code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 15:55:46 2019
@author: dweckler
"""
import numpy as np, matplotlib.pyplot as plt
from keras import backend as T
import time
import os
from .utilities import flat_avg
from dtmil.configuration.config_dtmil import get_json_config_data
from .prediction_data import Prediction_Data
import math
#%%class def
class Visualizer:
#TODO: Redesign this to work with multiple sources without depending on having all the data at once
def __init__(self, myData, myModel, sample_idx = None, guidelines = True, prediction_data = None, dataset_dir = None, input_json_data = None):
self.myData = myData
self.myModel = myModel
self._current_sample = sample_idx
##FIXME: make this update the visualization parameters every run (grab location of config file from myData?)
if (input_json_data is not None):
json_data = input_json_data
else:
_, json_data, _ = get_json_config_data(dataset_dir)
self.visualization_params = json_data['visualization']
##FIXME: Make this more able to be manually defined
sf = 0.25
self.xvec_scale_factor = sf
self.xvec_timeline=np.arange((self.myData.maxlen-1)*sf,-sf,-sf)
#this is to account for the extra value in the start and end indeces. Will be best practice to fix in the future
self.xvec_temp_time_lookup = np.copy(self.xvec_timeline)
self.xvec_temp_time_lookup = np.append(self.xvec_temp_time_lookup,self.xvec_timeline[-1])
if sample_idx == None:
print(f"sample index is set to None, using default value")
sample_idx = 0
if prediction_data:
self.prediction_data = prediction_data
else:
self.prediction_data = Prediction_Data(myData,myModel,sample_idx)
self.guidelines = guidelines
if (guidelines):
self.get_guidelines()
@classmethod
def frompredictiondata(cls, prediction_data, guidelines = True):
#initialize from preditcion data
return cls(prediction_data.myData, prediction_data.myModel, prediction_data.current_sample, prediction_data = prediction_data)
#%%plot sample timeline function
@property
def current_sample(self):
return self._current_sample
@current_sample.setter
def current_sample(self,value):
self._current_sample = value
self.prediction_data = Prediction_Data(self.myData,self.myModel,value)
def plot_sample_timeline(self, figure_size = None, saveFig = True):
myModel = self.myModel
model_output_directory = myModel.model_output_directory
xtest = myModel.xtest
if (saveFig):
plt.switch_backend('agg')
# function to get an intermediate layer's output (instance probabilities)
inst_layer_output_fn = T.function([myModel.model.layers[0].input],[myModel.model.layers[-2].output])
temp=xtest
L=inst_layer_output_fn([temp])[0]
nex=int(temp.shape[0]/2)
plt.figure(figsize=figure_size)
plt.subplot(2,1,1)
plt.plot(np.transpose(L[:nex,:,0]),'g')
plt.ylim([-0.1,1.1])
#plt.xlabel('Time to adverse event',fontsize=14)
#plt.xlabel('Sample timeline',fontsize=14)
plt.ylabel('Probability of \n adverse event',fontsize=14)
# plt.xticks([0,10,20],['1000 ft \n altitude', '10 mi', '20 mi'],rotation=0)
#plt.gca().invert_xaxis()
plt.subplot(2,1,2)
plt.plot(np.transpose(L[nex:,:,0]),'r')
plt.ylim([-0.1,1.1])
#plt.gca().invert_xaxis()
plt.xlabel('sample timeline',fontsize=14)
#plt.xticks([0,10,20],['1000 ft \n altitude', '10 mi', '20 mi'],rotation=0)
plt.ylabel('Probability of \n adverse event',fontsize=14)
temp=self.myData.xvalid
L=inst_layer_output_fn([temp])[0]
nex=int(temp.shape[0]/2)
np.where(L[nex:,80:,0]>0.5)[0][:10]
if(saveFig):
plt.savefig(os.path.join(model_output_directory,"timeline.png"))
#%%batch visualization function
#FIXME: text sizing
def visualize_sample_parameters(self,figure_size = None, saveFig = False, file_output_dir = "",file_output_type = "pdf",num_columns = 5, subplot_aspect_ratio = (1,1), subplot_size = 3.6):
myData = self.myData
# myModel = self.myModel
if (saveFig):
plt.switch_backend('agg')
#specify the variables to be included in the plot
correlated_states = myData.correlated_states.tolist()
trained_states = myData.parameter_selection.tolist()
parameters_to_plot=correlated_states + trained_states
correlated_indeces = len(correlated_states)
num_plots = len(parameters_to_plot) + 1
num_rows = math.ceil(float(num_plots)/float(num_columns))
if figure_size is None:
width = 4*num_columns
height = num_rows * 3.5
figure_size = (width,height)
fig, axs = plt.subplots(num_rows,num_columns, figsize= figure_size)
axs=axs.ravel()
starting_index = -1-myData.maxlen+1
for pltIdx in np.arange(len(parameters_to_plot)):
selected_parameter = parameters_to_plot[pltIdx]
plot_title = "{}".format(myData.header[selected_parameter])
#add holdout to the title if it's within the correlated indeces
if (pltIdx < correlated_indeces):
plot_title = plot_title + "(H/O)"
self.plot_parameter(selected_parameter,axs[pltIdx],starting_index, plot_title = plot_title)
# plot precursor score in a separate subplot
pltIdx=pltIdx+1
self.plot_precursor_score(axs[pltIdx],'Precursor Score')
fig.tight_layout()
# save figure if needed
if saveFig:
suffix = "_{}".format(self.myData.get_filename(self.current_sample))
file_label, file_dataset_type = self.myData.get_grouping(self.current_sample)
filename = "{}_{}".format(file_label,file_dataset_type)
save_figure(self.myModel,suffix,fig,file_output_dir,filename,file_output_type = 'pdf')
#self.save_figure(fig,file_output_dir)
def special_ranking_visualization(self, states_to_visualize,sorted_ranking_sums,figure_size = (10,10), saveFig = False, file_output_dir = "",file_output_type = "pdf"):
myData = self.myData
fig, axs = plt.subplots(3,3, figsize= figure_size)
axs=axs.ravel()
self.plot_precursor_score(axs[1],'Precursor Score')
for i in range(6):
selected_parameter = states_to_visualize[i]
plot_title = "{} ({})".format(myData.header[selected_parameter],sorted_ranking_sums[i])
#add holdout to the title if it's within the correlated indeces
self.plot_parameter(selected_parameter,axs[i+3],0, plot_title = plot_title)
#TODO: same as below except ordered ranking parameters with a variable number of columns and such
#output with values of ranking
#figure out what the values mean to report to bryan tomorrow
def visualize_top_ranking_parameters(self,ranking_group,feature_num_limit=None,num_columns = 4,displayfig = False):
file_output_dir = "feature_ranking"
myData = self.myData
if (not displayfig):
plt.switch_backend('agg')
#get as many as we can
#score_pair_lists = ranking_group.top_ranking_scores(1)
#response_windows_lists = ranking_group.top_response_windows(1)
response_windows_lists = ranking_group.ordered_response_windows_list
if(feature_num_limit is not None):
if len(response_windows_lists[0])> feature_num_limit:
response_windows_lists = [lst[0:feature_num_limit] for lst in response_windows_lists]
num_windows = len(response_windows_lists)
#print(feature_num_limit,len(response_windows_lists[0]),len(response_windows_lists[1]))
for idx,response_windows in enumerate(response_windows_lists):
parameter_selection = [window.attribute_index for window in response_windows]
# print([window.ranking_score for window in response_windows])
# print([window.most_important_sd_response for window in response_windows])
score_list = [round(window.ranking_score,3) for window in response_windows]
sd_response_list = []
for window in response_windows:
most_important_response = window.most_important_sd_response
if most_important_response is not None:
sd_response_list.append(str(most_important_response))
else:
sd_response_list.append("n/a")
#sd_response_list = [round(window.most_important_sd_response,3) for window in response_windows]
num_plots = len(response_windows) + 1
num_rows = math.ceil(float(num_plots)/float(num_columns))
width = 4*num_columns
height = num_rows * 3.5
figsize = (width,height)
fig, axs = plt.subplots(num_rows,num_columns, figsize= figsize)
axs=axs.ravel()
fig.tight_layout()
xvec_timeline = self.xvec_timeline
plot_idx = 0
axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=2,label = "Default")
axs[plot_idx].set_title("Precursor Score",fontsize=10)
axs[plot_idx].set_ylim([0,1])
axs[plot_idx].invert_xaxis()
if(self.guidelines):
axs[plot_idx].plot(self.xvec_timeline,self.precursor_score_guideline,'k--')
graph_colors = ['b','g','k','y','c','m','k','w']
color_idx = 0
sd_disturbances = ranking_group.parent.standard_deviation_disturbances
#TODO: condense everything below into one function (rather than writing the same code twice)
parameter_window_indeces = [ranking_group.parameter_list.index(i) for i in parameter_selection]
parameter_windows = [ranking_group.parameter_windows[i] for i in parameter_window_indeces]
#if this process isn't behind an if statement, the algorithm will output blank graphs
#furthermore, it will cause some of the following graphs to come out blank as well
#the cause of this is unknown, but may be useful to investigate in the future
if len(parameter_windows)>0:
#TODO: Figure out why this conditional became necessary and the one above stopped working? (maybe some revisions impacted it?)
if len(parameter_windows[0].start_indeces)>0:
start_index = parameter_windows[0].start_indeces[idx]
end_index = parameter_windows[0].end_indeces[idx]
window_start_idx = self.xvec_temp_time_lookup[start_index]
window_end_idx = self.xvec_temp_time_lookup[end_index]
axs[plot_idx].axvspan(window_start_idx, window_end_idx, alpha=0.1, color='k')
for index,window in enumerate(parameter_windows):
color_idx = 0
plot_idx = index+1
axs[plot_idx].invert_xaxis()
#axs[plot_idx].set(adjustable='box', aspect=1)
axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r', label = "Default",linewidth=2)
axs[plot_idx].axvspan(window_start_idx, window_end_idx, alpha=0.1, color='k')
for precursor_score in window.modified_precursor_scores:
selected_parameter = parameter_selection[index]
disturbance = sd_disturbances[color_idx]
if disturbance > 0:
label = "+ {} σ response".format(disturbance)
else:
label = "- {} σ response".format(abs(disturbance))
axs[plot_idx].plot(xvec_timeline,precursor_score,graph_colors[color_idx],linewidth=2,label = label)
axs[plot_idx].set_title("{} \n({}, {} σ response)".format(myData.header[selected_parameter],score_list[index],sd_response_list[index]),fontsize=10)
axs[plot_idx].set_ylim([0,1])
if(self.guidelines):
axs[plot_idx].plot(self.xvec_timeline,self.precursor_score_guideline,'k--')
color_idx += 1
if(plot_idx>1):
handles, labels = axs[plot_idx].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
#save the figure
plt.tight_layout()
file_label, file_dataset_type = self.myData.get_grouping(ranking_group.data_ID)
filename = "{}_{}_ranking".format(file_label,file_dataset_type)
suffix = "_{}".format(self.myData.get_filename(ranking_group.data_ID))
if num_windows > 1:
suffix = "{}_precursor_event_{}".format(suffix,idx)
save_figure(self.myModel,suffix,fig,file_output_dir,filename,output_time = False)
else:
#TODO:
print("Precursor score for {} does not cross threshold?".format(self.myData.get_filename(ranking_group.data_ID)))
else:
print("Precursor score for {} does not cross threshold!".format(self.myData.get_filename(ranking_group.data_ID)))
# def visualize_ranking_data(self,ranking_group, output_file = None, parameter_selection = None, num_columns = 7, subplot_aspect_ratio = (1,1), subplot_size = 3.6):
# myData = self.myData
# print("generating ranking data plot")
#
# if parameter_selection is None:
# parameter_selection = myData.parameter_selection.tolist()
#
# #all the paramaeters plus the precursor score in its own plot
# num_plots = len(parameter_selection) + 1
# num_rows = math.ceil(float(num_plots)/float(num_columns))
# dx, dy = subplot_aspect_ratio
# figsize = plt.figaspect(float(dy * num_rows) / float(dx * num_columns)) * subplot_size
#
# fig, axs = plt.subplots(num_rows,num_columns, figsize= figsize)
# #fig, axs = plt.subplots(numRows,numColumns)
# axs=axs.ravel()
# fig.tight_layout()
# #xvec_timeline=np.arange((myData.maxlen-1)*0.25,-0.25,-0.25)
#
# xvec_timeline = self.xvec_timeline
#
# axs[0].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=2)
# axs[0].set_title("Normal",fontsize=10)
# axs[0].set_ylim([0,1])
# axs[0].invert_xaxis()
#
# graph_colors = ['b','g','k','y']
# color_idx = 0
#
# parameter_window_indeces = [ranking_group.parameter_list.index(i) for i in parameter_selection]
# parameter_windows = [ranking_group.parameter_windows[i] for i in parameter_window_indeces]
#
# for index,window in enumerate(parameter_windows):
# color_idx = 0
# plot_idx = index+1
# axs[plot_idx].invert_xaxis()
#
# for precursor_score in window.modified_precursor_scores:
# selected_parameter = parameter_selection[index]
#
# axs[plot_idx].plot(xvec_timeline,precursor_score,graph_colors[color_idx],linewidth=2)
# axs[plot_idx].set_title("{} ({})".format(myData.header[selected_parameter],selected_parameter),fontsize=10)
# axs[plot_idx].set_ylim([0,1])
# axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=1)
# color_idx += 1
#%%save figure
def save_figure(self, fig,file_output_dir,file_output_type = 'pdf'):
save_figure(self.myModel,self.current_sample,fig,file_output_dir,"parameters_graph",file_output_type = 'pdf')
#%%plot precursor score
def plot_precursor_score(self, plot_axis, plot_title = "Precursor Score", start_index = None, end_index = None):
precursor_score = self.prediction_data.precursor_score
plot_axis.plot(self.xvec_timeline[start_index:end_index], precursor_score[start_index:end_index],'r',linewidth=2)
if(self.guidelines):
plot_axis.plot(self.xvec_timeline[start_index:end_index],self.precursor_score_guideline[start_index:end_index],'k--')
plot_axis.invert_xaxis()
plot_axis.set_title(plot_title,fontsize=10)
plot_axis.set_ylim([0,1])
#%%plot indivudual parameter
def plot_parameter(self, selected_parameter, plot_axis,starting_index = 0,end_index = None,plot_title = "", precIdx = None):
##FIXME: Make this more able to be manually defined
xvec_timeline=self.xvec_timeline
#FIXME: Make Prediction Data update states_orig ("visualization_sample")
parameter_values = self.prediction_data.visualization_window[starting_index:end_index,selected_parameter]
# plot time series variable
plot_axis.plot(xvec_timeline[starting_index:end_index],parameter_values,linewidth=2)
##plot the guidelines
# if discrete variable, use discrete nominal data as guideline, else use continuous nominal data
if selected_parameter in self.visualization_params["binary_parameters"]:
plot_axis.plot(xvec_timeline[starting_index:end_index],self.discrete_nominal_guideline[starting_index:end_index,selected_parameter],'k--',linewidth=2)
plot_axis.set_ylim([-0.1,1.1])
else:
plot_axis.plot(xvec_timeline[starting_index:end_index],self.nominal_guideline[0,starting_index:end_index,selected_parameter],'k--',linewidth=2)
plot_axis.plot(xvec_timeline[starting_index:end_index],self.nominal_guideline[1,starting_index:end_index,selected_parameter],'k--',linewidth=2)
##use this if we are dealing with multiple precursor score predictions, otherwise use the one genereated upon class initialization
if (precIdx):
precursor_indeces = precIdx
else:
precursor_indeces = self.prediction_data.precursor_indeces
# plot precursor time instants as an overlay
if len(precursor_indeces)>0:
precursor_overlay_values = self.prediction_data.visualization_window[precursor_indeces,selected_parameter]
self.precursor_overlay_values = precursor_overlay_values
if(end_index):
if end_index >= precursor_indeces[0]:
precursor_end_index = (np.abs(precursor_indeces - (end_index))).argmin()
print(precursor_end_index,end_index)
plot_axis.plot(xvec_timeline[precursor_indeces][0:precursor_end_index],precursor_overlay_values[0:precursor_end_index],'ro', alpha = 0.4)
else:
plot_axis.plot(xvec_timeline[precursor_indeces],precursor_overlay_values,'ro', alpha = 0.4)
#
if plot_title == "":
plot_title = "{} ({})".format(self.myData.header[selected_parameter],selected_parameter)
plot_axis.set_title(plot_title,fontsize=10)
# # invert x-axis so that distance to touchdown reduces as we go towards rightside of the plot
plot_axis.invert_xaxis()
#%%get guidelines
def get_guidelines(self):
myData = self.myData
optimal_values=myData.states_orig[:,np.concatenate((myData.I_opt,myData.I_opt_valid),axis=0)]
#determine guidelines
guideline_type = self.visualization_params["guideline_type"]
if guideline_type == 1:
optimal_standard_dev = np.std(optimal_values, axis=1)
optimal_mean = np.mean(optimal_values,axis = 1)
avg_guideline =flat_avg(optimal_mean)
sdev_guideline = flat_avg(optimal_standard_dev)
sdev_scale = 2.5
upper_guideline = avg_guideline + sdev_scale * sdev_guideline
lower_guideline = avg_guideline - sdev_scale * sdev_guideline
nominal_guideline = np.array([lower_guideline, upper_guideline])
else:
# get nominal percentiles for plotting
nominal_guideline=np.percentile(optimal_values,[10,90],axis=1)
self.nominal_guideline = nominal_guideline
# Get nominal values assuming binary (note that we will only use this if the variable is binary)
self.discrete_nominal_guideline=np.mean(optimal_values,axis=1)
self.precursor_score_guideline = np.full(optimal_values.shape[0],self.prediction_data.precursor_threshold)
def save_figure(myModel, figure_suffix, fig,file_output_dir,filename,file_output_type = 'pdf', output_time = True):
time_start = time.time()
print("Saving figure: {}".format(figure_suffix))
model_output_directory = myModel.model_output_directory
if model_output_directory != "":
model_output_directory = os.path.join(model_output_directory,file_output_dir)
if not os.path.exists(model_output_directory):
print(f"creating directory {model_output_directory}")
os.makedirs(model_output_directory)
filename = "{}{}.{}".format(filename,figure_suffix,"pdf")
filepath = os.path.join(model_output_directory,filename)
#print("Saving figure: {}".format(filepath))
fig.savefig(filepath,format= file_output_type)
# if(output_time):
# print("Total time to save figure: {}".format(time.time()-time_start))
def visualize(myData, myModel,sample_idx = 0, savefig = False):
vis = Visualizer(myData,myModel,sample_idx)
vis.plot_sample_timeline(figure_size = (8,6), saveFig = savefig)
print("Visualizing Sample {}".format(sample_idx))
vis.visualize_sample_parameters(figure_size=(32,24),saveFig = savefig)
|
[
"numpy.abs",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"numpy.full",
"numpy.copy",
"numpy.std",
"numpy.transpose",
"os.path.exists",
"numpy.append",
"matplotlib.pyplot.subplots",
"dtmil.configuration.config_dtmil.get_json_config_data",
"matplotlib.pyplot.ylim",
"keras.backend.function",
"numpy.percentile",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.subplot",
"os.makedirs",
"time.time",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((22688, 22699), 'time.time', 'time.time', ([], {}), '()\n', (22697, 22699), False, 'import time\n'), ((23198, 23244), 'os.path.join', 'os.path.join', (['model_output_directory', 'filename'], {}), '(model_output_directory, filename)\n', (23210, 23244), False, 'import os\n'), ((1321, 1371), 'numpy.arange', 'np.arange', (['((self.myData.maxlen - 1) * sf)', '(-sf)', '(-sf)'], {}), '((self.myData.maxlen - 1) * sf, -sf, -sf)\n', (1330, 1371), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((1533, 1560), 'numpy.copy', 'np.copy', (['self.xvec_timeline'], {}), '(self.xvec_timeline)\n', (1540, 1560), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((1598, 1659), 'numpy.append', 'np.append', (['self.xvec_temp_time_lookup', 'self.xvec_timeline[-1]'], {}), '(self.xvec_temp_time_lookup, self.xvec_timeline[-1])\n', (1607, 1659), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3093, 3171), 'keras.backend.function', 'T.function', (['[myModel.model.layers[0].input]', '[myModel.model.layers[-2].output]'], {}), '([myModel.model.layers[0].input], [myModel.model.layers[-2].output])\n', (3103, 3171), True, 'from keras import backend as T\n'), ((3291, 3322), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figure_size'}), '(figsize=figure_size)\n', (3301, 3322), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3331, 3351), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3342, 3351), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3406, 3427), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (3414, 3427), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3543, 3604), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of \n adverse event"""'], {'fontsize': '(14)'}), '("""Probability of \n adverse event""", fontsize=14)\n', (3553, 3604), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3727, 3747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3738, 3747), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3802, 3823), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (3810, 3823), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3865, 3907), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sample timeline"""'], {'fontsize': '(14)'}), "('sample timeline', fontsize=14)\n", (3875, 3907), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3999, 4060), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of \n adverse event"""'], {'fontsize': '(14)'}), '("""Probability of \n adverse event""", fontsize=14)\n', (4009, 4060), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((5326, 5382), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_columns'], {'figsize': 'figure_size'}), '(num_rows, num_columns, figsize=figure_size)\n', (5338, 5382), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((6890, 6929), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': 'figure_size'}), '(3, 3, figsize=figure_size)\n', (6902, 6929), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22397, 22428), 'numpy.mean', 'np.mean', (['optimal_values'], {'axis': '(1)'}), '(optimal_values, axis=1)\n', (22404, 22428), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22469, 22543), 'numpy.full', 'np.full', (['optimal_values.shape[0]', 'self.prediction_data.precursor_threshold'], {}), '(optimal_values.shape[0], self.prediction_data.precursor_threshold)\n', (22476, 22543), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22884, 22937), 'os.path.join', 'os.path.join', (['model_output_directory', 'file_output_dir'], {}), '(model_output_directory, file_output_dir)\n', (22896, 22937), False, 'import os\n'), ((1041, 1074), 'dtmil.configuration.config_dtmil.get_json_config_data', 'get_json_config_data', (['dataset_dir'], {}), '(dataset_dir)\n', (1061, 1074), False, 'from dtmil.configuration.config_dtmil import get_json_config_data\n'), ((2941, 2966), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (2959, 2966), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3367, 3394), 'numpy.transpose', 'np.transpose', (['L[:nex, :, 0]'], {}), '(L[:nex, :, 0])\n', (3379, 3394), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3763, 3790), 'numpy.transpose', 'np.transpose', (['L[nex:, :, 0]'], {}), '(L[nex:, :, 0])\n', (3775, 3790), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((4682, 4707), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (4700, 4707), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((7882, 7907), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (7900, 7907), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((9800, 9852), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_columns'], {'figsize': 'figsize'}), '(num_rows, num_columns, figsize=figsize)\n', (9812, 9852), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((21556, 21586), 'numpy.std', 'np.std', (['optimal_values'], {'axis': '(1)'}), '(optimal_values, axis=1)\n', (21562, 21586), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((21614, 21645), 'numpy.mean', 'np.mean', (['optimal_values'], {'axis': '(1)'}), '(optimal_values, axis=1)\n', (21621, 21645), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((21996, 22040), 'numpy.array', 'np.array', (['[lower_guideline, upper_guideline]'], {}), '([lower_guideline, upper_guideline])\n', (22004, 22040), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22143, 22190), 'numpy.percentile', 'np.percentile', (['optimal_values', '[10, 90]'], {'axis': '(1)'}), '(optimal_values, [10, 90], axis=1)\n', (22156, 22190), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22952, 22990), 'os.path.exists', 'os.path.exists', (['model_output_directory'], {}), '(model_output_directory)\n', (22966, 22990), False, 'import os\n'), ((23070, 23105), 'os.makedirs', 'os.makedirs', (['model_output_directory'], {}), '(model_output_directory)\n', (23081, 23105), False, 'import os\n'), ((4181, 4212), 'numpy.where', 'np.where', (['(L[nex:, 80:, 0] > 0.5)'], {}), '(L[nex:, 80:, 0] > 0.5)\n', (4189, 4212), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((4271, 4323), 'os.path.join', 'os.path.join', (['model_output_directory', '"""timeline.png"""'], {}), "(model_output_directory, 'timeline.png')\n", (4283, 4323), False, 'import os\n'), ((21324, 21382), 'numpy.concatenate', 'np.concatenate', (['(myData.I_opt, myData.I_opt_valid)'], {'axis': '(0)'}), '((myData.I_opt, myData.I_opt_valid), axis=0)\n', (21338, 21382), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((13986, 14004), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14002, 14004), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((20348, 20385), 'numpy.abs', 'np.abs', (['(precursor_indeces - end_index)'], {}), '(precursor_indeces - end_index)\n', (20354, 20385), True, 'import numpy as np, matplotlib.pyplot as plt\n')]
|
from decimal import Decimal
import unittest, sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from unittest.mock import patch
from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv
class Test_Process_Raw_Data(unittest.TestCase):
#Test helper methods
def test_convert_datestring_array_to_datetime(self):
datestrings = ['2020-01-01 00:00:00', '2020-01-02 00:00:00', '2020-01-01 03:00:00']
expected_datetimes = [datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')]
self.assertEqual(expected_datetimes, convert_datestring_array_to_datetime(datestrings))
def test_create_expected_row(self):
input_row = [5,4,3,2,1]
expected_row = np.array([[1,2,3,4,1,2]])
actual_row = create_expected_row(input_row, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
#Test process_raw_data methods
def test_set_intervals(self):
intervals = [5, 5, 5]
set_intervals(intervals)
self.assertEqual(intervals, get_intervals())
def test_set_target_interval(self):
interval = timedelta(minutes=69)
set_target_interval(interval)
self.assertEqual(interval, get_target_interval())
def test_set_const_intervals(self):
expected_intervals = [3, 3, 3, 3, 3]
set_const_intervals(3, 5)
self.assertEqual(expected_intervals, get_intervals())
def test_set_max_input_minutes_missing(self):
minutes = 69
set_max_input_minutes_missing(minutes)
self.assertEqual(minutes, get_max_input_minutes_missing())
def test_set_market(self):
market = 'GBP/JPY'
set_market(market)
self.assertEqual(market, get_market())
def test_categorise_data(self):
self.assertEqual(1, apply_category_label_binary(1.2222, 1.2223))
self.assertEqual(0, apply_category_label_binary(1.2223, 1.2222))
@patch('forex_predictor.data_extraction.process_raw_data.pd')
def test_load_market_csv(self, mock_pd):
load_market_csv('EUR/GBP')
mock_pd.read_csv.assert_called_with('data/EUR_GBP.csv')
def test_get_dates(self):
intervals = [5, 5, 5]
set_intervals(intervals)
training_start = datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
validation_start = datetime.strptime('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')
test_start = datetime.strptime('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')
test_end = datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')
actual_training_dates, actual_validation_dates, actual_test_dates = get_dates(training_start, validation_start, test_start, test_end)
expected_training_dates = convert_datestring_array_to_datetime(['2020-01-01 00:00:00', '2020-01-01 00:15:00', '2020-01-01 00:30:00', '2020-01-01 00:45:00'])
expected_validation_dates = convert_datestring_array_to_datetime(['2020-01-01 01:00:00', '2020-01-01 01:15:00', '2020-01-01 01:30:00', '2020-01-01 01:45:00'])
expected_test_dates = convert_datestring_array_to_datetime(['2020-01-01 02:00:00', '2020-01-01 02:15:00', '2020-01-01 02:30:00', '2020-01-01 02:45:00'])
self.assertEqual(expected_training_dates, actual_training_dates)
self.assertEqual(expected_validation_dates, actual_validation_dates)
self.assertEqual(expected_test_dates, actual_test_dates)
@patch('forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates')
def test_get_relevant_data(self, mock_method):
set_intervals([15,15,15,15])
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
target_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
get_relevant_data(df, target_date)
start_date = datetime.strptime('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')
mock_method.assert_called_with(start_date, end_date, df)
def test_get_dataframe_from_dates(self):
original_df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')
actual_df = get_dataframe_from_dates(start_date, end_date, original_df)
expected_df = original_df.iloc[74:79, :]
self.assertTrue(expected_df.equals(actual_df))
def test_find_start_date_index(self):
target_date = datetime.strptime('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')
df = pd.read_csv('tests/resources/dataframe_data.csv')
actual_index = find_start_date_index(df, target_date)
expected_index = 1994
self.assertEqual(expected_index, actual_index)
def test_process_input_data(self):
set_intervals([5, 5, 5])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
expected_input_data = pd.DataFrame(data=test_data)
actual_input_data = process_input_data(df)
self.assertTrue(expected_input_data.equals(actual_input_data))
def test_process_input_data_error(self):
set_intervals([5, 5, 5, 60])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
expected_error_message = 'Insufficient data to process for this number of intervals'
try:
actual_input_data = process_input_data(df)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def test_create_row(self):
set_intervals([5,5,5])
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
input_values = pd.DataFrame(data=test_data)
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [1, 2])
actual_row = create_row(input_values, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_create_relevant_data_row(self):
set_intervals([5,5,5])
set_target_interval(timedelta(minutes=5))
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2018, :]
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [0.79283, 0.79258])
actual_row = create_relevant_data_row(df, datetime.strptime('2014-07-18 09:04:00', '%Y-%m-%d %H:%M:%S'))
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_get_open_and_close_for_period(self):
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-21 18:00:00', '%Y-%m-%d %H:%M:%S')
open, close = get_open_and_close_for_period(df, start_date)
self.assertEqual(0.79194, open)
self.assertEqual(0.79193, close)
def test_get_open_and_close_for_period_error(self):
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-21 19:00:00', '%Y-%m-%d %H:%M:%S')
expected_error_message = 'Open-close data unavailable for 2014-07-21 19:00:00 and interval of 60 minutes'
try:
open, close = get_open_and_close_for_period(df, start_date)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def convert_datestring_array_to_datetime(datestrings):
"""For readability when working with large amounts of datetimes
"""
datetimes = []
for datestring in datestrings:
datetimes.append(datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S'))
return datetimes
def create_expected_row(input_row, outputs):
"""Create a row similar to how it is done in process_raw_data.py but with the advantage that this takes inputs as a python list
making it much easier to test. Can then use it in more integrated test with expected dataframe values
"""
values = np.array([input_row])
start_value = values[0][0]
values = values[:, 1:]
for i in range(0, len(values[0])):
values[0][i] = Decimal(str(start_value)) - Decimal(str(values[0][i]))
return np.hstack((values, [outputs]))
|
[
"forex_predictor.data_extraction.process_raw_data.apply_category_label_binary",
"pandas.read_csv",
"forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates",
"forex_predictor.data_extraction.process_raw_data.set_const_intervals",
"sys.exc_info",
"forex_predictor.data_extraction.process_raw_data.set_market",
"pandas.DataFrame",
"forex_predictor.data_extraction.process_raw_data.get_relevant_data",
"forex_predictor.data_extraction.process_raw_data.set_max_input_minutes_missing",
"datetime.timedelta",
"forex_predictor.data_extraction.process_raw_data.get_target_interval",
"forex_predictor.data_extraction.process_raw_data.get_open_and_close_for_period",
"numpy.hstack",
"unittest.mock.patch",
"forex_predictor.data_extraction.process_raw_data.get_intervals",
"datetime.datetime.strptime",
"forex_predictor.data_extraction.process_raw_data.set_intervals",
"forex_predictor.data_extraction.process_raw_data.create_row",
"forex_predictor.data_extraction.process_raw_data.get_dates",
"forex_predictor.data_extraction.process_raw_data.get_max_input_minutes_missing",
"forex_predictor.data_extraction.process_raw_data.process_input_data",
"forex_predictor.data_extraction.process_raw_data.load_market_csv",
"numpy.array",
"forex_predictor.data_extraction.process_raw_data.set_target_interval",
"numpy.array_equal",
"forex_predictor.data_extraction.process_raw_data.find_start_date_index",
"forex_predictor.data_extraction.process_raw_data.get_market"
] |
[((2459, 2519), 'unittest.mock.patch', 'patch', (['"""forex_predictor.data_extraction.process_raw_data.pd"""'], {}), "('forex_predictor.data_extraction.process_raw_data.pd')\n", (2464, 2519), False, 'from unittest.mock import patch\n'), ((3954, 4046), 'unittest.mock.patch', 'patch', (['"""forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates"""'], {}), "(\n 'forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates'\n )\n", (3959, 4046), False, 'from unittest.mock import patch\n'), ((9419, 9440), 'numpy.array', 'np.array', (['[input_row]'], {}), '([input_row])\n', (9427, 9440), True, 'import numpy as np\n'), ((9627, 9657), 'numpy.hstack', 'np.hstack', (['(values, [outputs])'], {}), '((values, [outputs]))\n', (9636, 9657), True, 'import numpy as np\n'), ((1250, 1280), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 1, 2]]'], {}), '([[1, 2, 3, 4, 1, 2]])\n', (1258, 1280), True, 'import numpy as np\n'), ((1509, 1533), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['intervals'], {}), '(intervals)\n', (1522, 1533), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1651, 1672), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(69)'}), '(minutes=69)\n', (1660, 1672), False, 'from datetime import datetime, timedelta\n'), ((1681, 1710), 'forex_predictor.data_extraction.process_raw_data.set_target_interval', 'set_target_interval', (['interval'], {}), '(interval)\n', (1700, 1710), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1863, 1888), 'forex_predictor.data_extraction.process_raw_data.set_const_intervals', 'set_const_intervals', (['(3)', '(5)'], {}), '(3, 5)\n', (1882, 1888), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2031, 2069), 'forex_predictor.data_extraction.process_raw_data.set_max_input_minutes_missing', 'set_max_input_minutes_missing', (['minutes'], {}), '(minutes)\n', (2060, 2069), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2204, 2222), 'forex_predictor.data_extraction.process_raw_data.set_market', 'set_market', (['market'], {}), '(market)\n', (2214, 2222), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2573, 2599), 'forex_predictor.data_extraction.process_raw_data.load_market_csv', 'load_market_csv', (['"""EUR/GBP"""'], {}), "('EUR/GBP')\n", (2588, 2599), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2733, 2757), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['intervals'], {}), '(intervals)\n', (2746, 2757), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2783, 2844), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (2800, 2844), False, 'from datetime import datetime, timedelta\n'), ((2872, 2933), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 01:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')\n", (2889, 2933), False, 'from datetime import datetime, timedelta\n'), ((2955, 3016), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 02:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')\n", (2972, 3016), False, 'from datetime import datetime, timedelta\n'), ((3036, 3097), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (3053, 3097), False, 'from datetime import datetime, timedelta\n'), ((3174, 3239), 'forex_predictor.data_extraction.process_raw_data.get_dates', 'get_dates', (['training_start', 'validation_start', 'test_start', 'test_end'], {}), '(training_start, validation_start, test_start, test_end)\n', (3183, 3239), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4096, 4127), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[15, 15, 15, 15]'], {}), '([15, 15, 15, 15])\n', (4109, 4127), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4189, 4238), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (4200, 4238), True, 'import pandas as pd\n'), ((4261, 4322), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (4278, 4322), False, 'from datetime import datetime, timedelta\n'), ((4331, 4365), 'forex_predictor.data_extraction.process_raw_data.get_relevant_data', 'get_relevant_data', (['df', 'target_date'], {}), '(df, target_date)\n', (4348, 4365), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4387, 4448), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-16 23:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')\n", (4404, 4448), False, 'from datetime import datetime, timedelta\n'), ((4468, 4529), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 01:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')\n", (4485, 4529), False, 'from datetime import datetime, timedelta\n'), ((4663, 4712), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (4674, 4712), True, 'import pandas as pd\n'), ((4734, 4795), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (4751, 4795), False, 'from datetime import datetime, timedelta\n'), ((4815, 4876), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 00:05:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')\n", (4832, 4876), False, 'from datetime import datetime, timedelta\n'), ((4897, 4956), 'forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates', 'get_dataframe_from_dates', (['start_date', 'end_date', 'original_df'], {}), '(start_date, end_date, original_df)\n', (4921, 4956), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((5126, 5187), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-18 08:46:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')\n", (5143, 5187), False, 'from datetime import datetime, timedelta\n'), ((5201, 5250), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (5212, 5250), True, 'import pandas as pd\n'), ((5275, 5313), 'forex_predictor.data_extraction.process_raw_data.find_start_date_index', 'find_start_date_index', (['df', 'target_date'], {}), '(df, target_date)\n', (5296, 5313), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((5447, 5471), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5]'], {}), '([5, 5, 5])\n', (5460, 5471), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((5906, 5934), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'test_data'}), '(data=test_data)\n', (5918, 5934), True, 'import pandas as pd\n'), ((5963, 5985), 'forex_predictor.data_extraction.process_raw_data.process_input_data', 'process_input_data', (['df'], {}), '(df)\n', (5981, 5985), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((6111, 6139), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5, 60]'], {}), '([5, 5, 5, 60])\n', (6124, 6139), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((6570, 6594), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5]'], {}), '([5, 5, 5])\n', (6583, 6594), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((6938, 6966), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'test_data'}), '(data=test_data)\n', (6950, 6966), True, 'import pandas as pd\n'), ((7149, 7181), 'forex_predictor.data_extraction.process_raw_data.create_row', 'create_row', (['input_values', '[1, 2]'], {}), '(input_values, [1, 2])\n', (7159, 7181), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((7301, 7325), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5]'], {}), '([5, 5, 5])\n', (7314, 7325), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((7923, 7972), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (7934, 7972), True, 'import pandas as pd\n'), ((7995, 8056), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-21 18:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-21 18:00:00', '%Y-%m-%d %H:%M:%S')\n", (8012, 8056), False, 'from datetime import datetime, timedelta\n'), ((8079, 8124), 'forex_predictor.data_extraction.process_raw_data.get_open_and_close_for_period', 'get_open_and_close_for_period', (['df', 'start_date'], {}), '(df, start_date)\n', (8108, 8124), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((8327, 8376), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (8338, 8376), True, 'import pandas as pd\n'), ((8399, 8460), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-21 19:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-21 19:00:00', '%Y-%m-%d %H:%M:%S')\n", (8416, 8460), False, 'from datetime import datetime, timedelta\n'), ((869, 930), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (886, 930), False, 'from datetime import datetime, timedelta\n'), ((932, 993), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-02 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (949, 993), False, 'from datetime import datetime, timedelta\n'), ((995, 1056), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (1012, 1056), False, 'from datetime import datetime, timedelta\n'), ((1359, 1399), 'numpy.array_equal', 'np.array_equal', (['expected_row', 'actual_row'], {}), '(expected_row, actual_row)\n', (1373, 1399), True, 'import numpy as np\n'), ((1570, 1585), 'forex_predictor.data_extraction.process_raw_data.get_intervals', 'get_intervals', ([], {}), '()\n', (1583, 1585), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1746, 1767), 'forex_predictor.data_extraction.process_raw_data.get_target_interval', 'get_target_interval', ([], {}), '()\n', (1765, 1767), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1934, 1949), 'forex_predictor.data_extraction.process_raw_data.get_intervals', 'get_intervals', ([], {}), '()\n', (1947, 1949), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2104, 2135), 'forex_predictor.data_extraction.process_raw_data.get_max_input_minutes_missing', 'get_max_input_minutes_missing', ([], {}), '()\n', (2133, 2135), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2256, 2268), 'forex_predictor.data_extraction.process_raw_data.get_market', 'get_market', ([], {}), '()\n', (2266, 2268), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2335, 2378), 'forex_predictor.data_extraction.process_raw_data.apply_category_label_binary', 'apply_category_label_binary', (['(1.2222)', '(1.2223)'], {}), '(1.2222, 1.2223)\n', (2362, 2378), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2408, 2451), 'forex_predictor.data_extraction.process_raw_data.apply_category_label_binary', 'apply_category_label_binary', (['(1.2223)', '(1.2222)'], {}), '(1.2223, 1.2222)\n', (2435, 2451), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4153, 4174), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (4162, 4174), False, 'from datetime import datetime, timedelta\n'), ((6360, 6382), 'forex_predictor.data_extraction.process_raw_data.process_input_data', 'process_input_data', (['df'], {}), '(df)\n', (6378, 6382), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((7205, 7245), 'numpy.array_equal', 'np.array_equal', (['expected_row', 'actual_row'], {}), '(expected_row, actual_row)\n', (7219, 7245), True, 'import numpy as np\n'), ((7352, 7372), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (7361, 7372), False, 'from datetime import datetime, timedelta\n'), ((7679, 7740), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-18 09:04:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-18 09:04:00', '%Y-%m-%d %H:%M:%S')\n", (7696, 7740), False, 'from datetime import datetime, timedelta\n'), ((7766, 7806), 'numpy.array_equal', 'np.array_equal', (['expected_row', 'actual_row'], {}), '(expected_row, actual_row)\n', (7780, 7806), True, 'import numpy as np\n'), ((7887, 7908), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (7896, 7908), False, 'from datetime import datetime, timedelta\n'), ((8291, 8312), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (8300, 8312), False, 'from datetime import datetime, timedelta\n'), ((8614, 8659), 'forex_predictor.data_extraction.process_raw_data.get_open_and_close_for_period', 'get_open_and_close_for_period', (['df', 'start_date'], {}), '(df, start_date)\n', (8643, 8659), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((9036, 9086), 'datetime.datetime.strptime', 'datetime.strptime', (['datestring', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(datestring, '%Y-%m-%d %H:%M:%S')\n", (9053, 9086), False, 'from datetime import datetime, timedelta\n'), ((5485, 5534), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (5496, 5534), True, 'import pandas as pd\n'), ((6153, 6202), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (6164, 6202), True, 'import pandas as pd\n'), ((6448, 6462), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6460, 6462), False, 'import unittest, sys\n'), ((7387, 7436), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (7398, 7436), True, 'import pandas as pd\n'), ((8725, 8739), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8737, 8739), False, 'import unittest, sys\n')]
|
#!/usr/bin/env python
import numpy as np
np.random.seed(42)
import emcee
def lnprior(params):
return 0.0
def lnlike(params, x, y):
model = params[0] * x + params[1]
residuals = y - model
return -np.sum(residuals ** 2)
def lnprob(params, x, y):
lnp = lnprior(params)
if np.isfinite(lnp):
return lnp + lnlike(params, x, y)
return -np.inf
if __name__ == '__main__':
real_m, real_c = 2, 5
real_x = np.sort(np.random.uniform(0, 10, 20))
real_y = real_m * real_x + real_c
noise = np.random.normal(0, 3, real_x.shape)
observed_y = real_y + noise
p0 = np.array([0, 0])
nwalkers = 10
niters = 100
sampler = emcee.EnsembleSampler(nwalkers, len(p0), lnprob,
args=(real_x, observed_y))
pos = np.array([p0 + 1E-5 * np.random.randn()
for _ in range(nwalkers)])
sampler.run_mcmc(pos, niters)
print(sampler.flatchain[::10, 0])
|
[
"numpy.random.uniform",
"numpy.sum",
"numpy.random.seed",
"numpy.random.randn",
"numpy.isfinite",
"numpy.array",
"numpy.random.normal"
] |
[((43, 61), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (57, 61), True, 'import numpy as np\n'), ((299, 315), 'numpy.isfinite', 'np.isfinite', (['lnp'], {}), '(lnp)\n', (310, 315), True, 'import numpy as np\n'), ((534, 570), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)', 'real_x.shape'], {}), '(0, 3, real_x.shape)\n', (550, 570), True, 'import numpy as np\n'), ((613, 629), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (621, 629), True, 'import numpy as np\n'), ((216, 238), 'numpy.sum', 'np.sum', (['(residuals ** 2)'], {}), '(residuals ** 2)\n', (222, 238), True, 'import numpy as np\n'), ((454, 482), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (471, 482), True, 'import numpy as np\n'), ((825, 842), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (840, 842), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file unittests/libtests/feassemble/data/ElasticityImplicit.py
## @brief Python application for generating C++ data files for testing
## C++ ElasticityImplicit object.
from pyre.components.Component import Component
import numpy
# ----------------------------------------------------------------------
# ElasticityImplicit class
class ElasticityImplicit(Component):
"""
Python application for generating C++ data files for testing C++
ElasticityImplicit object.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="elasticityimplicit"):
"""
Constructor.
"""
Component.__init__(self, name, facility="formulation")
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def calculateResidual(self, integrator):
"""
Calculate contribution to residual of operator for integrator.
{r} = -[K]{u(t)}
"""
K = integrator._calculateStiffnessMat()
residual = -numpy.dot(K, integrator.fieldT+integrator.fieldTIncr)
return residual.flatten()
def calculateJacobian(self, integrator):
"""
Calculate contribution to Jacobian matrix of operator for integrator.
[A] = [K]
"""
K = integrator._calculateStiffnessMat()
jacobian = K
return jacobian
# FACTORY //////////////////////////////////////////////////////////////
def formulation():
return ElasticityImplicit()
# End of file
|
[
"numpy.dot",
"pyre.components.Component.Component.__init__"
] |
[((1128, 1182), 'pyre.components.Component.Component.__init__', 'Component.__init__', (['self', 'name'], {'facility': '"""formulation"""'}), "(self, name, facility='formulation')\n", (1146, 1182), False, 'from pyre.components.Component import Component\n'), ((1485, 1540), 'numpy.dot', 'numpy.dot', (['K', '(integrator.fieldT + integrator.fieldTIncr)'], {}), '(K, integrator.fieldT + integrator.fieldTIncr)\n', (1494, 1540), False, 'import numpy\n')]
|
import numpy as np
from pprint import pprint
def cal_eigenvalues_and_eigenvectors(A):
"""
:param A: n x n Hermitian matrix
:return:
"""
eigenvalues, normed_eigenvectors = np.linalg.eig(A)
# Below two steps are redounding for readability
lmd = eigenvalues
v = normed_eigenvectors
return lmd, v
def cal_determinant(M):
return np.linalg.det(M)
def check_lemma2():
"""
lmd: short for lambda, i.e., eigenvalues.
"lambda" is not a good choice in python so I use lmd instead
v : normed_eigenvectors
:return:
"""
n = np.random.randint(low=3, high=10) # Dimension of a Hermitian matrix
C = np.matrix(np.random.rand(n, n)) # Seed Matrix
A = (C.getH() + C) # Construct Hermitian matrix
pprint("Pick a {} x {} matrix".format(n, n))
pprint(A)
lmd, v = cal_eigenvalues_and_eigenvectors(A)
pprint("Lambda Shape : {}".format(lmd.shape))
pprint("V Shape: {}".format(v.shape))
# Now pick a dimension: i
i = np.random.randint(low=1, high=n)
pprint("Pick one dimension to check : {}".format(i))
# Now pick a dimension: j
j = np.random.randint(low=0, high=n)
pprint("Pick one dimension to delete : {}".format(j))
# Now, let's compute left side of equation (2) in paper
left = v[ j - 1, i - 1] ** 2
for k in range(0, n):
if k == i - 1:
continue
left *= (lmd[i - 1] - lmd[k])
pprint("Left side equals to {}".format(left))
# Now, let's compute right side of the equation (2) in paper
right = 1
M = np.delete(A, (j - 1), axis=0)
M_j = np.delete(M, (j - 1), axis=1)
lmd_M_j, v_M_j = cal_eigenvalues_and_eigenvectors(M_j)
for k in range(0, n - 1):
right *= (lmd[i - 1] - lmd_M_j[k])
pprint("Right side equals to {}".format(right))
assert np.abs(left - right) < 1e-5, "left side {} does not equal to the right side {}.".format(left, right)
if __name__ == '__main__':
check_lemma2()
|
[
"numpy.abs",
"numpy.linalg.eig",
"numpy.random.randint",
"pprint.pprint",
"numpy.random.rand",
"numpy.linalg.det",
"numpy.delete"
] |
[((194, 210), 'numpy.linalg.eig', 'np.linalg.eig', (['A'], {}), '(A)\n', (207, 210), True, 'import numpy as np\n'), ((369, 385), 'numpy.linalg.det', 'np.linalg.det', (['M'], {}), '(M)\n', (382, 385), True, 'import numpy as np\n'), ((590, 623), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(3)', 'high': '(10)'}), '(low=3, high=10)\n', (607, 623), True, 'import numpy as np\n'), ((855, 864), 'pprint.pprint', 'pprint', (['A'], {}), '(A)\n', (861, 864), False, 'from pprint import pprint\n'), ((1046, 1078), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': 'n'}), '(low=1, high=n)\n', (1063, 1078), True, 'import numpy as np\n'), ((1175, 1207), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n'}), '(low=0, high=n)\n', (1192, 1207), True, 'import numpy as np\n'), ((1608, 1635), 'numpy.delete', 'np.delete', (['A', '(j - 1)'], {'axis': '(0)'}), '(A, j - 1, axis=0)\n', (1617, 1635), True, 'import numpy as np\n'), ((1648, 1675), 'numpy.delete', 'np.delete', (['M', '(j - 1)'], {'axis': '(1)'}), '(M, j - 1, axis=1)\n', (1657, 1675), True, 'import numpy as np\n'), ((677, 697), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (691, 697), True, 'import numpy as np\n'), ((1875, 1895), 'numpy.abs', 'np.abs', (['(left - right)'], {}), '(left - right)\n', (1881, 1895), True, 'import numpy as np\n')]
|
# yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: <NAME> <<EMAIL>>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] <EMAIL> $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
|
[
"numpy.true_divide",
"numpy.isscalar",
"numpy.asarray",
"numpy.isfinite",
"numpy.errstate",
"numpy.arange",
"re.sub",
"numpy.all",
"numpy.in1d"
] |
[((1986, 2005), 'numpy.arange', 'np.arange', (['(0)', 'ncols'], {}), '(0, ncols)\n', (1995, 2005), True, 'import numpy as np\n'), ((2589, 2602), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2599, 2602), True, 'import numpy as np\n'), ((2838, 2869), 'numpy.all', 'np.all', (['(a[1:] <= a[:-1])'], {'axis': '(0)'}), '(a[1:] <= a[:-1], axis=0)\n', (2844, 2869), True, 'import numpy as np\n'), ((3525, 3547), 'numpy.isscalar', 'np.isscalar', (['numerator'], {}), '(numerator)\n', (3536, 3547), True, 'import numpy as np\n'), ((4553, 4581), 're.sub', 're.sub', (['"""[^\\\\w]+"""', '""" """', 'text'], {}), "('[^\\\\w]+', ' ', text)\n", (4559, 4581), False, 'import re\n'), ((2078, 2116), 'numpy.in1d', 'np.in1d', (['feature_cols', 'ndarray_columns'], {}), '(feature_cols, ndarray_columns)\n', (2085, 2116), True, 'import numpy as np\n'), ((2795, 2826), 'numpy.all', 'np.all', (['(a[1:] >= a[:-1])'], {'axis': '(0)'}), '(a[1:] >= a[:-1], axis=0)\n', (2801, 2826), True, 'import numpy as np\n'), ((3681, 3727), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (3692, 3727), True, 'import numpy as np\n'), ((3750, 3788), 'numpy.true_divide', 'np.true_divide', (['numerator', 'denominator'], {}), '(numerator, denominator)\n', (3764, 3788), True, 'import numpy as np\n'), ((3813, 3832), 'numpy.isfinite', 'np.isfinite', (['result'], {}), '(result)\n', (3824, 3832), True, 'import numpy as np\n')]
|
# Copyright (c) 2016, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gzip
import os
import unittest
from datetime import datetime, timedelta
from typing import List
import numpy as np
import pytz
from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct
from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, \
generate_peak_valley, \
remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, \
filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position
from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
class TestPeakValleyComputation(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPeakValleyComputation, cls).setUpClass()
tz = pytz.timezone('US/Eastern')
data = []
cls._sample_frequency = 21.33
cls._smoothing_factor = 5
cls._time_window = 8
cls._expiration_amplitude_threshold_perc = 0.10
cls._threshold_expiration_duration = 0.312
cls._max_amplitude_change_peak_correction = 30
cls._inspiration_amplitude_threshold_perc = 0.10
cls._min_neg_slope_count_peak_correction = 4
cls._minimum_peak_to_valley_time_diff = 0.31
cls._window_length = int(round(cls._time_window * cls._sample_frequency))
with gzip.open(os.path.join(os.path.dirname(__file__), 'res/rip.csv.gz'), 'rt') as f:
for l in f:
values = list(map(int, l.split(',')))
data.append(
DataPoint.from_tuple(datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
cls._data_start_time_to_index = get_data_start_time_to_index_dic(data=data)
cls.rip_datastream = DataStream(None, None)
cls.rip_datastream.data = data
def test_smooth(self):
ds = DataStream(None, None)
ds.datapoints = self.rip_datastream.data
result_smooth = smooth(ds.datapoints, self._smoothing_factor)
sample_smooth_python = [i.sample for i in result_smooth[:5000]]
sample_smooth_matlab = np.genfromtxt(os.path.join(os.path.dirname(__file__), 'res/testmatlab_rip_smooth.csv'),
delimiter=',', )
self.assertTrue(np.alltrue(np.round(sample_smooth_matlab) == np.round(sample_smooth_python)))
def test_moving_average_curve(self):
ds = DataStream(None, None)
ds.datapoints = self.rip_datastream.data
data_smooth = smooth(ds.datapoints, self._smoothing_factor)
result = moving_average_curve(data_smooth, self._window_length)
sample_mac_python = [i.sample for i in result[:5000]]
sample_mac_matlab = np.genfromtxt(os.path.join(os.path.dirname(__file__), 'res/testmatlab_mac_sample.csv'),
delimiter=',', )
for i in range(0, len(sample_mac_matlab)):
self.assertAlmostEqual(sample_mac_matlab[i], sample_mac_python[i], delta=0.1)
def test_up_down_intercepts(self):
data_start_time_list = [0, 1, 2, 3, 4]
mac_start_time_list = [0, 1, 2, 3, 4]
data_sample_list = [10, 20, 30, 40, 50]
mac_sample_list = [11, 12, 31, 32, 52]
expected_up_intercepts_sample = [12, 32]
expected_down_intercepts_sample = [31, 52]
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time_list,
sample_list=data_sample_list)
mac_input = form_data_point_list_from_start_time_sample(start_time_list=mac_start_time_list,
sample_list=mac_sample_list)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
up_intercepts, down_intercepts = up_down_intercepts(data=data_input,
mac=mac_input,
data_start_time_to_index=data_start_time_to_index)
output_up_intercepts_sample = [i.sample for i in up_intercepts]
output_down_intercepts_sample = [i.sample for i in down_intercepts]
self.assertTrue(np.array_equal(expected_up_intercepts_sample, output_up_intercepts_sample))
self.assertTrue(np.array_equal(expected_down_intercepts_sample, output_down_intercepts_sample))
def test_filter_intercept_outlier(self):
# test cases
up_intercepts_case_list = []
down_intercepts_case_list = []
up_intercepts_expected_case_list = []
down_intercepts_expected_case_list = []
# first case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([9, 11, 21, 31, 41]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 41])
# second case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([8, 9, 11, 21, 31, 41, 42]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 42])
# third case
up_intercepts_case_list.append(
form_data_point_from_start_time_array([10, 20, 22, 23, 30, 32, 33, 40, 42, 43, 50, 52, 53]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([9, 11, 21, 31, 41]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 53])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 41])
# fourth case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array(
[7, 8, 9, 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43, 51, 52, 53]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 13, 23, 33, 43])
# fifth case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 11, 12, 16, 17, 18, 22, 23, 24]))
down_intercepts_case_list.append(
form_data_point_from_start_time_array([7, 8, 9, 13, 14, 15, 19, 20, 21, 25, 26, 27]))
up_intercepts_expected_case_list.append([12, 18, 24])
down_intercepts_expected_case_list.append([9, 15, 21])
for i, up_intercepts_case in enumerate(up_intercepts_case_list):
up_intercepts = up_intercepts_case
down_intercepts = down_intercepts_case_list[i]
up_intercepts_output, down_intercepts_output = filter_intercept_outlier(up_intercepts, down_intercepts)
# test all are List[Datapoints]
self.assertIsInstance(up_intercepts_output, list)
self.assertIsInstance(down_intercepts_output, list)
# test output match for first case
up_intercepts_output_start_time = [i.start_time for i in up_intercepts_output]
self.assertTrue(np.array_equal(up_intercepts_output_start_time, up_intercepts_expected_case_list[i]))
down_intercepts_output_start_time = [i.start_time for i in down_intercepts_output]
self.assertTrue(np.array_equal(down_intercepts_output_start_time, down_intercepts_expected_case_list[i]))
def test_generate_peak_valley(self):
down_intercepts_start_time = [10, 20, 30, 40, 50]
up_intercepts_start_time = [15, 25, 35, 45, 55]
data_start_times = [11, 12, 13, 16, 17, 18, 21, 22, 23, 26, 27, 28, 31, 32, 33, 36, 37, 38, 41, 42, 43, 46, 47,
48, 51, 52, 53, 56, 57, 58]
data_samples = [1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10,
11, 12]
expected_valley_samples = [1, 1, 1, 1]
expected_peak_samples = [12, 12, 12, 12]
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_times,
sample_list=data_samples)
down_intercepts_input = form_data_point_from_start_time_array(start_time_list=down_intercepts_start_time)
up_intercepts_inpput = form_data_point_from_start_time_array(start_time_list=up_intercepts_start_time)
peaks_output, valleys_output = generate_peak_valley(up_intercepts=up_intercepts_inpput,
down_intercepts=down_intercepts_input, data=data_input)
output_peaks_sample = [i.sample for i in peaks_output]
output_valleys_sample = [i.sample for i in valleys_output]
self.assertTrue(np.array_equal(output_peaks_sample, expected_peak_samples))
self.assertTrue(np.array_equal(output_valleys_sample, expected_valley_samples))
def test_correct_valley_position(self):
valleys_start_time = [1, 21]
up_intercepts_start_time = [10, 30]
peaks_start_time = [20, 40]
valleys_samples = [100, 100]
up_intercepts_samples = [500, 500]
peaks_samples = [1000, 1000]
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
data_samples = [100, 110, 120, 130, 140, 100, 200, 300, 400, 500] + [100, 110, 120, 130, 140, 150, 160, 170,
180, 500]
expected_valleys_start_time = [6,
21] # data is not monotoneously increasing from 1 to 10 export_data time, so new valley move towards right at 6 where it is monotonoeously increasing. but the second valley is alright. as data is monotonoeusly increasing from export_data time 21 to 30.
expected_valleys_samples = [100, 100]
peaks_input = form_data_point_list_from_start_time_sample(start_time_list=peaks_start_time,
sample_list=peaks_samples)
valleys_input = form_data_point_list_from_start_time_sample(start_time_list=valleys_start_time,
sample_list=valleys_samples)
up_intercepts_input = form_data_point_list_from_start_time_sample(start_time_list=up_intercepts_start_time,
sample_list=up_intercepts_samples)
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time,
sample_list=data_samples)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
valleys_corrected_ouput = correct_valley_position(peaks=peaks_input,
valleys=valleys_input,
up_intercepts=up_intercepts_input,
data=data_input,
data_start_time_to_index=data_start_time_to_index)
valleys_corrected_ouput_start_time = [i.start_time for i in valleys_corrected_ouput]
valleys_corrected_ouput_samples = [i.sample for i in valleys_corrected_ouput]
self.assertTrue(np.array_equal(valleys_corrected_ouput_start_time, expected_valleys_start_time))
self.assertTrue(np.array_equal(valleys_corrected_ouput_samples, expected_valleys_samples))
def test_correct_peak_position(self):
test_cases = []
# test case - 0: monotoneously decreasing from peak to up intercept. so peak position will not be changed.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
valleys_start_time = [1]
up_intercepts_start_time = [5]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [50]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 1: from up_intercepts to peak, increases from 50 to 90, then decreases from 90 to 60 by 3 point count.
# which is less than 4 (self._min_neg_slope_count_peak_correction = 4). so peak position will not be updated.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 40, 50, 90, 80, 70, 60, 100]
valleys_start_time = [1]
up_intercepts_start_time = [5]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [50]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 2: from up_intercepts to peak, increases from 30 to 60, then decreases from 60 to 10 by 5 point count.
# which is greater than 4 (self._min_neg_slope_count_peak_correction = 4).
# new peak sample value is 60. previous peak is at sample 100. so, amplitude change from new peak to prev peak is = 80%.
# 80% is not less than 30% (self._max_amplitude_change_peak_correction = 30).
# so peak position will not be updated.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 60, 50, 40, 30, 20, 10, 100]
valleys_start_time = [1]
up_intercepts_start_time = [3]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [30]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 3: from up_intercepts to peak, increases from 30 to 90, then decreases from 90 to 10 by 5 point count.
# which is greater than 4 (self._min_neg_slope_count_peak_correction = 4).
# new peak sample value is 90. previous peak is at sample 100. so, amplitude change from new peak to prev peak is = 12.5%.
# 12.5% is less than 30% (self._max_amplitude_change_peak_correction = 30).
# so peak position will be updated to new peak (sample = 90, start_time = 4)
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 90, 50, 40, 30, 20, 10, 100]
valleys_start_time = [1]
up_intercepts_start_time = [3]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [30]
peaks_samples = [100]
expected_peaks_start_time = [4]
expected_peaks_samples = [90]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
for i, item in enumerate(test_cases):
data_start_time = item['data_start_time']
data_samples = item['data_samples']
valleys_start_time = item['valleys_start_time']
up_intercepts_start_time = item['up_intercepts_start_time']
peaks_start_time = item['peaks_start_time']
valleys_samples = item['valleys_samples']
up_intercepts_samples = item['up_intercepts_samples']
peaks_samples = item['peaks_samples']
expected_peaks_start_time = item['expected_peaks_start_time']
expected_peaks_samples = item['expected_peaks_samples']
valleys_input = form_data_point_list_from_start_time_sample(start_time_list=valleys_start_time,
sample_list=valleys_samples)
up_intercepts_input = form_data_point_list_from_start_time_sample(start_time_list=up_intercepts_start_time,
sample_list=up_intercepts_samples)
peaks_input = form_data_point_list_from_start_time_sample(start_time_list=peaks_start_time,
sample_list=peaks_samples)
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time,
sample_list=data_samples)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
peaks_output = correct_peak_position(peaks=peaks_input,
valleys=valleys_input,
up_intercepts=up_intercepts_input,
data=data_input,
max_amplitude_change_peak_correction=self._max_amplitude_change_peak_correction,
min_neg_slope_count_peak_correction=self._min_neg_slope_count_peak_correction,
data_start_time_to_index=data_start_time_to_index)
peaks_output_samples = [i.sample for i in peaks_output]
peaks_output_start_time = [i.start_time for i in peaks_output]
self.assertTrue(np.array_equal(expected_peaks_start_time, peaks_output_start_time),
msg='Test failed for test case ' + str(i))
self.assertTrue(np.array_equal(expected_peaks_samples, peaks_output_samples),
msg='Test failed for test case ' + str(i))
def test_remove_close_valley_peak_pair(self):
valleys_start_time = form_time_delta_list_from_start_time_in_seconds([1, 2]) # time in seconds
peaks_start_time = form_time_delta_list_from_start_time_in_seconds(
[1 + self._minimum_peak_to_valley_time_diff + 0.1,
2 + self._minimum_peak_to_valley_time_diff - 0.1]) # time in seconds
expected_valleys_start_time = form_time_delta_list_from_start_time_in_seconds([1])
expected_peaks_start_time = form_time_delta_list_from_start_time_in_seconds(
[1 + self._minimum_peak_to_valley_time_diff + 0.1])
input_peaks = form_data_point_from_start_time_array(peaks_start_time)
input_valleys = form_data_point_from_start_time_array(valleys_start_time)
output_peaks, output_valleys = remove_close_valley_peak_pair(peaks=input_peaks, valleys=input_valleys,
minimum_peak_to_valley_time_diff=self._minimum_peak_to_valley_time_diff)
output_peaks_start_time = [i.start_time for i in output_peaks]
output_valleys_start_time = [i.start_time for i in output_valleys]
self.assertTrue(np.array_equal(expected_peaks_start_time, output_peaks_start_time))
self.assertTrue(np.array_equal(expected_valleys_start_time, output_valleys_start_time))
def test_filter_expiration_duration_outlier(self):
peaks_start_time = form_time_delta_list_from_start_time_in_seconds([1, 2, 3, 4, 5])
valleys_start_time = form_time_delta_list_from_start_time_in_seconds(
[0, 1 + self._threshold_expiration_duration + .1, 2 + self._threshold_expiration_duration - .1,
3 + self._threshold_expiration_duration + .1, 4 + self._threshold_expiration_duration - .1])
expected_peaks_start_time = form_time_delta_list_from_start_time_in_seconds([1, 3, 5])
expected_valleys_start_time = form_time_delta_list_from_start_time_in_seconds(
[0, 1 + self._threshold_expiration_duration + .1, 3 + self._threshold_expiration_duration + .1])
input_peaks = form_data_point_from_start_time_array(peaks_start_time)
input_valleys = form_data_point_from_start_time_array(valleys_start_time)
output_peaks, output_valleys = filter_expiration_duration_outlier(peaks=input_peaks, valleys=input_valleys,
threshold_expiration_duration=self._threshold_expiration_duration)
output_peaks_start_time = [i.start_time for i in output_peaks]
output_valleys_start_time = [i.start_time for i in output_valleys]
self.assertTrue(np.array_equal(expected_peaks_start_time, output_peaks_start_time))
self.assertTrue(np.array_equal(expected_valleys_start_time, output_valleys_start_time))
def test_filter_small_amp_inspiration_peak_valley(self):
valleys_sample = [1, 2, 3, 4, 5]
peak_sample = [21, 22, 23, 24, 5.5]
# self._inspiration_amplitude_threshold_perc is .10 on average. here inspiration avg value 16.100000000000001. so, 10% of 16.100000000000001 = 1.61. so, inspiration[4] = peak[4] - valley[4] = 0.5 < 1.61. so, last peak and valley is not expected.
expected_valleys_sample = [1, 2, 3, 4]
expected_peaks_sample = [21, 22, 23, 24]
input_valleys = form_data_point_from_sample_array(sample_list=valleys_sample)
input_peaks = form_data_point_from_sample_array(sample_list=peak_sample)
output_peaks, output_valleys = filter_small_amp_inspiration_peak_valley(peaks=input_peaks,
valleys=input_valleys,
inspiration_amplitude_threshold_perc=0.1)
output_valleys_sample = [i.sample for i in output_valleys]
output_peaks_sample = [i.sample for i in output_peaks]
self.assertTrue(np.array_equal(expected_peaks_sample, output_peaks_sample))
self.assertTrue(np.array_equal(expected_valleys_sample, output_valleys_sample))
def test_filter_small_amp_expiration_peak_valley(self):
valleys_sample = [1, 2, 3, 4, 5]
peak_sample = [22, 23, 24, 5.5, 26]
# self._expiration_amplitude_threshold_perc is .10 on average. here expiration avg value 15.125. so, 10% of 15.125 = 1.51. so, expiration = abs(valley = 5 - peak = 5.5) = 0.5 < 1.51. so, peak = 5.5 and valley = 5 is not expected.
expected_valleys_sample = [1, 2, 3, 4]
expected_peaks_sample = [22, 23, 24, 26]
input_valleys = form_data_point_from_sample_array(sample_list=valleys_sample)
input_peaks = form_data_point_from_sample_array(sample_list=peak_sample)
output_peaks, output_valleys = filter_small_amp_expiration_peak_valley(peaks=input_peaks, valleys=input_valleys,
expiration_amplitude_threshold_perc=0.1)
output_valleys_sample = [i.sample for i in output_valleys]
output_peaks_sample = [i.sample for i in output_peaks]
self.assertTrue(np.array_equal(expected_peaks_sample, output_peaks_sample))
self.assertTrue(np.array_equal(expected_valleys_sample, output_valleys_sample))
def test_timestamp_correct(self):
rip_corrected = timestamp_correct(datastream=self.rip_datastream, sampling_frequency=self._sample_frequency)
timestamp_corrected_rip_data_unique_start_time_count = len(set([i.start_time for i in rip_corrected.data]))
raw_rip_data_unique_start_time_count = len(set([i.start_time for i in self.rip_datastream.data]))
self.assertGreaterEqual(timestamp_corrected_rip_data_unique_start_time_count,
raw_rip_data_unique_start_time_count,
msg='Timestamp corrected rip data has duplicate export_data times. '
'Check if rip raw data sample frequency missmatch with provided default rip sample frequency.')
def get_data_start_time_to_index_dic(data: List[DataPoint]) -> dict:
data_start_time_to_index = {}
for index, d in enumerate(data):
data_start_time_to_index[d.start_time] = index
return data_start_time_to_index
def form_data_point_from_start_time_array(start_time_list):
datapoints = []
for i in start_time_list:
datapoints.append(DataPoint.from_tuple(i, 0))
return datapoints
def form_data_point_list_from_start_time_sample(start_time_list,
sample_list):
datapoints = []
if len(start_time_list) == len(sample_list):
for i, start_time in enumerate(start_time_list):
datapoints.append(DataPoint.from_tuple(start_time, sample_list[i]))
else:
raise Exception('Length of start_time list and sample list missmatch.')
return datapoints
def form_time_delta_list_from_start_time_in_seconds(start_time_list):
start_time_time_delta_list = []
for i in start_time_list:
start_time_time_delta_list.append(timedelta(seconds=i))
return start_time_time_delta_list
def form_data_point_from_sample_array(sample_list):
datapoints = []
for i in sample_list:
datapoints.append(DataPoint.from_tuple(start_time=datetime.now(), sample=i))
return datapoints
if __name__ == '__main__':
unittest.main()
|
[
"cerebralcortex.data_processor.signalprocessing.rip.correct_valley_position",
"cerebralcortex.data_processor.signalprocessing.rip.filter_intercept_outlier",
"cerebralcortex.data_processor.signalprocessing.rip.remove_close_valley_peak_pair",
"cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_inspiration_peak_valley",
"numpy.round",
"unittest.main",
"cerebralcortex.data_processor.signalprocessing.rip.generate_peak_valley",
"os.path.dirname",
"cerebralcortex.data_processor.signalprocessing.alignment.timestamp_correct",
"datetime.timedelta",
"datetime.datetime.now",
"cerebralcortex.data_processor.signalprocessing.rip.up_down_intercepts",
"cerebralcortex.data_processor.signalprocessing.vector.smooth",
"cerebralcortex.data_processor.signalprocessing.rip.filter_expiration_duration_outlier",
"datetime.datetime.fromtimestamp",
"cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_expiration_peak_valley",
"cerebralcortex.data_processor.signalprocessing.rip.correct_peak_position",
"cerebralcortex.data_processor.signalprocessing.vector.moving_average_curve",
"cerebralcortex.kernel.datatypes.datapoint.DataPoint.from_tuple",
"cerebralcortex.kernel.datatypes.datastream.DataStream",
"pytz.timezone",
"numpy.array_equal"
] |
[((28847, 28862), 'unittest.main', 'unittest.main', ([], {}), '()\n', (28860, 28862), False, 'import unittest\n'), ((2307, 2334), 'pytz.timezone', 'pytz.timezone', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (2320, 2334), False, 'import pytz\n'), ((3283, 3305), 'cerebralcortex.kernel.datatypes.datastream.DataStream', 'DataStream', (['None', 'None'], {}), '(None, None)\n', (3293, 3305), False, 'from cerebralcortex.kernel.datatypes.datastream import DataStream\n'), ((3386, 3408), 'cerebralcortex.kernel.datatypes.datastream.DataStream', 'DataStream', (['None', 'None'], {}), '(None, None)\n', (3396, 3408), False, 'from cerebralcortex.kernel.datatypes.datastream import DataStream\n'), ((3483, 3528), 'cerebralcortex.data_processor.signalprocessing.vector.smooth', 'smooth', (['ds.datapoints', 'self._smoothing_factor'], {}), '(ds.datapoints, self._smoothing_factor)\n', (3489, 3528), False, 'from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve\n'), ((3940, 3962), 'cerebralcortex.kernel.datatypes.datastream.DataStream', 'DataStream', (['None', 'None'], {}), '(None, None)\n', (3950, 3962), False, 'from cerebralcortex.kernel.datatypes.datastream import DataStream\n'), ((4035, 4080), 'cerebralcortex.data_processor.signalprocessing.vector.smooth', 'smooth', (['ds.datapoints', 'self._smoothing_factor'], {}), '(ds.datapoints, self._smoothing_factor)\n', (4041, 4080), False, 'from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve\n'), ((4098, 4152), 'cerebralcortex.data_processor.signalprocessing.vector.moving_average_curve', 'moving_average_curve', (['data_smooth', 'self._window_length'], {}), '(data_smooth, self._window_length)\n', (4118, 4152), False, 'from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve\n'), ((5383, 5489), 'cerebralcortex.data_processor.signalprocessing.rip.up_down_intercepts', 'up_down_intercepts', ([], {'data': 'data_input', 'mac': 'mac_input', 'data_start_time_to_index': 'data_start_time_to_index'}), '(data=data_input, mac=mac_input, data_start_time_to_index\n =data_start_time_to_index)\n', (5401, 5489), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((10143, 10260), 'cerebralcortex.data_processor.signalprocessing.rip.generate_peak_valley', 'generate_peak_valley', ([], {'up_intercepts': 'up_intercepts_inpput', 'down_intercepts': 'down_intercepts_input', 'data': 'data_input'}), '(up_intercepts=up_intercepts_inpput, down_intercepts=\n down_intercepts_input, data=data_input)\n', (10163, 10260), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((12517, 12693), 'cerebralcortex.data_processor.signalprocessing.rip.correct_valley_position', 'correct_valley_position', ([], {'peaks': 'peaks_input', 'valleys': 'valleys_input', 'up_intercepts': 'up_intercepts_input', 'data': 'data_input', 'data_start_time_to_index': 'data_start_time_to_index'}), '(peaks=peaks_input, valleys=valleys_input,\n up_intercepts=up_intercepts_input, data=data_input,\n data_start_time_to_index=data_start_time_to_index)\n', (12540, 12693), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((22193, 22341), 'cerebralcortex.data_processor.signalprocessing.rip.remove_close_valley_peak_pair', 'remove_close_valley_peak_pair', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'minimum_peak_to_valley_time_diff': 'self._minimum_peak_to_valley_time_diff'}), '(peaks=input_peaks, valleys=input_valleys,\n minimum_peak_to_valley_time_diff=self._minimum_peak_to_valley_time_diff)\n', (22222, 22341), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((23676, 23823), 'cerebralcortex.data_processor.signalprocessing.rip.filter_expiration_duration_outlier', 'filter_expiration_duration_outlier', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'threshold_expiration_duration': 'self._threshold_expiration_duration'}), '(peaks=input_peaks, valleys=input_valleys,\n threshold_expiration_duration=self._threshold_expiration_duration)\n', (23710, 23823), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((24938, 25067), 'cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_inspiration_peak_valley', 'filter_small_amp_inspiration_peak_valley', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'inspiration_amplitude_threshold_perc': '(0.1)'}), '(peaks=input_peaks, valleys=\n input_valleys, inspiration_amplitude_threshold_perc=0.1)\n', (24978, 25067), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((26219, 26346), 'cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_expiration_peak_valley', 'filter_small_amp_expiration_peak_valley', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'expiration_amplitude_threshold_perc': '(0.1)'}), '(peaks=input_peaks, valleys=\n input_valleys, expiration_amplitude_threshold_perc=0.1)\n', (26258, 26346), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((26788, 26885), 'cerebralcortex.data_processor.signalprocessing.alignment.timestamp_correct', 'timestamp_correct', ([], {'datastream': 'self.rip_datastream', 'sampling_frequency': 'self._sample_frequency'}), '(datastream=self.rip_datastream, sampling_frequency=self.\n _sample_frequency)\n', (26805, 26885), False, 'from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct\n'), ((5779, 5853), 'numpy.array_equal', 'np.array_equal', (['expected_up_intercepts_sample', 'output_up_intercepts_sample'], {}), '(expected_up_intercepts_sample, output_up_intercepts_sample)\n', (5793, 5853), True, 'import numpy as np\n'), ((5879, 5957), 'numpy.array_equal', 'np.array_equal', (['expected_down_intercepts_sample', 'output_down_intercepts_sample'], {}), '(expected_down_intercepts_sample, output_down_intercepts_sample)\n', (5893, 5957), True, 'import numpy as np\n'), ((8411, 8467), 'cerebralcortex.data_processor.signalprocessing.rip.filter_intercept_outlier', 'filter_intercept_outlier', (['up_intercepts', 'down_intercepts'], {}), '(up_intercepts, down_intercepts)\n', (8435, 8467), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((10472, 10530), 'numpy.array_equal', 'np.array_equal', (['output_peaks_sample', 'expected_peak_samples'], {}), '(output_peaks_sample, expected_peak_samples)\n', (10486, 10530), True, 'import numpy as np\n'), ((10556, 10618), 'numpy.array_equal', 'np.array_equal', (['output_valleys_sample', 'expected_valley_samples'], {}), '(output_valleys_sample, expected_valley_samples)\n', (10570, 10618), True, 'import numpy as np\n'), ((13123, 13202), 'numpy.array_equal', 'np.array_equal', (['valleys_corrected_ouput_start_time', 'expected_valleys_start_time'], {}), '(valleys_corrected_ouput_start_time, expected_valleys_start_time)\n', (13137, 13202), True, 'import numpy as np\n'), ((13228, 13301), 'numpy.array_equal', 'np.array_equal', (['valleys_corrected_ouput_samples', 'expected_valleys_samples'], {}), '(valleys_corrected_ouput_samples, expected_valleys_samples)\n', (13242, 13301), True, 'import numpy as np\n'), ((20280, 20633), 'cerebralcortex.data_processor.signalprocessing.rip.correct_peak_position', 'correct_peak_position', ([], {'peaks': 'peaks_input', 'valleys': 'valleys_input', 'up_intercepts': 'up_intercepts_input', 'data': 'data_input', 'max_amplitude_change_peak_correction': 'self._max_amplitude_change_peak_correction', 'min_neg_slope_count_peak_correction': 'self._min_neg_slope_count_peak_correction', 'data_start_time_to_index': 'data_start_time_to_index'}), '(peaks=peaks_input, valleys=valleys_input,\n up_intercepts=up_intercepts_input, data=data_input,\n max_amplitude_change_peak_correction=self.\n _max_amplitude_change_peak_correction,\n min_neg_slope_count_peak_correction=self.\n _min_neg_slope_count_peak_correction, data_start_time_to_index=\n data_start_time_to_index)\n', (20301, 20633), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((22579, 22645), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_start_time', 'output_peaks_start_time'], {}), '(expected_peaks_start_time, output_peaks_start_time)\n', (22593, 22645), True, 'import numpy as np\n'), ((22671, 22741), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_start_time', 'output_valleys_start_time'], {}), '(expected_valleys_start_time, output_valleys_start_time)\n', (22685, 22741), True, 'import numpy as np\n'), ((24066, 24132), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_start_time', 'output_peaks_start_time'], {}), '(expected_peaks_start_time, output_peaks_start_time)\n', (24080, 24132), True, 'import numpy as np\n'), ((24158, 24228), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_start_time', 'output_valleys_start_time'], {}), '(expected_valleys_start_time, output_valleys_start_time)\n', (24172, 24228), True, 'import numpy as np\n'), ((25379, 25437), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_sample', 'output_peaks_sample'], {}), '(expected_peaks_sample, output_peaks_sample)\n', (25393, 25437), True, 'import numpy as np\n'), ((25463, 25525), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_sample', 'output_valleys_sample'], {}), '(expected_valleys_sample, output_valleys_sample)\n', (25477, 25525), True, 'import numpy as np\n'), ((26577, 26635), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_sample', 'output_peaks_sample'], {}), '(expected_peaks_sample, output_peaks_sample)\n', (26591, 26635), True, 'import numpy as np\n'), ((26661, 26723), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_sample', 'output_valleys_sample'], {}), '(expected_valleys_sample, output_valleys_sample)\n', (26675, 26723), True, 'import numpy as np\n'), ((27866, 27892), 'cerebralcortex.kernel.datatypes.datapoint.DataPoint.from_tuple', 'DataPoint.from_tuple', (['i', '(0)'], {}), '(i, 0)\n', (27886, 27892), False, 'from cerebralcortex.kernel.datatypes.datapoint import DataPoint\n'), ((28545, 28565), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'i'}), '(seconds=i)\n', (28554, 28565), False, 'from datetime import datetime, timedelta\n'), ((3660, 3685), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3675, 3685), False, 'import os\n'), ((4271, 4296), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4286, 4296), False, 'import os\n'), ((8806, 8894), 'numpy.array_equal', 'np.array_equal', (['up_intercepts_output_start_time', 'up_intercepts_expected_case_list[i]'], {}), '(up_intercepts_output_start_time,\n up_intercepts_expected_case_list[i])\n', (8820, 8894), True, 'import numpy as np\n'), ((9015, 9107), 'numpy.array_equal', 'np.array_equal', (['down_intercepts_output_start_time', 'down_intercepts_expected_case_list[i]'], {}), '(down_intercepts_output_start_time,\n down_intercepts_expected_case_list[i])\n', (9029, 9107), True, 'import numpy as np\n'), ((21074, 21140), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_start_time', 'peaks_output_start_time'], {}), '(expected_peaks_start_time, peaks_output_start_time)\n', (21088, 21140), True, 'import numpy as np\n'), ((21241, 21301), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_samples', 'peaks_output_samples'], {}), '(expected_peaks_samples, peaks_output_samples)\n', (21255, 21301), True, 'import numpy as np\n'), ((28202, 28250), 'cerebralcortex.kernel.datatypes.datapoint.DataPoint.from_tuple', 'DataPoint.from_tuple', (['start_time', 'sample_list[i]'], {}), '(start_time, sample_list[i])\n', (28222, 28250), False, 'from cerebralcortex.kernel.datatypes.datapoint import DataPoint\n'), ((2898, 2923), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2913, 2923), False, 'import os\n'), ((3818, 3848), 'numpy.round', 'np.round', (['sample_smooth_matlab'], {}), '(sample_smooth_matlab)\n', (3826, 3848), True, 'import numpy as np\n'), ((3852, 3882), 'numpy.round', 'np.round', (['sample_smooth_python'], {}), '(sample_smooth_python)\n', (3860, 3882), True, 'import numpy as np\n'), ((28764, 28778), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28776, 28778), False, 'from datetime import datetime, timedelta\n'), ((3104, 3156), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(values[0] / 1000000.0)'], {'tz': 'tz'}), '(values[0] / 1000000.0, tz=tz)\n', (3126, 3156), False, 'from datetime import datetime, timedelta\n')]
|
# evaluate a decision tree on the entire small dataset
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.tree import DecisionTreeClassifier
# define dataset
X, y = make_classification(n_samples=1000, n_features=3, n_informative=2, n_redundant=1, random_state=1)
# define model
model = DecisionTreeClassifier()
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report result
print('Mean Accuracy: %.3f (%.3f)' % (mean(scores), std(scores)))
|
[
"numpy.std",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.RepeatedStratifiedKFold",
"sklearn.datasets.make_classification",
"sklearn.tree.DecisionTreeClassifier",
"numpy.mean"
] |
[((333, 434), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)', 'n_features': '(3)', 'n_informative': '(2)', 'n_redundant': '(1)', 'random_state': '(1)'}), '(n_samples=1000, n_features=3, n_informative=2,\n n_redundant=1, random_state=1)\n', (352, 434), False, 'from sklearn.datasets import make_classification\n'), ((454, 478), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (476, 478), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((514, 579), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(3)', 'random_state': '(1)'}), '(n_splits=10, n_repeats=3, random_state=1)\n', (537, 579), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((606, 672), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'scoring': '"""accuracy"""', 'cv': 'cv', 'n_jobs': '(-1)'}), "(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n", (621, 672), False, 'from sklearn.model_selection import cross_val_score\n'), ((727, 739), 'numpy.mean', 'mean', (['scores'], {}), '(scores)\n', (731, 739), False, 'from numpy import mean\n'), ((741, 752), 'numpy.std', 'std', (['scores'], {}), '(scores)\n', (744, 752), False, 'from numpy import std\n')]
|
import os
import tensorflow as tf
import os
from PIL import Image
import numpy as np
import cv2
from preprocessing import preprocessing_factory
from google.protobuf import text_format
def main(_):
labels = []
'''
# Let's read our pbtxt file into a Graph protobuf
f = open("C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pbtxt", "r")
graph_protobuf = text_format.Parse(f.read(), tf.GraphDef())
# Import the graph protobuf into our new graph.
graph_clone = tf.Graph()
with graph_clone.as_default():
tf.import_graph_def(graph_def=graph_protobuf, name="")
# Display the graph inline.
graph_clone.as_graph_def()
'''
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile("C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb", 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Create a list of labels.
with open('C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/classes.txt', 'rt') as lf:
for l in lf:
labels.append(l.strip())
# Load from a file
image = Image.open('C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg')
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'resnet_v1_50',
is_training=False)
eval_image_size = 72
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
output_layer = 'prefetch_queue/fifo_queue:0'
input_node = 'prefetch_queue/fifo_queue:0'
'''
output_layer = 'resnet_v1_50/conv1/Relu:0' OR 'resnet_v1_50/block4/unit_3/bottleneck_v1/Relu:0'
input_node = 'resnet_v1_50/SpatialSqueeze:0'
'''
with tf.Session() as sess:
try:
prob_tensor = sess.graph.get_tensor_by_name(output_layer)
predictions, = sess.run(prob_tensor, {input_node: image})
except KeyError:
print("Couldn't find classification output layer: " + output_layer + ".")
exit(-1)
# Print the highest probability label
highest_probability_index = np.argmax(predictions)
print('Classified as: ' + labels[highest_probability_index])
print()
# Or you can print out all of the results mapping labels to probabilities.
label_index = 0
for p in predictions:
truncated_probablity = np.float64(np.round(p,8))
print (labels[label_index], truncated_probablity)
label_index += 1
if __name__ == '__main__':
tf.app.run()
|
[
"numpy.argmax",
"tensorflow.Session",
"PIL.Image.open",
"preprocessing.preprocessing_factory.get_preprocessing",
"numpy.round",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.GraphDef",
"tensorflow.app.run",
"tensorflow.io.gfile.GFile"
] |
[((707, 730), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (728, 730), True, 'import tensorflow as tf\n'), ((1159, 1268), 'PIL.Image.open', 'Image.open', (['"""C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg"""'], {}), "(\n 'C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg'\n )\n", (1169, 1268), False, 'from PIL import Image\n'), ((1289, 1363), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['"""resnet_v1_50"""'], {'is_training': '(False)'}), "('resnet_v1_50', is_training=False)\n", (1328, 1363), False, 'from preprocessing import preprocessing_factory\n'), ((2200, 2222), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (2209, 2222), True, 'import numpy as np\n'), ((2597, 2609), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2607, 2609), True, 'import tensorflow as tf\n'), ((741, 847), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['"""C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb"""', '"""rb"""'], {}), "(\n 'C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb',\n 'rb')\n", (758, 847), True, 'import tensorflow as tf\n'), ((897, 936), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (916, 936), True, 'import tensorflow as tf\n'), ((1817, 1829), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1827, 1829), True, 'import tensorflow as tf\n'), ((2468, 2482), 'numpy.round', 'np.round', (['p', '(8)'], {}), '(p, 8)\n', (2476, 2482), True, 'import numpy as np\n')]
|
import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton,\
QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem
from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox
_SHELL_FONT = 'MS Shell Dlg 2'
# todo: clean up display pyqtgraph
class stability_gui(QFrame):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setFrameStyle(0x0001 | 0x0030)
self.setFixedSize(400, 875)
self.makeLayout()
self.setWindowTitle("Stability Client")
def _makeStabilityTab(self):
"""
This tab displays the trap parameters and the resultant secular frequencies.
This is independent of ion number.
Part of the Parameters QTabWidget.
"""
# parameter box
stability_widget = QWidget()
stability_widget_layout = QGridLayout(stability_widget)
# l0_distance
l0_distance_label = QLabel("Length Scale (\u03BCm)")
self.l0_distance = QLabel("00.00")
self.l0_distance.setStyleSheet('color: blue')
# # record button
# self.record_button = TextChangingButton(('Stop Recording', 'Start Recording'))
# self.record_button.setMaximumHeight(25)
# a parameter
aparam_display_label = QLabel('a-parameter')
self.aparam_display = QLabel('0.0000')
# q parameter
qparam_display_label = QLabel('q-parameter')
self.qparam_display = QLabel('0.000')
# wsecr - radial
wsecr_display_label = QLabel('\u03C9 Radial (x2\u03C0 MHz)')
self.wsecr_display = QLabel('0.000')
# wsecz - radial
wsecz_display_label = QLabel('\u03C9 Axial (x2\u03C0 MHz)')
self.wsecz_display = QLabel('0.000')
# anharmonic_limit
anharmonic_limit_label = QLabel("Anharmonic Limit (%)")
self.anharmonic_limit = QLabel("00.00")
# configure display elements
for display in (self.l0_distance, self.aparam_display, self.qparam_display, self.wsecr_display,
self.wsecz_display, self.anharmonic_limit):
display.setFont(QFont(_SHELL_FONT, pointSize=22))
display.setAlignment(Qt.AlignRight)
display.setStyleSheet('color: blue')
for display_label in (l0_distance_label, aparam_display_label, qparam_display_label,
wsecr_display_label, wsecz_display_label, anharmonic_limit_label):
display_label.setAlignment(Qt.AlignRight)
# layout parameter box elements
stability_widget_layout.addWidget(anharmonic_limit_label, 1, 0, 1, 1)
stability_widget_layout.addWidget(self.anharmonic_limit, 2, 0, 1, 1)
stability_widget_layout.addWidget(aparam_display_label, 1, 1, 1, 1)
stability_widget_layout.addWidget(self.aparam_display, 2, 1, 1, 1)
stability_widget_layout.addWidget(qparam_display_label, 1, 2, 1, 1)
stability_widget_layout.addWidget(self.qparam_display, 2, 2, 1, 1)
stability_widget_layout.addWidget(wsecr_display_label, 3, 1, 1, 1)
stability_widget_layout.addWidget(self.wsecr_display, 4, 1, 1, 1)
stability_widget_layout.addWidget(wsecz_display_label, 3, 2, 1, 1)
stability_widget_layout.addWidget(self.wsecz_display, 4, 2, 1, 1)
stability_widget_layout.addWidget(l0_distance_label, 3, 0, 1, 1)
stability_widget_layout.addWidget(self.l0_distance, 4, 0, 1, 1)
return stability_widget
def _makeIonTab(self):
"""
This tab allows configuration of ion chain data to retrieve
mode values (i.e. eigenvector components and mode frequencies).
"""
# create holders
iontab_widget = QWidget()
iontab_widget_layout = QGridLayout(iontab_widget)
# total_ions
total_ion_label = QLabel("# of ions")
self.total_ions = QDoubleSpinBox()
self.total_ions.setRange(1, 10)
self.total_ions.setDecimals(0)
self.total_ions.setSingleStep(1)
self.total_ions.setKeyboardTracking(False)
# ion_num
ion_num_label = QLabel("Ion #")
self.ion_num = QComboBox()
# ion_mass
ion_mass_label = QLabel("Ion Mass (amu)")
self.ion_mass = QDoubleSpinBox()
self.ion_mass.setRange(1, 200)
self.ion_mass.setDecimals(1)
self.ion_mass.setSingleStep(1)
self.ion_mass.setKeyboardTracking(False)
# configure display elements
for display in (self.total_ions, self.ion_num, self.ion_mass):
try:
display.setFont(QFont(_SHELL_FONT, pointSize=18))
display.setAlignment(Qt.AlignRight)
except AttributeError:
pass
for display_label in (total_ion_label, ion_num_label, ion_mass_label):
display_label.setAlignment(Qt.AlignRight)
# lay out
iontab_widget_layout.addWidget(total_ion_label, 0, 0, 1, 1)
iontab_widget_layout.addWidget(self.total_ions, 1, 0, 1, 1)
iontab_widget_layout.addWidget(ion_num_label, 0, 1, 1, 1)
iontab_widget_layout.addWidget(self.ion_num, 1, 1, 1, 1)
iontab_widget_layout.addWidget(ion_mass_label, 0, 2, 1, 1)
iontab_widget_layout.addWidget(self.ion_mass, 1, 2, 1, 1)
# todo: integrate with andor
return iontab_widget
def _makeTrapTab(self):
"""
This tab allows configuration of dynamic trap parameters.
Part of the Parameters QTabWidget.
"""
# create holders
trap_widget = QWidget()
trap_widget_layout = QGridLayout(trap_widget)
# vrf
vrf_display_label = QLabel('VRF (Vpp)')
self.vrf_display = QDoubleSpinBox()
# vrf - offset
voff_display_label = QLabel('V_off (V)')
self.voff_display = QDoubleSpinBox()
# wrf
wrf_display_label = QLabel('\u03C9RF (x2\u03C0 MHz)')
self.wrf_display = QDoubleSpinBox()
# vdc
vdc_display_label = QLabel('VDC (V)')
self.vdc_display = QDoubleSpinBox()
# configure display elements
for display in (self.vrf_display, self.voff_display, self.wrf_display, self.vdc_display):
display.setFont(QFont(_SHELL_FONT, pointSize=12))
display.setAlignment(Qt.AlignRight)
display.setDecimals(3)
display.setSingleStep(1)
display.setRange(-100, 1000)
display.setKeyboardTracking(False)
for display_label in (vrf_display_label, voff_display_label,
wrf_display_label, vdc_display_label):
display_label.setAlignment(Qt.AlignRight)
# create radio buttons
radio_widget = QWidget()
radio_widget_layout = QHBoxLayout(radio_widget)
self.values_get = QRadioButton("Get Values from System")
self.values_set = QRadioButton("Manually Set Values")
radio_widget_layout.addWidget(self.values_get)
radio_widget_layout.addWidget(self.values_set)
self.values_set.setChecked(True)
# lay out
trap_widget_layout.addWidget(radio_widget, 0, 0, 1, 2)
trap_widget_layout.addWidget(vrf_display_label, 1, 0, 1, 1)
trap_widget_layout.addWidget(self.vrf_display, 2, 0, 1, 1)
trap_widget_layout.addWidget(wrf_display_label, 1, 1, 1, 1)
trap_widget_layout.addWidget(self.wrf_display, 2, 1, 1, 1)
trap_widget_layout.addWidget(vdc_display_label, 3, 0, 1, 1)
trap_widget_layout.addWidget(self.vdc_display, 4, 0, 1, 1)
trap_widget_layout.addWidget(voff_display_label, 3, 1, 1, 1)
trap_widget_layout.addWidget(self.voff_display, 4, 1, 1, 1)
return trap_widget
def _makeGeometryTab(self):
"""
This tab allows configuration of trap geometry parameters.
Part of the Parameters QTabWidget.
"""
# r0, kr, z0, kz
# create holders
geometry_widget = QWidget()
geometry_widget_layout = QGridLayout(geometry_widget)
# display labels
r0_display_label = QLabel('r0 (\u03BCm)')
kr_display_label = QLabel('\u03BAr')
z0_display_label = QLabel('z0 (\u03BCm)')
kz_display_label = QLabel('\u03BAz')
# spin boxes
self.r0_display = QDoubleSpinBox()
self.kr_display = QDoubleSpinBox()
self.z0_display = QDoubleSpinBox()
self.kz_display = QDoubleSpinBox()
# configure display elements
for spinbox in (self.r0_display, self.kr_display, self.z0_display, self.kz_display):
spinbox.setFont(QFont(_SHELL_FONT, pointSize=12))
spinbox.setAlignment(Qt.AlignRight)
for spinbox in (self.r0_display, self.z0_display):
spinbox.setRange(0, 10000)
spinbox.setDecimals(0)
spinbox.setSingleStep(1)
for spinbox in (self.kr_display, self.kz_display):
spinbox.setRange(0, 1)
spinbox.setDecimals(3)
spinbox.setSingleStep(1)
for display_label in (r0_display_label, kr_display_label, z0_display_label, kz_display_label):
display_label.setAlignment(Qt.AlignRight)
# lay out
geometry_widget_layout.addWidget(r0_display_label, 0, 0, 1, 1)
geometry_widget_layout.addWidget(self.r0_display, 1, 0, 1, 1)
geometry_widget_layout.addWidget(kr_display_label, 0, 1, 1, 1)
geometry_widget_layout.addWidget(self.kr_display, 1, 1, 1, 1)
geometry_widget_layout.addWidget(z0_display_label, 2, 0, 1, 1)
geometry_widget_layout.addWidget(self.z0_display, 3, 0, 1, 1)
geometry_widget_layout.addWidget(kz_display_label, 2, 1, 1, 1)
geometry_widget_layout.addWidget(self.kz_display, 3, 1, 1, 1)
return geometry_widget
def _makeMathieuDisplayTab(self):
"""
This tab draws the stability plot display.
Part of the Display QTabWidget
"""
# create holder widget
mathieu_widget = QWidget()
mathieu_widget_display = QGridLayout(mathieu_widget)
# create plotwidget for display
pg.setConfigOption('background', 'k')
self.stability_display = pg.PlotWidget(name='Mathieu Stability Display', border=True)
self.stability_display.showGrid(x=True, y=True, alpha=0.5)
self.stability_display.setRange(xRange=[0, 1], yRange=[0, 0.1])
self.stability_display.setLimits(xMin=-0.1, xMax=1, yMin=-0.1, yMax=0.1)
self.stability_display.setMaximumSize(400, 400)
self.stability_display.setMinimumSize(300, 300)
self.stability_display.setLabel('left', 'a')
self.stability_display.setLabel('bottom', 'q')
self.stability_point = self.stability_display.plot(symbol='o', symbolBrush=QColor(Qt.white))
# create stability boundaries for mathieu
# todo: cut off after intersection; also do negative
xarr = np.linspace(0, 1, 100)
yarr = 0.5 * np.power(xarr, 2)
self.stability_region = self.stability_display.plot(symbol=None, pen=QColor(Qt.red))
self.stability_region2 = self.stability_display.plot(xarr, yarr, symbol=None, pen=QColor(Qt.red))
# beta setting
beta_setting_display = QLabel('\u03B2')
beta_setting_display.setAlignment(Qt.AlignRight)
self.beta_setting = QDoubleSpinBox()
self.beta_setting.setFont(QFont('MS Shell Dlg 2', pointSize=14))
self.beta_setting.setDecimals(1)
self.beta_setting.setSingleStep(1)
self.beta_setting.setRange(0, 5)
self.beta_setting.setKeyboardTracking(False)
self.beta_setting.setAlignment(Qt.AlignRight)
# autoscale button
self.autoscale = QPushButton("Autoscale")
# lay out
mathieu_widget_display.addWidget(beta_setting_display, 0, 0, 1, 1)
mathieu_widget_display.addWidget(self.beta_setting, 1, 0, 1, 1)
mathieu_widget_display.addWidget(self.autoscale, 1, 1, 1, 1)
mathieu_widget_display.addWidget(self.stability_display, 2, 0, 3, 3)
return mathieu_widget
def _makeEigenTab(self):
"""
This tab displays the ion chain mode data.
Part of the Display QTabWidget.
"""
# create holders
eigen_widget = QWidget()
eigen_widget_layout = QGridLayout(eigen_widget)
# create widgets
self.eigenmode_axial_display = QTreeWidget()
self.eigenmode_axial_display.setHeaderLabels(["Mode Frequency (x2\u03C0 MHz)", "Ion Number", "Mode Amplitude"])
self.eigenmode_radial_display = QTreeWidget()
self.eigenmode_radial_display.setHeaderLabels(["Mode Frequency (x2\u03C0 MHz)", "Ion Number", "Mode Amplitude"])
# lay out
eigen_widget_layout.addWidget(QCustomGroupBox(self.eigenmode_axial_display, "Axial Modes"))
eigen_widget_layout.addWidget(QCustomGroupBox(self.eigenmode_radial_display, "Radial Modes"))
return eigen_widget
def makeLayout(self):
# create parameter tab widget
parameterTabWidget = QTabWidget()
chain_widget = QWidget()
chain_widget_layout = QVBoxLayout(chain_widget)
chain_widget_layout.addWidget(QCustomGroupBox(self._makeIonTab(), "Ion Chain"))
chain_widget_layout.addWidget(QCustomGroupBox(self._makeStabilityTab(), "Ion Stability"))
trap_widget = QWidget()
trap_widget_layout = QVBoxLayout(trap_widget)
trap_widget_layout.addWidget(QCustomGroupBox(self._makeTrapTab(), "Trap Parameter"))
trap_widget_layout.addWidget(QCustomGroupBox(self._makeGeometryTab(), "Trap Geometry"))
parameterTabWidget.addTab(chain_widget, "Ion Chain")
parameterTabWidget.addTab(trap_widget, "Trap")
# create display tab widget
display_tabs = {
'Mathieu': self._makeMathieuDisplayTab(),
'Eigenmode Data': self._makeEigenTab(),
}
displayTabWidget = QTabWidget()
for tab_name, tab_widget in display_tabs.items():
displayTabWidget.addTab(tab_widget, tab_name)
# title
title = QLabel('Stability Client')
title.setFont(QFont(_SHELL_FONT, pointSize=18))
title.setAlignment(Qt.AlignCenter)
# lay out
layout = QGridLayout(self)
layout.addWidget(title, 0, 0, 1, 4)
layout.addWidget(parameterTabWidget, 1, 0, 2, 4)
layout.addWidget(displayTabWidget, 4, 0, 3, 4)
def drawStability(self, beta=0.4):
xarr = np.linspace(0, 1, 100)
yarr = np.power(beta, 2) - 0.5 * np.power(xarr, 2)
self.stability_region.setData(xarr, yarr)
if __name__ == "__main__":
from EGGS_labrad.clients import runGUI
runGUI(stability_gui)
|
[
"EGGS_labrad.clients.Widgets.QCustomGroupBox",
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QTabWidget",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QRadioButton",
"numpy.power",
"PyQt5.QtWidgets.QWidget.__init__",
"numpy.linspace",
"EGGS_labrad.clients.runGUI",
"PyQt5.QtWidgets.QComboBox",
"pyqtgraph.setConfigOption",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QTreeWidget",
"PyQt5.QtWidgets.QDoubleSpinBox",
"PyQt5.QtGui.QFont",
"pyqtgraph.PlotWidget"
] |
[((15426, 15447), 'EGGS_labrad.clients.runGUI', 'runGUI', (['stability_gui'], {}), '(stability_gui)\n', (15432, 15447), False, 'from EGGS_labrad.clients import runGUI\n'), ((533, 563), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (549, 563), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((998, 1007), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (1005, 1007), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1042, 1071), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['stability_widget'], {}), '(stability_widget)\n', (1053, 1071), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1122, 1149), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Length Scale (μm)"""'], {}), "('Length Scale (μm)')\n", (1128, 1149), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1182, 1197), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""00.00"""'], {}), "('00.00')\n", (1188, 1197), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1470, 1491), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""a-parameter"""'], {}), "('a-parameter')\n", (1476, 1491), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1522, 1538), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.0000"""'], {}), "('0.0000')\n", (1528, 1538), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1592, 1613), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""q-parameter"""'], {}), "('q-parameter')\n", (1598, 1613), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1644, 1659), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.000"""'], {}), "('0.000')\n", (1650, 1659), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1715, 1743), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""ω Radial (x2π MHz)"""'], {}), "('ω Radial (x2π MHz)')\n", (1721, 1743), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1783, 1798), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.000"""'], {}), "('0.000')\n", (1789, 1798), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1854, 1881), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""ω Axial (x2π MHz)"""'], {}), "('ω Axial (x2π MHz)')\n", (1860, 1881), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1921, 1936), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.000"""'], {}), "('0.000')\n", (1927, 1936), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1997, 2027), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Anharmonic Limit (%)"""'], {}), "('Anharmonic Limit (%)')\n", (2003, 2027), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((2060, 2075), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""00.00"""'], {}), "('00.00')\n", (2066, 2075), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4034, 4043), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4041, 4043), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4075, 4101), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['iontab_widget'], {}), '(iontab_widget)\n', (4086, 4101), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4149, 4168), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""# of ions"""'], {}), "('# of ions')\n", (4155, 4168), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4195, 4211), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (4209, 4211), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4425, 4440), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Ion #"""'], {}), "('Ion #')\n", (4431, 4440), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4464, 4475), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (4473, 4475), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4520, 4544), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Ion Mass (amu)"""'], {}), "('Ion Mass (amu)')\n", (4526, 4544), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4569, 4585), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (4583, 4585), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((5955, 5964), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (5962, 5964), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((5994, 6018), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['trap_widget'], {}), '(trap_widget)\n', (6005, 6018), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6061, 6080), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""VRF (Vpp)"""'], {}), "('VRF (Vpp)')\n", (6067, 6080), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6108, 6124), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6122, 6124), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6177, 6196), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""V_off (V)"""'], {}), "('V_off (V)')\n", (6183, 6196), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6225, 6241), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6239, 6241), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6284, 6307), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""ωRF (x2π MHz)"""'], {}), "('ωRF (x2π MHz)')\n", (6290, 6307), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6345, 6361), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6359, 6361), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6404, 6421), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""VDC (V)"""'], {}), "('VDC (V)')\n", (6410, 6421), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6449, 6465), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6463, 6465), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7119, 7128), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (7126, 7128), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7159, 7184), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['radio_widget'], {}), '(radio_widget)\n', (7170, 7184), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7211, 7249), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Get Values from System"""'], {}), "('Get Values from System')\n", (7223, 7249), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7276, 7311), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Manually Set Values"""'], {}), "('Manually Set Values')\n", (7288, 7311), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8476, 8485), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (8483, 8485), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8519, 8547), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['geometry_widget'], {}), '(geometry_widget)\n', (8530, 8547), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8601, 8618), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""r0 (μm)"""'], {}), "('r0 (μm)')\n", (8607, 8618), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8651, 8663), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""κr"""'], {}), "('κr')\n", (8657, 8663), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8696, 8713), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""z0 (μm)"""'], {}), "('z0 (μm)')\n", (8702, 8713), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8746, 8758), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""κz"""'], {}), "('κz')\n", (8752, 8758), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8812, 8828), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8826, 8828), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8855, 8871), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8869, 8871), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8898, 8914), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8912, 8914), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8941, 8957), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8955, 8957), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((10626, 10635), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (10633, 10635), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((10669, 10696), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['mathieu_widget'], {}), '(mathieu_widget)\n', (10680, 10696), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((10745, 10782), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""k"""'], {}), "('background', 'k')\n", (10763, 10782), True, 'import pyqtgraph as pg\n'), ((10816, 10876), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'name': '"""Mathieu Stability Display"""', 'border': '(True)'}), "(name='Mathieu Stability Display', border=True)\n", (10829, 10876), True, 'import pyqtgraph as pg\n'), ((11544, 11566), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (11555, 11566), True, 'import numpy as np\n'), ((11859, 11870), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""β"""'], {}), "('β')\n", (11865, 11870), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((11961, 11977), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (11975, 11977), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((12335, 12359), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Autoscale"""'], {}), "('Autoscale')\n", (12346, 12359), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((12937, 12946), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (12944, 12946), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((12977, 13002), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['eigen_widget'], {}), '(eigen_widget)\n', (12988, 13002), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13067, 13080), 'PyQt5.QtWidgets.QTreeWidget', 'QTreeWidget', ([], {}), '()\n', (13078, 13080), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13241, 13254), 'PyQt5.QtWidgets.QTreeWidget', 'QTreeWidget', ([], {}), '()\n', (13252, 13254), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13718, 13730), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (13728, 13730), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13755, 13764), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (13762, 13764), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13795, 13820), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['chain_widget'], {}), '(chain_widget)\n', (13806, 13820), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14030, 14039), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (14037, 14039), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14069, 14093), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['trap_widget'], {}), '(trap_widget)\n', (14080, 14093), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14605, 14617), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (14615, 14617), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14767, 14793), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Stability Client"""'], {}), "('Stability Client')\n", (14773, 14793), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14929, 14946), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (14940, 14946), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((15218, 15240), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (15229, 15240), True, 'import numpy as np\n'), ((11588, 11605), 'numpy.power', 'np.power', (['xarr', '(2)'], {}), '(xarr, 2)\n', (11596, 11605), True, 'import numpy as np\n'), ((12012, 12049), 'PyQt5.QtGui.QFont', 'QFont', (['"""MS Shell Dlg 2"""'], {'pointSize': '(14)'}), "('MS Shell Dlg 2', pointSize=14)\n", (12017, 12049), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((13432, 13492), 'EGGS_labrad.clients.Widgets.QCustomGroupBox', 'QCustomGroupBox', (['self.eigenmode_axial_display', '"""Axial Modes"""'], {}), "(self.eigenmode_axial_display, 'Axial Modes')\n", (13447, 13492), False, 'from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox\n'), ((13532, 13594), 'EGGS_labrad.clients.Widgets.QCustomGroupBox', 'QCustomGroupBox', (['self.eigenmode_radial_display', '"""Radial Modes"""'], {}), "(self.eigenmode_radial_display, 'Radial Modes')\n", (13547, 13594), False, 'from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox\n'), ((14816, 14848), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(18)'}), '(_SHELL_FONT, pointSize=18)\n', (14821, 14848), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((15256, 15273), 'numpy.power', 'np.power', (['beta', '(2)'], {}), '(beta, 2)\n', (15264, 15273), True, 'import numpy as np\n'), ((2314, 2346), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(22)'}), '(_SHELL_FONT, pointSize=22)\n', (2319, 2346), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((6630, 6662), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(12)'}), '(_SHELL_FONT, pointSize=12)\n', (6635, 6662), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((9117, 9149), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(12)'}), '(_SHELL_FONT, pointSize=12)\n', (9122, 9149), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((11400, 11416), 'PyQt5.QtGui.QColor', 'QColor', (['Qt.white'], {}), '(Qt.white)\n', (11406, 11416), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((11683, 11697), 'PyQt5.QtGui.QColor', 'QColor', (['Qt.red'], {}), '(Qt.red)\n', (11689, 11697), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((11789, 11803), 'PyQt5.QtGui.QColor', 'QColor', (['Qt.red'], {}), '(Qt.red)\n', (11795, 11803), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((15282, 15299), 'numpy.power', 'np.power', (['xarr', '(2)'], {}), '(xarr, 2)\n', (15290, 15299), True, 'import numpy as np\n'), ((4908, 4940), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(18)'}), '(_SHELL_FONT, pointSize=18)\n', (4913, 4940), False, 'from PyQt5.QtGui import QFont, QColor\n')]
|
#
# styleopt.py
# Artistic Style Transfer
# Optimisation method
# as defined in Gatys et. al
#
import os
import api
import numpy as np
import tensorflow as tf
import keras.backend as K
import matplotlib.pyplot as plt
import stylefn
from PIL import Image
from keras.models import Model, Sequential
from util import apply_settings
from tensorflow.contrib.opt import ScipyOptimizerInterface
from datetime import datetime
# Style transfer settings
# NOTE: the following are default settings and may be overriden
SETTINGS = {
"image_shape": (512, 512, 3),
# Optimisation settings
"learning_rate": 10,
"n_epochs": 100,
}
# Represents the computational graph that will perform style transfer using the
# optimisation method
class TransfuseGraph:
# Create a style transfer graph that caters the style and content images shapes
# with the given style transfer settings overrides & pastiche init value
def __init__(self, pastiche_init, settings):
self.settings = settings
# Define tensor shapes
self.style_shape = self.settings["image_shape"]
self.content_shape = self.settings["image_shape"]
self.pastiche_shape = self.settings["image_shape"]
self.build(pastiche_init)
# Build style transfer graph for the given pastiche_init value
def build(self, pastiche_init):
K.clear_session()
# Setup content and style tensors
self.content_op = K.placeholder(self.content_shape, name="content")
self.style_op = K.placeholder(self.style_shape, name="style")
# Setup pastiche tensor derieved from random noise
self.pastiche_op = K.variable(pastiche_init, name="pastiche")
# Build style transfer graph
self.loss_op = stylefn.build_loss(self.pastiche_op, self.content_op,
self.style_op, self.settings)
# Setup optimisation
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss_op, method='L-BFGS-B', options={'maxiter': 20},
var_list=[self.pastiche_op])
# Setup tensorboard
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter("./logs/{}-{}".format(
self.settings, datetime.now().strftime("%H:%M:%S")))
self.session = K.get_session()
# Perform one iteration of style transfer using the inputs in feed dic
def transfer(self, feed):
# Perform training setup
self.optimizer.minimize(self.session, feed_dict=feed)
# Callback for writing tensorboard infomation given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_tensorboard(graph, feed, i_epoch):
summary = graph.session.run(graph.summary_op, feed_dict=feed)
graph.writer.add_summary(summary, i_epoch)
# Callback for display progress infomation given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_progress(graph, feed, i_epoch):
loss = graph.session.run(graph.loss_op, feed_dict=feed)
print("[{}/{}] loss: {:e}".format(i_epoch, graph.settings["n_epochs"], loss))
# Callback to display current pastiche given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_pastiche(graph, feed, i_epoch):
pastiche = graph.session.run(graph.pastiche_op, feed_dict=feed)
pastiche_image = stylefn.deprocess_image(pastiche, graph.pastiche_shape)
# Display image as a plot
plt.imshow(np.asarray(pastiche_image))
plt.draw()
plt.pause(1e-6)
plt.clf()
# Perform style transfer using the optimisation method on the given content imag
# using the style from the given style image, parameterised by settings
# Applys the given style transfer settings before performing style transfer
# Every callback_step number of epochs, will call the given callbacks
# Returns the pastiche, the results of performing style transfer
def transfer_style(content_image, style_image, settings={}, callbacks=[], callback_step=1):
# Apply setting overrides
settings = apply_settings(settings, SETTINGS)
print(settings)
# Preprocess image data
image_shape = settings["image_shape"]
content = stylefn.preprocess_image(content_image, image_shape)
style = stylefn.preprocess_image(style_image, image_shape)
# Define limits for generated pastiche
min_limits = - stylefn.IMG_BGR_MEAN
max_limits = 255.0 - stylefn.IMG_BGR_MEAN
# Build style transfer graph
pastiche_init = np.random.uniform(size=image_shape) * 255.0 - 127.5
graph = TransfuseGraph(pastiche_init=pastiche_init, settings=settings)
session = graph.session
session.run(tf.global_variables_initializer())
# Optimise style transfer graph to perform style transfer
feed = {graph.content_op: content, graph.style_op: style}
n_epochs = settings["n_epochs"]
for i_epoch in range(1, n_epochs + 1):
# Clip the pastiche to ensure values say within limits
clipped_pastiche_op = tf.clip_by_value(graph.pastiche_op,
min_limits, max_limits)
graph.pastiche_op.assign(clipped_pastiche_op)
# Perform style transfer
graph.transfer(feed)
# Call callbacks
if i_epoch % callback_step == 0:
for callback in callbacks: callback(graph, feed, i_epoch)
# Deprocess style transfered image
pastiche = session.run(graph.pastiche_op, feed_dict=feed)
pastiche_image = stylefn.deprocess_image(pastiche, image_shape)
return pastiche_image
if __name__ == "__main__":
content_image = Image.open("data/Tuebingen_Neckarfront.jpg")
style_image = Image.open("data/stary_night.jpg")
settings = {
"image_shape": (32, 32, 3),
"n_epochs": 100
}
pastiche_image = transfer_style(content_image, style_image, settings=settings,
callbacks=[callback_pastiche, callback_progress,
callback_tensorboard],
callback_step=20)
pastiche_image.save("pastiche.jpg")
|
[
"tensorflow.clip_by_value",
"matplotlib.pyplot.clf",
"keras.backend.placeholder",
"matplotlib.pyplot.draw",
"stylefn.build_loss",
"datetime.datetime.now",
"matplotlib.pyplot.pause",
"tensorflow.summary.merge_all",
"keras.backend.clear_session",
"tensorflow.contrib.opt.ScipyOptimizerInterface",
"util.apply_settings",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"stylefn.deprocess_image",
"stylefn.preprocess_image",
"numpy.random.uniform",
"keras.backend.get_session",
"PIL.Image.open",
"keras.backend.variable"
] |
[((3469, 3524), 'stylefn.deprocess_image', 'stylefn.deprocess_image', (['pastiche', 'graph.pastiche_shape'], {}), '(pastiche, graph.pastiche_shape)\n', (3492, 3524), False, 'import stylefn\n'), ((3607, 3617), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3615, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3622, 3638), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-06)'], {}), '(1e-06)\n', (3631, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3651), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3649, 3651), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4188), 'util.apply_settings', 'apply_settings', (['settings', 'SETTINGS'], {}), '(settings, SETTINGS)\n', (4168, 4188), False, 'from util import apply_settings\n'), ((4294, 4346), 'stylefn.preprocess_image', 'stylefn.preprocess_image', (['content_image', 'image_shape'], {}), '(content_image, image_shape)\n', (4318, 4346), False, 'import stylefn\n'), ((4359, 4409), 'stylefn.preprocess_image', 'stylefn.preprocess_image', (['style_image', 'image_shape'], {}), '(style_image, image_shape)\n', (4383, 4409), False, 'import stylefn\n'), ((5600, 5646), 'stylefn.deprocess_image', 'stylefn.deprocess_image', (['pastiche', 'image_shape'], {}), '(pastiche, image_shape)\n', (5623, 5646), False, 'import stylefn\n'), ((5726, 5770), 'PIL.Image.open', 'Image.open', (['"""data/Tuebingen_Neckarfront.jpg"""'], {}), "('data/Tuebingen_Neckarfront.jpg')\n", (5736, 5770), False, 'from PIL import Image\n'), ((5789, 5823), 'PIL.Image.open', 'Image.open', (['"""data/stary_night.jpg"""'], {}), "('data/stary_night.jpg')\n", (5799, 5823), False, 'from PIL import Image\n'), ((1369, 1386), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (1384, 1386), True, 'import keras.backend as K\n'), ((1456, 1505), 'keras.backend.placeholder', 'K.placeholder', (['self.content_shape'], {'name': '"""content"""'}), "(self.content_shape, name='content')\n", (1469, 1505), True, 'import keras.backend as K\n'), ((1530, 1575), 'keras.backend.placeholder', 'K.placeholder', (['self.style_shape'], {'name': '"""style"""'}), "(self.style_shape, name='style')\n", (1543, 1575), True, 'import keras.backend as K\n'), ((1671, 1713), 'keras.backend.variable', 'K.variable', (['pastiche_init'], {'name': '"""pastiche"""'}), "(pastiche_init, name='pastiche')\n", (1681, 1713), True, 'import keras.backend as K\n'), ((1775, 1863), 'stylefn.build_loss', 'stylefn.build_loss', (['self.pastiche_op', 'self.content_op', 'self.style_op', 'self.settings'], {}), '(self.pastiche_op, self.content_op, self.style_op, self.\n settings)\n', (1793, 1863), False, 'import stylefn\n'), ((1961, 2090), 'tensorflow.contrib.opt.ScipyOptimizerInterface', 'tf.contrib.opt.ScipyOptimizerInterface', (['self.loss_op'], {'method': '"""L-BFGS-B"""', 'options': "{'maxiter': 20}", 'var_list': '[self.pastiche_op]'}), "(self.loss_op, method='L-BFGS-B',\n options={'maxiter': 20}, var_list=[self.pastiche_op])\n", (1999, 2090), True, 'import tensorflow as tf\n'), ((2176, 2198), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2196, 2198), True, 'import tensorflow as tf\n'), ((2355, 2370), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2368, 2370), True, 'import keras.backend as K\n'), ((3575, 3601), 'numpy.asarray', 'np.asarray', (['pastiche_image'], {}), '(pastiche_image)\n', (3585, 3601), True, 'import numpy as np\n'), ((4766, 4799), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4797, 4799), True, 'import tensorflow as tf\n'), ((5103, 5162), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['graph.pastiche_op', 'min_limits', 'max_limits'], {}), '(graph.pastiche_op, min_limits, max_limits)\n', (5119, 5162), True, 'import tensorflow as tf\n'), ((4595, 4630), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'image_shape'}), '(size=image_shape)\n', (4612, 4630), True, 'import numpy as np\n'), ((2293, 2307), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2305, 2307), False, 'from datetime import datetime\n')]
|
# A simple python script to plot the GW
# signals over time, for a chosen mode
import numpy as np;
import matplotlib.pyplot as plt;
# output data for setup
M = 1.0
mu = 0.05
r = 300
symmetry = 4
# make the plot
fig = plt.figure()
# volume integral dataset out
data1 = np.loadtxt("VolumeIntegrals.dat")
timedata = data1[:,0]
dM = symmetry*data1[:,3] - symmetry*data1[0,3]
Source = symmetry*data1[:,4]
# flux dataset out
data1 = np.loadtxt("SurfaceIntegrals.dat")
labelstring = "integral(Flux * dt)"
timedata = data1[:,0]
dt = timedata[1] - timedata[0]
NetEiFlux = data1[:,3]
NetEoFlux = data1[:,6]
FEodt = np.zeros_like(timedata)
FEidt = np.zeros_like(timedata)
Source_dt = np.zeros_like(timedata)
for i, F in enumerate(timedata) :
if (i > 0) :
FEodt[i] += FEodt[i-1] + NetEoFlux[i] * dt
FEidt[i] += FEidt[i-1] + NetEiFlux[i] * dt
Source_dt[i] += Source_dt[i-1]+ Source[i] * dt
plt.plot(timedata, FEodt, '-', lw = 1.0, label="Mdot outer dt")
plt.plot(timedata, FEidt, '-', lw = 1.0, label="Mdot inner dt")
plt.plot(timedata, Source_dt, '-', lw = 1.0, label="Source dt")
plt.plot(timedata, dM, '-', lw = 1.0, label="M-M0")
plt.plot(timedata, FEidt - FEodt + Source_dt, '--', lw = 1.0, label="check M-M0")
# make the plot look nice
plt.xlabel("time")
plt.ylabel("Change in Cloud Mom")
#plt.xlim(0, 100)
#plt.ylim(-10, 10)
plt.legend(loc=0)
plt.grid()
# save as png image
filename = "MvsT.png"
plt.savefig(filename)
|
[
"numpy.zeros_like",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((219, 231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (229, 231), True, 'import matplotlib.pyplot as plt\n'), ((271, 304), 'numpy.loadtxt', 'np.loadtxt', (['"""VolumeIntegrals.dat"""'], {}), "('VolumeIntegrals.dat')\n", (281, 304), True, 'import numpy as np\n'), ((431, 465), 'numpy.loadtxt', 'np.loadtxt', (['"""SurfaceIntegrals.dat"""'], {}), "('SurfaceIntegrals.dat')\n", (441, 465), True, 'import numpy as np\n'), ((609, 632), 'numpy.zeros_like', 'np.zeros_like', (['timedata'], {}), '(timedata)\n', (622, 632), True, 'import numpy as np\n'), ((641, 664), 'numpy.zeros_like', 'np.zeros_like', (['timedata'], {}), '(timedata)\n', (654, 664), True, 'import numpy as np\n'), ((677, 700), 'numpy.zeros_like', 'np.zeros_like', (['timedata'], {}), '(timedata)\n', (690, 700), True, 'import numpy as np\n'), ((907, 968), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'FEodt', '"""-"""'], {'lw': '(1.0)', 'label': '"""Mdot outer dt"""'}), "(timedata, FEodt, '-', lw=1.0, label='Mdot outer dt')\n", (915, 968), True, 'import matplotlib.pyplot as plt\n'), ((971, 1032), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'FEidt', '"""-"""'], {'lw': '(1.0)', 'label': '"""Mdot inner dt"""'}), "(timedata, FEidt, '-', lw=1.0, label='Mdot inner dt')\n", (979, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1096), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'Source_dt', '"""-"""'], {'lw': '(1.0)', 'label': '"""Source dt"""'}), "(timedata, Source_dt, '-', lw=1.0, label='Source dt')\n", (1043, 1096), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1148), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'dM', '"""-"""'], {'lw': '(1.0)', 'label': '"""M-M0"""'}), "(timedata, dM, '-', lw=1.0, label='M-M0')\n", (1107, 1148), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1230), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', '(FEidt - FEodt + Source_dt)', '"""--"""'], {'lw': '(1.0)', 'label': '"""check M-M0"""'}), "(timedata, FEidt - FEodt + Source_dt, '--', lw=1.0, label='check M-M0')\n", (1159, 1230), True, 'import matplotlib.pyplot as plt\n'), ((1260, 1278), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (1270, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1312), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Change in Cloud Mom"""'], {}), "('Change in Cloud Mom')\n", (1289, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1367), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1360, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1378), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1376, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1443), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1433, 1443), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns;
from collections import deque
sns.set()
import glob2
import argparse
from cycler import cycler
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from matplotlib.ticker import MaxNLocator
def load_results(file, dtype=None):
if not os.path.exists(file):
return None
with open(file, 'r') as f:
lines = [line for line in f]
if len(lines) < 2:
return None
keys = [name.strip() for name in lines[0].split(',')]
if dtype is None:
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
else:
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0., dtype=dtype)
if data.ndim == 1:
data = data.reshape(1, -1)
assert data.ndim == 2
assert data.shape[-1] == len(keys)
result = {}
for idx, key in enumerate(keys):
result[key] = data[:, idx]
return result
# def pad(xs, value=np.nan, maxlen=None):
# if maxlen is None:
# maxlen = np.max([len(x) for x in xs])
#
# padded_xs = []
# for x in xs:
# if x.shape[0] >= maxlen:
# padded_xs.append(x)
#
# padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value
# x_padded = np.concatenate([x, padding], axis=0)
# assert x_padded.shape[1:] == x.shape[1:]
# assert x_padded.shape[0] == maxlen
# padded_xs.append(x_padded)
# return np.array(padded_xs)
#
# def smooth_curve(x, y):
# halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
# k = halfwidth
# xsmoo = x
# ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
# mode='same')
# return xsmoo, ysmoo
def prepare_data(paths):
inter_dict = {}
var_param_keys = set()
max_episodes = 0
for curr_path in paths:
if not os.path.isdir(curr_path):
continue
print('loading {}'.format(curr_path))
# results = load_results(os.path.join(curr_path, 'mask_records.csv'))
# if not results:
# print('skipping {}'.format(curr_path))
# continue
with open(os.path.join(curr_path, 'params.json'), 'r') as f:
params = json.load(f)
for k,v in params.items():
if k not in inter_dict.keys():
inter_dict[k] = [v]
if v not in inter_dict[k]:
inter_dict[k].append(v)
var_param_keys.add(k)
# max_episodes = max(max_episodes, len(results['episode']))
return var_param_keys
def plot_epochs_success(data, percent_to_achieve, fig_dir):
plt.clf()
# fig = plt.figure(figsize=(20, 8))
plt.figure(figsize=(9, 4.5))
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
# plt.rc('axes', prop_cycle=(cycler('linestyle', ['-', '--', ':']) * cycler('color', new_colors)))
surf_plot_data = {}
uniform_sampling_epochs = []
none_sampling_epochs = []
kappa_s = set()
rg_s = set()
for config in sorted(data.keys()):
epochs = []
for d in data[config]:
try:
epoch = min(np.argwhere(d[1] > percent_to_achieve))[0]
except:
print("Not enough data for {}".format(config))
continue
epochs.append(epoch)
# epochs = [len(d[0]) for d in data[config]]
if 'curriculum_sampling: none' in config:
none_sampling_epochs += epochs
kappa_s.add(-1)
continue
median_epochs = np.median(epochs)
min_perc = np.nanpercentile(epochs, 25, axis=0)
max_perc = np.nanpercentile(epochs, 75, axis=0)
avg_epochs = np.mean(epochs)
n_runs = len(epochs)
std_epochs = np.std(epochs)
if 'stochastic3_' not in config:
continue
rg = float(config.split("stochastic3_")[1].split("_")[0])
rg_s.add(rg)
kappa = float(config.split("stochastic3_")[1].split("_")[2])
kappa_s.add(kappa)
if rg not in surf_plot_data.keys():
surf_plot_data[rg] = {}
if kappa == 0.0:
uniform_sampling_epochs += epochs
surf_plot_data[rg][kappa] = (avg_epochs, std_epochs, n_runs, median_epochs, min_perc, max_perc)
uniform_avg_epochs = np.mean(uniform_sampling_epochs)
none_avg_epochs = np.mean(none_sampling_epochs)
uniform_std_epochs = np.std(uniform_sampling_epochs)
none_std_epochs = np.std(none_sampling_epochs)
uniform_median_epochs = np.median(uniform_sampling_epochs)
none_median_epochs = np.median(none_sampling_epochs)
uniform_min_perc = np.nanpercentile(uniform_sampling_epochs, 25, axis=0)
none_min_perc = np.nanpercentile(none_sampling_epochs, 25, axis=0)
uniform_max_perc = np.nanpercentile(uniform_sampling_epochs, 75, axis=0)
none_max_perc = np.nanpercentile(none_sampling_epochs, 75, axis=0)
for rg in surf_plot_data.keys():
surf_plot_data[rg][0.0] = (
uniform_avg_epochs, uniform_std_epochs, len(uniform_sampling_epochs), uniform_median_epochs, uniform_min_perc,
uniform_max_perc)
surf_plot_data[rg][-1] = (
none_avg_epochs, none_std_epochs, len(none_sampling_epochs), none_median_epochs, none_min_perc,
none_max_perc)
kappa_s = sorted(list(kappa_s))
# kappa_s.insert(1,0)
rg_s = sorted(list(rg_s))
# surf_plot_data_arr = np.array(list(surf_plot_data.items()))
for idx, kappa in enumerate(kappa_s):
# label = "c={} -n: {}".format(c, len(surf_plot_data[0][kappa]))
# n_runs = ''
# n_runs = np.mean(0)
c_label = "$\kappa$={}".format(kappa)
if kappa== -1:
c_label = "no CGM"
continue
if kappa== 0:
c_label = "uniform GM"
continue
label = "{}".format(c_label)
xs = sorted(list(surf_plot_data.keys()))
xs = np.array([k for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
# ys = np.array([surf_plot_data[k][kappa][0] for k in sorted(surf_plot_data.keys()) if kappain surf_plot_data[k].keys()])
# std_ys = np.array([surf_plot_data[k][kappa][1] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
# min_vals = ys + std_ys
# max_vals = ys - std_ys
ys = np.array([surf_plot_data[k][kappa][3] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
n_runs = np.array([surf_plot_data[k][kappa][2] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
min_vals = np.array([surf_plot_data[k][kappa][4] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
max_vals = np.array([surf_plot_data[k][kappa][5] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
if np.array(xs).shape != ys.shape:
print("This data probably has not all kappas")
continue
color = new_colors[idx]
print("C {} has color {}".format(kappa,color))
# Add median points
plt.scatter(xs, ys, color=color)
# Add number of runs
# for d_idx, n in enumerate(n_runs):
# plt.gca().annotate(str(n), (xs[d_idx], ys[d_idx]))
# Add lines
plt.plot(xs, ys, label=label, color=color)
# Add quartiles
plt.plot(xs, min_vals, linestyle='dashed', color=color, alpha=0.25)
plt.plot(xs, max_vals, linestyle='dashed', color=color, alpha=0.25)
# break
# plt.fill_between(xs, min_vals, max_vals, alpha=0.25)
# plt.fill_between(xs, min_vals, max_vals, alpha=0.1)
# plt.legend(loc='upper left', bbox_to_anchor=(5.05,1.83))
ax = plt.gca()
# ax.set_xlim([0, 70])
ax.set_ylim([20, 80])
plt.xlabel('$c_g$')
plt.ylabel('epochs to achieve {}% success rate'.format(int(percent_to_achieve*100)))
plt.legend(loc='upper left')
# plt.title("Number of epochs to achieve {}% success rate".format(int(percent_to_achieve*100)), loc='center', pad=-20)
plt.savefig(os.path.join(fig_dir, 'penalty_hyperopt_.png'))
if __name__ == '__main__':
matplotlib.rcParams['font.family'] = "serif"
matplotlib.rcParams['font.weight'] = 'normal'
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
parallel_rollouts=4
training_rollout_cycles_per_epoch=64
eval_rollout_cycles_per_epoch = 10
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--smooth', type=int, default=1)
args = parser.parse_args()
plot_epochs_success(data, 50, parser.args.dir)
|
[
"numpy.nanpercentile",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.gca",
"os.path.join",
"numpy.std",
"os.path.exists",
"numpy.genfromtxt",
"seaborn.set",
"numpy.median",
"matplotlib.pyplot.legend",
"numpy.argwhere",
"json.load",
"matplotlib.pyplot.plot",
"os.path.isdir",
"matplotlib.pyplot.scatter",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((126, 135), 'seaborn.set', 'sns.set', ([], {}), '()\n', (133, 135), True, 'import seaborn as sns\n'), ((2994, 3003), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3001, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3076), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4.5)'}), '(figsize=(9, 4.5))\n', (3058, 3076), True, 'import matplotlib.pyplot as plt\n'), ((4769, 4801), 'numpy.mean', 'np.mean', (['uniform_sampling_epochs'], {}), '(uniform_sampling_epochs)\n', (4776, 4801), True, 'import numpy as np\n'), ((4824, 4853), 'numpy.mean', 'np.mean', (['none_sampling_epochs'], {}), '(none_sampling_epochs)\n', (4831, 4853), True, 'import numpy as np\n'), ((4879, 4910), 'numpy.std', 'np.std', (['uniform_sampling_epochs'], {}), '(uniform_sampling_epochs)\n', (4885, 4910), True, 'import numpy as np\n'), ((4933, 4961), 'numpy.std', 'np.std', (['none_sampling_epochs'], {}), '(none_sampling_epochs)\n', (4939, 4961), True, 'import numpy as np\n'), ((4990, 5024), 'numpy.median', 'np.median', (['uniform_sampling_epochs'], {}), '(uniform_sampling_epochs)\n', (4999, 5024), True, 'import numpy as np\n'), ((5050, 5081), 'numpy.median', 'np.median', (['none_sampling_epochs'], {}), '(none_sampling_epochs)\n', (5059, 5081), True, 'import numpy as np\n'), ((5105, 5158), 'numpy.nanpercentile', 'np.nanpercentile', (['uniform_sampling_epochs', '(25)'], {'axis': '(0)'}), '(uniform_sampling_epochs, 25, axis=0)\n', (5121, 5158), True, 'import numpy as np\n'), ((5179, 5229), 'numpy.nanpercentile', 'np.nanpercentile', (['none_sampling_epochs', '(25)'], {'axis': '(0)'}), '(none_sampling_epochs, 25, axis=0)\n', (5195, 5229), True, 'import numpy as np\n'), ((5253, 5306), 'numpy.nanpercentile', 'np.nanpercentile', (['uniform_sampling_epochs', '(75)'], {'axis': '(0)'}), '(uniform_sampling_epochs, 75, axis=0)\n', (5269, 5306), True, 'import numpy as np\n'), ((5327, 5377), 'numpy.nanpercentile', 'np.nanpercentile', (['none_sampling_epochs', '(75)'], {'axis': '(0)'}), '(none_sampling_epochs, 75, axis=0)\n', (5343, 5377), True, 'import numpy as np\n'), ((8220, 8229), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8227, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8287, 8306), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$c_g$"""'], {}), "('$c_g$')\n", (8297, 8306), True, 'import matplotlib.pyplot as plt\n'), ((8401, 8429), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (8411, 8429), True, 'import matplotlib.pyplot as plt\n'), ((9027, 9052), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9050, 9052), False, 'import argparse\n'), ((532, 552), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (546, 552), False, 'import os\n'), ((780, 849), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'skip_header': '(1)', 'filling_values': '(0.0)'}), "(file, delimiter=',', skip_header=1, filling_values=0.0)\n", (793, 849), True, 'import numpy as np\n'), ((874, 961), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'skip_header': '(1)', 'filling_values': '(0.0)', 'dtype': 'dtype'}), "(file, delimiter=',', skip_header=1, filling_values=0.0, dtype\n =dtype)\n", (887, 961), True, 'import numpy as np\n'), ((4008, 4025), 'numpy.median', 'np.median', (['epochs'], {}), '(epochs)\n', (4017, 4025), True, 'import numpy as np\n'), ((4045, 4081), 'numpy.nanpercentile', 'np.nanpercentile', (['epochs', '(25)'], {'axis': '(0)'}), '(epochs, 25, axis=0)\n', (4061, 4081), True, 'import numpy as np\n'), ((4101, 4137), 'numpy.nanpercentile', 'np.nanpercentile', (['epochs', '(75)'], {'axis': '(0)'}), '(epochs, 75, axis=0)\n', (4117, 4137), True, 'import numpy as np\n'), ((4159, 4174), 'numpy.mean', 'np.mean', (['epochs'], {}), '(epochs)\n', (4166, 4174), True, 'import numpy as np\n'), ((4225, 4239), 'numpy.std', 'np.std', (['epochs'], {}), '(epochs)\n', (4231, 4239), True, 'import numpy as np\n'), ((7588, 7620), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'color': 'color'}), '(xs, ys, color=color)\n', (7599, 7620), True, 'import matplotlib.pyplot as plt\n'), ((7788, 7830), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'label': 'label', 'color': 'color'}), '(xs, ys, label=label, color=color)\n', (7796, 7830), True, 'import matplotlib.pyplot as plt\n'), ((7863, 7930), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'min_vals'], {'linestyle': '"""dashed"""', 'color': 'color', 'alpha': '(0.25)'}), "(xs, min_vals, linestyle='dashed', color=color, alpha=0.25)\n", (7871, 7930), True, 'import matplotlib.pyplot as plt\n'), ((7939, 8006), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'max_vals'], {'linestyle': '"""dashed"""', 'color': 'color', 'alpha': '(0.25)'}), "(xs, max_vals, linestyle='dashed', color=color, alpha=0.25)\n", (7947, 8006), True, 'import matplotlib.pyplot as plt\n'), ((8569, 8615), 'os.path.join', 'os.path.join', (['fig_dir', '"""penalty_hyperopt_.png"""'], {}), "(fig_dir, 'penalty_hyperopt_.png')\n", (8581, 8615), False, 'import os\n'), ((2227, 2251), 'os.path.isdir', 'os.path.isdir', (['curr_path'], {}), '(curr_path)\n', (2240, 2251), False, 'import os\n'), ((2591, 2603), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2600, 2603), False, 'import json\n'), ((2519, 2557), 'os.path.join', 'os.path.join', (['curr_path', '"""params.json"""'], {}), "(curr_path, 'params.json')\n", (2531, 2557), False, 'import os\n'), ((7350, 7362), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (7358, 7362), True, 'import numpy as np\n'), ((3604, 3642), 'numpy.argwhere', 'np.argwhere', (['(d[1] > percent_to_achieve)'], {}), '(d[1] > percent_to_achieve)\n', (3615, 3642), True, 'import numpy as np\n')]
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import warnings
from math import ceil
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing.label import _encode, _encode_check_unknown
from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples
from sagemaker_sklearn_extension.impute import RobustImputer
class ThresholdOneHotEncoder(OneHotEncoder):
"""Encode categorical integer features as a one-hot numeric array, with optional restrictions on feature encoding.
This adds functionality to encode only if a feature appears more than ``threshold`` number of times. It also adds
functionality to bound the number of categories per feature to ``max_categories``.
This transformer is an extension of ``OneHotEncoder`` from the ``sklearn.preprocessing`` module.
Parameters
----------
categories : 'auto' or a list of lists/arrays of values (default = 'auto')
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith column. The passed categories should not
mix strings and numeric values within a single feature, and should be sorted in case of numeric values.
The used categories can be found in the ``categories_`` attribute.
drop : 'first' or a list/array of shape (n_features,) (default = None)
Specifies a methodology to use to drop one of the categories per feature. This is useful in situations where
perfectly collinear features cause problems, such as when feeding the resulting data into a neural network or
an unregularized regression.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one category is present, the feature will be
dropped entirely.
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that should be dropped.
sparse : boolean (default = True)
Will return sparse matrix if set True else will return an array.
dtype : number type (default = np.float64)
Desired dtype of output.
threshold : float (default = max(10, n_features / 1000))
The threshold for including a value in the encoding of the result. Default value is the maximum of `10` or
`n_features / 1000` where `n_features` is the number of columns of input X. How this parameter is interpreted
depends on whether it is more than or equal to or less than 1.
- If `threshold` is more than or equal to one, it represents the number of times a value must appear to be
one hot encoded in the result.
- If `threshold` is less than one, it represents the fraction of rows which must contain the value for it to be
one hot encoded in the result. The values is rounded up, so if `threshold` is 0.255 and there are 100 rows, a
value must appear at least 26 times to be included.
max_categories : int (default = 100)
Maximum number of categories to encode per feature. If the number of observed categories is greater than
``max_categories``, the encoder will take the top ``max_categories`` observed categories, sorted by count.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting (in order of the features in X and corresponding with
the output of ``transform``). This includes the category specified in ``drop`` (if any).
drop_idx_ : array of shape (n_features,)
``drop_idx_[i]`` is the index in ``categories_[i]`` of the category to be dropped for each feature. None if all
the transformed features will be retained.
"""
def __init__(self, categories=None, drop=None, sparse=True, dtype=np.float64, threshold=None, max_categories=100):
super().__init__(None, None, categories, drop, sparse, dtype, "ignore")
self.threshold = threshold
self.max_categories = max_categories
def fit(self, X, y=None):
"""Fit ThresholdOneHotEncoder to X.
Overrides self.categories_ under the following conditions:
- include values that appear at least ``threshold`` number of times
- include the top ``self.max_categories`` number of categories to encode
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
Returns
-------
self : ThresholdOneHotEncoder
"""
super().fit(X, y)
assert self.max_categories >= 1
_, n_samples, n_features = self._check_X(X)
if not self.threshold:
threshold = max(10, n_samples / 1000)
elif self.threshold >= 1:
threshold = self.threshold
else:
threshold = ceil(self.threshold * n_samples)
n_features_completely_under_threshold = 0
for j in range(n_features):
# get unique values and their counts
items, counts = np.unique([row[j] for row in X], return_counts=True)
# add items that appear more than threshold times
self.categories_[j] = items[counts >= threshold].astype("O")
if self.categories_[j].size == 0:
n_features_completely_under_threshold += 1
# If no category is above the threshold, then create an unknown category to prevent
# self.transform() from raising an IndexError.
items.sort()
unknown_category = "{}___".format(items[-1])
# It's important to keep the dtype of `self.categories_[j]` as 'U' here because our `unknown_category`
# might end up being longer than any of the seen categories, and that changes the behavior of
# the `self._transform` method.
self.categories_[j] = np.asarray([unknown_category], dtype="U")
elif len(self.categories_[j]) > self.max_categories:
items_and_counts = dict(zip(items, counts))
self.categories_[j] = np.asarray(
sorted(items_and_counts, key=items_and_counts.get, reverse=True)[: self.max_categories], dtype="O"
)
if n_features_completely_under_threshold > 0:
times = "time" if self.threshold == 1 else "times"
warnings.warn(
"{} out of {} features do not have any categories appearing more than threshold={} {}.".format(
n_features_completely_under_threshold, n_features, self.threshold, times
)
)
return self
def _more_tags(self):
return {"X_types": ["categorical"]}
class RobustLabelEncoder(LabelEncoder):
"""Encode labels for seen and unseen labels.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Similar to ``sklearn.preprocessing.LabelEncoder`` with additional features.
- ``RobustLabelEncoder`` encodes unseen values with ``fill_encoded_label_value`` or ``fill_label_value``
if ``fill_unseen_labels=True`` for ``transform`` or ``inverse_transform`` respectively
- ``RobustLabelEncoder`` can use predetermined labels with the parameter``labels``.
Examples
--------
>>> from sagemaker_sklearn_extension.preprocessing import RobustLabelEncoder
>>> rle = RobustLabelEncoder()
>>> rle.fit([1, 2, 2, 6])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> rle.classes_
array([1, 2, 6])
>>> rle.transform([1, 1, 2, 6])
array([0, 0, 1, 2])
>>> rle.transform([1, 1, 2, 6, 1738])
array([ 0, 0, 1, 2, 3])
>>> rle.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
>>> rle.inverse_transform([-1738, 0, 0, 1, 2])
['<unseen_label>', 1, 1, 2, 6]
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> rle = RobustLabelEncoder()
>>> rle.fit(["hot dog", "hot dog", "banana"])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> list(rle.classes_)
['banana', 'hot dog']
>>> rle.transform(["hot dog", "hot dog"])
array([1, 1])
>>> rle.transform(["banana", "llama"])
array([0, 2])
>>> list(rle.inverse_transform([2, 2, 1]))
['<unseen_label>', '<unseen_label>', 'hot dog']
Parameters
----------
labels : list of values (default = None)
List of unique values for label encoding. Overrides ``self.classes_``.
If ``labels`` is None, RobustLabelEncoder will automatically determine the labels.
fill_unseen_labels : boolean (default = True)
Whether or not to fill unseen values during transform or inverse_transform.
fill_encoded_label_value : int (default = n_classes)
Replacement value for unseen labels during ``transform``.
Default value is n_classes.
fill_label_value : str (default = '<unseen_label>')
Replacement value for unseen encoded labels during ``inverse_transform``.
Attributes
----------
classes_ : array of shape (n_classes,)
Holds the label for each class.
"""
def __init__(
self, labels=None, fill_unseen_labels=True, fill_encoded_label_value=None, fill_label_value="<unseen_label>"
):
super().__init__()
self.labels = labels
self.fill_unseen_labels = fill_unseen_labels
self.fill_encoded_label_value = fill_encoded_label_value
self.fill_label_value = fill_label_value
def fit(self, y):
"""Fit label encoder.
Parameters
----------
y : array-like of shape (n_samples,)
Label values.
Returns
-------
self : RobustLabelEncoder.
"""
y = column_or_1d(y, warn=True)
self.classes_ = self._check_labels_and_sort() or _encode(y)
return self
def _check_labels_and_sort(self):
if not self.labels:
return None
if self._is_sorted(self.labels):
return self.labels
warnings.warn("`labels` parameter is expected to be sorted. Sorting `labels`.")
return sorted(self.labels)
def _is_sorted(self, iterable):
return all(iterable[i] <= iterable[i + 1] for i in range(len(iterable) - 1))
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
``fill_unseen_labels=True`` does nothing in ``fit_transform`` because there will be no unseen labels.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
y = column_or_1d(y, warn=True)
sorted_labels = self._check_labels_and_sort()
self.classes_, y_encoded = (
_encode(y, uniques=sorted_labels, encode=True) if sorted_labels else _encode(y, encode=True)
)
return y_encoded
def transform(self, y):
"""Transform labels to normalized encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_encoded_label_value`` for unseen values.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
if self.fill_unseen_labels:
_, mask = _encode_check_unknown(y, self.classes_, return_mask=True)
y_encoded = np.searchsorted(self.classes_, y)
fill_encoded_label_value = self.fill_encoded_label_value or len(self.classes_)
y_encoded[~mask] = fill_encoded_label_value
else:
_, y_encoded = _encode(y, uniques=self.classes_, encode=True)
return y_encoded
def inverse_transform(self, y):
"""Transform labels back to original encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_label_value`` for unseen values.
Parameters
----------
y : numpy array of shape [n_samples]
Encoded label values.
Returns
-------
y_decoded : numpy array of shape [n_samples]
Label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
if y.dtype.kind not in ("i", "u"):
try:
y = y.astype(np.float).astype(np.int)
except ValueError:
raise ValueError("`y` contains values not convertible to integer.")
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
labels = np.arange(len(self.classes_))
diff = np.setdiff1d(y, labels)
if diff and not self.fill_unseen_labels:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y_decoded = [self.classes_[idx] if idx in labels else self.fill_label_value for idx in y]
return y_decoded
class NALabelEncoder(BaseEstimator, TransformerMixin):
"""Encoder for transforming labels to NA values.
Uses `RobustImputer` on 1D inputs of labels
- Uses `is_finite_numeric` mask for encoding by default
- Only uses the `RobustImputer` strategy `constant` and fills using `np.nan`
- Default behavior encodes non-float and non-finite values as nan values in
the target column of a given regression dataset
Parameters
----------
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
"""
def __init__(self, mask_function=None):
self.mask_function = mask_function
def fit(self, y):
"""Fit the encoder on y.
Parameters
----------
y : {array-like}, shape (n_samples,)
Input column, where `n_samples` is the number of samples.
Returns
-------
self : NALabelEncoder
"""
self.model_ = RobustImputer(strategy="constant", fill_values=np.nan, mask_function=self.mask_function)
y = y.reshape(-1, 1)
self.model_.fit(X=y)
return self
def transform(self, y):
"""Encode all non-float and non-finite values in y as NA values.
Parameters
----------
y : {array-like}, shape (n_samples)
The input column to encode.
Returns
-------
yt : {ndarray}, shape (n_samples,)
The encoded input column.
"""
check_is_fitted(self, "model_")
y = y.reshape(-1, 1)
return self.model_.transform(y).flatten()
def inverse_transform(self, y):
"""Returns input column"""
return y
def _more_tags(self):
return {"X_types": ["1dlabels"]}
|
[
"sklearn.preprocessing.label._encode_check_unknown",
"math.ceil",
"numpy.asarray",
"numpy.setdiff1d",
"sklearn.preprocessing.label._encode",
"sklearn.utils.validation._num_samples",
"sklearn.utils.validation.check_is_fitted",
"numpy.searchsorted",
"sklearn.utils.validation.column_or_1d",
"sagemaker_sklearn_extension.impute.RobustImputer",
"numpy.array",
"warnings.warn",
"numpy.unique"
] |
[((10817, 10843), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (10829, 10843), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((11103, 11182), 'warnings.warn', 'warnings.warn', (['"""`labels` parameter is expected to be sorted. Sorting `labels`."""'], {}), "('`labels` parameter is expected to be sorted. Sorting `labels`.')\n", (11116, 11182), False, 'import warnings\n'), ((11800, 11826), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (11812, 11826), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((12684, 12717), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""classes_"""'], {}), "(self, 'classes_')\n", (12699, 12717), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((12730, 12756), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (12742, 12756), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((13761, 13794), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""classes_"""'], {}), "(self, 'classes_')\n", (13776, 13794), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((13807, 13833), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (13819, 13833), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((14251, 14274), 'numpy.setdiff1d', 'np.setdiff1d', (['y', 'labels'], {}), '(y, labels)\n', (14263, 14274), True, 'import numpy as np\n'), ((15884, 15977), 'sagemaker_sklearn_extension.impute.RobustImputer', 'RobustImputer', ([], {'strategy': '"""constant"""', 'fill_values': 'np.nan', 'mask_function': 'self.mask_function'}), "(strategy='constant', fill_values=np.nan, mask_function=self.\n mask_function)\n", (15897, 15977), False, 'from sagemaker_sklearn_extension.impute import RobustImputer\n'), ((16410, 16441), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""model_"""'], {}), "(self, 'model_')\n", (16425, 16441), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((5740, 5792), 'numpy.unique', 'np.unique', (['[row[j] for row in X]'], {'return_counts': '(True)'}), '([row[j] for row in X], return_counts=True)\n', (5749, 5792), True, 'import numpy as np\n'), ((10901, 10911), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {}), '(y)\n', (10908, 10911), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((11930, 11976), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {'uniques': 'sorted_labels', 'encode': '(True)'}), '(y, uniques=sorted_labels, encode=True)\n', (11937, 11976), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((11999, 12022), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {'encode': '(True)'}), '(y, encode=True)\n', (12006, 12022), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((12819, 12834), 'sklearn.utils.validation._num_samples', '_num_samples', (['y'], {}), '(y)\n', (12831, 12834), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((12860, 12872), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12868, 12872), True, 'import numpy as np\n'), ((12932, 12989), 'sklearn.preprocessing.label._encode_check_unknown', '_encode_check_unknown', (['y', 'self.classes_'], {'return_mask': '(True)'}), '(y, self.classes_, return_mask=True)\n', (12953, 12989), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((13014, 13047), 'numpy.searchsorted', 'np.searchsorted', (['self.classes_', 'y'], {}), '(self.classes_, y)\n', (13029, 13047), True, 'import numpy as np\n'), ((13236, 13282), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {'uniques': 'self.classes_', 'encode': '(True)'}), '(y, uniques=self.classes_, encode=True)\n', (13243, 13282), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((14134, 14149), 'sklearn.utils.validation._num_samples', '_num_samples', (['y'], {}), '(y)\n', (14146, 14149), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((14175, 14187), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14183, 14187), True, 'import numpy as np\n'), ((5542, 5574), 'math.ceil', 'ceil', (['(self.threshold * n_samples)'], {}), '(self.threshold * n_samples)\n', (5546, 5574), False, 'from math import ceil\n'), ((6603, 6644), 'numpy.asarray', 'np.asarray', (['[unknown_category]'], {'dtype': '"""U"""'}), "([unknown_category], dtype='U')\n", (6613, 6644), True, 'import numpy as np\n')]
|
import netCDF4
import numpy
import vtk
from reader_base import ReaderBase
class LatLonReader(ReaderBase):
def __init__(self, filename, padding=0):
"""
Constructor
@param filename UM netCDF file
@param padding number of extra cells to add on the high end of longitudes
@note padding add extra cells on the high end of longitudes
"""
super(LatLonReader, self).__init__()
# read file
nc = netCDF4.Dataset(filename, 'r')
lon_units = ''
lat_units = ''
# gather all the latitudes and longitudes
lats, lons = None, None
lats_0, lons_0 = None, None
for varname in nc.variables:
var = nc.variables[varname]
if hasattr(var, 'standard_name'):
if var.standard_name == 'longitude':
if varname.find('_0') >= 0:
lons_0 = var[:]
else:
lons = var[:]
lons_units = var.units
elif var.standard_name == 'latitude':
if varname.find('_0') >= 0:
lats_0 = var[:]
else:
lats = var[:]
lats_units = var.units
ncells_lat, ncells_lon = len(lats_0), len(lons_0)
ncells = ncells_lat * (ncells_lon + padding)
# construct the unstructured grid as a collection of
# 2D cells
pointArray = numpy.zeros((4 * ncells, 3))
self.vtk['pointArray'] = pointArray
pointData = self.vtk['pointData']
pointData.SetNumberOfComponents(3)
pointData.SetNumberOfTuples(4 * ncells)
pointData.SetVoidArray(pointArray, 4 * ncells * 3, 1)
points = self.vtk['points']
points.SetNumberOfPoints(4 * ncells)
points.SetData(pointData)
grid = self.vtk['grid']
grid.Allocate(ncells, 1)
ptIds = vtk.vtkIdList()
ptIds.SetNumberOfIds(4)
periodicity_length = 360. # in deg
icell = 0
for j0 in range(ncells_lat):
j1 = j0 + 1
for i in range(ncells_lon + padding):
i0 = (i + 0) % ncells_lon
i1 = (i + 1) % ncells_lon
offset0 = periodicity_length * ((i + 0) // ncells_lon)
offset1 = periodicity_length * ((i + 1) // ncells_lon)
lon00, lat00 = lons[i0] + offset0, lats[j0]
lon10, lat10 = lons[i1] + offset1, lats[j0]
lon11, lat11 = lons[i1] + offset1, lats[j1]
lon01, lat01 = lons[i0] + offset0, lats[j1]
k0 = 4*icell
k1, k2, k3 = k0 + 1, k0 + 2, k0 + 3
# storing coords as lon, lat, 0
pointArray[k0, :] = lon00, lat00, 0.
pointArray[k1, :] = lon10, lat10, 0.
pointArray[k2, :] = lon11, lat11, 0.
pointArray[k3, :] = lon01, lat01, 0.
ptIds.SetId(0, k0)
ptIds.SetId(1, k1)
ptIds.SetId(2, k2)
ptIds.SetId(3, k3)
grid.InsertNextCell(vtk.VTK_QUAD, ptIds)
icell += 1
grid.SetPoints(points)
###############################################################################
def main():
import argparse
from numpy import pi, cos, sin, exp
parser = argparse.ArgumentParser(description='Read ugrid file')
parser.add_argument('-i', dest='input', default='ll.nc', help='Specify UM input netCDF file')
parser.add_argument('-p', dest='padding', type=int, default=0,
help='Specify by how much the grid should be padded on the high lon side')
parser.add_argument('-V', dest='vtk_file', default='lonlat.vtk', help='Save grid in VTK file')
parser.add_argument('-b', dest='binary', action='store_true', help='Write binary file')
parser.add_argument('-stream', dest='streamFunc', default='x',
help='Stream function as a function of x (longitude in rad) and y (latitude in rad)')
args = parser.parse_args()
reader = LatLonReader(filename=args.input, padding=args.padding)
if args.streamFunc:
# compute the edge velocity if user provides the stream function
x, y = reader.getLonLat()
streamData = eval(args.streamFunc)
edgeVel = reader.getEdgeFieldFromStreamData(streamData)
reader.setEdgeField('edge_integrated_velocity', edgeVel)
loopIntegrals = reader.getLoopIntegralsFromStreamData(streamData)
reader.setLoopIntegrals('cell_loop_integrals', loopIntegrals)
if args.vtk_file:
reader.saveToVtkFile(args.vtk_file, binary=args.binary)
if __name__ == '__main__':
main()
|
[
"netCDF4.Dataset",
"numpy.zeros",
"argparse.ArgumentParser",
"vtk.vtkIdList"
] |
[((3437, 3491), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Read ugrid file"""'}), "(description='Read ugrid file')\n", (3460, 3491), False, 'import argparse\n'), ((475, 505), 'netCDF4.Dataset', 'netCDF4.Dataset', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (490, 505), False, 'import netCDF4\n'), ((1508, 1536), 'numpy.zeros', 'numpy.zeros', (['(4 * ncells, 3)'], {}), '((4 * ncells, 3))\n', (1519, 1536), False, 'import numpy\n'), ((1975, 1990), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (1988, 1990), False, 'import vtk\n')]
|
import os, glob, tempfile, warnings
import numpy as np
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
# Rpy
try:
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('library(knockoff); library(glmnet)')
from rpy2 import rinterface
except ImportError:
warnings.warn("rpy2 with knockoff and glmnet unavailable")
def null_print(x):
pass
# Knockoff selection
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full', 'debiased']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target in ['full', 'debiased']:
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
class lasso_glmnet(generic_method):
def select(self, CV=True, seed=0):
numpy2ri.activate()
rpy.r.assign('X', self.X.copy())
rpy.r.assign('Y', self.Y.copy())
rpy.r('X = as.matrix(X)')
rpy.r('Y = as.numeric(Y)')
rpy.r('set.seed(%d)' % seed)
rpy.r('cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r("L1 = cvG[['lambda.min']]")
rpy.r("L2 = cvG[['lambda.1se']]")
if CV:
rpy.r("L = L1")
else:
rpy.r("L = 0.99 * L2")
rpy.r("G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)")
n, p = self.X.shape
L = rpy.r('L')
rpy.r('B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]')
B = np.asarray(rpy.r('B'))
selected = (B != 0)
if selected.sum():
V = np.nonzero(selected)[0]
return V, V
else:
return [], []
lasso_glmnet.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
Sigma = as.matrix(Sigma)
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
def cv_glmnet_lam(X, Y, seed=0):
"""
Some calculations that can be reused by methods:
lambda.min, lambda.1se, lambda.theory and Reid et al. estimate of noise
"""
numpy2ri.activate()
rpy.r('set.seed(%d)' % seed)
rpy.r.assign('X', X.copy())
rpy.r.assign('Y', Y.copy())
rpy.r('X=as.matrix(X)')
rpy.r('Y=as.numeric(Y)')
rpy.r('set.seed(1)')
rpy.r('G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r("L = G[['lambda.min']]")
rpy.r("L1 = G[['lambda.1se']]")
L = rpy.r('L')
L1 = rpy.r('L1')
numpy2ri.deactivate()
return float(1.00001 * L[0]), float(1.00001 * L1[0]),
|
[
"rpy2.robjects.numpy2ri.activate",
"traitlets.Float",
"tempfile.mkstemp",
"rpy2.robjects.r",
"traitlets.Unicode",
"numpy.nonzero",
"rpy2.robjects.r.assign",
"numpy.linalg.inv",
"rpy2.robjects.numpy2ri.deactivate",
"warnings.warn"
] |
[((433, 476), 'rpy2.robjects.r', 'rpy.r', (['"""library(knockoff); library(glmnet)"""'], {}), "('library(knockoff); library(glmnet)')\n", (438, 476), True, 'import rpy2.robjects as rpy\n'), ((799, 809), 'traitlets.Float', 'Float', (['(0.2)'], {}), '(0.2)\n', (804, 809), False, 'from traitlets import HasTraits, Integer, Unicode, Float, Integer, Instance, Dict, Bool, default\n'), ((828, 853), 'traitlets.Unicode', 'Unicode', (['"""Generic method"""'], {}), "('Generic method')\n", (835, 853), False, 'from traitlets import HasTraits, Integer, Unicode, Float, Integer, Instance, Dict, Bool, default\n'), ((873, 882), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (880, 882), False, 'from traitlets import HasTraits, Integer, Unicode, Float, Integer, Instance, Dict, Bool, default\n'), ((3115, 3134), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (3132, 3134), False, 'from rpy2.robjects import numpy2ri\n'), ((3139, 3173), 'rpy2.robjects.r.assign', 'rpy.r.assign', (['"""Sigma"""', 'feature_cov'], {}), "('Sigma', feature_cov)\n", (3151, 3173), True, 'import rpy2.robjects as rpy\n'), ((3178, 3208), 'rpy2.robjects.r.assign', 'rpy.r.assign', (['"""method"""', 'method'], {}), "('method', method)\n", (3190, 3208), True, 'import rpy2.robjects as rpy\n'), ((3213, 3671), 'rpy2.robjects.r', 'rpy.r', (['"""\n\n # Compute the Cholesky -- from create.gaussian\n\n Sigma = as.matrix(Sigma)\n diag_s = diag(switch(method, equi = create.solve_equi(Sigma), \n sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))\n if (is.null(dim(diag_s))) {\n diag_s = diag(diag_s, length(diag_s))\n }\n SigmaInv_s = solve(Sigma, diag_s)\n Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s\n chol_k = chol(Sigma_k)\n """'], {}), '(\n """\n\n # Compute the Cholesky -- from create.gaussian\n\n Sigma = as.matrix(Sigma)\n diag_s = diag(switch(method, equi = create.solve_equi(Sigma), \n sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))\n if (is.null(dim(diag_s))) {\n diag_s = diag(diag_s, length(diag_s))\n }\n SigmaInv_s = solve(Sigma, diag_s)\n Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s\n chol_k = chol(Sigma_k)\n """\n )\n', (3218, 3671), True, 'import rpy2.robjects as rpy\n'), ((4217, 4236), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (4234, 4236), False, 'from rpy2.robjects import numpy2ri\n'), ((4241, 4269), 'rpy2.robjects.r', 'rpy.r', (["('set.seed(%d)' % seed)"], {}), "('set.seed(%d)' % seed)\n", (4246, 4269), True, 'import rpy2.robjects as rpy\n'), ((4338, 4361), 'rpy2.robjects.r', 'rpy.r', (['"""X=as.matrix(X)"""'], {}), "('X=as.matrix(X)')\n", (4343, 4361), True, 'import rpy2.robjects as rpy\n'), ((4366, 4390), 'rpy2.robjects.r', 'rpy.r', (['"""Y=as.numeric(Y)"""'], {}), "('Y=as.numeric(Y)')\n", (4371, 4390), True, 'import rpy2.robjects as rpy\n'), ((4395, 4415), 'rpy2.robjects.r', 'rpy.r', (['"""set.seed(1)"""'], {}), "('set.seed(1)')\n", (4400, 4415), True, 'import rpy2.robjects as rpy\n'), ((4420, 4484), 'rpy2.robjects.r', 'rpy.r', (['"""G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)"""'], {}), "('G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')\n", (4425, 4484), True, 'import rpy2.robjects as rpy\n'), ((4489, 4519), 'rpy2.robjects.r', 'rpy.r', (['"""L = G[[\'lambda.min\']]"""'], {}), '("L = G[[\'lambda.min\']]")\n', (4494, 4519), True, 'import rpy2.robjects as rpy\n'), ((4524, 4555), 'rpy2.robjects.r', 'rpy.r', (['"""L1 = G[[\'lambda.1se\']]"""'], {}), '("L1 = G[[\'lambda.1se\']]")\n', (4529, 4555), True, 'import rpy2.robjects as rpy\n'), ((4564, 4574), 'rpy2.robjects.r', 'rpy.r', (['"""L"""'], {}), "('L')\n", (4569, 4574), True, 'import rpy2.robjects as rpy\n'), ((4584, 4595), 'rpy2.robjects.r', 'rpy.r', (['"""L1"""'], {}), "('L1')\n", (4589, 4595), True, 'import rpy2.robjects as rpy\n'), ((4600, 4621), 'rpy2.robjects.numpy2ri.deactivate', 'numpy2ri.deactivate', ([], {}), '()\n', (4619, 4621), False, 'from rpy2.robjects import numpy2ri\n'), ((533, 591), 'warnings.warn', 'warnings.warn', (['"""rpy2 with knockoff and glmnet unavailable"""'], {}), "('rpy2 with knockoff and glmnet unavailable')\n", (546, 591), False, 'import os, glob, tempfile, warnings\n'), ((2187, 2206), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (2204, 2206), False, 'from rpy2.robjects import numpy2ri\n'), ((2298, 2323), 'rpy2.robjects.r', 'rpy.r', (['"""X = as.matrix(X)"""'], {}), "('X = as.matrix(X)')\n", (2303, 2323), True, 'import rpy2.robjects as rpy\n'), ((2332, 2358), 'rpy2.robjects.r', 'rpy.r', (['"""Y = as.numeric(Y)"""'], {}), "('Y = as.numeric(Y)')\n", (2337, 2358), True, 'import rpy2.robjects as rpy\n'), ((2367, 2395), 'rpy2.robjects.r', 'rpy.r', (["('set.seed(%d)' % seed)"], {}), "('set.seed(%d)' % seed)\n", (2372, 2395), True, 'import rpy2.robjects as rpy\n'), ((2404, 2470), 'rpy2.robjects.r', 'rpy.r', (['"""cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)"""'], {}), "('cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')\n", (2409, 2470), True, 'import rpy2.robjects as rpy\n'), ((2479, 2512), 'rpy2.robjects.r', 'rpy.r', (['"""L1 = cvG[[\'lambda.min\']]"""'], {}), '("L1 = cvG[[\'lambda.min\']]")\n', (2484, 2512), True, 'import rpy2.robjects as rpy\n'), ((2521, 2554), 'rpy2.robjects.r', 'rpy.r', (['"""L2 = cvG[[\'lambda.1se\']]"""'], {}), '("L2 = cvG[[\'lambda.1se\']]")\n', (2526, 2554), True, 'import rpy2.robjects as rpy\n'), ((2655, 2716), 'rpy2.robjects.r', 'rpy.r', (['"""G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)"""'], {}), "('G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)')\n", (2660, 2716), True, 'import rpy2.robjects as rpy\n'), ((2757, 2767), 'rpy2.robjects.r', 'rpy.r', (['"""L"""'], {}), "('L')\n", (2762, 2767), True, 'import rpy2.robjects as rpy\n'), ((2776, 2839), 'rpy2.robjects.r', 'rpy.r', (['"""B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]"""'], {}), "('B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]')\n", (2781, 2839), True, 'import rpy2.robjects as rpy\n'), ((3693, 3708), 'rpy2.robjects.r', 'rpy.r', (['"""chol_k"""'], {}), "('chol_k')\n", (3698, 3708), True, 'import rpy2.robjects as rpy\n'), ((3738, 3757), 'rpy2.robjects.r', 'rpy.r', (['"""SigmaInv_s"""'], {}), "('SigmaInv_s')\n", (3743, 3757), True, 'import rpy2.robjects as rpy\n'), ((3783, 3798), 'rpy2.robjects.r', 'rpy.r', (['"""diag_s"""'], {}), "('diag_s')\n", (3788, 3798), True, 'import rpy2.robjects as rpy\n'), ((2582, 2597), 'rpy2.robjects.r', 'rpy.r', (['"""L = L1"""'], {}), "('L = L1')\n", (2587, 2597), True, 'import rpy2.robjects as rpy\n'), ((2624, 2646), 'rpy2.robjects.r', 'rpy.r', (['"""L = 0.99 * L2"""'], {}), "('L = 0.99 * L2')\n", (2629, 2646), True, 'import rpy2.robjects as rpy\n'), ((2863, 2873), 'rpy2.robjects.r', 'rpy.r', (['"""B"""'], {}), "('B')\n", (2868, 2873), True, 'import rpy2.robjects as rpy\n'), ((1630, 1646), 'numpy.linalg.inv', 'np.linalg.inv', (['Q'], {}), '(Q)\n', (1643, 1646), True, 'import numpy as np\n'), ((2946, 2966), 'numpy.nonzero', 'np.nonzero', (['selected'], {}), '(selected)\n', (2956, 2966), True, 'import numpy as np\n'), ((3864, 3882), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (3880, 3882), False, 'import os, glob, tempfile, warnings\n')]
|
import numpy as np
import warnings
from copy import deepcopy
from scipy.signal import fftconvolve, medfilt
import astropy.units as u
import astropy.constants as cst
from astropy.io import fits, registry
from astropy.wcs import WCS
from astropy.nddata import NDDataArray, StdDevUncertainty, InverseVariance
from astropy.nddata.ccddata import _known_uncertainties
from astropy.nddata.ccddata import _unc_name_to_cls, _unc_cls_to_name, _uncertainty_unit_equivalent_to_parent
def forman(M):
"""Return Forman window.
The Forman window is defined in (E-4) [1]_.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
numpy.bartlett, numpy.blackman, numpy.hamming, numpy.kaiser, numpy.hanning
References
----------
..[1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, float)
n = np.arange(0, M)
return (1 - ((n - M / 2) / M) ** 2) ** 2
class FTSData(NDDataArray):
"""Class to handle OPD or spectral FTS cubes.
Parameters
----------
data : `~numpy.ndarray` or `FTSData`
The actual data contained in this `FTSData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : `~numpy.ndarray`-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
hits : `~numpy.ndarray`-like, optional
Hit map for the data, given as a int Numpy array or any object that
can be converted to a int Numpy array with a shape
matching that of the data.
flags : `~numpy.ndarray`-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : `~astropy.wcs.WCS`, optional
WCS-object containing the world coordinate system for the data.
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
"""
__opd_idx = None
__freq_idx = None
hits = None
def __init__(self, *args, hits=None, **kwargs):
# Initialize with the parent...
super().__init__(*args, **kwargs)
# Additionnal data
if hits is not None:
self.hits = np.array(hits).astype(int)
# Set Internal indexes on the wcs object
if self.wcs is not None:
opd_idx = np.argwhere("opd" == np.char.lower(self.wcs.wcs.ctype)).squeeze()
self.__opd_idx = opd_idx.item() if opd_idx.size == 1 else None
freq_idx = np.argwhere("freq" == np.char.lower(self.wcs.wcs.ctype)).squeeze()
self.__freq_idx = freq_idx.item() if freq_idx.size == 1 else None
@property
def __is_opd(self):
return self.__opd_idx is not None
@property
def __is_freq(self):
return self.__freq_idx is not None
@property
def opd_axis(self):
if self.__is_opd:
return self.wcs.sub([self.__opd_idx + 1]).pixel_to_world(np.arange(self.shape[0]))
@property
def spectral_axis(self):
if self.__is_freq:
return self.wcs.sub([self.__freq_idx + 1]).pixel_to_world(np.arange(self.shape[0]))
@property
def _is_doublesided(self):
"""Return True is the cube is double sided, also enforce positive increments."""
return (np.sum(self.wcs.sub([self.__opd_idx + 1]).all_pix2world([0, self.shape[0] - 1], 0)) == 0) & (
self.wcs.wcs.cdelt[self.__opd_idx] > 0
)
@property
def _is_onesided(self):
"""Return True is the cube is one sided, also enforce positive increments."""
return (np.sum(self.wcs.sub([self.__opd_idx + 1]).all_pix2world(0, 0)) == 0) & (
self.wcs.wcs.cdelt[self.__opd_idx] > 0
)
# from CCDData
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
def _extract_doublesided(self):
"""Return the largest doublesided OPD cube from the data.
Returns
-------
output : FTSData
A doublesided interferograms cube
"""
assert self.__is_opd, "Intput should be OPD cube"
opd_wcs = self.wcs.sub([self.__opd_idx + 1])
opds = opd_wcs.all_pix2world(np.arange(self.data.shape[0]), 0)[0]
_maxopd = np.min([-opds.min(), opds.max()])
signed = np.sign(opd_wcs.wcs.cdelt[0])
slice_idx = opd_wcs.all_world2pix([-signed * _maxopd, signed * _maxopd], 0)[0].astype(int)
slice_idx += [0, 1] # Inclusive end
_slice = slice(*slice_idx)
wcs = deepcopy(self.wcs)
wcs.wcs.crpix[self.__opd_idx] -= _slice.start
meta = deepcopy(self.meta)
meta["HISTORY"] = "extract_doublesided"
mask = self.mask[_slice] if self.mask is not None else None
hits = self.hits[_slice] if self.hits is not None else None
result = self.__class__(self.data[_slice], wcs=wcs, mask=mask, meta=meta, hits=hits)
return result
def _to_onesided(self):
"""Return a onesided OPD cube from the data.
Returns
-------
output : FTSData
A onesided interferograms cube
"""
zpd_idx = self.wcs.sub([self.__opd_idx + 1]).world_to_pixel(0 * self.wcs.wcs.cunit[self.__opd_idx]).astype(int)
extrema_opd = np.abs(self.wcs.sub([self.__opd_idx + 1]).pixel_to_world([0, self.shape[0] - 1]))
if extrema_opd[1] >= extrema_opd[0]:
# Positive single sided : longer right hand side...
# Or doublesided
extract_slice = slice(zpd_idx, None)
os_slice = slice(0, zpd_idx + 1)
db_slice = slice(zpd_idx, None, -1)
elif extrema_opd[1] < extrema_opd[0]:
# Negative single sided : longer left hand side...
# Or double sided
extract_slice = slice(zpd_idx, None, -1)
os_slice = slice(0, self.data.shape[0] - zpd_idx)
db_slice = slice(zpd_idx, None)
# TODO: self.mask ??
# Extract the longest part
onesided_itg = self.data[extract_slice].copy()
onesided_hits = self.hits[extract_slice].copy() if self.hits is not None else None
# Take the mean with the other half on the double sided part
onesided_itg[os_slice] += self.data[db_slice]
onesided_itg[os_slice] /= 2
if onesided_hits is not None:
onesided_hits[os_slice] += self.hits[db_slice]
onesided_hits[os_slice] /= 2
wcs = deepcopy(self.wcs)
wcs.wcs.crpix[self.__opd_idx] = 1
output = FTSData(onesided_itg, wcs=wcs, meta=self.meta, hits=onesided_hits)
return output
def __invert_doublesided(self, apodization_function=None):
"""Invert a doublesided interferograms cube.
Parameters
----------
apodization_function : func
Apodization function to be used on the interferograms (default: None)
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice can be made among the function available in numpy at [1]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
assert self.__is_opd, "Intput should be OPD cube"
assert self._is_doublesided, "Not a doublesided interferogram cube"
cdelt_opd = self.wcs.wcs.cdelt[self.__opd_idx]
cunit_opd = u.Unit(self.wcs.wcs.cunit[self.__opd_idx])
naxis_opd = self.shape[0]
# freq = np.fft.fftfreq(naxis_opd, d=cdelt_opd * cunit_opd) * cst.c
if apodization_function is None:
apodization_function = np.ones
_cube = np.ma.array(self.data, mask=self.mask).filled(0) * np.expand_dims(
apodization_function(naxis_opd), tuple(np.arange(1, self.ndim))
)
# Spencer 2005 Eq 2.29, direct fft
spectra = np.fft.fft(np.fft.ifftshift(_cube, axes=0), axis=0)
# Factor of 2 because we used the fourier transform
spectra *= (4 * cdelt_opd * cunit_opd).decompose().value
spectra = np.fft.fftshift(spectra, axes=0)
# freq = np.fft.fftshift(freq)
# Build new wcs
wcs = deepcopy(self.wcs)
wcs.wcs.ctype[self.__opd_idx] = "FREQ"
wcs.wcs.cunit[self.__opd_idx] = "Hz"
# TODO: (cst.c / (cdelt_opd * cunit_opd) / (naxis_opd-1)).to(u.Hz).value give the 1/2L resolution, but fails in the tests
wcs.wcs.cdelt[self.__opd_idx] = (cst.c / (cdelt_opd * cunit_opd) / naxis_opd).to(u.Hz).value
wcs.wcs.crpix[self.__opd_idx] = (naxis_opd - 1) / 2 + 1
wcs.wcs.crval[self.__opd_idx] = 0
# TODO: Estimate uncertainty/hits
output = FTSData(spectra, meta=self.meta, wcs=wcs)
return output
def __invert_onesided(self, apodization_function=None):
"""Invert a onesided interferograms cube.
Parameters
----------
apodization_function : func
Apodization function to be used on the interferograms (default: None)
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice can be made among the function available in numpy at [1]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
.. [1] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
assert self.__is_opd, "Intput should be OPD cube"
assert self._is_onesided, "Not a one sided interferogram cube"
cdelt_opd = self.wcs.wcs.cdelt[self.__opd_idx]
cunit_opd = u.Unit(self.wcs.wcs.cunit[self.__opd_idx])
naxis_opd = self.shape[0]
if apodization_function is None:
apodization_function = np.ones
_cube = np.ma.array(self.data, mask=self.mask).filled(0) * np.expand_dims(
apodization_function(2 * naxis_opd)[naxis_opd:], tuple(np.arange(1, self.ndim))
)
# Spencer 2005 Eq 2.29, direct fft
# Trick is to use the unnormalized irfft
output_shape = 2 * naxis_opd - 1
spectra = np.fft.irfft(_cube, n=output_shape, axis=0) * output_shape
# Factor of 2 because we used the fourier transform
spectra *= (4 * cdelt_opd * cunit_opd).decompose().value
spectra = np.fft.fftshift(spectra, axes=0)
# Build new wcs
wcs = deepcopy(self.wcs)
wcs.wcs.ctype[self.__opd_idx] = "FREQ"
wcs.wcs.cunit[self.__opd_idx] = "Hz"
# (cst.c / (cdelt_opd * cunit_opd) / (output_shape-1)).to(u.Hz).value give the 1/2L resolution, but fails in the tests
wcs.wcs.cdelt[self.__opd_idx] = (cst.c / (cdelt_opd * cunit_opd) / output_shape).to(u.Hz).value
wcs.wcs.crpix[self.__opd_idx] = naxis_opd
wcs.wcs.crval[self.__opd_idx] = 0
# TODO: Estimate uncertainty/hits
output = FTSData(spectra, meta=self.meta, wcs=wcs)
return output
def _get_phase_correction_function(
self,
niter=1,
doublesided_apodization=None,
medfilt_size=None,
deg=None,
fitting_func="polynomial",
pcf_apodization=None,
plot=False,
**kwargs
):
"""Compute the phase correction function for the current cube
This follow the description in [1]_ with some additionnal features.
Parameters
----------
niter : [int], optional
number of iterations, by default 1
doublesided_apodization : [function], optional
apodization function for the double sided inversion, by default None, but see Notes
medfilt_size : [int], optional
size of the median filtering window to be applied (before polynomial fitting), by default None
deg : [int], optional
the polynomial degree to fit to the phase, by default None
fitting_func : [str], ("polynomial"|"chebysev"), optional
fitting function class, either polynomial or chebyshev, by default, "polynomial"
pcf_apodization : [function], optional
apodization function for the phase correction function, by default None
plot : bool, optional
diagnostic plots, by default False
Returns
-------
array_like (cube shape)
the phase correction function to be used as convolution kernel for the interferograms
Notes
-----
Choice of apodization function can be made among the function available in numpy at [2]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
"""
if pcf_apodization is None:
pcf_apodization = np.ones
# Working copy
itg = deepcopy(self._extract_doublesided())
# Reference iterferogram
itg_ma = np.ma.array(itg.data, mask=itg.mask, copy=True).filled(0)
# Null starting phase (take only the upper part)
phase = np.zeros(((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:]))
# Loop Here
for i in range(niter):
cube = itg._FTSData__invert_doublesided(apodization_function=doublesided_apodization)
# Spencer 2.39 , well actually phases are -pi/pi so arctan2 or angle
_phase = np.angle(cube.data[(itg.shape[0] - 1) // 2 :])
# Replace bad phase :
_phase[np.isnan(_phase)] = 0
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=4)
(freq,) = cube.wcs.sub([self.__opd_idx + 1]).all_pix2world(np.arange(cube.shape[0]), 0)
axes[1].plot(freq, cube.data[:, :, 0])
axes[2].plot(freq, _phase[:, :, 0])
if medfilt_size is not None:
# Median filtering of the phases
_phase = medfilt(_phase, kernel_size=(medfilt_size, *(1,) * (len(itg.shape) - 1)))
if deg is not None:
if fitting_func == "polynomial":
polyfit, polyval = np.polynomial.polynomial.polyfit, np.polynomial.polynomial.polyval
elif fitting_func == "chebychev":
polyfit, polyval = np.polynomial.chebyshev.chebfit, np.polynomial.chebyshev.chebval
else:
raise ValueError('fitting_func should be in ("polynomial"|"chebychev")')
# polynomial fit on the phase, weighted by the intensity
p = []
idx = np.linspace(0, 1, _phase.shape[0])
# np.polynomail.polynomial.polyfit do not accept a (`M`, `K`) array for the weights, so need to loop....
for spec, weight in zip(
_phase.reshape(_phase.shape[0], -1).T,
np.abs(cube.data[(itg.shape[0] - 1) // 2 :]).reshape(_phase.shape[0], -1).T,
):
p.append(polyfit(idx, spec, deg, w=weight))
p = np.asarray(p).T
# evaluate the polynomal all at once :
_phase = polyval(idx, p).T.reshape(_phase.shape)
# Wrap back the phases to -pi pi, uncessary, but just in case
_phase = (_phase + np.pi) % (2 * np.pi) - np.pi
"""
fit data also incorporates smoothing in the
out of band region to ensure zero phase and derivative discontinuities and zero amplitude at
zero and Nyquist frequency.
"""
if plot:
axes[2].plot(freq, _phase[:, :, 0], linestyle="--")
phase += _phase
# Spencer 3.30
# Using rfft leads pure real pcf and strangely could lead to wrong results
# phase_correction_function = np.fft.irfft(np.exp(-1j * phase), axis=0, n=2*(phase.shape[0]-1)+1)
phase_correction_function = np.fft.ifft(
np.exp(-1j * np.fft.fftshift(np.concatenate([-phase[:0:-1], phase]), axes=0)), axis=0
)
# Apodization of the PCF along the first axis
phase_correction_function = (
np.fft.fftshift(phase_correction_function, axes=0).T
* pcf_apodization(phase_correction_function.shape[0])
).T
if plot:
(x,) = itg.wcs.sub([3]).all_pix2world(np.arange(itg.shape[0]), 0)
axes[3].plot(x, phase_correction_function[:, :, 0])
axes[3].set_xlim(-1, 1)
axes[0].plot(x, itg.data[:, :, 0])
axes[0].set_xlim(-1, 1)
# Correct the initial dataset with the current phase for the next iteration
corrected_itg = fftconvolve(itg_ma, phase_correction_function, mode="same", axes=0).real
itg.data[:] = corrected_itg
return phase_correction_function
def to_spectra(self, onesided_apodization=None, **kwargs):
"""Invert an interferograms cube using the (enhanced) Forman method.
This follow the description in [1]_.
Parameters
----------
onesided_apodization : [function], optional
apodization function to be used on the one sided interferograms, by default None
niter : [int], optional
number of iterations, by default 1
doublesided_apodization : [function], optional
apodization function for the double sided inversion, by default None, but see Notes
medfilt_size : [int], optional
size of the median filtering window to be applied (before polynomial fitting), by default None
deg : [int], optional
the polynomial degree to fit to the phase, by default None
pcf_apodization : [function], optional
apodization function for the phase correction function, by default None
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice of apodization function can be made among the function available in numpy at [2]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
.. [2] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
phase_correction_function = self._get_phase_correction_function(**kwargs)
# Convolved the interferograms and hits
itg = np.ma.array(self.data, mask=self.mask).filled(0)
corrected_itg = fftconvolve(itg, phase_correction_function, mode="same", axes=0).real
corrected_hits = None
if self.hits is not None:
hits = np.ma.array(self.hits, mask=self.mask).filled(0)
corrected_hits = fftconvolve(hits, phase_correction_function, mode="same", axes=0).real
corrected = FTSData(corrected_itg, wcs=self.wcs, hits=corrected_hits)
onesided = corrected._to_onesided()
return onesided.__invert_onesided(apodization_function=onesided_apodization)
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_hits="HITS",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
):
"""Creates an HDUList object from a FTSData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_hits : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty, ``'HITS'`` for hits and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.meta, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.meta.copy()
else:
header = fits.Header(self.meta)
if self.unit is not None and self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError("only uncertainties of type {} can be saved.".format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None and self.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_hits and self.hits is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.hits, "shape"):
raise ValueError("only a numpy.ndarray hits can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduHits = fits.ImageHDU(self.hits.astype(np.uint16), name=hdu_hits)
hdus.append(hduHits)
if hdu_flags and self.flags:
raise NotImplementedError("adding the flags to a HDU is not " "supported at this time.")
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def from_array(cls, opd, data, hits=None, mask=None, **kwargs):
"""Construct FTS data from arrays.
Parameters
----------
opd : array_like or Quantity (M,)
the optical path difference, by default 'mm'
data : array_like (M, *)
the corresponding data, first dimension must match opd
hits : array_like, optionnal
the corresponding hits
mask : array_like, optionnal
the corresponding mask
Returns
-------
data : FTSData
the corresponding FTSData objects
"""
naxis = len(data.shape)
wcs = WCS(naxis=naxis)
if not isinstance(opd, u.Quantity):
opd = u.Quantity(opd, "mm")
zpd_idx = np.argmin(np.abs(opd))
if opd[zpd_idx] != 0:
print("Shifting opd by {} for 0".format(opd[zpd_idx]))
opd -= opd[zpd_idx]
dpd = np.diff(opd)
np.testing.assert_almost_equal(
np.median(dpd).to(dpd.unit).value, dpd.value, err_msg="Problem on opd differences"
)
wcs.wcs.ctype[naxis - 1] = "OPD"
wcs.wcs.cunit[naxis - 1] = opd.unit
wcs.wcs.crpix[naxis - 1] = zpd_idx + 1
wcs.wcs.crval[naxis - 1] = opd[zpd_idx].value
wcs.wcs.cdelt[naxis - 1] = np.median(dpd).value
if mask is None:
mask = False
return cls(data, wcs=wcs, hits=hits, mask=mask | np.isnan(data), **kwargs)
def fits_ftsdata_writer(
fts_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_hits="HITS",
hdu_flags=None,
key_uncertainty_type="UTYPE",
**kwd
):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_hits, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty ``'HITS'`` for hits and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = fts_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
hdu_hits=hdu_hits,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(FTSData):
# registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer("fits", FTSData, fits_ftsdata_writer)
registry.register_identifier("fits", FTSData, fits.connect.is_fits)
|
[
"numpy.abs",
"numpy.angle",
"astropy.io.fits.PrimaryHDU",
"numpy.ones",
"numpy.isnan",
"astropy.io.fits.Header",
"numpy.arange",
"scipy.signal.fftconvolve",
"astropy.io.fits.HDUList",
"astropy.io.fits.ImageHDU",
"numpy.fft.ifftshift",
"astropy.io.registry.register_writer",
"numpy.fft.irfft",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"copy.deepcopy",
"astropy.units.Quantity",
"astropy.io.registry.register_identifier",
"numpy.median",
"astropy.io.registry.delay_doc_updates",
"numpy.asarray",
"numpy.fft.fftshift",
"numpy.char.lower",
"astropy.nddata.ccddata._uncertainty_unit_equivalent_to_parent",
"astropy.units.Unit",
"numpy.concatenate",
"numpy.zeros",
"astropy.wcs.WCS",
"numpy.ma.array",
"numpy.diff",
"numpy.array",
"numpy.sign"
] |
[((1238, 1253), 'numpy.arange', 'np.arange', (['(0)', 'M'], {}), '(0, M)\n', (1247, 1253), True, 'import numpy as np\n'), ((30442, 30477), 'astropy.io.registry.delay_doc_updates', 'registry.delay_doc_updates', (['FTSData'], {}), '(FTSData)\n', (30468, 30477), False, 'from astropy.io import fits, registry\n'), ((30555, 30617), 'astropy.io.registry.register_writer', 'registry.register_writer', (['"""fits"""', 'FTSData', 'fits_ftsdata_writer'], {}), "('fits', FTSData, fits_ftsdata_writer)\n", (30579, 30617), False, 'from astropy.io import fits, registry\n'), ((30622, 30689), 'astropy.io.registry.register_identifier', 'registry.register_identifier', (['"""fits"""', 'FTSData', 'fits.connect.is_fits'], {}), "('fits', FTSData, fits.connect.is_fits)\n", (30650, 30689), False, 'from astropy.io import fits, registry\n'), ((1169, 1181), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1177, 1181), True, 'import numpy as np\n'), ((1212, 1229), 'numpy.ones', 'np.ones', (['(1)', 'float'], {}), '(1, float)\n', (1219, 1229), True, 'import numpy as np\n'), ((6077, 6106), 'numpy.sign', 'np.sign', (['opd_wcs.wcs.cdelt[0]'], {}), '(opd_wcs.wcs.cdelt[0])\n', (6084, 6106), True, 'import numpy as np\n'), ((6301, 6319), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (6309, 6319), False, 'from copy import deepcopy\n'), ((6390, 6409), 'copy.deepcopy', 'deepcopy', (['self.meta'], {}), '(self.meta)\n', (6398, 6409), False, 'from copy import deepcopy\n'), ((8235, 8253), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (8243, 8253), False, 'from copy import deepcopy\n'), ((9383, 9425), 'astropy.units.Unit', 'u.Unit', (['self.wcs.wcs.cunit[self.__opd_idx]'], {}), '(self.wcs.wcs.cunit[self.__opd_idx])\n', (9389, 9425), True, 'import astropy.units as u\n'), ((10048, 10080), 'numpy.fft.fftshift', 'np.fft.fftshift', (['spectra'], {'axes': '(0)'}), '(spectra, axes=0)\n', (10063, 10080), True, 'import numpy as np\n'), ((10159, 10177), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (10167, 10177), False, 'from copy import deepcopy\n'), ((11662, 11704), 'astropy.units.Unit', 'u.Unit', (['self.wcs.wcs.cunit[self.__opd_idx]'], {}), '(self.wcs.wcs.cunit[self.__opd_idx])\n', (11668, 11704), True, 'import astropy.units as u\n'), ((12364, 12396), 'numpy.fft.fftshift', 'np.fft.fftshift', (['spectra'], {'axes': '(0)'}), '(spectra, axes=0)\n', (12379, 12396), True, 'import numpy as np\n'), ((12436, 12454), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (12444, 12454), False, 'from copy import deepcopy\n'), ((15251, 15306), 'numpy.zeros', 'np.zeros', (['((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:])'], {}), '(((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:]))\n', (15259, 15306), True, 'import numpy as np\n'), ((27210, 27228), 'astropy.io.fits.HDUList', 'fits.HDUList', (['hdus'], {}), '(hdus)\n', (27222, 27228), False, 'from astropy.io import fits, registry\n'), ((27925, 27941), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': 'naxis'}), '(naxis=naxis)\n', (27928, 27941), False, 'from astropy.wcs import WCS\n'), ((28214, 28226), 'numpy.diff', 'np.diff', (['opd'], {}), '(opd)\n', (28221, 28226), True, 'import numpy as np\n'), ((9864, 9895), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['_cube'], {'axes': '(0)'}), '(_cube, axes=0)\n', (9880, 9895), True, 'import numpy as np\n'), ((12162, 12205), 'numpy.fft.irfft', 'np.fft.irfft', (['_cube'], {'n': 'output_shape', 'axis': '(0)'}), '(_cube, n=output_shape, axis=0)\n', (12174, 12205), True, 'import numpy as np\n'), ((15561, 15606), 'numpy.angle', 'np.angle', (['cube.data[(itg.shape[0] - 1) // 2:]'], {}), '(cube.data[(itg.shape[0] - 1) // 2:])\n', (15569, 15606), True, 'import numpy as np\n'), ((20913, 20977), 'scipy.signal.fftconvolve', 'fftconvolve', (['itg', 'phase_correction_function'], {'mode': '"""same"""', 'axes': '(0)'}), "(itg, phase_correction_function, mode='same', axes=0)\n", (20924, 20977), False, 'from scipy.signal import fftconvolve, medfilt\n'), ((23537, 23559), 'astropy.io.fits.Header', 'fits.Header', (['self.meta'], {}), '(self.meta)\n', (23548, 23559), False, 'from astropy.io import fits, registry\n'), ((24504, 24538), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.data', 'header'], {}), '(self.data, header)\n', (24519, 24538), False, 'from astropy.io import fits, registry\n'), ((25623, 25636), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (25634, 25636), False, 'from astropy.io import fits, registry\n'), ((26472, 26548), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['self.uncertainty.array', 'hdr_uncertainty'], {'name': 'hdu_uncertainty'}), '(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty)\n', (26485, 26548), False, 'from astropy.io import fits, registry\n'), ((28005, 28026), 'astropy.units.Quantity', 'u.Quantity', (['opd', '"""mm"""'], {}), "(opd, 'mm')\n", (28015, 28026), True, 'import astropy.units as u\n'), ((28056, 28067), 'numpy.abs', 'np.abs', (['opd'], {}), '(opd)\n', (28062, 28067), True, 'import numpy as np\n'), ((28595, 28609), 'numpy.median', 'np.median', (['dpd'], {}), '(dpd)\n', (28604, 28609), True, 'import numpy as np\n'), ((4443, 4467), 'numpy.arange', 'np.arange', (['self.shape[0]'], {}), '(self.shape[0])\n', (4452, 4467), True, 'import numpy as np\n'), ((4610, 4634), 'numpy.arange', 'np.arange', (['self.shape[0]'], {}), '(self.shape[0])\n', (4619, 4634), True, 'import numpy as np\n'), ((5969, 5998), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (5978, 5998), True, 'import numpy as np\n'), ((15119, 15166), 'numpy.ma.array', 'np.ma.array', (['itg.data'], {'mask': 'itg.mask', 'copy': '(True)'}), '(itg.data, mask=itg.mask, copy=True)\n', (15130, 15166), True, 'import numpy as np\n'), ((15662, 15678), 'numpy.isnan', 'np.isnan', (['_phase'], {}), '(_phase)\n', (15670, 15678), True, 'import numpy as np\n'), ((15783, 15804), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)'}), '(ncols=4)\n', (15795, 15804), True, 'import matplotlib.pyplot as plt\n'), ((16783, 16817), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '_phase.shape[0]'], {}), '(0, 1, _phase.shape[0])\n', (16794, 16817), True, 'import numpy as np\n'), ((18914, 18981), 'scipy.signal.fftconvolve', 'fftconvolve', (['itg_ma', 'phase_correction_function'], {'mode': '"""same"""', 'axes': '(0)'}), "(itg_ma, phase_correction_function, mode='same', axes=0)\n", (18925, 18981), False, 'from scipy.signal import fftconvolve, medfilt\n'), ((20840, 20878), 'numpy.ma.array', 'np.ma.array', (['self.data'], {'mask': 'self.mask'}), '(self.data, mask=self.mask)\n', (20851, 20878), True, 'import numpy as np\n'), ((21145, 21210), 'scipy.signal.fftconvolve', 'fftconvolve', (['hits', 'phase_correction_function'], {'mode': '"""same"""', 'axes': '(0)'}), "(hits, phase_correction_function, mode='same', axes=0)\n", (21156, 21210), False, 'from scipy.signal import fftconvolve, medfilt\n'), ((3703, 3717), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (3711, 3717), True, 'import numpy as np\n'), ((9638, 9676), 'numpy.ma.array', 'np.ma.array', (['self.data'], {'mask': 'self.mask'}), '(self.data, mask=self.mask)\n', (9649, 9676), True, 'import numpy as np\n'), ((9756, 9779), 'numpy.arange', 'np.arange', (['(1)', 'self.ndim'], {}), '(1, self.ndim)\n', (9765, 9779), True, 'import numpy as np\n'), ((11841, 11879), 'numpy.ma.array', 'np.ma.array', (['self.data'], {'mask': 'self.mask'}), '(self.data, mask=self.mask)\n', (11852, 11879), True, 'import numpy as np\n'), ((11975, 11998), 'numpy.arange', 'np.arange', (['(1)', 'self.ndim'], {}), '(1, self.ndim)\n', (11984, 11998), True, 'import numpy as np\n'), ((15880, 15904), 'numpy.arange', 'np.arange', (['cube.shape[0]'], {}), '(cube.shape[0])\n', (15889, 15904), True, 'import numpy as np\n'), ((17240, 17253), 'numpy.asarray', 'np.asarray', (['p'], {}), '(p)\n', (17250, 17253), True, 'import numpy as np\n'), ((18570, 18593), 'numpy.arange', 'np.arange', (['itg.shape[0]'], {}), '(itg.shape[0])\n', (18579, 18593), True, 'import numpy as np\n'), ((21067, 21105), 'numpy.ma.array', 'np.ma.array', (['self.hits'], {'mask': 'self.mask'}), '(self.hits, mask=self.mask)\n', (21078, 21105), True, 'import numpy as np\n'), ((26111, 26205), 'astropy.nddata.ccddata._uncertainty_unit_equivalent_to_parent', '_uncertainty_unit_equivalent_to_parent', (['uncertainty_cls', 'self.uncertainty.unit', 'self.unit'], {}), '(uncertainty_cls, self.uncertainty.\n unit, self.unit)\n', (26149, 26205), False, 'from astropy.nddata.ccddata import _unc_name_to_cls, _unc_cls_to_name, _uncertainty_unit_equivalent_to_parent\n'), ((28725, 28739), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (28733, 28739), True, 'import numpy as np\n'), ((18355, 18405), 'numpy.fft.fftshift', 'np.fft.fftshift', (['phase_correction_function'], {'axes': '(0)'}), '(phase_correction_function, axes=0)\n', (18370, 18405), True, 'import numpy as np\n'), ((28279, 28293), 'numpy.median', 'np.median', (['dpd'], {}), '(dpd)\n', (28288, 28293), True, 'import numpy as np\n'), ((3856, 3889), 'numpy.char.lower', 'np.char.lower', (['self.wcs.wcs.ctype'], {}), '(self.wcs.wcs.ctype)\n', (3869, 3889), True, 'import numpy as np\n'), ((4022, 4055), 'numpy.char.lower', 'np.char.lower', (['self.wcs.wcs.ctype'], {}), '(self.wcs.wcs.ctype)\n', (4035, 4055), True, 'import numpy as np\n'), ((18167, 18205), 'numpy.concatenate', 'np.concatenate', (['[-phase[:0:-1], phase]'], {}), '([-phase[:0:-1], phase])\n', (18181, 18205), True, 'import numpy as np\n'), ((17059, 17102), 'numpy.abs', 'np.abs', (['cube.data[(itg.shape[0] - 1) // 2:]'], {}), '(cube.data[(itg.shape[0] - 1) // 2:])\n', (17065, 17102), True, 'import numpy as np\n')]
|
import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point = np.array([x, y, z, 1])
class Test_Axis_3_xxx:
def test_matrix_3_xxx(self):
expected = Point([ z, x, y, 1])
calculated = Point.calculate(mne._matrix_3_xxx)
assert calculated == expected
def test_matrix_3_1_mtmHxx_ttt(self):
expected = Point([ z, 1+x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_1_mtmHxx_ttt)
assert calculated == expected
def test_matrix_3_1_HxmHxx_ttt(self):
expected = Point([ 1+z, x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_1_HxmHxx_ttt)
assert calculated == expected
def test_matrix_3_1_Hxtxx_ttt(self):
expected = Point([ 1+z, 1+x, y, 1])
calculated = Point.calculate(mne._matrix_3_1_Hxtxx_ttt)
assert calculated == expected
def test_matrix_3_xxx_hhh(self):
expected = Point([ 1+z, 1+x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_xxx_hhh)
assert calculated == expected
|
[
"sympy.symbols",
"numpy.array",
"equality_check.Point.calculate",
"equality_check.Point"
] |
[((118, 137), 'sympy.symbols', 'sp.symbols', (['"""x y z"""'], {}), "('x y z')\n", (128, 137), True, 'import sympy as sp\n'), ((157, 179), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (165, 179), True, 'import numpy as np\n'), ((258, 277), 'equality_check.Point', 'Point', (['[z, x, y, 1]'], {}), '([z, x, y, 1])\n', (263, 277), False, 'from equality_check import Point\n'), ((300, 334), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_xxx'], {}), '(mne._matrix_3_xxx)\n', (315, 334), False, 'from equality_check import Point\n'), ((435, 462), 'equality_check.Point', 'Point', (['[z, 1 + x, 1 + y, 1]'], {}), '([z, 1 + x, 1 + y, 1])\n', (440, 462), False, 'from equality_check import Point\n'), ((481, 524), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_1_mtmHxx_ttt'], {}), '(mne._matrix_3_1_mtmHxx_ttt)\n', (496, 524), False, 'from equality_check import Point\n'), ((625, 652), 'equality_check.Point', 'Point', (['[1 + z, x, 1 + y, 1]'], {}), '([1 + z, x, 1 + y, 1])\n', (630, 652), False, 'from equality_check import Point\n'), ((671, 714), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_1_HxmHxx_ttt'], {}), '(mne._matrix_3_1_HxmHxx_ttt)\n', (686, 714), False, 'from equality_check import Point\n'), ((814, 841), 'equality_check.Point', 'Point', (['[1 + z, 1 + x, y, 1]'], {}), '([1 + z, 1 + x, y, 1])\n', (819, 841), False, 'from equality_check import Point\n'), ((860, 902), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_1_Hxtxx_ttt'], {}), '(mne._matrix_3_1_Hxtxx_ttt)\n', (875, 902), False, 'from equality_check import Point\n'), ((998, 1029), 'equality_check.Point', 'Point', (['[1 + z, 1 + x, 1 + y, 1]'], {}), '([1 + z, 1 + x, 1 + y, 1])\n', (1003, 1029), False, 'from equality_check import Point\n'), ((1046, 1084), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_xxx_hhh'], {}), '(mne._matrix_3_xxx_hhh)\n', (1061, 1084), False, 'from equality_check import Point\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division # Eliminate need for decimals on whole values
import sys
# As of 28 July 2019, python3.6 is the default "python3" in apt-get install python3
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python version 3.6")
sys.exit(1)
import configparser # config file parsing
import argparse # command line parsing
import os
from datetime import date, timedelta, datetime
from time import time # For performance timing
from math import acos, asin, atan, cos, sin, tan, degrees # Fast/precise math functions
import numpy as np
import logging
import string
from spacetrack import SpaceTrackClient
# These are necessary until <NAME> approves pull requests
# https://github.com/brandon-rhodes/python-sgp4/pull/35
sys.path.insert(1, '../python-sgp4')
# https://github.com/skyfielders/python-skyfield/pull/276
sys.path.insert(2, '/Users/chris/Dropbox/code/preMVP/python-skyfield')
# FIXME: Note python-skyfield is not currently compatible with cythonized python-SGP4
from skyfield.iokit import Loader, download, parse_tle
from skyfield import sgp4lib
# The following 5 lines are necessary until our modules are public
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
tle_path = os.path.join(parentdir, "sathunt-tle")
sys.path.insert(1,tle_path)
from tle_util import make_tle, append_tle_file
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / mag(vector)
def proj(v2, v1):
""" Returns the unit vector projection of v1 onto v2 """
b = np.dot(v2, v1)/np.dot(v2, v2)
temp = np.multiply(b, v2)
# Make unit vector
vp = unit_vector(temp)
return vp
def flat_proj(v1, v2):
""" Returns the flat projection of direction unit vector, v1 onto v2 """
temp1 = np.cross(v1, v2)
temp2 = np.cross(temp1, v1)
return proj(temp2, v2)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
Partially Ref: angle(vec1,vec2) in python-sgp4/ext.py
"""
small = 0.00000001
undefined = 999999.1
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
magv1 = mag(v1)
magv2 = mag(v1)
if (magv1 * magv2 > small * small):
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
else:
return undefined
def mag(v):
""" Computes the magnitude of a vector ||v||
Renamed from norm(v) used in original Scott Campbell code
to better correspond to function names in SGP4 code.
"""
mag = np.sqrt(np.dot(v, v))
return mag
def main():
""" Interactive tool for finding an unknown TLE object within a library of TLEs
TODO:
- Implment argv[1] = unid.txt, argv[2] = refer.tle
- Make non-interactive callable version
- Make stand-alone verison that uses python-SGP exclusively, not tle_util
- Incorporate logic to read the (first?) TLE from the UNID file
- Incorporate logic to warn/error the user if no TLEs found
- Incorporate Perr/Alpha inputs as command line/config flags
- Put in more compares for altitude, velocity, etc.
"""
t0 = time()
# Read commandline options
conf_parser = argparse.ArgumentParser(description='Utility to assist in ID of an unidentified (unid) satellite')
conf_parser.add_argument("-c", "--conf_file",
help="Specify configuration file. [Default configuration.ini]",
dest='conf_file',
nargs='?',
const=1,
default='configuration.ini',
type=str,
metavar="FILE")
conf_parser.add_argument("-d", "--datadir",
help="data directory [default ./data]",
dest='datadir',
default='./data',
nargs='?',
const=1,
type=str,
metavar="DIR")
conf_parser.add_argument("--tleref",
help="Specify TLE reference file. [Default refer.tle]",
dest='tle_ref',
nargs='?',
type=str,
metavar="REFTLE")
conf_parser.add_argument("--tleunid",
help="Specify TLE unid file. [Default unid.tle]",
dest='tle_unid',
nargs='?',
type=str,
metavar="UNID")
conf_parser.add_argument("--update", help="update TLEs from online sources",
action="store_true")
conf_parser.add_argument("-dbname", "--database",
help="database to USE",
dest='dbname',
default='opensatcat_dev',
nargs='?',
const=1,
type=str,
metavar="NAME")
conf_parser.add_argument("-H", "--hostname",
help="database hostname",
dest='dbhostname',
default='opensatcat.cvpypmmxjtv1.us-east-2.rds.amazonaws.com',
nargs='?',
const=1,
type=str,
metavar="HOSTNAME")
conf_parser.add_argument("-u", "--user",
help="database user name",
dest='dbusername',
nargs='?',
type=str,
metavar="USER")
conf_parser.add_argument("-p", "--password",
help="database user password",
dest='dbpassword',
nargs='?',
type=str,
metavar="PASSWD")
conf_parser.add_argument("-t", "--dbtype",
help="database type [INFILE, sqlserver, sqlite] \
default: INFILE",
dest='dbtype',
nargs='?',
choices=['INFILE', 'sqlserver', 'sqlite'],
default='INFILE',
type=str,
metavar="TYPE")
conf_parser.add_argument("-i", "--import", help="Import TLEs to database",
dest='importTLE',
action="store_true")
conf_parser.add_argument("-q", "--quiet", help="Suppress console output",
dest='quiet',
action="store_true")
conf_parser.add_argument("-V", "--verbose",
help="increase verbosity: 0 = only warnings, 1 = info, 2 = debug. No number means info. Default is no verbosity.",
const=1,
default=1,
type=int,
nargs="?")
# Process commandline options and parse configuration
cfg = configparser.ConfigParser(inline_comment_prefixes=('#', ';'))
args = conf_parser.parse_args()
log = logging.getLogger(__name__)
# make it print to the console.
console = logging.StreamHandler()
log.addHandler(console)
conf_file = args.conf_file
tle_ref = args.tle_ref
tle_unid = args.tle_unid
update = args.update
datadir = args.datadir
dbname = args.dbname
dbhostname = args.dbhostname
dbusername = args.dbusername
dbpassword = args.dbpassword
dbtype = args.dbtype
importTLE = args.importTLE
verbose = args.verbose
quiet = args.quiet
# Set our python-skyfield data directory
load = Loader(datadir)
ts = load.timescale()
if (quiet == False):
if verbose == 0:
log.setLevel(logging.WARN)
elif verbose == 1:
log.setLevel(logging.INFO)
elif verbose == 2:
log.setLevel(logging.DEBUG)
log.debug("Log level set to {}".format(log.level))
if verbose:
for arg in vars(args):
log.debug("%s : %s",arg, getattr(args, arg))
cfg.read([args.conf_file])
log.info("Reading config from: {}".format(args.conf_file))
# 1st arg in original version
if not (tle_ref):
try:
tle_ref = cfg.get('Common', 'tle_ref')
except KeyError:
tle_ref = "refer.tle"
# 2nd arg in original version
if not (tle_unid):
try:
tle_unid = cfg.get('Common', 'tle_unid')
except KeyError:
tle_unid = "unid.txt"
# # Read single (first?) TLE from UNIDentified TLE file
# TLE_UNID = tle_util.TLEFile(tle_unid,strict=False)
# for sat_num in TLE_UNID.Satellites: # Need learn a better method to get just the first/only record
# #// id_sat comparison variables
# #// Date t1(tle);
# #// Satellite id_sat(t1.jd, ii, om, ec, ww, ma, nn, bstar);
# UNIDsat = TLE_UNID.Satellites[sat_num]
# # echo tle to screen
# log.info("{LINE1}\n{LINE2}".format(LINE1=UNIDsat.line1, LINE2=UNIDsat.line2))
# # Most popular const used by TLEs
# whichconst = sgp4.earth_gravity.wgs72
# afspc_mode = False
# (satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo) = UNIDsat.satrec
# # id_satrec = sgp4init(whichconst, afspc_mode, satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo)
# # (rr,vv) = sgp4(id_satrec, tsince=0, whichconst=whichconst)
# id_sat = sgp4.io.twoline2rv(UNIDsat.line1, UNIDsat.line2, whichconst, afspc_mode=False)
# (year, monnth, day, hour, minute, second) = UNIDsat.epoch.timetuple()[:6]
UNIDtle = load.tle(url=tle_unid,reload=False)
# Make sure UNID satellite appears only once
# UNIDtle = set(UNIDtle.values())
if(not UNIDtle):
log.error("No TLEs found in file: {}".format(tle_unid))
log.error("Run elfind first?")
sys.exit()
# Get the single item out of the list
# [UNID] = UNIDtle
for satnum in UNIDtle: break
UNID = UNIDtle[satnum]
# t_unid = ts.ut1(jd=UNID.model.jdsatepoch)
t_unid = UNID.epoch
# Get r,v data at its own EPOCH
# (rr, vv) = id_sat.propagate(year, monnth, day, hour, minute, second)
(rr, vv, id_sat_err) = UNID._position_and_velocity_TEME_km(t_unid)
id_sat_rr = np.array(rr)
id_sat_vv = np.array(vv)
# print(id_sat_rr)
# print(id_sat_vv)
# flat projection of UNID satellite direction unit vector, vp1
vp1 = flat_proj(rr, vv)
# Set Perr error bound
err1 = input(" position error, degrees [2]: ")
err1 = err1 or 2
err1 = float(err1)
# Set alpha error bound
err2 = input(" track angle error, degrees [20]: ")
err2 = err2 or 20
err2 = float(err2)
# Read in REFERENCE element list, and loop through for potential solutions within error bounds
REFtle = load.tle(url=tle_ref,reload=False)
# Make sure REFtle satellites appears only once
REFtle = set(REFtle.values())
for ref_sat in REFtle:
# log.debug("Comparing against {}".format(sat_num))
# if(ref_sat.model.satnum == 26905):
# print("here")
# Get r,v data at UNID epoch
(rr, vv, ref_sat_err) = ref_sat._position_and_velocity_TEME_km(t_unid)
ref_sat_rr = np.array(rr)
ref_sat_vv = np.array(vv)
# delr - satellite delta r vector
delr = np.subtract(id_sat_rr, ref_sat_rr)
# delr - flat projection of delta r unit vector
delr = flat_proj(id_sat_rr, delr)
# alpha - angle between delr and id_sat.vv, radians
alpha = angle_between(delr, id_sat_vv)
# Per - angle between position unit vectors, radians
Perr = angle_between(ref_sat_rr, id_sat_rr)
# delta - magnitude of Perr in direction of id_sat.vv (UNID velocity), radians
delt = atan(tan(Perr) * cos(alpha))
# delta_t - time of flight to Closest Point of Approach (cpa) seconds
# rr, vv already in units of km, km/s. No need to convert.
delta_t = delt * mag(id_sat_rr) / mag(id_sat_vv)
# cpa - Closest Point of Approach (cpa), radians
cpa = asin(sin(alpha) * sin(Perr))
# vp2 - flat projection of REF satellite direction unit vector
vp2 = flat_proj(ref_sat_rr, ref_sat_vv)
# alpha - angle between direction unit vectors, radians
alpha = acos(np.dot(vp1, vp2))
# Calculate REF deltas from UNID
try:
alpha = acos(cos(alpha)/cos(delt))
except ValueError:
alpha = float('nan')
# Prepare for presentation to user
alpha = degrees(alpha) # angle between direction unit vectors
Perr = degrees(Perr) # angle between position unit vectors
# Compare UNID to REF using osculating elements (close enough)
if((Perr < err1) and (alpha < err2)):
# tle = None # epoch of elements in tle format
# ii = None # inclination, degrees
# om = None # right ascension of ascending node, degrees
# ec = None # eccentricity
# ww = None # argument of the perigee, degrees
# ma = None # mean anomaly, degrees
# nn = None # mean motion, revolutions/day
# uu = None # true longitude
# c2 = None # bstar coefficient
# bstar = None # BSTAR drag term
# name[81] = None
# visually check match parameters using advanced mean elements
# Write tle to screen
(tle_line0, tle_line1, tle_line2) = make_tle(
name=ref_sat.name,
ssn=ref_sat.model.satnum,
epoch_datetime=ref_sat.epoch.utc_datetime(),
xincl=ref_sat.model.inclo,
xnodeo=ref_sat.model.nodeo,
eo=ref_sat.model.ecco,
omegao=ref_sat.model.argpo,
xmo=ref_sat.model.mo,
xno=degrees(ref_sat.model.no_kozai*1440.0)/360.0,
deg=False)
log.info(" position error {:4.1f}".format(Perr))
log.info("track angle error {:4.1f}\n".format(alpha))
log.info(" time error {:4.0f}".format(delta_t))
log.info(" to closest point {:4.1f}\n".format(degrees(cpa)))
tle_file_path = os.path.join(datadir, tle_unid)
append_tle_file(tle_file_path, tle_line0, tle_line1, tle_line2)
get_continue = input("\n[Next]")
# // s_in("\n[Next]", buf);
# // } // if match
# // } // while
# // s_in("\n[Done]", buf);
get_continue = input("\n[Done]")
# // system(id_file);
# // } // end main
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"os.path.join",
"numpy.multiply",
"skyfield.iokit.Loader",
"os.path.dirname",
"tle_util.append_tle_file",
"math.cos",
"configparser.ConfigParser",
"logging.StreamHandler",
"numpy.cross",
"math.sin",
"inspect.currentframe",
"numpy.dot",
"math.degrees",
"sys.exit",
"numpy.subtract",
"math.tan",
"sys.path.insert",
"time.time",
"numpy.array",
"logging.getLogger"
] |
[((950, 986), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../python-sgp4"""'], {}), "(1, '../python-sgp4')\n", (965, 986), False, 'import sys\n'), ((1047, 1117), 'sys.path.insert', 'sys.path.insert', (['(2)', '"""/Users/chris/Dropbox/code/preMVP/python-skyfield"""'], {}), "(2, '/Users/chris/Dropbox/code/preMVP/python-skyfield')\n", (1062, 1117), False, 'import sys\n'), ((1480, 1507), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (1495, 1507), False, 'import os\n'), ((1520, 1558), 'os.path.join', 'os.path.join', (['parentdir', '"""sathunt-tle"""'], {}), "(parentdir, 'sathunt-tle')\n", (1532, 1558), False, 'import os\n'), ((1560, 1588), 'sys.path.insert', 'sys.path.insert', (['(1)', 'tle_path'], {}), '(1, tle_path)\n', (1575, 1588), False, 'import sys\n'), ((359, 370), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (367, 370), False, 'import sys\n'), ((1887, 1905), 'numpy.multiply', 'np.multiply', (['b', 'v2'], {}), '(b, v2)\n', (1898, 1905), True, 'import numpy as np\n'), ((2094, 2110), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (2102, 2110), True, 'import numpy as np\n'), ((2124, 2143), 'numpy.cross', 'np.cross', (['temp1', 'v1'], {}), '(temp1, v1)\n', (2132, 2143), True, 'import numpy as np\n'), ((3722, 3728), 'time.time', 'time', ([], {}), '()\n', (3726, 3728), False, 'from time import time\n'), ((3780, 3883), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Utility to assist in ID of an unidentified (unid) satellite"""'}), "(description=\n 'Utility to assist in ID of an unidentified (unid) satellite')\n", (3803, 3883), False, 'import argparse\n'), ((8321, 8382), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'inline_comment_prefixes': "('#', ';')"}), "(inline_comment_prefixes=('#', ';'))\n", (8346, 8382), False, 'import configparser\n'), ((8431, 8458), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (8448, 8458), False, 'import logging\n'), ((8513, 8536), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8534, 8536), False, 'import logging\n'), ((9016, 9031), 'skyfield.iokit.Loader', 'Loader', (['datadir'], {}), '(datadir)\n', (9022, 9031), False, 'from skyfield.iokit import Loader, download, parse_tle\n'), ((11763, 11775), 'numpy.array', 'np.array', (['rr'], {}), '(rr)\n', (11771, 11775), True, 'import numpy as np\n'), ((11793, 11805), 'numpy.array', 'np.array', (['vv'], {}), '(vv)\n', (11801, 11805), True, 'import numpy as np\n'), ((1845, 1859), 'numpy.dot', 'np.dot', (['v2', 'v1'], {}), '(v2, v1)\n', (1851, 1859), True, 'import numpy as np\n'), ((1860, 1874), 'numpy.dot', 'np.dot', (['v2', 'v2'], {}), '(v2, v2)\n', (1866, 1874), True, 'import numpy as np\n'), ((3116, 3128), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (3122, 3128), True, 'import numpy as np\n'), ((11343, 11353), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11351, 11353), False, 'import sys\n'), ((12771, 12783), 'numpy.array', 'np.array', (['rr'], {}), '(rr)\n', (12779, 12783), True, 'import numpy as np\n'), ((12806, 12818), 'numpy.array', 'np.array', (['vv'], {}), '(vv)\n', (12814, 12818), True, 'import numpy as np\n'), ((12882, 12916), 'numpy.subtract', 'np.subtract', (['id_sat_rr', 'ref_sat_rr'], {}), '(id_sat_rr, ref_sat_rr)\n', (12893, 12916), True, 'import numpy as np\n'), ((14155, 14169), 'math.degrees', 'degrees', (['alpha'], {}), '(alpha)\n', (14162, 14169), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((14225, 14238), 'math.degrees', 'degrees', (['Perr'], {}), '(Perr)\n', (14232, 14238), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((1441, 1463), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1461, 1463), False, 'import inspect\n'), ((13905, 13921), 'numpy.dot', 'np.dot', (['vp1', 'vp2'], {}), '(vp1, vp2)\n', (13911, 13921), True, 'import numpy as np\n'), ((15939, 15970), 'os.path.join', 'os.path.join', (['datadir', 'tle_unid'], {}), '(datadir, tle_unid)\n', (15951, 15970), False, 'import os\n'), ((15984, 16047), 'tle_util.append_tle_file', 'append_tle_file', (['tle_file_path', 'tle_line0', 'tle_line1', 'tle_line2'], {}), '(tle_file_path, tle_line0, tle_line1, tle_line2)\n', (15999, 16047), False, 'from tle_util import make_tle, append_tle_file\n'), ((2828, 2846), 'numpy.dot', 'np.dot', (['v1_u', 'v2_u'], {}), '(v1_u, v2_u)\n', (2834, 2846), True, 'import numpy as np\n'), ((13358, 13367), 'math.tan', 'tan', (['Perr'], {}), '(Perr)\n', (13361, 13367), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((13370, 13380), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (13373, 13380), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((13669, 13679), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (13672, 13679), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((13682, 13691), 'math.sin', 'sin', (['Perr'], {}), '(Perr)\n', (13685, 13691), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((14008, 14018), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (14011, 14018), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((14019, 14028), 'math.cos', 'cos', (['delt'], {}), '(delt)\n', (14022, 14028), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((15893, 15905), 'math.degrees', 'degrees', (['cpa'], {}), '(cpa)\n', (15900, 15905), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((15556, 15596), 'math.degrees', 'degrees', (['(ref_sat.model.no_kozai * 1440.0)'], {}), '(ref_sat.model.no_kozai * 1440.0)\n', (15563, 15596), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/juano2310/CarND-Behavioral-Cloning-P3-Juan/blob/master/model.py
#https://github.com/udacity/self-driving-car/blob/master/steering-models/community-models/rambo/train.py
import os
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
import sklearn
from sklearn.model_selection import train_test_split
samples = []
with open('../../../output/conde_gazebo/interpolated.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
sklearn.utils.shuffle(samples)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print("Number of traing samples: ",len(train_samples))
print("Number of validation samples: ",len(validation_samples))
#index,timestamp,width,height,frame_id,filename,angle,speed
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
#print(batch_samples)
images = []
angles = []
for batch_sample in batch_samples:
if batch_sample[5] != "filename":
path = os.path.normpath(batch_sample[5]).split(os.path.sep)
name = '../../../output/conde_gazebo/center/'+path[1].split('\\')[-1]
center_image = cv2.imread(name)
center_image = cv2.resize(center_image, (320,180)) #resize from 720x1280 to 180x320
#plt.imshow(left_image)
#plt.show()
angle = float(batch_sample[6])
images.append(center_image)
angles.append(angle)
flip_image = np.fliplr(center_image)
flip_angle = -1 * angle
images.append(flip_image)
angles.append(flip_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
batch_size_value = 32
n_epoch = 150
train_generator = generator(train_samples, batch_size=batch_size_value)
validation_generator = generator(validation_samples, batch_size=batch_size_value)
model = Sequential()
# trim image to only see section with road
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(180,320,3)))
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
#Nvidia model
model.add(Convolution2D(24, (5, 5), activation="relu", name="conv_1", strides=(2, 2)))
model.add(Convolution2D(36, (5, 5), activation="relu", name="conv_2", strides=(2, 2)))
model.add(Convolution2D(48, (5, 5), activation="relu", name="conv_3", strides=(2, 2)))
model.add(SpatialDropout2D(.5, dim_ordering='default'))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_4", strides=(1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_5", strides=(1, 1)))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
# checkpoint
filepath="../../weights/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)
callbacks_list = [checkpoint]
# Fit the model
history_object = model.fit_generator(train_generator, steps_per_epoch=(len(train_samples) / batch_size_value), validation_data=validation_generator, validation_steps=(len(validation_samples)/batch_size_value), callbacks=callbacks_list, epochs=n_epoch)
# Save model
model.save('model.h5')
with open('model.json', 'w') as output_json:
output_json.write(model.to_json())
# Save TensorFlow model
tf.train.write_graph(K.get_session().graph.as_graph_def(), logdir='.', name='model.pb', as_text=False)
# Plot the training and validation loss for each epoch
print('Generating loss chart...')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('model.png')
# Done
print('Done.')
|
[
"matplotlib.pyplot.title",
"csv.reader",
"keras.layers.Cropping2D",
"sklearn.model_selection.train_test_split",
"keras.layers.core.Flatten",
"keras.layers.core.SpatialDropout2D",
"os.path.normpath",
"keras.layers.core.Dropout",
"cv2.resize",
"keras.layers.core.Dense",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"numpy.fliplr",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"keras.backend.get_session",
"cv2.imread",
"keras.layers.Lambda",
"keras.layers.convolutional.Convolution2D",
"numpy.array",
"keras.models.Sequential",
"sklearn.utils.shuffle",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((952, 982), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['samples'], {}), '(samples)\n', (973, 982), False, 'import sklearn\n'), ((1019, 1059), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (1035, 1059), False, 'from sklearn.model_selection import train_test_split\n'), ((2883, 2895), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2893, 2895), False, 'from keras.models import Model, Sequential\n'), ((4092, 4197), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""auto"""', 'period': '(1)'}), "(filepath, monitor='val_loss', verbose=1, save_best_only=\n True, mode='auto', period=1)\n", (4107, 4197), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4831, 4871), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (4839, 4871), True, 'import matplotlib.pyplot as plt\n'), ((4872, 4916), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (4880, 4916), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4959), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (4926, 4959), True, 'import matplotlib.pyplot as plt\n'), ((4960, 4997), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (4970, 4997), True, 'import matplotlib.pyplot as plt\n'), ((4998, 5017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (5008, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5018, 5083), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (5028, 5083), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5108), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""model.png"""'], {}), "('model.png')\n", (5095, 5108), True, 'import matplotlib.pyplot as plt\n'), ((876, 895), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (886, 895), False, 'import csv\n'), ((2950, 3016), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((50, 20), (0, 0))', 'input_shape': '(180, 320, 3)'}), '(cropping=((50, 20), (0, 0)), input_shape=(180, 320, 3))\n', (2960, 3016), False, 'from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D\n'), ((3104, 3137), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {}), '(lambda x: x / 255.0 - 0.5)\n', (3110, 3137), False, 'from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D\n'), ((3166, 3241), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5, 5)'], {'activation': '"""relu"""', 'name': '"""conv_1"""', 'strides': '(2, 2)'}), "(24, (5, 5), activation='relu', name='conv_1', strides=(2, 2))\n", (3179, 3241), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3253, 3328), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5, 5)'], {'activation': '"""relu"""', 'name': '"""conv_2"""', 'strides': '(2, 2)'}), "(36, (5, 5), activation='relu', name='conv_2', strides=(2, 2))\n", (3266, 3328), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3340, 3415), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5, 5)'], {'activation': '"""relu"""', 'name': '"""conv_3"""', 'strides': '(2, 2)'}), "(48, (5, 5), activation='relu', name='conv_3', strides=(2, 2))\n", (3353, 3415), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3427, 3472), 'keras.layers.core.SpatialDropout2D', 'SpatialDropout2D', (['(0.5)'], {'dim_ordering': '"""default"""'}), "(0.5, dim_ordering='default')\n", (3443, 3472), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3484, 3559), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'name': '"""conv_4"""', 'strides': '(1, 1)'}), "(64, (3, 3), activation='relu', name='conv_4', strides=(1, 1))\n", (3497, 3559), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3571, 3646), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'name': '"""conv_5"""', 'strides': '(1, 1)'}), "(64, (3, 3), activation='relu', name='conv_5', strides=(1, 1))\n", (3584, 3646), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3659, 3668), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (3666, 3668), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3681, 3692), 'keras.layers.core.Dense', 'Dense', (['(1164)'], {}), '(1164)\n', (3686, 3692), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3704, 3716), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3711, 3716), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3727, 3756), 'keras.layers.core.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3732, 3756), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3768, 3780), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3775, 3780), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3791, 3819), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (3796, 3819), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3831, 3843), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3838, 3843), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3854, 3882), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3859, 3882), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3894, 3906), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3901, 3906), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3917, 3925), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3922, 3925), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((2493, 2509), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2501, 2509), True, 'import numpy as np\n'), ((2533, 2549), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (2541, 2549), True, 'import numpy as np\n'), ((2583, 2622), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2604, 2622), False, 'import sklearn\n'), ((4659, 4674), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (4672, 4674), True, 'from keras import backend as K\n'), ((1895, 1911), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (1905, 1911), False, 'import cv2\n'), ((1948, 1984), 'cv2.resize', 'cv2.resize', (['center_image', '(320, 180)'], {}), '(center_image, (320, 180))\n', (1958, 1984), False, 'import cv2\n'), ((2289, 2312), 'numpy.fliplr', 'np.fliplr', (['center_image'], {}), '(center_image)\n', (2298, 2312), True, 'import numpy as np\n'), ((1710, 1743), 'os.path.normpath', 'os.path.normpath', (['batch_sample[5]'], {}), '(batch_sample[5])\n', (1726, 1743), False, 'import os\n')]
|
"""Symbolic model code generation.
Improvement ideas
-----------------
* Add compiled code to linecache so that tracebacks can be produced, like done
in the `IPython.core.compilerop` module.
"""
import abc
import collections
import collections.abc
import contextlib
import functools
import inspect
import itertools
import re
import types
import attrdict
import numpy as np
import jinja2
import sympy
from . import function, printing, utils, var
class Variables(var.SymbolObject):
"""Represents code generation model variables."""
pass
class Base:
"""Code generation model base."""
def __init__(self):
self.variables = Variables(self={})
"""Model variables dictionary."""
self.derivatives = {}
"""Dictionary of model derivatives, to optimize higher order diff."""
def __getattribute__(self, name):
"""Overloaded method to bind SymbolicSubsFunction objects."""
attr = super().__getattribute__(name)
if isinstance(attr, function.SymbolicSubsFunction) and attr.ismethod:
return functools.partial(attr, self)
else:
return attr
def _compute_derivative(self, fname, wrt):
assert isinstance(wrt, tuple)
if wrt == ():
return self.default_function_output(fname)
# See if the derivative is registered
dname = self.derivatives.get((fname,) + wrt)
if dname is not None:
return self.default_function_output(dname)
expr = self._compute_derivative(fname, wrt[1:])
wrt_array = self.variables[wrt[0]]
return utils.ndexpr_diff(expr, wrt_array)
def add_derivative(self, fname, wrt, dname):
if utils.isstr(wrt):
wrt = (wrt,)
elif not isinstance(wrt, tuple):
raise TypeError("argument wrt must be string or tuple")
args = self.function_codegen_arguments(fname, include_self=True)
expr = self._compute_derivative(fname, wrt)
deriv = function.SymbolicSubsFunction(args, expr)
setattr(self, dname, deriv)
self.derivatives[(fname,) + wrt] = dname
def set_default_members(self):
for key, val in self.variables['self'].items():
setattr(self, key, val)
@contextlib.contextmanager
def using_default_members(self):
"""Context manager that sets default attributes temporarily."""
set_members = {}
unset_members = []
# Get the values of the members before the entering the context
for k in self.variables['self'].keys():
try:
set_members[k] = getattr(self, k)
except AttributeError:
unset_members.append(k)
try:
# Set the members to their "default" values
self.set_default_members()
yield
finally:
# Restore previous values
for key, val in set_members.items():
setattr(self, key, val)
for key in unset_members:
delattr(self, key)
def function_codegen_arguments(self, fname, include_self=False):
f = getattr(self, fname)
param_names = inspect.signature(f).parameters.keys()
if include_self:
param_names = ['self', *param_names]
return function.Arguments((n,self.variables[n]) for n in param_names)
@utils.cached_method
def default_function_output(self, fname):
"""Function output for the default arguments."""
f = getattr(self, fname)
if isinstance(f, functools.partial):
if isinstance(f.func, function.SymbolicSubsFunction):
return f.func.default_output
args = self.function_codegen_arguments(fname)
with self.using_default_members():
return np.asarray(f(*args.values()))
def print_code(self, **options):
model_printer = ModelPrinter(self, **options)
return model_printer.print_class()
def compile_class(self, **options):
model_printer = ModelPrinter(self, **options)
return model_printer.class_obj()
def print_class(model, **options):
model_printer = ModelPrinter(model, **options)
return model_printer.print_class()
def compile_class(model, **options):
model_printer = ModelPrinter(model, **options)
return model_printer.class_obj()
model_template_src = '''\
# Model imports
import numpy as {{printer.numpy_alias}}
{% for import in m.imports -%}
import {{ import }}
{% endfor %}
class {{m.name}}({{ m.bases | join(', ') }}, metaclass={{m.metaclass}}):
"""Generated code for {{m.name}} from symbolic model."""
{% for method in m.methods %}
{{ method | indent }}
{% endfor %}
{% for name, value in m.assignments.items() -%}
{% if isndarray(value) -%}
{{ printer.print_ndarray(value, assign_to=name) }}
{% else -%}
{{ name }} = {{ value }}
{% endif -%}
{% endfor %}
'''
class ModelPrinter:
"""Generates numpy code for symbolic models."""
@utils.cached_class_property
def template(cls):
return jinja2.Template(model_template_src)
def __init__(self, model, **options):
self.model = model
"""The underlying symbolic model."""
self.options = options
"""Model printer options."""
try:
functions = options['functions']
except KeyError:
functions = getattr(model, 'generate_functions', [])
f_specs = []
for fname in functions:
output = self.model.default_function_output(fname)
arguments = self.model.function_codegen_arguments(fname, True)
f_specs.append((fname, output, arguments))
self._f_specs = f_specs
"""Function generation specifications."""
@property
def name(self):
"""Name of the generated class."""
return (getattr(self.model, 'generated_name', None)
or self.options.get('name', None)
or f'Generated{type(self.model).__name__}')
@property
def assignments(self):
"""Mapping of simple assignments to be made in the class code."""
try:
return self.options['assignments']
except KeyError:
return getattr(self.model, 'generate_assignments', {})
@property
def imports(self):
"""List of imports to include in the generated class code."""
try:
return self.options['imports']
except KeyError:
return getattr(self.model, 'generate_imports', [])
@property
def bases(self):
"""List of names of base classes for the generated model class."""
try:
return self.options['bases']
except KeyError:
return getattr(self.model, 'generated_bases', ['object'])
@property
def metaclass(self):
"""Metaclass for the generated model class."""
try:
return self.options['metaclass']
except KeyError:
return getattr(self.model, 'generated_metaclass', 'type')
@property
def methods(self):
for fname, output, arguments in self._f_specs:
fdef = function.print_function(fname, output, arguments)
yield fdef
def print_class(self):
isndarray = lambda var: isinstance(var, np.ndarray)
context = dict(m=self, printer=printing.Printer(), isndarray=isndarray)
return self.template.render(context)
def class_obj(self):
env = {}
exec(compile(self.print_class(), '<string>', 'exec'), env)
return env[self.name]
def collect_symbols(f):
sig = inspect.signature(f)
if len(sig.parameters) < 2:
raise ValueError(f"method {f.__name__} should have at least 2 "
"parameters, 'self' and the collected symbols")
params = list(sig.parameters.values())
collected_symbols_arg_name = params[-1].name
new_sig = sig.replace(parameters=params[:-1])
nargs_wrapped = len(params) - 1
@functools.wraps(f)
def wrapper(self, *args):
# Validate arguments
nargs_in = len(args) + 1
if nargs_in != nargs_wrapped:
raise TypeError(f"{f.__name__} takes {nargs_wrapped} arguments "
f"but got only {nargs_in}")
# Create substitution dictionary
subs = self.variables['self'].subs_map(self)
for param, value in zip(params[1:-1], args):
subs.update(self.variables[param.name].subs_map(value))
# Create collected symbols AttrDict
collected_symbols = attrdict.AttrDict()
for var, expr in subs.items():
collected_symbols[var.name] = expr
ret = f(self, *args, **{collected_symbols_arg_name: collected_symbols})
# Ensure function return is an ndarray
return np.asarray(ret, object)
wrapper.__signature__ = new_sig
return wrapper
|
[
"jinja2.Template",
"functools.partial",
"numpy.asarray",
"inspect.signature",
"functools.wraps",
"attrdict.AttrDict"
] |
[((7770, 7790), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (7787, 7790), False, 'import inspect\n'), ((8156, 8174), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (8171, 8174), False, 'import functools\n'), ((5161, 5196), 'jinja2.Template', 'jinja2.Template', (['model_template_src'], {}), '(model_template_src)\n', (5176, 5196), False, 'import jinja2\n'), ((8743, 8762), 'attrdict.AttrDict', 'attrdict.AttrDict', ([], {}), '()\n', (8760, 8762), False, 'import attrdict\n'), ((9000, 9023), 'numpy.asarray', 'np.asarray', (['ret', 'object'], {}), '(ret, object)\n', (9010, 9023), True, 'import numpy as np\n'), ((1085, 1114), 'functools.partial', 'functools.partial', (['attr', 'self'], {}), '(attr, self)\n', (1102, 1114), False, 'import functools\n'), ((3242, 3262), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (3259, 3262), False, 'import inspect\n')]
|
#!/usr/bin/env python3
# coding: utf-8
"""
@author: <NAME> <EMAIL>
@last modified by: <NAME>
@file:qc.py
@time:2021/03/26
"""
from scipy.sparse import issparse
import numpy as np
def cal_qc(data):
"""
calculate three qc index including the number of genes expressed in the count matrix, the total counts per cell
and the percentage of counts in mitochondrial genes.
:param data: the StereoExpData object.
:return: StereoExpData object storing quality control results.
"""
exp_matrix = data.exp_matrix
total_count = cal_total_counts(exp_matrix)
n_gene_by_count = cal_n_genes_by_counts(exp_matrix)
pct_counts_mt = cal_pct_counts_mt(data, exp_matrix, total_count)
data.cells.total_counts = total_count
data.cells.pct_counts_mt = pct_counts_mt
data.cells.n_genes_by_counts = n_gene_by_count
return data
def cal_total_counts(exp_matrix):
"""
calculate the total gene counts of per cell.
:param exp_matrix: the express matrix.
:return:
"""
total_count = np.array(exp_matrix.sum(1)).reshape(-1)
return total_count
def cal_per_gene_counts(exp_matrix):
"""
calculate the total counts of per gene.
:param exp_matrix: the express matrix.
:return:
"""
gene_counts = np.array(exp_matrix.sum(axis=0)).reshape(-1)
return gene_counts
def cal_n_cells_by_counts(exp_matrix):
"""
total counts of each gene.
:param exp_matrix: the express matrix.
:return:
"""
n_cells_by_counts = np.array(exp_matrix.sum(0)).reshape(-1)
return n_cells_by_counts
def cal_n_cells(exp_matrix):
"""
Number of cells that occur in each gene.
:param exp_matrix: the express matrix.
:return:
"""
n_cells = exp_matrix.getnnz(axis=0) if issparse(exp_matrix) else np.count_nonzero(exp_matrix, axis=0)
return n_cells
def cal_n_genes_by_counts(exp_matrix):
n_genes_by_counts = exp_matrix.getnnz(axis=1) if issparse(exp_matrix) else np.count_nonzero(exp_matrix, axis=1)
return n_genes_by_counts
def cal_pct_counts_mt(data, exp_matrix, total_count):
if total_count is None:
total_count = cal_total_counts(exp_matrix)
mt_index = np.char.startswith(np.char.lower(data.gene_names), prefix='mt-')
mt_count = np.array(exp_matrix[:, mt_index].sum(1)).reshape(-1)
pct_counts_mt = mt_count / total_count * 100
return pct_counts_mt
|
[
"scipy.sparse.issparse",
"numpy.char.lower",
"numpy.count_nonzero"
] |
[((1770, 1790), 'scipy.sparse.issparse', 'issparse', (['exp_matrix'], {}), '(exp_matrix)\n', (1778, 1790), False, 'from scipy.sparse import issparse\n'), ((1796, 1832), 'numpy.count_nonzero', 'np.count_nonzero', (['exp_matrix'], {'axis': '(0)'}), '(exp_matrix, axis=0)\n', (1812, 1832), True, 'import numpy as np\n'), ((1946, 1966), 'scipy.sparse.issparse', 'issparse', (['exp_matrix'], {}), '(exp_matrix)\n', (1954, 1966), False, 'from scipy.sparse import issparse\n'), ((1972, 2008), 'numpy.count_nonzero', 'np.count_nonzero', (['exp_matrix'], {'axis': '(1)'}), '(exp_matrix, axis=1)\n', (1988, 2008), True, 'import numpy as np\n'), ((2207, 2237), 'numpy.char.lower', 'np.char.lower', (['data.gene_names'], {}), '(data.gene_names)\n', (2220, 2237), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
def load_cancer():
# data, target, feature_names
result_dict = {'features': np.array(["Clump Thickness",
"Uniformity of Cell Size",
"Uniformity of Cell Shape",
"Marginal Adhesion",
"Single Epithelial Cell Size",
"Bare Nuclei",
"Bland Chromatin",
"Normal Nucleoli",
"Mitoses"])}
df_dict = pd.read_csv('breast_cancer_wisconsin.csv', header=0).to_dict('split')
df_data = np.array(df_dict['data'])
result_dict['data'] = df_data[:, :-1]
result_dict['target'] = df_data[:, -1]
return result_dict
def load_hepatitis():
result_dict = {'features': np.array(["AGE",
"SEX",
"STEROID",
"ANTIVIRAL",
"FATIGUE",
"MALAISE",
"ANOREXIA",
"LIVER BIG",
"LIVER FIRM",
"SPLEEN PALPABLE",
"SPIDERS",
"ASCITES",
"VARICES",
"BILIRUBIN",
"ALK PHOSPHATE",
"SGOT",
"ALBUMIN",
"PROTIME",
"HISTOLOGY"])
}
df_dict = pd.read_csv('hepatitis.csv', header=0).to_dict('split')
df_data = np.array(df_dict['data'])
result_dict['data'] = df_data[:, 1:]
result_dict['target'] = (df_data[:, 0]).astype(int)
return result_dict
|
[
"pandas.read_csv",
"numpy.array"
] |
[((754, 779), 'numpy.array', 'np.array', (["df_dict['data']"], {}), "(df_dict['data'])\n", (762, 779), True, 'import numpy as np\n'), ((2022, 2047), 'numpy.array', 'np.array', (["df_dict['data']"], {}), "(df_dict['data'])\n", (2030, 2047), True, 'import numpy as np\n'), ((125, 337), 'numpy.array', 'np.array', (["['Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',\n 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei',\n 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses']"], {}), "(['Clump Thickness', 'Uniformity of Cell Size',\n 'Uniformity of Cell Shape', 'Marginal Adhesion',\n 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',\n 'Normal Nucleoli', 'Mitoses'])\n", (133, 337), True, 'import numpy as np\n'), ((943, 1189), 'numpy.array', 'np.array', (["['AGE', 'SEX', 'STEROID', 'ANTIVIRAL', 'FATIGUE', 'MALAISE', 'ANOREXIA',\n 'LIVER BIG', 'LIVER FIRM', 'SPLEEN PALPABLE', 'SPIDERS', 'ASCITES',\n 'VARICES', 'BILIRUBIN', 'ALK PHOSPHATE', 'SGOT', 'ALBUMIN', 'PROTIME',\n 'HISTOLOGY']"], {}), "(['AGE', 'SEX', 'STEROID', 'ANTIVIRAL', 'FATIGUE', 'MALAISE',\n 'ANOREXIA', 'LIVER BIG', 'LIVER FIRM', 'SPLEEN PALPABLE', 'SPIDERS',\n 'ASCITES', 'VARICES', 'BILIRUBIN', 'ALK PHOSPHATE', 'SGOT', 'ALBUMIN',\n 'PROTIME', 'HISTOLOGY'])\n", (951, 1189), True, 'import numpy as np\n'), ((670, 722), 'pandas.read_csv', 'pd.read_csv', (['"""breast_cancer_wisconsin.csv"""'], {'header': '(0)'}), "('breast_cancer_wisconsin.csv', header=0)\n", (681, 722), True, 'import pandas as pd\n'), ((1952, 1990), 'pandas.read_csv', 'pd.read_csv', (['"""hepatitis.csv"""'], {'header': '(0)'}), "('hepatitis.csv', header=0)\n", (1963, 1990), True, 'import pandas as pd\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock PyReach Color Camera."""
from typing import Callable, Optional
import numpy as np # type: ignore
from pyreach import calibration as cal
from pyreach import color_camera
from pyreach import core
from pyreach.mock import calibration_mock as cal_mock
class ColorFrameMock(color_camera.ColorFrame):
"""A single color camera frame taken at a specific time.
Attributes:
time: The time in seconds of the frame since 1970.
sequence: The sequence number of the color frame.
device_type: The JSON device type string.
device_name: The JSON device name string.
color_image: A color image as a (DX,DY,3) array of uint8's.
calibration: The calibration when the image is captured.
"""
def __init__(self, time: float, sequence: int,
device_type: str, device_name: str,
color_image: np.ndarray,
calibration: Optional[cal.Calibration]) -> None:
"""Initialize a MockColorFrame."""
self._time: float = time
self._sequence = sequence
self._device_type: str = device_type
self._device_name: str = device_name
self._color_image: np.ndarray = color_image
self._calibration: Optional[cal.Calibration] = calibration
@property
def time(self) -> float:
"""Return timestamp of the ColorFrame."""
return self._time
@property
def sequence(self) -> int:
"""Sequence number of the ColorFrame."""
return self._sequence
@property
def device_type(self) -> str:
"""Return the Reach device type."""
return self._device_type
@property
def device_name(self) -> str:
"""Return the Reach device name."""
return self._device_name
@property
def color_image(self) -> np.ndarray:
"""Return the color image as a (DX,DY,3)."""
return self._color_image
@property
def calibration(self) -> Optional[cal.Calibration]:
"""Return the Calibration for for the ColorFrame."""
return self._calibration
def pose(self) -> Optional[core.Pose]:
"""Return the pose of the camera when the image is taken."""
raise NotImplementedError
class ColorCameraMock(color_camera.ColorCamera):
"""Mock ColorCamera class."""
def __init__(self) -> None:
"""Init a MockColorCamera."""
pass
def add_update_callback(
self,
callback: Callable[[color_camera.ColorFrame], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback function to be invoked when a new frame is available.
Args:
callback: A function to be invoked when a new frame is available. Returns
False to continue receiving new images. Returns True to stop further
update.
finished_callback: Optional callback, called when the callback is stopped
or if the camera is closed.
Returns:
A function that when called stops the callbacks.
"""
raise NotImplementedError
def start_streaming(self, request_period: float = 0.1) -> None:
"""Start streaming of camera images.
Args:
request_period: The number of seconds between frames. Defaults to .1
second between frames.
"""
pass
def stop_streaming(self) -> None:
"""Stop streaming camera images."""
raise NotImplementedError
def supports_tagged_request(self) -> bool:
"""Return True if tagged requests are supported."""
raise NotImplementedError
def enable_tagged_request(self) -> None:
"""Enable tagged requests."""
raise NotImplementedError
def disable_tagged_request(self) -> None:
"""Disable tagged requests."""
raise NotImplementedError
def image(self) -> Optional[color_camera.ColorFrame]:
"""Return the latest image if it exists."""
color_frame_mock: ColorFrameMock = ColorFrameMock(
1.0, 0, "device_type", "device_name", np.zeros((3, 5, 3),
dtype=np.uint8),
cal_mock.CalibrationMock("device_type", "device_name",
"color_camera_link_name"))
color_frame: color_camera.ColorFrame = color_frame_mock
return color_frame
def fetch_image(self,
timeout: float = 15.0) -> Optional[color_camera.ColorFrame]:
"""Fetch a new image or possibly times out.
Args:
timeout: The optional amount of time to wait for a camera frame. If not
specified, 15 seconds is the default timeout.
Returns:
Returns the color image or None for a timeout.
"""
raise NotImplementedError
def async_fetch_image(self,
callback: Optional[Callable[[color_camera.ColorFrame],
None]] = None,
error_callback: Optional[Callable[[core.PyReachStatus],
None]] = None,
timeout: float = 30) -> None:
"""Fetch a new image asynchronously.
The callback function will be invoked when new image is available.
Args:
callback: A callback function that is called when an image arrives. If the
camera fails to load an image, the callback is not called.
error_callback: Optional callback that is called if there is an error.
timeout: Timeout for the fetch, defaults to 30 seconds.
"""
raise NotImplementedError
@property
def pose(self) -> Optional[core.Pose]:
"""Return the latest pose of the camera."""
raise NotImplementedError
|
[
"pyreach.mock.calibration_mock.CalibrationMock",
"numpy.zeros"
] |
[((4429, 4464), 'numpy.zeros', 'np.zeros', (['(3, 5, 3)'], {'dtype': 'np.uint8'}), '((3, 5, 3), dtype=np.uint8)\n', (4437, 4464), True, 'import numpy as np\n'), ((4529, 4614), 'pyreach.mock.calibration_mock.CalibrationMock', 'cal_mock.CalibrationMock', (['"""device_type"""', '"""device_name"""', '"""color_camera_link_name"""'], {}), "('device_type', 'device_name', 'color_camera_link_name'\n )\n", (4553, 4614), True, 'from pyreach.mock import calibration_mock as cal_mock\n')]
|
import numpy as np
from scipy.fftpack import rfft, irfft, rfftfreq
from ....routines import rescale
def fourier_filter(data: np.ndarray, fs: float,
lp_freq: float = None, hp_freq: float = None, bs_freqs: list = [],
trans_width: float = 1, band_width: float = 1) -> np.ndarray:
"""
Fourer filter along last axis of ``data`` with lowpass, highpass and bandstop options.
Parameters
----------
``data`` : np.ndarray
``fs``: float
sampling frequency
``lp_freq``: float, optional
lowpass frequency (default is None)
``hp_freq``: float, optional
highpass frequency (default is None)
``bs_freqs``: list, optional
bandstop frequencies (default is [])
``trans_width``: float, optional
width of the transition region between bands (default is 1)
in frequency units
``band_width``: float, optional
width of the band to remove (default is 1)
in frequency units
Returns
-------
np.ndarray
filtered ``data``
"""
T = data.shape[-1]
d = 1. / fs
freq = rfftfreq(T, d)
f_data = rfft(data, axis=-1)
freq_resp = create_freq_resp(freq, lp_freq,
hp_freq, bs_freqs,
trans_width, band_width)
f_data = np.apply_along_axis(lambda x: x * freq_resp, -1, f_data)
data_filtered = irfft(f_data)
return data_filtered
def create_freq_resp(freq: np.ndarray, lp_freq: float,
hp_freq: float, bs_freqs: list,
trans_width: float, band_width: float) -> np.ndarray:
"""Calculates frequency responce for given ``freq``
Parameters
----------
``freq``: np.ndarray, shape=(N)
frequency array
``lp_freq``: float
lowpass frequency
``hp_freq``: float
highpass frequency
``bs_freqs``: list
bandstop frequencies
``trans_width``: float
width of the transition region between bands
``band_width``: float
width of the band to remove
Returns
--------
np.ndarray, shape=(N)
frequency responce array in range form 0 to 1
"""
freq_resp = np.ones_like(freq)
if lp_freq is not None:
freq_resp *= FR_lowpass(freq, lp_freq, trans_width)
if hp_freq is not None:
freq_resp *= FR_highpass(freq, hp_freq, trans_width)
for bs_freq in bs_freqs:
freq_resp *= FR_bandstop(freq, bs_freq, trans_width, band_width)
return freq_resp
def FR_lowpass(freq: np.ndarray, lp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for lowpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``lp_freq``: float
lowpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((freq - lp_freq) / sigma))
def FR_highpass(freq: np.ndarray, hp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for highpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``hp_freq``: float
highpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((hp_freq - freq) / sigma))
def FR_bandstop(freq: np.ndarray, bs_freq: float,
trans_width: float, band_width: float) -> np.ndarray:
"""Frequency responce for bandstop filter
Parameters
----------
``freq``: np.ndarray
frequency array
``bs_freq``: float
bandstop frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
left = FR_lowpass(freq, bs_freq - band_width / 2., trans_width)
right = FR_highpass(freq, bs_freq + band_width / 2., trans_width)
return rescale(left + right)
|
[
"scipy.fftpack.rfftfreq",
"scipy.fftpack.rfft",
"numpy.ones_like",
"numpy.apply_along_axis",
"numpy.exp",
"scipy.fftpack.irfft"
] |
[((1143, 1157), 'scipy.fftpack.rfftfreq', 'rfftfreq', (['T', 'd'], {}), '(T, d)\n', (1151, 1157), False, 'from scipy.fftpack import rfft, irfft, rfftfreq\n'), ((1171, 1190), 'scipy.fftpack.rfft', 'rfft', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (1175, 1190), False, 'from scipy.fftpack import rfft, irfft, rfftfreq\n'), ((1372, 1428), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: x * freq_resp)', '(-1)', 'f_data'], {}), '(lambda x: x * freq_resp, -1, f_data)\n', (1391, 1428), True, 'import numpy as np\n'), ((1449, 1462), 'scipy.fftpack.irfft', 'irfft', (['f_data'], {}), '(f_data)\n', (1454, 1462), False, 'from scipy.fftpack import rfft, irfft, rfftfreq\n'), ((2265, 2283), 'numpy.ones_like', 'np.ones_like', (['freq'], {}), '(freq)\n', (2277, 2283), True, 'import numpy as np\n'), ((3115, 3147), 'numpy.exp', 'np.exp', (['((freq - lp_freq) / sigma)'], {}), '((freq - lp_freq) / sigma)\n', (3121, 3147), True, 'import numpy as np\n'), ((3656, 3688), 'numpy.exp', 'np.exp', (['((hp_freq - freq) / sigma)'], {}), '((hp_freq - freq) / sigma)\n', (3662, 3688), True, 'import numpy as np\n')]
|
import numpy as np
import unittest
from numpy.testing import assert_array_less
from GPyOpt.core.errors import InvalidConfigError
from GPyOpt.core.task.space import Design_space
from GPyOpt.experiment_design import initial_design
class TestInitialDesign(unittest.TestCase):
def setUp(self):
self.space = [
{'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1},
{'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)},
{'name': 'var_3', 'type': 'categorical', 'domain': (0,1,2)}
]
self.design_space = Design_space(self.space)
self.bandit_variable = {'name': 'stations', 'type': 'bandit', 'domain': np.array([[1, 1], [2, 2], [3, 3], [4, 4]])}
def assert_samples_against_space(self, samples):
lower_bound_var1 = self.design_space.name_to_variable['var_1'].domain[0]
upper_bound_var1 = self.design_space.name_to_variable['var_1'].domain[1]
self.assertTrue((samples[:,0] >= lower_bound_var1).all())
self.assertTrue((samples[:,0] <= upper_bound_var1).all())
var2_values = self.design_space.name_to_variable['var_2'].domain
self.assertTrue(np.in1d(samples[:,1], var2_values).all())
var3_values = self.design_space.name_to_variable['var_3'].domain
self.assertTrue(np.in1d(samples[:,2], var3_values).all())
def test_grid_design(self):
init_points_count = 3
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
init_points_count = 1000
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_grid_design_with_multiple_continuous_variables(self):
self.space.extend([
{'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
{'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
])
self.design_space = Design_space(self.space)
init_points_count = 10
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), 1)
init_points_count = 100
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), 3**4)
def test_random_design(self):
init_points_count = 10
samples = initial_design('random', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_random_design_with_constraints(self):
constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
self.design_space = Design_space(self.space, constraints=constraints)
initial_points_count = 10
samples = initial_design('random', self.design_space, initial_points_count)
self.assert_samples_against_space(samples)
self.assertTrue((samples[:,0]**2 - 1 < 0).all())
def test_random_design_with_bandit_only(self):
space = [self.bandit_variable]
self.design_space = Design_space(space)
initial_points_count = 3
samples = initial_design('random', self.design_space, initial_points_count)
self.assertEqual(len(samples), initial_points_count)
def test_nonrandom_designs_with_constrains(self):
constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
self.design_space = Design_space(self.space, constraints=constraints)
initial_points_count = 10
with self.assertRaises(InvalidConfigError):
initial_design('grid', self.design_space, initial_points_count)
with self.assertRaises(InvalidConfigError):
initial_design('latin', self.design_space, initial_points_count)
with self.assertRaises(InvalidConfigError):
initial_design('sobol', self.design_space, initial_points_count)
def test_latin_design(self):
init_points_count = 10
samples = initial_design('latin', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_latin_design_with_multiple_continuous_variables(self):
self.space.extend([
{'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
{'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
])
self.design_space = Design_space(self.space)
init_points_count = 10
samples = initial_design('latin', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_sobol_design(self):
init_points_count = 10
samples = initial_design('sobol', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
|
[
"GPyOpt.core.task.space.Design_space",
"numpy.array",
"GPyOpt.experiment_design.initial_design",
"numpy.in1d"
] |
[((592, 616), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {}), '(self.space)\n', (604, 616), False, 'from GPyOpt.core.task.space import Design_space\n'), ((1451, 1511), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (1465, 1511), False, 'from GPyOpt.experiment_design import initial_design\n'), ((1673, 1733), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (1687, 1733), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2158, 2182), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {}), '(self.space)\n', (2170, 2182), False, 'from GPyOpt.core.task.space import Design_space\n'), ((2233, 2293), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (2247, 2293), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2387, 2447), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (2401, 2447), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2577, 2639), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""random"""', 'self.design_space', 'init_points_count'], {}), "('random', self.design_space, init_points_count)\n", (2591, 2639), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2904, 2953), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {'constraints': 'constraints'}), '(self.space, constraints=constraints)\n', (2916, 2953), False, 'from GPyOpt.core.task.space import Design_space\n'), ((3007, 3072), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""random"""', 'self.design_space', 'initial_points_count'], {}), "('random', self.design_space, initial_points_count)\n", (3021, 3072), False, 'from GPyOpt.experiment_design import initial_design\n'), ((3301, 3320), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['space'], {}), '(space)\n', (3313, 3320), False, 'from GPyOpt.core.task.space import Design_space\n'), ((3373, 3438), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""random"""', 'self.design_space', 'initial_points_count'], {}), "('random', self.design_space, initial_points_count)\n", (3387, 3438), False, 'from GPyOpt.experiment_design import initial_design\n'), ((3659, 3708), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {'constraints': 'constraints'}), '(self.space, constraints=constraints)\n', (3671, 3708), False, 'from GPyOpt.core.task.space import Design_space\n'), ((4215, 4276), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""latin"""', 'self.design_space', 'init_points_count'], {}), "('latin', self.design_space, init_points_count)\n", (4229, 4276), False, 'from GPyOpt.experiment_design import initial_design\n'), ((4702, 4726), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {}), '(self.space)\n', (4714, 4726), False, 'from GPyOpt.core.task.space import Design_space\n'), ((4777, 4838), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""latin"""', 'self.design_space', 'init_points_count'], {}), "('latin', self.design_space, init_points_count)\n", (4791, 4838), False, 'from GPyOpt.experiment_design import initial_design\n'), ((5031, 5092), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""sobol"""', 'self.design_space', 'init_points_count'], {}), "('sobol', self.design_space, init_points_count)\n", (5045, 5092), False, 'from GPyOpt.experiment_design import initial_design\n'), ((698, 740), 'numpy.array', 'np.array', (['[[1, 1], [2, 2], [3, 3], [4, 4]]'], {}), '([[1, 1], [2, 2], [3, 3], [4, 4]])\n', (706, 740), True, 'import numpy as np\n'), ((3808, 3871), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'initial_points_count'], {}), "('grid', self.design_space, initial_points_count)\n", (3822, 3871), False, 'from GPyOpt.experiment_design import initial_design\n'), ((3937, 4001), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""latin"""', 'self.design_space', 'initial_points_count'], {}), "('latin', self.design_space, initial_points_count)\n", (3951, 4001), False, 'from GPyOpt.experiment_design import initial_design\n'), ((4067, 4131), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""sobol"""', 'self.design_space', 'initial_points_count'], {}), "('sobol', self.design_space, initial_points_count)\n", (4081, 4131), False, 'from GPyOpt.experiment_design import initial_design\n'), ((1188, 1223), 'numpy.in1d', 'np.in1d', (['samples[:, 1]', 'var2_values'], {}), '(samples[:, 1], var2_values)\n', (1195, 1223), True, 'import numpy as np\n'), ((1328, 1363), 'numpy.in1d', 'np.in1d', (['samples[:, 2]', 'var3_values'], {}), '(samples[:, 2], var3_values)\n', (1335, 1363), True, 'import numpy as np\n')]
|
import time
import argparse
import numpy as np
import json
import os
import sys
# from matplotlib import pyplot
from torch.utils.data import DataLoader
from preprocessing import Constants
from util import construct_data_from_json
from dgl_treelstm.KNN import KNN
from dgl_treelstm.nn_models import *
from dgl_treelstm.metric import *
from preprocessing import Vocab
from preprocessing import varTree
from dgl_treelstm.dgl_dataset import dgl_dataset
from check_time import process_data
from train import pad_feature_batcher, batcher
from preprocessing.Vector_Dataset import Vector_Dataset
from preprocessing.Tree_Dataset import Tree_Dataset
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve
import warnings
from dataset_filename_seperation import get_dataset_seperation
warnings.filterwarnings('ignore')
# simulation for different models
class Simulation:
def __init__(self, model, time_selection="adjust", threshold=200):
self.model = model
# this threshold can be adaptive, which is updated in the simulation to find a better border for timeout
self.threshold = threshold
# save data for adaptive threshold
self.time_record = {"timeout":[], "solvable":[]}
# this is the actual time setting, which is fixed for comparision the ground truth
self.time_out_setting = 200
self.time_selection = time_selection
if isinstance(self.model, TreeLSTM):
self.model_type = "TreeLSTM"
self.preprocess = Tree_Dataset.generate_feature_dataset
elif isinstance(self.model, KNN):
self.model_type = "KNN"
self.preprocess = Vector_Dataset.generate_feature_dataset
elif isinstance(self.model, LSTM):
self.model_type = "LSTM"
self.preprocess = Vector_Dataset.generate_feature_dataset
def load_model(self, input):
raise NotImplementedError
if self.model_type == "KNN":
dataset = th.load("data/gnucore/fv2/gnucore_train")
x = [i.feature for i in dataset]
y = [1 if i.gettime("adjust") > 300 else 0 for i in dataset]
self.model.fit(x, y)
elif self.model_type == "LSTM":
model = th.load("checkpoints/gnucore/pad_feature_l_z.pkl")["model"]
self.model.load_state_dict(model)
elif self.model_type == "TreeLSTM":
model = th.load("checkpoints/g_tree_feature_t_z_r_200.pkl")["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
raise NotImplementedError
# feature = self.preprocess(script)
if isinstance(data, varTree):
dataloader = dgl_dataset([data], None)
iterator = iter(dataloader)
data = next(iterator)
feature = data.logic_tree
solve_time = data.gettime("original")
elif self.model_type == "LSTM":
dataloader = DataLoader(dataset=[data], batch_size=1, collate_fn=pad_feature_batcher('cpu', 'original'),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = rnn_utils.pack_padded_sequence(data.feature, data.data_len, enforce_sorted=False,
batch_first=True)
solve_time = data.label
else:
feature, solve_time = data.logic_tree, data.gettime("original")
return feature, solve_time
def predict(self, feature, truth):
raise NotImplementedError
if self.model_type == "KNN":
predict_result = self.model.incremental_predict(feature, truth)
skip = predict_result
elif self.model_type == "LSTM":
self.model.eval()
with th.no_grad():
predict_result = self.model(feature)
skip = predict_result > self.threshold
else:
self.model.eval()
with th.no_grad():
h = th.zeros((1, 150))
c = th.zeros((1, 150))
predict_result = self.model(feature, h, c)
skip = predict_result > self.threshold
return predict_result, skip
def modify_threshold(self, result, truth):
if self.model_type == "KNN":
return
if result < self.threshold and truth > self.time_out_setting:
self.time_record["timeout"].append(result)
elif result < self.threshold and truth < self.time_out_setting:
self.time_record["solvable"].append(result)
if result < self.threshold and truth > self.time_out_setting:
self.dynamic_threshold()
print("decrease threshold to ", str(self.threshold))
return
def dynamic_threshold(self):
timeout_list = np.array(self.time_record["timeout"])
solvable_list = self.time_record["solvable"]
try:
solvable_limit = max(np.percentile(solvable_list, 95), np.mean(solvable_list), 60)
suitable_timeout = list(filter(lambda x: x > solvable_limit, timeout_list))
if len(suitable_timeout) == 0:
suitable_timeout = [solvable_limit]
suitable_min_timeout = min(suitable_timeout)
suitable_min_timeout = min(suitable_min_timeout, self.threshold)
if isinstance(suitable_min_timeout, th.Tensor):
suitable_min_timeout = suitable_min_timeout.item()
max_solvable = max(filter(lambda x:x <= suitable_min_timeout, self.time_record["solvable"]))
if isinstance(max_solvable, th.Tensor):
max_solvable = max_solvable.item()
self.threshold = max(suitable_min_timeout - 1, (suitable_min_timeout + max_solvable) / 2,
self.threshold - 50, 60)
except (IndexError,ValueError):
pass
class KNN_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(KNN_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "KNN"
self.preprocess = Vector_Dataset.generate_feature_dataset
self.separate_test = False
def load_model(self, input):
# dataset = th.load(input)
dataset = construct_data_from_json(input)
# test_filename = ["echo", "ginstall", "expr", "tail", "seq", "split", "test", "yes", "chgrp", "date", "expand",
# "head", "nohup", "printf", "sha1sum", "stat", "timeout", "uniq", "nice", "pr"]
# test_filename = ["expand"]
# dataset = list(filter(lambda x:x.filename not in test_filename, dataset))
x = [i.feature for i in dataset]
if "smt-comp" in input:
fn = [x.filename.split("_")[0] for x in dataset]
else:
fn = [i.filename for i in dataset]
y = [1 if i.gettime(self.time_selection) > self.time_out_setting else 0 for i in dataset]
self.model.fit(x, y)
self.model.filename = np.array(fn)
def script_to_feature(self, data):
if not self.separate_test:
if ".smt2" in data.filename:
fn = data.filename.split("_")[0]
else:
fn = data.filename
self.model.remove_test(fn)
self.separate_test = True
feature, solve_time = data.feature, data.gettime(self.time_selection)
return feature, solve_time
def predict(self, feature, truth):
predict_result = self.model.incremental_predict(feature, truth)
skip = predict_result
return predict_result, skip
class LSTM_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(LSTM_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "LSTM"
self.preprocess = Vector_Dataset.generate_feature_dataset
def load_model(self, input):
model = th.load(input, map_location='cpu')["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
dataloader = DataLoader(dataset=[data], batch_size=1, collate_fn=pad_feature_batcher('cpu', self.time_selection),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = rnn_utils.pack_padded_sequence(data.feature, data.data_len, enforce_sorted=False,
batch_first=True)
solve_time = data.label.item()
return feature, solve_time
def predict(self, feature, truth):
self.model.eval()
with th.no_grad():
predict_result = self.model(feature)
skip = predict_result > self.threshold
return predict_result, skip
class TreeLSTM_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(TreeLSTM_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "TreeLSTM"
self.preprocess = Tree_Dataset.generate_feature_dataset
def load_model(self, input):
model = th.load(input, map_location='cpu')["model"]
# model = th.load("checkpoints/g_tree+feature_t_z_r_200.pkl")["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
smt_vocab_file = './data/gnucore/smt.vocab'
smt_vocab = Vocab(filename=smt_vocab_file,
data=[Constants.UNK_WORD])
data = dgl_dataset([data], None, vocab=smt_vocab, time_selection=self.time_selection, time_threshold=self.threshold)
dataloader = DataLoader(dataset=data, batch_size=1, collate_fn=batcher("cpu"),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = data.graph
solve_time = data.label[0].item()
return data, solve_time
def predict(self, feature, truth):
self.model.eval()
n = feature.graph.number_of_nodes()
with th.no_grad():
h = th.zeros((n, 150))
c = th.zeros((n, 150))
predict_result = self.model(feature, h, c)
skip = predict_result[0] > self.threshold
return predict_result[0], skip
# result saving structure
class Evalution:
def __init__(self, pred=np.array([]), truth=np.array([]), time_out_setting=200):
self.pred = self.get_numpy(pred)
self.truth = self.get_numpy(truth)
self.classify_result = np.array([])
self.time_out_setting = time_out_setting
def get_numpy(self, data):
if isinstance(data, th.Tensor):
data = data.cpu().numpy()
else:
data = data
return data
def add(self, pred, truth, classify_result):
self.pred = np.append(self.pred, self.get_numpy(pred))
self.truth = np.append(self.truth, self.get_numpy(truth))
self.classify_result = np.append(self.classify_result, self.get_numpy(classify_result))
def score(self):
truth = [1 if x > self.time_out_setting else 0 for x in self.truth]
acc = accuracy_score(truth, self.classify_result)
pre = precision_score(truth, self.classify_result)
rec = recall_score(truth, self.classify_result)
f1 = f1_score(truth, self.classify_result)
return acc, pre, rec, f1
# time calculation
class Time_Section:
def __init__(self):
self.original_time = 0
self.predict_time = 0
# overall time for simulation comparision(without solving phase 1 which manually added)
self.final_time = 0
self.preprocessing_time = 0
def update(self, predict_result, solve_time):
self.original_time += solve_time
# for the first solving phase t1=1s
self.final_time += 1
# skip if predicted timeout
if not predict_result:
self.final_time += solve_time
def add_prediction_time(self, predict_used_time, preprocessing_time):
self.preprocessing_time = preprocessing_time
self.predict_time = predict_used_time
self.final_time = self.final_time + predict_used_time + preprocessing_time
# load the test data, script to feature just like the training, we do not saving the result because the program number
# we also want to include the processing time into final time
def load_data(model, input):
dataset = None
if model == "Tree-LSTM":
dataset = Tree_Dataset(treeforassert=True, feature_number_limit=100)
elif model == "lstm":
dataset = Vector_Dataset(feature_number_limit=50)
elif model == "KNN":
dataset = Vector_Dataset(feature_number_limit=2)
else:
dataset = Tree_Dataset(feature_number_limit=100)
if "smt-comp" in input:
test_filename = input.split("/")[-1]
input = "/".join(input.split("/")[:-1])
dataset.fs_list = dataset.generate_feature_dataset(input, fileprefix=test_filename)
if len(dataset.fs_list) == 0:
print("smt-comp file are not separated with filename, but please use the similar structure, more information in simulation_smt-comp.md")
# test_filename1 = [x.filename for x in dataset.fs_list]
# test_file = list(filter(lambda x:x.split("_")[0] == test_filename, test_filename1))
# dataset.fs_list = dataset.split_with_filename(test_file)[1]
input = input + "/" + test_filename
else:
if "klee" in input:
# the klee processing is time-consuming because of the SMT scripts structure, so we saved the result for next time
# for other dataset we extract feature every time it simulates.
data_input = "data/klee/" + input.split("/")[-1] + model_name
try:
if model == "KNN":
dataset = construct_data_from_json(data_input)
else:
dataset = th.load(data_input)
except (TypeError,FileNotFoundError):
dataset.generate_feature_dataset(input)
if model != "KNN":
th.save(dataset, data_input)
else:
dataset.generate_feature_dataset(input)
return dataset.fs_list, input
# mainly for cross dataset prediction for adaptive KNN model, rely on my model naming pattern
def identify_dataset(input, dataset):
for i in ["busybox", "smt-comp", "klee"]:
if i in input:
return i
if "g_" in input or "gnucore/" in input:
return "gnucore"
if "b_" in input:
return "busybox"
if "s_" in input:
return "smt-comp"
if "k_" in input:
return "klee"
return "gnucore"
# our baseline result, not usable without result from PCC
def make_PCC_output(input, output_result):
if os.path.exists(input):
with open(input, "r") as f:
data = json.load(f)
serial_result = sorted(data["result"], key=lambda x:(len(x[0]), x[0]))
else:
serial_result = []
for i in range(1,4):
with open(input[:-5] + "_" + str(i) + ".json", "r") as f:
data = json.load(f)
serial_result.extend(sorted(data["result"], key=lambda x: (len(x[0]), x[0])))
od = serial_result
for i in ["arch", "chgrp", "csplit", "dirname", "fmt", "id", "md5sum", "mv", "pinky", "readlink", "seq",
"sleep", "tac", "tsort", "uptime", "base64", "chmod", "cut", "du", "fold", "join", "mkdir",
"nice", "pr", "rm", "setuidgid", "sort", "tail", "tty", "users", "basename", "chroot", "date", "expand", "ginstall",
"link", "mkfifo", "nl", "printenv", "rmdir", "sha1sum", "split", "test", "uname", "vdir",
"cat", "comm", "df", "expr", "head", "ln", "mknod", "od", "printf", "runcon", "shred", "stat", "touch", "unexpand", "wc",
"chcon", "cp", "dir", "factor", "hostname", "ls", "mktemp", "pathchk", "ptx", "shuf", "su",
"tr", "unlink", "who", "ifconfig", "rpm", "Sage2", "klogd", "mcm", "lfsr"]:
serial_result = list(filter(lambda x: x[0].startswith(i), od))
if len(serial_result) == 0:
continue
print(i)
truth = [x[2] for x in serial_result]
if isinstance(truth[0], list):
truth = list(map(lambda x:0 if x[0] else 300, truth))
pred = [x[1] for x in serial_result]
dt_simulation = Simulation(None)
dt_simulation.model_type = "DNN"
if isinstance(pred[0], int):
classify_result = pred
else:
threshold_list = []
for i in range(len(truth)):
dt_simulation.modify_threshold(pred[i], truth[i])
threshold_list.append(dt_simulation.threshold)
classify_result = [1.0 if pred[i] > threshold_list[i] else 0.0 for i in range(len(pred))]
# classify_result = [1.0 if x > data["time_limit_setting"] else 0.0 for x in pred]
original_time = sum(truth)
pred_truth_tuple = list(
zip(range(len(pred)), pred, truth, classify_result))
pred_truth_diff_tuple = list(filter(lambda a: a[3] != (a[2] > data["time_limit_setting"]), pred_truth_tuple))
pred_truth_tuple = list(filter(lambda a: a[3] != 0, pred_truth_tuple))
final_time = original_time - sum([x[2] for x in pred_truth_tuple])
truth = [1 if x > data["time_limit_setting"] else 0 for x in truth]
acc = accuracy_score(truth, classify_result)
pre = precision_score(truth, classify_result)
rec = recall_score(truth, classify_result)
f1 = f1_score(truth, classify_result)
print_output = {"train_dataset": "gnucore", "test_dataset": "gnucore", "pred_truth_diff_tuple": pred_truth_diff_tuple,
"original_time": original_time,
"total_time": final_time, "input": input, "pos_num":sum(truth), "tp": sum(truth)*rec,
"fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec}
print(print_output)
output = {"train_dataset": "gnucore", "test_dataset": "gnucore", "predicted_result": pred,
"acutal_solving_time": truth, "original_time": original_time, "total_time": final_time,
"metrics": {"acc": acc, "pre": pre, "rec": rec, "f1": f1, "pos_num":sum(truth), "tp": sum(truth)*rec,
"fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec},
"time_out_setting": data["time_limit_setting"],
"model": "PCC", "input": input}
output = json.dumps(output, indent=4)
# print(print_output)
print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# fpr, tpr, thresholds = roc_curve(truth, pred)
# pyplot.plot(fpr, tpr, lw=1, label="lstm")
# # print(fpr, tpr, thresholds)
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
if output_result:
try:
outpur_path = "_".join(["gnucore", input.split("/")[-1], "DNN"]) + ".json"
with open("simulation_result/" + outpur_path, "w")as f:
f.write(output)
except:
with open("simulation_result/output.json", "w")as f:
f.write(output)
# output the result for a single program
# to do: not support for adaptive threshold for regression simulation
def make_output(dsn1, dsn2, input, simulation, result, time_section, output_result=True, plot_picture=True):
pred_truth_tuple = list(zip(range(len(result.pred)), result.pred.tolist(), result.truth.tolist(), result.classify_result))
pred_truth_tuple = list(filter(lambda a:a[3] != (a[2] > simulation.time_out_setting), pred_truth_tuple))
truth = [1 if x > simulation.time_out_setting else 0 for x in result.truth]
acc = accuracy_score(truth, result.classify_result)
pre = precision_score(truth, result.classify_result)
rec = recall_score(truth, result.classify_result)
f1 = f1_score(truth, result.classify_result)
confusion_matrix = np.zeros((2, 2))
for t, p in zip(truth, result.classify_result):
confusion_matrix[t][int(p)] += 1
# print_output = {"train_dataset": dsn1, "test_dataset": dsn2, "pred_truth_diff_tuple": pred_truth_tuple,
# "original_time": time_section.original_time,
# "predict_time":time_section.predict_time + time_section.preprocessing_time,
# "total_time": time_section.final_time, "input":input, "pos_num":sum(truth), "tp": sum(truth)*rec,
# "fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec}
print_output = {"timeout_query_num":sum(truth), "true-positive number": confusion_matrix[1][1],
"false-negative number": confusion_matrix[1][0], "false-positive number": confusion_matrix[0][1]}
output = {"train_dataset": dsn1, "test_dataset": dsn2, "predicted_result": result.pred.tolist(),
"acutal_solving_time": result.truth.tolist(), "original_time": time_section.original_time, "predict_time":
time_section.predict_time + time_section.preprocessing_time, "total_time": time_section.final_time,
"metrics":{"acc": acc, "pre": pre, "rec": rec, "f1": f1}, "time_out_setting": simulation.time_out_setting,
"model":simulation.model_type, "input":input, "pos_num":sum(truth), "tp": confusion_matrix[1][1],
"fn": confusion_matrix[1][0], "fp": confusion_matrix[0][1]}
if not len(result.truth):
return
output = json.dumps(output, indent=4)
print("train dataset:" + dsn1)
# print("test dataset:" + dsn2)
print("test program:" + input)
print("prediction truth difference tuple(index, predicted result, truth, classification result):")
print(pred_truth_tuple)
print("original solving time:" + str(int(time_section.original_time)) + "s")
print("prediction time:" + str(int(time_section.predict_time + time_section.preprocessing_time)) + "s")
print("solving time with the predictor:" + str(int(time_section.final_time)) + "s")
print(print_output)
print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# if simulation.model_type != 'KNN':
# fpr, tpr, thresholds = roc_curve(result.truth > simulation.time_out_setting, result.pred)
# pyplot.plot(fpr, tpr, lw=1, label=simulation.model_type)
# # print(fpr, tpr, thresholds)
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
if output_result:
try:
if args.model_name == "KNN":
identify = ""
elif args.classification:
identify = "_c"
elif args.adapt:
identify = "_m"
else:
identify = "_r"
outpur_path = "_".join([dsn1, input.split("/")[-1], simulation.model_type]) + identify + ".json"
with open("simulation_result/" + outpur_path, "w")as f:
f.write(output)
except:
with open("simulation_result/output.json", "w")as f:
f.write(output)
# automatic partition selection since we use cross validation to generate three piece of result for a model
# used for the hardcoded switch
def choose_input(dataset, input, load_path):
fn = get_dataset_seperation(dataset)
f1, f2, f3 = fn[0], fn[1], fn[2]
input = input.split("/")[-1]
if dataset == "smt-comp":
input = input.split("_")[0]
if os.path.exists(load_path):
return load_path
if input in f1:
load_path = ".".join([load_path.split(".")[0] + "_0", load_path.split(".")[1]])
elif input in f2:
load_path = ".".join([load_path.split(".")[0] + "_1", load_path.split(".")[1]])
elif input in f3:
load_path = ".".join([load_path.split(".")[0] + "_2", load_path.split(".")[1]])
else:
load_path = ""
return load_path
# simulate the solving in real order, in the simulation, the predicted timeout solving would be skipped,
# the time different is taken as the time saved.
# the simulation may not reflect the real situation since wrongly skip path means the change of path selection, but if
# you give it a low priority, then these paths are just deferred, you may execute more paths in the same time budget.
def simulation_for_single_program(test_directory, args):
s = time.time()
input_index = args.input_index
load_path = args.load_file
# some setting process since all simulation use one entry
if not args.regression:
regression = False
else:
input_list[int(input_index)] = input_list[int(input_index)].replace("_r_", "_c_")
regression = True
if model_name == "KNN":
knn = KNN()
simulation = KNN_Simulation(knn, time_selection=args.time_selection)
if not input_index:
input_index = 8
elif model_name == "lstm":
lstm = LSTM(150, regression, False)
simulation = LSTM_Simulation(lstm, time_selection=args.time_selection)
if not input_index:
input_index = 0
else:
tree_lstm = TreeLSTM(133, 150, 150, 1, 0.5, regression, False, cell_type='childsum', pretrained_emb=None)
simulation = TreeLSTM_Simulation(tree_lstm, time_selection=args.time_selection)
if not input_index:
input_index = 2
# setting timeout threshold
# for original time, we collect the data with timeout with 100s, larger than it would be useless
simulation.time_out_setting = args.threshold
if test_directory == None:
test_directory = input_list[int(input_index)]
serial_data, test_input = load_data(model_name, test_directory)
time_section = Time_Section()
result = Evalution(time_out_setting=args.threshold)
# for cross project, identify dataset name
dsn1 = identify_dataset(input_list[int(input_index)], None)
dsn2 = identify_dataset(test_input, serial_data)
# load the model for different approach
if load_path == None:
load_path = input_list[int(input_index)]
if model_name != "KNN":
load_path = choose_input(dsn1, test_input, load_path)
simulation.load_model(load_path)
s1 = time.time()
aindex = 0
# simulation system, but not actual solving since the solving time is consuming, and time may be different
for data in serial_data:
data_index = len(result.truth)
feature, solve_time = simulation.script_to_feature(data)
predict_result, skip = simulation.predict(feature, 1 if solve_time > simulation.time_out_setting else 0)
if len(result.pred) % 500 == 0:
print(len(result.pred))
if model_name != "KNN" and regression and args.adapt:
pass
simulation.modify_threshold(predict_result, solve_time)
if model_name != "KNN" and not regression:
pred = th.argmax(F.log_softmax(predict_result), 1)
skip = pred == 1
predict_result = 1 if skip else 0
time_section.update(skip, solve_time)
result.add(predict_result, solve_time, skip)
aindex += 1
e = time.time()
time_section.add_prediction_time(e - s1, s1 - s)
make_output(dsn1, dsn2, test_directory, simulation, result, time_section, True, True)
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default="KNN", help="model type, allow 'lstm', 'tree-lstm', 'KNN'")
parser.add_argument('--test_directory', default=None, help="the script saving directory for test program")
parser.add_argument('--load_file', default=None, help="the path to model for evaluation")
parser.add_argument('--input_index', type=int, default=8, help="short-way for switch evaluation model,"
"hardcoded, not recommanded to change for use")
parser.add_argument('--time_selection', default='original', help="the time label you want to use, allow "
"'original', 'adjust', the 'adjust' stand for 'z3' by now, modify when you experiment with other solver")
parser.add_argument('--regression', action='store_true', help="used for time prediction(regression),"
"not use for timeout constraint classification(classification)")
parser.add_argument('--adapt', action='store_true', help="an adaptive time threshold for neural network "
"models used for regression, because the predicted timeout threshold varies for different programs")
parser.add_argument('--threshold', type=int, default=200, help="the timeout threshold for solving")
parser.add_argument('--batch-size', type=int, default=64, help="some lstm setting in case you change the model")
parser.add_argument('--x-size', type=int, default=300)
parser.add_argument('--h-size', type=int, default=150)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--num_classes', type=float, default=2)
args = parser.parse_args()
print()
print("Simulation start:")
print(args)
return args
if __name__ == '__main__':
args = parse_arg()
model_name = args.model_name
input_index = args.input_index
# hardcoded short-way for switch evaluation model
input_list = ["checkpoints/simulation/g_serial_pad_feature_l_z_r_200.pkl",#0
"checkpoints/simulation/g_serial_tree_feature_t_z_r_200.pkl",#1
"checkpoints/simulation/g_tree+feature_t_z_r_200.pkl",#2
"checkpoints/simulation/b_serial_pad_feature_l_z_r_200.pkl",#3
"checkpoints/simulation/b_serial_tree_feature_t_z_r_200.pkl",#4
"checkpoints/simulation/b_tree+feature_t_z_r_200.pkl",#5
"checkpoints/simulation/s_serial_pad_feature_l_z_r_200.pkl",#6
"checkpoints/simulation/s_tree_feature_t_z_r_200.pkl",#7
"data/gnucore/fv2_serial/train",#8
"data/busybox/fv2_serial/train",#9
"data/smt-comp/fv2_serial/train",#10
"data/klee/fv2_serial/train",#11
"checkpoints/simulation/k_serial_pad_feature_l_z_r_200.pkl",#12
"checkpoints/simulation/k_serial_tree_feature_l_z_r_200.pkl"]#13
if args.load_file == None and (args.input_index > 13 or args.input_index < 0):
print("these paths are hardcoded shortway for specific directory name")
print(input_list)
exit(0)
# test for all programs in a dataset, the home directory is "data/gnucore/single_test"
# test_input_list = []
# for root, dir, files in os.walk("data/gnucore/single_test"):
# if not root.endswith("single_test"):
# test_input_list.append(root)
# for i in test_input_list:
# input = i
# simulation_for_single_program(test_directory, input_index)
if args.test_directory:
test_directory = args.test_directory
else:
test_directory = "data/example/arch"
# some test
# test_directory = "data/smt-comp/QF_BV/Sage"
# test_directory = "data/klee/arch-43200/solver-queries.smt2"
simulation_for_single_program(test_directory, args)
# make_PCC_output("data/PCC_result/mcm_c.json", False)
# regression simulation, not remember much, different time threshold
# input = "checkpoints/smt-comp/serial_pad_feature_evaluation_c.pkl"
# if os.path.exists(input):
# serial_result = th.load(input)["result"]
# else:
# serial_result = []
# for i in range(1, 4):
# a = th.load(input[:-4] + "_" + str(i) + ".pkl")["result"]
# serial_result.extend(a)
# result = serial_result
# pred = np.array(list(map(lambda x:x[0], result)))
# truth = np.array(list(map(lambda x:x[1], result)))
# for a in [40,50,60,100,150,200,250]:
# if truth.dtype == "int64":
# t, p = truth, pred
# else:
# t, p = truth > a, pred > a
# print("threshold", a)
# acc = accuracy_score(t, p)
# pre = precision_score(t, p)
# rec = recall_score(t, p)
# f1 = f1_score(t, p)
# print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# if truth.dtype == "int64":
# break
# try:
# fpr, tpr, thresholds = precision_recall_curve(truth > a, pred)
# pyplot.plot(tpr, fpr, lw=1, label="lstm")
# # print(fpr)
# # print(tpr)
# # print(thresholds)
# i = np.searchsorted(thresholds, a)
# print(fpr[i], tpr[i], thresholds[i])
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
# except (IndexError, ValueError):
# pass
|
[
"argparse.ArgumentParser",
"sklearn.metrics.accuracy_score",
"json.dumps",
"sklearn.metrics.f1_score",
"numpy.mean",
"preprocessing.Tree_Dataset.Tree_Dataset",
"preprocessing.Vector_Dataset.Vector_Dataset",
"os.path.exists",
"train.batcher",
"util.construct_data_from_json",
"preprocessing.Vocab",
"sklearn.metrics.recall_score",
"numpy.percentile",
"json.load",
"dgl_treelstm.dgl_dataset.dgl_dataset",
"warnings.filterwarnings",
"numpy.zeros",
"time.time",
"dgl_treelstm.KNN.KNN",
"numpy.array",
"sklearn.metrics.precision_score",
"train.pad_feature_batcher",
"dataset_filename_seperation.get_dataset_seperation"
] |
[((858, 891), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (881, 891), False, 'import warnings\n'), ((14925, 14946), 'os.path.exists', 'os.path.exists', (['input'], {}), '(input)\n', (14939, 14946), False, 'import os\n'), ((20244, 20289), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20258, 20289), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20300, 20346), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20315, 20346), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20357, 20400), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20369, 20400), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20410, 20449), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20418, 20449), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20473, 20489), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (20481, 20489), True, 'import numpy as np\n'), ((21996, 22024), 'json.dumps', 'json.dumps', (['output'], {'indent': '(4)'}), '(output, indent=4)\n', (22006, 22024), False, 'import json\n'), ((24024, 24055), 'dataset_filename_seperation.get_dataset_seperation', 'get_dataset_seperation', (['dataset'], {}), '(dataset)\n', (24046, 24055), False, 'from dataset_filename_seperation import get_dataset_seperation\n'), ((24199, 24224), 'os.path.exists', 'os.path.exists', (['load_path'], {}), '(load_path)\n', (24213, 24224), False, 'import os\n'), ((25090, 25101), 'time.time', 'time.time', ([], {}), '()\n', (25099, 25101), False, 'import time\n'), ((26914, 26925), 'time.time', 'time.time', ([], {}), '()\n', (26923, 26925), False, 'import time\n'), ((27837, 27848), 'time.time', 'time.time', ([], {}), '()\n', (27846, 27848), False, 'import time\n'), ((28023, 28048), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (28046, 28048), False, 'import argparse\n'), ((4911, 4948), 'numpy.array', 'np.array', (["self.time_record['timeout']"], {}), "(self.time_record['timeout'])\n", (4919, 4948), True, 'import numpy as np\n'), ((6384, 6415), 'util.construct_data_from_json', 'construct_data_from_json', (['input'], {}), '(input)\n', (6408, 6415), False, 'from util import construct_data_from_json\n'), ((7116, 7128), 'numpy.array', 'np.array', (['fn'], {}), '(fn)\n', (7124, 7128), True, 'import numpy as np\n'), ((9527, 9584), 'preprocessing.Vocab', 'Vocab', ([], {'filename': 'smt_vocab_file', 'data': '[Constants.UNK_WORD]'}), '(filename=smt_vocab_file, data=[Constants.UNK_WORD])\n', (9532, 9584), False, 'from preprocessing import Vocab\n'), ((9626, 9740), 'dgl_treelstm.dgl_dataset.dgl_dataset', 'dgl_dataset', (['[data]', 'None'], {'vocab': 'smt_vocab', 'time_selection': 'self.time_selection', 'time_threshold': 'self.threshold'}), '([data], None, vocab=smt_vocab, time_selection=self.\n time_selection, time_threshold=self.threshold)\n', (9637, 9740), False, 'from dgl_treelstm.dgl_dataset import dgl_dataset\n'), ((10477, 10489), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10485, 10489), True, 'import numpy as np\n'), ((10497, 10509), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10505, 10509), True, 'import numpy as np\n'), ((10649, 10661), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10657, 10661), True, 'import numpy as np\n'), ((11266, 11309), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11280, 11309), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((11324, 11368), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11339, 11368), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((11383, 11424), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11395, 11424), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((11438, 11475), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11446, 11475), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((12602, 12660), 'preprocessing.Tree_Dataset.Tree_Dataset', 'Tree_Dataset', ([], {'treeforassert': '(True)', 'feature_number_limit': '(100)'}), '(treeforassert=True, feature_number_limit=100)\n', (12614, 12660), False, 'from preprocessing.Tree_Dataset import Tree_Dataset\n'), ((17558, 17596), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17572, 17596), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((17611, 17650), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17626, 17650), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((17665, 17701), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17677, 17701), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((17715, 17747), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17723, 17747), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((18710, 18738), 'json.dumps', 'json.dumps', (['output'], {'indent': '(4)'}), '(output, indent=4)\n', (18720, 18738), False, 'import json\n'), ((25453, 25458), 'dgl_treelstm.KNN.KNN', 'KNN', ([], {}), '()\n', (25456, 25458), False, 'from dgl_treelstm.KNN import KNN\n'), ((2753, 2778), 'dgl_treelstm.dgl_dataset.dgl_dataset', 'dgl_dataset', (['[data]', 'None'], {}), '([data], None)\n', (2764, 2778), False, 'from dgl_treelstm.dgl_dataset import dgl_dataset\n'), ((12705, 12744), 'preprocessing.Vector_Dataset.Vector_Dataset', 'Vector_Dataset', ([], {'feature_number_limit': '(50)'}), '(feature_number_limit=50)\n', (12719, 12744), False, 'from preprocessing.Vector_Dataset import Vector_Dataset\n'), ((15003, 15015), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15012, 15015), False, 'import json\n'), ((5048, 5080), 'numpy.percentile', 'np.percentile', (['solvable_list', '(95)'], {}), '(solvable_list, 95)\n', (5061, 5080), True, 'import numpy as np\n'), ((5082, 5104), 'numpy.mean', 'np.mean', (['solvable_list'], {}), '(solvable_list)\n', (5089, 5104), True, 'import numpy as np\n'), ((8250, 8297), 'train.pad_feature_batcher', 'pad_feature_batcher', (['"""cpu"""', 'self.time_selection'], {}), "('cpu', self.time_selection)\n", (8269, 8297), False, 'from train import pad_feature_batcher, batcher\n'), ((9807, 9821), 'train.batcher', 'batcher', (['"""cpu"""'], {}), "('cpu')\n", (9814, 9821), False, 'from train import pad_feature_batcher, batcher\n'), ((12788, 12826), 'preprocessing.Vector_Dataset.Vector_Dataset', 'Vector_Dataset', ([], {'feature_number_limit': '(2)'}), '(feature_number_limit=2)\n', (12802, 12826), False, 'from preprocessing.Vector_Dataset import Vector_Dataset\n'), ((12855, 12893), 'preprocessing.Tree_Dataset.Tree_Dataset', 'Tree_Dataset', ([], {'feature_number_limit': '(100)'}), '(feature_number_limit=100)\n', (12867, 12893), False, 'from preprocessing.Tree_Dataset import Tree_Dataset\n'), ((15254, 15266), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15263, 15266), False, 'import json\n'), ((13964, 14000), 'util.construct_data_from_json', 'construct_data_from_json', (['data_input'], {}), '(data_input)\n', (13988, 14000), False, 'from util import construct_data_from_json\n'), ((3058, 3096), 'train.pad_feature_batcher', 'pad_feature_batcher', (['"""cpu"""', '"""original"""'], {}), "('cpu', 'original')\n", (3077, 3096), False, 'from train import pad_feature_batcher, batcher\n')]
|
import numpy as np
import pandas as pd
students = 250
nr_to_label = {0: 'bike', 1: 'car', 2: 'bus 40', 3: 'bus 240'}
label_to_nr = {v: k for k, v in nr_to_label.items()}
def choice(income, distance, lazy):
"""
Generate a choice based on the params
"""
if income < 500:
if distance < 8 and distance * lazy * lazy < 120:
return label_to_nr['bike']
elif income > 350:
return label_to_nr['bus 40']
else:
return label_to_nr['bus 240']
if lazy < 3:
return label_to_nr['bus 40']
return label_to_nr['car']
# generate some random numbers
idc = np.array([np.round(np.random.normal(300, 200, size=students).clip(min=0)),
np.random.poisson(8, size=students),
np.random.randint(1, 10, size=students)]).T
# get their favourite mode of transport
idct = np.hstack((idc, np.array([[choice(*row) for row in idc]]).T))
# add some randomness by shuffling some labels
replace = np.where(np.random.random(size=students) < 0.15)[0]
idct[replace, 3] = np.random.randint(0, 4, size=replace.size)
# store result
df = pd.DataFrame(idct, columns=['income', 'distance', 'lazy', 'transport'])
df['transport'] = df['transport'].map(nr_to_label)
df.to_csv('transport.csv', sep=';', encoding='utf-8')
|
[
"pandas.DataFrame",
"numpy.random.randint",
"numpy.random.random",
"numpy.random.poisson",
"numpy.random.normal"
] |
[((1058, 1100), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': 'replace.size'}), '(0, 4, size=replace.size)\n', (1075, 1100), True, 'import numpy as np\n'), ((1122, 1193), 'pandas.DataFrame', 'pd.DataFrame', (['idct'], {'columns': "['income', 'distance', 'lazy', 'transport']"}), "(idct, columns=['income', 'distance', 'lazy', 'transport'])\n", (1134, 1193), True, 'import pandas as pd\n'), ((722, 757), 'numpy.random.poisson', 'np.random.poisson', (['(8)'], {'size': 'students'}), '(8, size=students)\n', (739, 757), True, 'import numpy as np\n'), ((775, 814), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': 'students'}), '(1, 10, size=students)\n', (792, 814), True, 'import numpy as np\n'), ((996, 1027), 'numpy.random.random', 'np.random.random', ([], {'size': 'students'}), '(size=students)\n', (1012, 1027), True, 'import numpy as np\n'), ((650, 691), 'numpy.random.normal', 'np.random.normal', (['(300)', '(200)'], {'size': 'students'}), '(300, 200, size=students)\n', (666, 691), True, 'import numpy as np\n')]
|
# test function gradient
def limetr_gradient():
import numpy as np
from limetr.__init__ import LimeTr
ok = True
# setup test problem
# -------------------------------------------------------------------------
model = LimeTr.testProblem(use_trimming=True,
use_constraints=True,
use_regularizer=True,
use_uprior=True,
use_gprior=True,
know_obs_std=False,
share_obs_std=True)
tol = 1e-6
# test the gradient
# -------------------------------------------------------------------------
x = np.random.randn(model.k)
x[model.idx_gamma] = 0.1
x[model.idx_delta] = 0.1
tr_grad = model.gradient(x, use_ad=True)
my_grad = model.gradient(x)
err = np.linalg.norm(tr_grad - my_grad)
ok = ok and err < tol
if not ok:
print('err', err)
print('tr_grad', tr_grad)
print('my_grad', my_grad)
return ok
|
[
"limetr.__init__.LimeTr.testProblem",
"numpy.linalg.norm",
"numpy.random.randn"
] |
[((244, 408), 'limetr.__init__.LimeTr.testProblem', 'LimeTr.testProblem', ([], {'use_trimming': '(True)', 'use_constraints': '(True)', 'use_regularizer': '(True)', 'use_uprior': '(True)', 'use_gprior': '(True)', 'know_obs_std': '(False)', 'share_obs_std': '(True)'}), '(use_trimming=True, use_constraints=True, use_regularizer\n =True, use_uprior=True, use_gprior=True, know_obs_std=False,\n share_obs_std=True)\n', (262, 408), False, 'from limetr.__init__ import LimeTr\n'), ((715, 739), 'numpy.random.randn', 'np.random.randn', (['model.k'], {}), '(model.k)\n', (730, 739), True, 'import numpy as np\n'), ((887, 920), 'numpy.linalg.norm', 'np.linalg.norm', (['(tr_grad - my_grad)'], {}), '(tr_grad - my_grad)\n', (901, 920), True, 'import numpy as np\n')]
|
from common.vec_env.vec_logger import VecLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
GAMMA = 0.99
TAU = 1.00
N_STEPS = 5
CLIP_GRAD = 50
COEF_VALUE = 0.5
COEF_ENTROPY = 0.01
def train(args, venv, model, path, device):
N = args.num_processes
net = model(venv.observation_space.shape[0], venv.action_space.n).to(device)
net.train()
optimizer = optim.Adam(net.parameters(), lr=args.lr, amsgrad=args.amsgrad)
vlogger = VecLogger(N=N, path=path)
vlogger.add_model(net)
state = venv.reset()
state_v = torch.from_numpy(state).float().to(device)
hx = torch.zeros(N, 512).to(device)
cx = torch.zeros(N, 512).to(device)
t = 0
while t < args.num_timesteps:
# Reset gradients
loss_value_v = torch.zeros(1, 1).to(device)
loss_policy_v = torch.zeros(1, 1).to(device)
loss_entropy_v = torch.zeros(1, 1).to(device)
gae_v = torch.zeros(N, 1).to(device)
hx.detach_()
cx.detach_()
reward_vs = []
done_vs = []
value_vs = []
log_prob_action_vs = []
entropy_vs = []
for step in range(N_STEPS):
# Perform action according to policy
value_v, logit_v, (hx, cx) = net(state_v, (hx, cx))
prob_v = F.softmax(logit_v, dim=1)
action_v = prob_v.multinomial(num_samples=1)
action = action_v.data.cpu().numpy()
log_prob_v = F.log_softmax(logit_v, dim=1)
log_prob_action_v = log_prob_v.gather(1, action_v)
entropy_v = -(log_prob_v * prob_v).sum(dim=1, keepdim=True)
# Receive reward and new state
state, reward, done, info = venv.step(action)
t += N
reward = np.expand_dims(reward, axis=1)
done = np.expand_dims(done, axis=1)
info = np.expand_dims(info, axis=1)
vlogger.log(t, reward, info)
state_v = torch.from_numpy(state).float().to(device)
reward_v = torch.from_numpy(reward).float().to(device)
done_v = torch.from_numpy(done.astype('int')).float().to(device)
reward_vs.append(reward_v)
done_vs.append(done_v)
value_vs.append(value_v)
log_prob_action_vs.append(log_prob_action_v)
entropy_vs.append(entropy_v)
# Reset the LSTM state if done
hx = (1 - done_v) * hx
cx = (1 - done_v) * cx
# R
R_v = (1 - done_v) * net(state_v, (hx, cx))[0]
value_vs.append(R_v)
for i in reversed(range(len(reward_vs))):
R_v = (1 - done_vs[i]) * GAMMA * R_v + reward_vs[i]
# Accumulate gradients
adv_v = R_v.detach() - value_vs[i]
# Generalized Advantage Estimataion
delta_t = reward_vs[i] + (1 - done_vs[i]) * GAMMA * value_vs[i + 1] - value_vs[i]
gae_v = gae_v * (1 - done_vs[i]) * GAMMA * TAU + delta_t
loss_value_v += (0.5 * adv_v.pow(2)).sum()
loss_policy_v -= (log_prob_action_vs[i] * gae_v.detach()).sum() # cautious: detach()
loss_entropy_v -= (entropy_vs[i]).sum()
net.zero_grad()
loss_v = COEF_VALUE * loss_value_v + loss_policy_v + COEF_ENTROPY * loss_entropy_v
loss_v.backward()
nn.utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
optimizer.step()
venv.close()
|
[
"numpy.expand_dims",
"torch.nn.functional.softmax",
"common.vec_env.vec_logger.VecLogger",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.from_numpy"
] |
[((521, 546), 'common.vec_env.vec_logger.VecLogger', 'VecLogger', ([], {'N': 'N', 'path': 'path'}), '(N=N, path=path)\n', (530, 546), False, 'from common.vec_env.vec_logger import VecLogger\n'), ((666, 685), 'torch.zeros', 'torch.zeros', (['N', '(512)'], {}), '(N, 512)\n', (677, 685), False, 'import torch\n'), ((706, 725), 'torch.zeros', 'torch.zeros', (['N', '(512)'], {}), '(N, 512)\n', (717, 725), False, 'import torch\n'), ((1350, 1375), 'torch.nn.functional.softmax', 'F.softmax', (['logit_v'], {'dim': '(1)'}), '(logit_v, dim=1)\n', (1359, 1375), True, 'import torch.nn.functional as F\n'), ((1508, 1537), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logit_v'], {'dim': '(1)'}), '(logit_v, dim=1)\n', (1521, 1537), True, 'import torch.nn.functional as F\n'), ((1816, 1846), 'numpy.expand_dims', 'np.expand_dims', (['reward'], {'axis': '(1)'}), '(reward, axis=1)\n', (1830, 1846), True, 'import numpy as np\n'), ((1866, 1894), 'numpy.expand_dims', 'np.expand_dims', (['done'], {'axis': '(1)'}), '(done, axis=1)\n', (1880, 1894), True, 'import numpy as np\n'), ((1914, 1942), 'numpy.expand_dims', 'np.expand_dims', (['info'], {'axis': '(1)'}), '(info, axis=1)\n', (1928, 1942), True, 'import numpy as np\n'), ((832, 849), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (843, 849), False, 'import torch\n'), ((885, 902), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (896, 902), False, 'import torch\n'), ((939, 956), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (950, 956), False, 'import torch\n'), ((984, 1001), 'torch.zeros', 'torch.zeros', (['N', '(1)'], {}), '(N, 1)\n', (995, 1001), False, 'import torch\n'), ((614, 637), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (630, 637), False, 'import torch\n'), ((2007, 2030), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2023, 2030), False, 'import torch\n'), ((2073, 2097), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (2089, 2097), False, 'import torch\n')]
|
"""Training and testing the Pairwise Differentiable Gradient Descent (PDGD) algorithm for unbiased learning to rank.
See the following paper for more information on the Pairwise Differentiable Gradient Descent (PDGD) algorithm.
* Oosterhuis, Harrie, and <NAME>. "Differentiable unbiased online learning to rank." In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp. 1293-1302. ACM, 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
import copy
import itertools
from six.moves import zip
from tensorflow import dtypes
from ultra.learning_algorithm.base_algorithm import BaseAlgorithm
import ultra.utils as utils
import ultra
class PDGD(BaseAlgorithm):
"""The Pairwise Differentiable Gradient Descent (PDGD) algorithm for unbiased learning to rank.
This class implements the Pairwise Differentiable Gradient Descent (PDGD) algorithm based on the input layer
feed. See the following paper for more information on the algorithm.
* Oosterhuis, Harrie, and <NAME>. "Differentiable unbiased online learning to rank." In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp. 1293-1302. ACM, 2018.
"""
def __init__(self, data_set, exp_settings, forward_only=False):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
forward_only: Set true to conduct prediction only, false to conduct training.
"""
print('Build Pairwise Differentiable Gradient Descent (PDGD) algorithm.')
self.hparams = ultra.utils.hparams.HParams(
learning_rate=0.05, # Learning rate (\mu).
# Scalar for the probability distribution.
tau=1,
max_gradient_norm=1.0, # Clip gradients to this norm.
# Set strength for L2 regularization.
l2_loss=0.005,
grad_strategy='ada', # Select gradient strategy
)
print(exp_settings['learning_algorithm_hparams'])
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
self.model = None
self.max_candidate_num = exp_settings['max_candidate_num']
self.feature_size = data_set.feature_size
self.learning_rate = tf.Variable(
float(self.hparams.learning_rate), trainable=False)
# Feeds for inputs.
self.is_training = tf.placeholder(tf.bool, name="is_train")
self.docid_inputs = [] # a list of top documents
self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size],
name="letor_features") # the letor features for the documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],
name="docid_input{0}".format(i)))
self.labels.append(tf.placeholder(tf.float32, shape=[None],
name="label{0}".format(i)))
self.global_step = tf.Variable(0, trainable=False)
self.output = tf.concat(
self.get_ranking_scores(
self.docid_inputs,
is_training=self.is_training,
scope='ranking_model'),
1)
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels))
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = ultra.utils.make_ranking_metric_fn(
metric, topn)(reshaped_labels, pad_removed_output, None)
tf.summary.scalar(
'%s_%d' %
(metric, topn), metric_value, collections=['eval'])
# Build model
if not forward_only:
self.rank_list_size = exp_settings['train_list_cutoff']
self.train_output = self.ranking_model(
self.rank_list_size, scope='ranking_model')
train_labels = self.labels[:self.rank_list_size]
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_train_labels = tf.transpose(
tf.convert_to_tensor(train_labels))
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.train_output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = ultra.utils.make_ranking_metric_fn(metric, topn)(
reshaped_train_labels, pad_removed_output, None)
tf.summary.scalar(
'%s_%d' %
(metric, topn), metric_value, collections=['train_eval'])
# Build training pair inputs only when it is training
self.positive_docid_inputs = tf.placeholder(
tf.int64, shape=[None], name="positive_docid_input")
self.negative_docid_inputs = tf.placeholder(
tf.int64, shape=[None], name="negative_docid_input")
self.pair_weights = tf.placeholder(
tf.float32, shape=[None], name="pair_weight")
# Build ranking loss
pair_scores = self.get_ranking_scores(
[self.positive_docid_inputs,
self.negative_docid_inputs], is_training=self.is_training, scope='ranking_model'
)
self.loss = tf.reduce_sum(
tf.math.multiply(
#self.pairwise_cross_entropy_loss(pair_scores[0], pair_scores[1]),
tf.reduce_sum(-tf.exp(pair_scores[0]) / (
tf.exp(pair_scores[0]) + tf.exp(pair_scores[1])), 1),
self.pair_weights
)
)
params = tf.trainable_variables()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * tf.nn.l2_loss(p)
# Select optimizer
self.optimizer_func = tf.train.AdagradOptimizer
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = tf.train.GradientDescentOptimizer
# Gradients and SGD update operation for training the model.
opt = self.optimizer_func(self.hparams.learning_rate)
self.gradients = tf.gradients(self.loss, params)
if self.hparams.max_gradient_norm > 0:
self.clipped_gradients, self.norm = tf.clip_by_global_norm(self.gradients,
self.hparams.max_gradient_norm)
self.updates = opt.apply_gradients(zip(self.clipped_gradients, params),
global_step=self.global_step)
tf.summary.scalar(
'Gradient Norm',
self.norm,
collections=['train'])
else:
self.norm = None
self.updates = opt.apply_gradients(zip(self.gradients, params),
global_step=self.global_step)
tf.summary.scalar(
'Learning Rate',
self.learning_rate,
collections=['train'])
tf.summary.scalar('Loss', self.loss, collections=['train'])
self.train_summary = tf.summary.merge_all(key='train')
self.train_eval_summary = tf.summary.merge_all(key='train_eval')
self.eval_summary = tf.summary.merge_all(key='eval')
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, input_feed, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: (tf.Session) tensorflow session to use.
input_feed: (dictionary) A dictionary containing all the input feed data.
forward_only: whether to do the backward step (False) or only forward (True).
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
if not forward_only:
# Run the model to get ranking scores
input_feed[self.is_training.name] = False
rank_outputs = session.run(
[self.train_output, self.train_eval_summary], input_feed)
# reduce value to avoid numerical problems
rank_outputs[0] = np.array(rank_outputs[0])
rank_outputs[0] = rank_outputs[0] - \
np.amax(rank_outputs[0], axis=1, keepdims=True)
exp_ranking_scores = np.exp(self.hparams.tau * rank_outputs[0])
# Remove scores for padding documents
letor_features_length = len(input_feed[self.letor_features.name])
for i in range(len(input_feed[self.labels[0].name])):
for j in range(self.rank_list_size):
# not a valid doc
if input_feed[self.docid_inputs[j].name][i] == letor_features_length:
exp_ranking_scores[i][j] = 0.0
# Compute denominator for each position
denominators = np.cumsum(
exp_ranking_scores[:, ::-1], axis=1)[:, ::-1]
sum_log_denominators = np.sum(
np.log(
denominators,
out=np.zeros_like(denominators),
where=denominators > 0),
axis=1)
# Create training pairs based on the ranking scores and the labels
positive_docids, negative_docids, pair_weights = [], [], []
for i in range(len(input_feed[self.labels[0].name])):
# Generate pairs and compute weights
for j in range(self.rank_list_size):
l = self.rank_list_size - 1 - j
# not a valid doc
if input_feed[self.docid_inputs[l].name][i] == letor_features_length:
continue
if input_feed[self.labels[l].name][i] > 0: # a clicked doc
for k in range(l + 2):
# find a negative/unclicked doc
if k < self.rank_list_size and input_feed[self.labels[k]
.name][i] < input_feed[self.labels[l].name][i]:
# not a valid doc
if input_feed[self.docid_inputs[k]
.name][i] == letor_features_length:
continue
positive_docids.append(
input_feed[self.docid_inputs[l].name][i])
negative_docids.append(
input_feed[self.docid_inputs[k].name][i])
flipped_exp_scores = np.copy(
exp_ranking_scores[i])
flipped_exp_scores[k] = exp_ranking_scores[i][l]
flipped_exp_scores[l] = exp_ranking_scores[i][k]
flipped_denominator = np.cumsum(
flipped_exp_scores[::-1])[::-1]
sum_log_flipped_denominator = np.sum(
np.log(
flipped_denominator,
out=np.zeros_like(flipped_denominator),
where=flipped_denominator > 0))
#p_r = np.prod(rank_prob[i][min_i:max_i+1])
#p_rs = np.prod(flipped_rank_prob[min_i:max_i+1])
# weight = p_rs / (p_r + p_rs) = 1 / (1 +
# (d_rs/d_r)) = 1 / (1 + exp(log_drs - log_dr))
weight = 1.0 / \
(1.0 +
np.exp(min(sum_log_flipped_denominator -
sum_log_denominators[i], 20)))
if np.isnan(weight):
print('SOMETHING WRONG!!!!!!!')
print(
'sum_log_denominators[i] is nan: ' + str(np.isnan(sum_log_denominators[i])))
print('sum_log_flipped_denominator is nan ' +
str(np.isnan(sum_log_flipped_denominator)))
pair_weights.append(weight)
input_feed[self.positive_docid_inputs.name] = positive_docids
input_feed[self.negative_docid_inputs.name] = negative_docids
input_feed[self.pair_weights.name] = pair_weights
# Train the model
input_feed[self.is_training.name] = True
train_outputs = session.run([
self.updates, # Update Op that does SGD.
self.loss, # Loss for this batch.
self.train_summary # Summarize statistics.
], input_feed)
summary = utils.merge_TFSummary(
[rank_outputs[-1], train_outputs[-1]], [0.5, 0.5])
# loss, no outputs, summary.
return train_outputs[1], rank_outputs, summary
else:
input_feed[self.is_training.name] = False
output_feed = [
self.eval_summary, # Summarize statistics.
self.output # Model outputs
]
outputs = session.run(output_feed, input_feed)
return None, outputs[1], outputs[0] # loss, outputs, summary.
|
[
"tensorflow.trainable_variables",
"numpy.isnan",
"ultra.utils.hparams.HParams",
"six.moves.zip",
"tensorflow.global_variables",
"tensorflow.Variable",
"numpy.exp",
"tensorflow.clip_by_global_norm",
"numpy.zeros_like",
"numpy.copy",
"tensorflow.placeholder",
"numpy.cumsum",
"tensorflow.exp",
"tensorflow.gradients",
"tensorflow.summary.merge_all",
"tensorflow.summary.scalar",
"ultra.utils.merge_TFSummary",
"tensorflow.convert_to_tensor",
"numpy.amax",
"ultra.utils.make_ranking_metric_fn",
"numpy.array",
"tensorflow.nn.l2_loss"
] |
[((1873, 1991), 'ultra.utils.hparams.HParams', 'ultra.utils.hparams.HParams', ([], {'learning_rate': '(0.05)', 'tau': '(1)', 'max_gradient_norm': '(1.0)', 'l2_loss': '(0.005)', 'grad_strategy': '"""ada"""'}), "(learning_rate=0.05, tau=1, max_gradient_norm=\n 1.0, l2_loss=0.005, grad_strategy='ada')\n", (1900, 1991), False, 'import ultra\n'), ((2757, 2797), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_train"""'}), "(tf.bool, name='is_train')\n", (2771, 2797), True, 'import tensorflow as tf\n'), ((2886, 2973), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.feature_size]', 'name': '"""letor_features"""'}), "(tf.float32, shape=[None, self.feature_size], name=\n 'letor_features')\n", (2900, 2973), True, 'import tensorflow as tf\n'), ((3510, 3541), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (3521, 3541), True, 'import tensorflow as tf\n'), ((8084, 8117), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train"""'}), "(key='train')\n", (8104, 8117), True, 'import tensorflow as tf\n'), ((8152, 8190), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train_eval"""'}), "(key='train_eval')\n", (8172, 8190), True, 'import tensorflow as tf\n'), ((8219, 8251), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""eval"""'}), "(key='eval')\n", (8239, 8251), True, 'import tensorflow as tf\n'), ((3855, 3888), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.labels'], {}), '(self.labels)\n', (3875, 3888), True, 'import tensorflow as tf\n'), ((5521, 5588), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""positive_docid_input"""'}), "(tf.int64, shape=[None], name='positive_docid_input')\n", (5535, 5588), True, 'import tensorflow as tf\n'), ((5647, 5714), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""negative_docid_input"""'}), "(tf.int64, shape=[None], name='negative_docid_input')\n", (5661, 5714), True, 'import tensorflow as tf\n'), ((5764, 5824), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""pair_weight"""'}), "(tf.float32, shape=[None], name='pair_weight')\n", (5778, 5824), True, 'import tensorflow as tf\n'), ((6478, 6502), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6500, 6502), True, 'import tensorflow as tf\n'), ((7035, 7066), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'params'], {}), '(self.loss, params)\n', (7047, 7066), True, 'import tensorflow as tf\n'), ((7855, 7932), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Learning Rate"""', 'self.learning_rate'], {'collections': "['train']"}), "('Learning Rate', self.learning_rate, collections=['train'])\n", (7872, 7932), True, 'import tensorflow as tf\n'), ((7994, 8053), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'self.loss'], {'collections': "['train']"}), "('Loss', self.loss, collections=['train'])\n", (8011, 8053), True, 'import tensorflow as tf\n'), ((8288, 8309), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8307, 8309), True, 'import tensorflow as tf\n'), ((9200, 9225), 'numpy.array', 'np.array', (['rank_outputs[0]'], {}), '(rank_outputs[0])\n', (9208, 9225), True, 'import numpy as np\n'), ((9373, 9415), 'numpy.exp', 'np.exp', (['(self.hparams.tau * rank_outputs[0])'], {}), '(self.hparams.tau * rank_outputs[0])\n', (9379, 9415), True, 'import numpy as np\n'), ((14032, 14104), 'ultra.utils.merge_TFSummary', 'utils.merge_TFSummary', (['[rank_outputs[-1], train_outputs[-1]]', '[0.5, 0.5]'], {}), '([rank_outputs[-1], train_outputs[-1]], [0.5, 0.5])\n', (14053, 14104), True, 'import ultra.utils as utils\n'), ((4271, 4350), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_%d' % (metric, topn))", 'metric_value'], {'collections': "['eval']"}), "('%s_%d' % (metric, topn), metric_value, collections=['eval'])\n", (4288, 4350), True, 'import tensorflow as tf\n'), ((4821, 4855), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['train_labels'], {}), '(train_labels)\n', (4841, 4855), True, 'import tensorflow as tf\n'), ((7170, 7240), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['self.gradients', 'self.hparams.max_gradient_norm'], {}), '(self.gradients, self.hparams.max_gradient_norm)\n', (7192, 7240), True, 'import tensorflow as tf\n'), ((7501, 7569), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Gradient Norm"""', 'self.norm'], {'collections': "['train']"}), "('Gradient Norm', self.norm, collections=['train'])\n", (7518, 7569), True, 'import tensorflow as tf\n'), ((9292, 9339), 'numpy.amax', 'np.amax', (['rank_outputs[0]'], {'axis': '(1)', 'keepdims': '(True)'}), '(rank_outputs[0], axis=1, keepdims=True)\n', (9299, 9339), True, 'import numpy as np\n'), ((9926, 9972), 'numpy.cumsum', 'np.cumsum', (['exp_ranking_scores[:, ::-1]'], {'axis': '(1)'}), '(exp_ranking_scores[:, ::-1], axis=1)\n', (9935, 9972), True, 'import numpy as np\n'), ((4142, 4190), 'ultra.utils.make_ranking_metric_fn', 'ultra.utils.make_ranking_metric_fn', (['metric', 'topn'], {}), '(metric, topn)\n', (4176, 4190), False, 'import ultra\n'), ((5278, 5368), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_%d' % (metric, topn))", 'metric_value'], {'collections': "['train_eval']"}), "('%s_%d' % (metric, topn), metric_value, collections=[\n 'train_eval'])\n", (5295, 5368), True, 'import tensorflow as tf\n'), ((7367, 7402), 'six.moves.zip', 'zip', (['self.clipped_gradients', 'params'], {}), '(self.clipped_gradients, params)\n', (7370, 7402), False, 'from six.moves import zip\n'), ((7733, 7760), 'six.moves.zip', 'zip', (['self.gradients', 'params'], {}), '(self.gradients, params)\n', (7736, 7760), False, 'from six.moves import zip\n'), ((5135, 5183), 'ultra.utils.make_ranking_metric_fn', 'ultra.utils.make_ranking_metric_fn', (['metric', 'topn'], {}), '(metric, topn)\n', (5169, 5183), False, 'import ultra\n'), ((6633, 6649), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['p'], {}), '(p)\n', (6646, 6649), True, 'import tensorflow as tf\n'), ((10124, 10151), 'numpy.zeros_like', 'np.zeros_like', (['denominators'], {}), '(denominators)\n', (10137, 10151), True, 'import numpy as np\n'), ((6282, 6304), 'tensorflow.exp', 'tf.exp', (['pair_scores[0]'], {}), '(pair_scores[0])\n', (6288, 6304), True, 'import tensorflow as tf\n'), ((6333, 6355), 'tensorflow.exp', 'tf.exp', (['pair_scores[0]'], {}), '(pair_scores[0])\n', (6339, 6355), True, 'import tensorflow as tf\n'), ((6358, 6380), 'tensorflow.exp', 'tf.exp', (['pair_scores[1]'], {}), '(pair_scores[1])\n', (6364, 6380), True, 'import tensorflow as tf\n'), ((11713, 11743), 'numpy.copy', 'np.copy', (['exp_ranking_scores[i]'], {}), '(exp_ranking_scores[i])\n', (11720, 11743), True, 'import numpy as np\n'), ((13000, 13016), 'numpy.isnan', 'np.isnan', (['weight'], {}), '(weight)\n', (13008, 13016), True, 'import numpy as np\n'), ((11997, 12032), 'numpy.cumsum', 'np.cumsum', (['flipped_exp_scores[::-1]'], {}), '(flipped_exp_scores[::-1])\n', (12006, 12032), True, 'import numpy as np\n'), ((12296, 12330), 'numpy.zeros_like', 'np.zeros_like', (['flipped_denominator'], {}), '(flipped_denominator)\n', (12309, 12330), True, 'import numpy as np\n'), ((13210, 13243), 'numpy.isnan', 'np.isnan', (['sum_log_denominators[i]'], {}), '(sum_log_denominators[i])\n', (13218, 13243), True, 'import numpy as np\n'), ((13374, 13411), 'numpy.isnan', 'np.isnan', (['sum_log_flipped_denominator'], {}), '(sum_log_flipped_denominator)\n', (13382, 13411), True, 'import numpy as np\n')]
|
"""
# 3D high-res brain mesh
Showing a ultra-high resolution mesh of a human brain, acquired with a 7 Tesla MRI.
The data is not yet publicly available.
Data courtesy of <NAME> et al.:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> (2020)
*7 Tesla MRI Followed by Histological 3D Reconstructions in Whole-Brain Specimens*
Front. Neuroanat. 14:536838
doi: 10.3389/fnana.2020.536838
Acknowledgements to <NAME> and <NAME> for data access.
"""
from pathlib import Path
import numpy as np
from datoviz import canvas, run, colormap
c = canvas(show_fps=True, width=1024, height=768)
panel = c.panel(controller='arcball')
visual = panel.visual('mesh', transform='auto')
ROOT = Path(__file__).parent.parent.parent.parent
pos = np.load(ROOT / "data/mesh/brain_highres.vert.npy")
faces = np.load(ROOT / "data/mesh/brain_highres.faces.npy")
assert pos.ndim == 2
assert pos.shape[1] == 3
assert faces.ndim == 2
assert faces.shape[1] == 3
print(f"Mesh has {len(faces)} triangles and {len(pos)} vertices")
visual.data('pos', pos)
visual.data('index', faces.ravel())
visual.data('clip', np.array([0, 0, 1, 1]))
gui = c.gui("GUI")
@gui.control("slider_float", "clip", vmin=-1, vmax=+1, value=+1)
def on_change(value):
visual.data('clip', np.array([0, 0, 1, value]))
run()
|
[
"numpy.load",
"datoviz.canvas",
"datoviz.run",
"pathlib.Path",
"numpy.array"
] |
[((628, 673), 'datoviz.canvas', 'canvas', ([], {'show_fps': '(True)', 'width': '(1024)', 'height': '(768)'}), '(show_fps=True, width=1024, height=768)\n', (634, 673), False, 'from datoviz import canvas, run, colormap\n'), ((817, 867), 'numpy.load', 'np.load', (["(ROOT / 'data/mesh/brain_highres.vert.npy')"], {}), "(ROOT / 'data/mesh/brain_highres.vert.npy')\n", (824, 867), True, 'import numpy as np\n'), ((876, 927), 'numpy.load', 'np.load', (["(ROOT / 'data/mesh/brain_highres.faces.npy')"], {}), "(ROOT / 'data/mesh/brain_highres.faces.npy')\n", (883, 927), True, 'import numpy as np\n'), ((1357, 1362), 'datoviz.run', 'run', ([], {}), '()\n', (1360, 1362), False, 'from datoviz import canvas, run, colormap\n'), ((1173, 1195), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (1181, 1195), True, 'import numpy as np\n'), ((1328, 1354), 'numpy.array', 'np.array', (['[0, 0, 1, value]'], {}), '([0, 0, 1, value])\n', (1336, 1354), True, 'import numpy as np\n'), ((768, 782), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (772, 782), False, 'from pathlib import Path\n')]
|
import math
from typing import List, Union, Sequence
from pyrep.backend import sim
from pyrep.objects.object import Object, object_type_to_class
import numpy as np
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
class VisionSensor(Object):
"""A camera-type sensor, reacting to light, colors and images.
"""
def __init__(self, name_or_handle: Union[str, int]):
super().__init__(name_or_handle)
self.resolution = sim.simGetVisionSensorResolution(self._handle)
@staticmethod
def create(resolution: List[int], explicit_handling=False,
perspective_mode=True, show_volume_not_detecting=True,
show_volume_detecting=True, passive=False,
use_local_lights=False, show_fog=True,
near_clipping_plane=1e-2, far_clipping_plane=10.0,
view_angle=60.0, ortho_size=1.0, sensor_size=None,
render_mode=RenderMode.OPENGL3,
position=None, orientation=None) -> 'VisionSensor':
""" Create a Vision Sensor
:param resolution: List of the [x, y] resolution.
:param explicit_handling: Sensor will be explicitly handled.
:param perspective_mode: Sensor will be operated in Perspective Mode.
Orthographic mode if False.
:param show_volume_not_detecting: Sensor volume will be shown when not
detecting anything.
:param show_volume_detecting: Sensor will be shown when detecting.
:param passive: Sensor will be passive (use an external image).
:param use_local_lights: Sensor will use local lights.
:param show_fog: Sensor will show fog (if enabled).
:param near_clipping_plane: Near clipping plane.
:param far_clipping_plane: Far clipping plane.
:param view_angle: Perspective angle (in degrees) if in Perspective Mode.
:param ortho_size: Orthographic projection size [m] if in Orthographic
Mode.
:param sensor_size: Size [x, y, z] of the Vision Sensor object.
:param render_mode: Sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
:param position: The [x, y, z] position, if specified.
:param orientation: The [x, y, z] orientation in radians, if specified.
:return: The created Vision Sensor.
"""
options = 0
if explicit_handling:
options |= 1
if perspective_mode:
options |= 2
if not show_volume_not_detecting:
options |= 4
if not show_volume_detecting:
options |= 8
if passive:
options |= 16
if use_local_lights:
options |= 32
if not show_fog:
options |= 64
int_params = [
resolution[0], # 0
resolution[1], # 1
0, # 2
0 # 3
]
if sensor_size is None:
sensor_size = [0.01, 0.01, 0.03]
float_params = [
near_clipping_plane, # 0
far_clipping_plane, # 1
math.radians(view_angle) if perspective_mode else ortho_size, # 2
sensor_size[0], # 3
sensor_size[1], # 4
sensor_size[2], # 5
0.0, # 6
0.0, # 7
0.0, # 8
0.0, # 9
0.0, # 10
]
vs = VisionSensor(
sim.simCreateVisionSensor(options, int_params, float_params, None)
)
vs.set_render_mode(render_mode)
if position is not None:
vs.set_position(position)
if orientation is not None:
vs.set_orientation(orientation)
return vs
def _get_requested_type(self) -> ObjectType:
return ObjectType.VISION_SENSOR
def handle_explicitly(self) -> None:
"""Handle sensor explicitly.
This enables capturing image (e.g., capture_rgb())
without PyRep.step().
"""
if not self.get_explicit_handling():
raise RuntimeError('The explicit_handling is disabled. '
'Call set_explicit_handling(value=1) to enable explicit_handling first.')
sim.simHandleVisionSensor(self._handle)
def capture_rgb(self) -> np.ndarray:
"""Retrieves the rgb-image of a vision sensor.
:return: A numpy array of size (width, height, 3)
"""
return sim.simGetVisionSensorImage(self._handle, self.resolution)
def capture_depth(self, in_meters=False) -> np.ndarray:
"""Retrieves the depth-image of a vision sensor.
:param in_meters: Whether the depth should be returned in meters.
:return: A numpy array of size (width, height)
"""
return sim.simGetVisionSensorDepthBuffer(
self._handle, self.resolution, in_meters)
def capture_pointcloud(self) -> np.ndarray:
"""Retrieves point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
d = self.capture_depth(in_meters=True)
return self.pointcloud_from_depth(d)
def pointcloud_from_depth(self, depth: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
intrinsics = self.get_intrinsic_matrix()
return VisionSensor.pointcloud_from_depth_and_camera_params(
depth, self.get_matrix(), intrinsics)
@staticmethod
def pointcloud_from_depth_and_camera_params(
depth: np.ndarray, extrinsics: np.ndarray,
intrinsics: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
upc = _create_uniform_pixel_coords_image(depth.shape)
pc = upc * np.expand_dims(depth, -1)
C = np.expand_dims(extrinsics[:3, 3], 0).T
R = extrinsics[:3, :3]
R_inv = R.T # inverse of rot matrix is transpose
R_inv_C = np.matmul(R_inv, C)
extrinsics = np.concatenate((R_inv, -R_inv_C), -1)
cam_proj_mat = np.matmul(intrinsics, extrinsics)
cam_proj_mat_homo = np.concatenate(
[cam_proj_mat, [np.array([0, 0, 0, 1])]])
cam_proj_mat_inv = np.linalg.inv(cam_proj_mat_homo)[0:3]
world_coords_homo = np.expand_dims(_pixel_to_world_coords(
pc, cam_proj_mat_inv), 0)
world_coords = world_coords_homo[..., :-1][0]
return world_coords
def get_intrinsic_matrix(self):
res = np.array(self.get_resolution())
pp_offsets = res / 2
ratio = res[0] / res[1]
pa_x = pa_y = math.radians(self.get_perspective_angle())
if ratio > 1:
pa_y = 2 * np.arctan(np.tan(pa_y / 2) / ratio)
elif ratio < 1:
pa_x = 2 * np.arctan(np.tan(pa_x / 2) * ratio)
persp_angles = np.array([pa_x, pa_y])
focal_lengths = -res / (2 * np.tan(persp_angles / 2))
return np.array(
[[focal_lengths[0], 0., pp_offsets[0]],
[0., focal_lengths[1], pp_offsets[1]],
[0., 0., 1.]])
def get_resolution(self) -> List[int]:
""" Return the Sensor's resolution.
:return: Resolution [x, y]
"""
return sim.simGetVisionSensorResolution(self._handle)
def set_resolution(self, resolution: List[int]) -> None:
""" Set the Sensor's resolution.
:param resolution: New resolution [x, y]
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_x, resolution[0]
)
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_y, resolution[1]
)
self.resolution = resolution
def get_perspective_mode(self) -> PerspectiveMode:
""" Retrieve the Sensor's perspective mode.
:return: The current PerspectiveMode.
"""
perspective_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
)
return PerspectiveMode(perspective_mode)
def set_perspective_mode(self, perspective_mode: PerspectiveMode) -> None:
""" Set the Sensor's perspective mode.
:param perspective_mode: The new perspective mode, one of:
PerspectiveMode.ORTHOGRAPHIC
PerspectiveMode.PERSPECTIVE
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
perspective_mode.value
)
def get_render_mode(self) -> RenderMode:
""" Retrieves the Sensor's rendering mode
:return: RenderMode for the current rendering mode.
"""
render_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode
)
return RenderMode(render_mode)
def set_render_mode(self, render_mode: RenderMode) -> None:
""" Set the Sensor's rendering mode
:param render_mode: The new sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode,
render_mode.value
)
def get_windowed_size(self) -> Sequence[int]:
"""Get the size of windowed rendering.
:return: The (x, y) resolution of the window. 0 for full-screen.
"""
size_x = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x)
size_y = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y)
return size_x, size_y
def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
"""Set the size of windowed rendering.
:param resolution: The (x, y) resolution of the window.
0 for full-screen.
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x,
resolution[0])
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y,
resolution[1])
def get_perspective_angle(self) -> float:
""" Get the Sensor's perspective angle.
:return: The sensor's perspective angle (in degrees).
"""
return math.degrees(sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle
))
def set_perspective_angle(self, angle: float) -> None:
""" Set the Sensor's perspective angle.
:param angle: New perspective angle (in degrees)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle,
math.radians(angle)
)
def get_orthographic_size(self) -> float:
""" Get the Sensor's orthographic size.
:return: The sensor's orthographic size (in metres).
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size
)
def set_orthographic_size(self, ortho_size: float) -> None:
""" Set the Sensor's orthographic size.
:param angle: New orthographic size (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size, ortho_size
)
def get_near_clipping_plane(self) -> float:
""" Get the Sensor's near clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping
)
def set_near_clipping_plane(self, near_clipping: float) -> None:
""" Set the Sensor's near clipping plane.
:param near_clipping: New near clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping, near_clipping
)
def get_far_clipping_plane(self) -> float:
""" Get the Sensor's far clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping
)
def set_far_clipping_plane(self, far_clipping: float) -> None:
""" Set the Sensor's far clipping plane.
:param far_clipping: New far clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping, far_clipping
)
def set_entity_to_render(self, entity_to_render: int) -> None:
""" Set the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 to render all objects in scene.
:param entity_to_render: Handle of the entity to render
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render, entity_to_render
)
def get_entity_to_render(self) -> None:
""" Get the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 if all objects in scene are rendered.
:return: Handle of the entity to render
"""
return sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render
)
def _create_uniform_pixel_coords_image(resolution: np.ndarray):
pixel_x_coords = np.reshape(
np.tile(np.arange(resolution[1]), [resolution[0]]),
(resolution[0], resolution[1], 1)).astype(np.float32)
pixel_y_coords = np.reshape(
np.tile(np.arange(resolution[0]), [resolution[1]]),
(resolution[1], resolution[0], 1)).astype(np.float32)
pixel_y_coords = np.transpose(pixel_y_coords, (1, 0, 2))
uniform_pixel_coords = np.concatenate(
(pixel_x_coords, pixel_y_coords, np.ones_like(pixel_x_coords)), -1)
return uniform_pixel_coords
def _transform(coords, trans):
h, w = coords.shape[:2]
coords = np.reshape(coords, (h * w, -1))
coords = np.transpose(coords, (1, 0))
transformed_coords_vector = np.matmul(trans, coords)
transformed_coords_vector = np.transpose(
transformed_coords_vector, (1, 0))
return np.reshape(transformed_coords_vector,
(h, w, -1))
def _pixel_to_world_coords(pixel_coords, cam_proj_mat_inv):
h, w = pixel_coords.shape[:2]
pixel_coords = np.concatenate(
[pixel_coords, np.ones((h, w, 1))], -1)
world_coords = _transform(pixel_coords, cam_proj_mat_inv)
world_coords_homo = np.concatenate(
[world_coords, np.ones((h, w, 1))], axis=-1)
return world_coords_homo
object_type_to_class[ObjectType.VISION_SENSOR] = VisionSensor
|
[
"pyrep.const.PerspectiveMode",
"pyrep.backend.sim.simGetVisionSensorResolution",
"numpy.ones",
"numpy.arange",
"pyrep.backend.sim.simGetObjectFloatParameter",
"math.radians",
"numpy.transpose",
"numpy.tan",
"numpy.reshape",
"numpy.ones_like",
"pyrep.backend.sim.simSetObjectInt32Parameter",
"pyrep.backend.sim.simGetVisionSensorDepthBuffer",
"numpy.linalg.inv",
"pyrep.backend.sim.simCreateVisionSensor",
"numpy.concatenate",
"pyrep.backend.sim.simHandleVisionSensor",
"pyrep.backend.sim.simSetObjectFloatParameter",
"numpy.expand_dims",
"pyrep.backend.sim.simGetVisionSensorImage",
"pyrep.const.RenderMode",
"numpy.array",
"pyrep.backend.sim.simGetObjectInt32Parameter",
"numpy.matmul"
] |
[((14652, 14691), 'numpy.transpose', 'np.transpose', (['pixel_y_coords', '(1, 0, 2)'], {}), '(pixel_y_coords, (1, 0, 2))\n', (14664, 14691), True, 'import numpy as np\n'), ((14917, 14948), 'numpy.reshape', 'np.reshape', (['coords', '(h * w, -1)'], {}), '(coords, (h * w, -1))\n', (14927, 14948), True, 'import numpy as np\n'), ((14962, 14990), 'numpy.transpose', 'np.transpose', (['coords', '(1, 0)'], {}), '(coords, (1, 0))\n', (14974, 14990), True, 'import numpy as np\n'), ((15023, 15047), 'numpy.matmul', 'np.matmul', (['trans', 'coords'], {}), '(trans, coords)\n', (15032, 15047), True, 'import numpy as np\n'), ((15080, 15127), 'numpy.transpose', 'np.transpose', (['transformed_coords_vector', '(1, 0)'], {}), '(transformed_coords_vector, (1, 0))\n', (15092, 15127), True, 'import numpy as np\n'), ((15148, 15197), 'numpy.reshape', 'np.reshape', (['transformed_coords_vector', '(h, w, -1)'], {}), '(transformed_coords_vector, (h, w, -1))\n', (15158, 15197), True, 'import numpy as np\n'), ((458, 504), 'pyrep.backend.sim.simGetVisionSensorResolution', 'sim.simGetVisionSensorResolution', (['self._handle'], {}), '(self._handle)\n', (490, 504), False, 'from pyrep.backend import sim\n'), ((4612, 4651), 'pyrep.backend.sim.simHandleVisionSensor', 'sim.simHandleVisionSensor', (['self._handle'], {}), '(self._handle)\n', (4637, 4651), False, 'from pyrep.backend import sim\n'), ((4835, 4893), 'pyrep.backend.sim.simGetVisionSensorImage', 'sim.simGetVisionSensorImage', (['self._handle', 'self.resolution'], {}), '(self._handle, self.resolution)\n', (4862, 4893), False, 'from pyrep.backend import sim\n'), ((5169, 5244), 'pyrep.backend.sim.simGetVisionSensorDepthBuffer', 'sim.simGetVisionSensorDepthBuffer', (['self._handle', 'self.resolution', 'in_meters'], {}), '(self._handle, self.resolution, in_meters)\n', (5202, 5244), False, 'from pyrep.backend import sim\n'), ((6473, 6492), 'numpy.matmul', 'np.matmul', (['R_inv', 'C'], {}), '(R_inv, C)\n', (6482, 6492), True, 'import numpy as np\n'), ((6514, 6551), 'numpy.concatenate', 'np.concatenate', (['(R_inv, -R_inv_C)', '(-1)'], {}), '((R_inv, -R_inv_C), -1)\n', (6528, 6551), True, 'import numpy as np\n'), ((6575, 6608), 'numpy.matmul', 'np.matmul', (['intrinsics', 'extrinsics'], {}), '(intrinsics, extrinsics)\n', (6584, 6608), True, 'import numpy as np\n'), ((7355, 7377), 'numpy.array', 'np.array', (['[pa_x, pa_y]'], {}), '([pa_x, pa_y])\n', (7363, 7377), True, 'import numpy as np\n'), ((7455, 7566), 'numpy.array', 'np.array', (['[[focal_lengths[0], 0.0, pp_offsets[0]], [0.0, focal_lengths[1], pp_offsets\n [1]], [0.0, 0.0, 1.0]]'], {}), '([[focal_lengths[0], 0.0, pp_offsets[0]], [0.0, focal_lengths[1],\n pp_offsets[1]], [0.0, 0.0, 1.0]])\n', (7463, 7566), True, 'import numpy as np\n'), ((7804, 7850), 'pyrep.backend.sim.simGetVisionSensorResolution', 'sim.simGetVisionSensorResolution', (['self._handle'], {}), '(self._handle)\n', (7836, 7850), False, 'from pyrep.backend import sim\n'), ((8024, 8125), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_resolution_x', 'resolution[0]'], {}), '(self._handle, sim.\n sim_visionintparam_resolution_x, resolution[0])\n', (8054, 8125), False, 'from pyrep.backend import sim\n'), ((8151, 8252), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_resolution_y', 'resolution[1]'], {}), '(self._handle, sim.\n sim_visionintparam_resolution_y, resolution[1])\n', (8181, 8252), False, 'from pyrep.backend import sim\n'), ((8501, 8596), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_perspective_operation'], {}), '(self._handle, sim.\n sim_visionintparam_perspective_operation)\n', (8531, 8596), False, 'from pyrep.backend import sim\n'), ((8630, 8663), 'pyrep.const.PerspectiveMode', 'PerspectiveMode', (['perspective_mode'], {}), '(perspective_mode)\n', (8645, 8663), False, 'from pyrep.const import ObjectType, PerspectiveMode, RenderMode\n'), ((8960, 9079), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_perspective_operation', 'perspective_mode.value'], {}), '(self._handle, sim.\n sim_visionintparam_perspective_operation, perspective_mode.value)\n', (8990, 9079), False, 'from pyrep.backend import sim\n'), ((9300, 9385), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_render_mode'], {}), '(self._handle, sim.sim_visionintparam_render_mode\n )\n', (9330, 9385), False, 'from pyrep.backend import sim\n'), ((9418, 9441), 'pyrep.const.RenderMode', 'RenderMode', (['render_mode'], {}), '(render_mode)\n', (9428, 9441), False, 'from pyrep.const import ObjectType, PerspectiveMode, RenderMode\n'), ((9926, 10030), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_render_mode', 'render_mode.value'], {}), '(self._handle, sim.\n sim_visionintparam_render_mode, render_mode.value)\n', (9956, 10030), False, 'from pyrep.backend import sim\n'), ((10261, 10350), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_x'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_x)\n', (10291, 10350), False, 'from pyrep.backend import sim\n'), ((10376, 10465), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_y'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_y)\n', (10406, 10465), False, 'from pyrep.backend import sim\n'), ((10745, 10849), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_x', 'resolution[0]'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_x, resolution[0])\n', (10775, 10849), False, 'from pyrep.backend import sim\n'), ((10878, 10982), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_y', 'resolution[1]'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_y, resolution[1])\n', (10908, 10982), False, 'from pyrep.backend import sim\n'), ((11827, 11913), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_ortho_size'], {}), '(self._handle, sim.\n sim_visionfloatparam_ortho_size)\n', (11857, 11913), False, 'from pyrep.backend import sim\n'), ((12121, 12219), 'pyrep.backend.sim.simSetObjectFloatParameter', 'sim.simSetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_ortho_size', 'ortho_size'], {}), '(self._handle, sim.\n sim_visionfloatparam_ortho_size, ortho_size)\n', (12151, 12219), False, 'from pyrep.backend import sim\n'), ((12410, 12499), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_near_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_near_clipping)\n', (12440, 12499), False, 'from pyrep.backend import sim\n'), ((12724, 12828), 'pyrep.backend.sim.simSetObjectFloatParameter', 'sim.simSetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_near_clipping', 'near_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_near_clipping, near_clipping)\n', (12754, 12828), False, 'from pyrep.backend import sim\n'), ((13017, 13105), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_far_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_far_clipping)\n', (13047, 13105), False, 'from pyrep.backend import sim\n'), ((13325, 13427), 'pyrep.backend.sim.simSetObjectFloatParameter', 'sim.simSetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_far_clipping', 'far_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_far_clipping, far_clipping)\n', (13355, 13427), False, 'from pyrep.backend import sim\n'), ((13746, 13854), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_entity_to_render', 'entity_to_render'], {}), '(self._handle, sim.\n sim_visionintparam_entity_to_render, entity_to_render)\n', (13776, 13854), False, 'from pyrep.backend import sim\n'), ((14147, 14237), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_entity_to_render'], {}), '(self._handle, sim.\n sim_visionintparam_entity_to_render)\n', (14177, 14237), False, 'from pyrep.backend import sim\n'), ((3824, 3890), 'pyrep.backend.sim.simCreateVisionSensor', 'sim.simCreateVisionSensor', (['options', 'int_params', 'float_params', 'None'], {}), '(options, int_params, float_params, None)\n', (3849, 3890), False, 'from pyrep.backend import sim\n'), ((6289, 6314), 'numpy.expand_dims', 'np.expand_dims', (['depth', '(-1)'], {}), '(depth, -1)\n', (6303, 6314), True, 'import numpy as np\n'), ((6327, 6363), 'numpy.expand_dims', 'np.expand_dims', (['extrinsics[:3, 3]', '(0)'], {}), '(extrinsics[:3, 3], 0)\n', (6341, 6363), True, 'import numpy as np\n'), ((6734, 6766), 'numpy.linalg.inv', 'np.linalg.inv', (['cam_proj_mat_homo'], {}), '(cam_proj_mat_homo)\n', (6747, 6766), True, 'import numpy as np\n'), ((11201, 11294), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_perspective_angle'], {}), '(self._handle, sim.\n sim_visionfloatparam_perspective_angle)\n', (11231, 11294), False, 'from pyrep.backend import sim\n'), ((11613, 11632), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (11625, 11632), False, 'import math\n'), ((14776, 14804), 'numpy.ones_like', 'np.ones_like', (['pixel_x_coords'], {}), '(pixel_x_coords)\n', (14788, 14804), True, 'import numpy as np\n'), ((15374, 15392), 'numpy.ones', 'np.ones', (['(h, w, 1)'], {}), '((h, w, 1))\n', (15381, 15392), True, 'import numpy as np\n'), ((15524, 15542), 'numpy.ones', 'np.ones', (['(h, w, 1)'], {}), '((h, w, 1))\n', (15531, 15542), True, 'import numpy as np\n'), ((3386, 3410), 'math.radians', 'math.radians', (['view_angle'], {}), '(view_angle)\n', (3398, 3410), False, 'import math\n'), ((7414, 7438), 'numpy.tan', 'np.tan', (['(persp_angles / 2)'], {}), '(persp_angles / 2)\n', (7420, 7438), True, 'import numpy as np\n'), ((6681, 6703), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (6689, 6703), True, 'import numpy as np\n'), ((14370, 14394), 'numpy.arange', 'np.arange', (['resolution[1]'], {}), '(resolution[1])\n', (14379, 14394), True, 'import numpy as np\n'), ((14525, 14549), 'numpy.arange', 'np.arange', (['resolution[0]'], {}), '(resolution[0])\n', (14534, 14549), True, 'import numpy as np\n'), ((7223, 7239), 'numpy.tan', 'np.tan', (['(pa_y / 2)'], {}), '(pa_y / 2)\n', (7229, 7239), True, 'import numpy as np\n'), ((7306, 7322), 'numpy.tan', 'np.tan', (['(pa_x / 2)'], {}), '(pa_x / 2)\n', (7312, 7322), True, 'import numpy as np\n')]
|
import os
import numpy as np
import re
import sys
try:
import h5py
except ImportError:
h5py = None
'''
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
'''
from .. import logger, logging
from .base import MFPackage, MissingFile
from .name import Modflow
_re_fmtin = re.compile(
r'\((?P<body>(?P<rep>\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\d+)(\.(?P<d>\d+))?'
r'|FREE|BINARY)\)')
class MFFileReader(object):
"""MODFLOW file reader"""
_parent_class = MFPackage
def __init__(self, f=None, parent=None):
"""Initialize with a file and an instance of a parent class
Parameters
----------
f : str, file-like object or None
A path to a file, or a file-like reader with with a 'readlines'
method, such as BytesIO. If None, then it is obtained from
parent.fpath, or parent.fname
parent : instance of MFPackage
"""
# Set up logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logger.level)
if parent is None:
parent = self._parent_class()
if not isinstance(parent, self._parent_class):
self.logger.error(
"'parent' should be an instance of a %r object; found %r",
self._parent_class.__name__, parent.__class__.__name__)
self.parent = parent
if f is None:
if getattr(parent, 'fpath', None) is not None:
f = parent.fpath
elif getattr(parent, 'fname', None) is not None:
f = parent.fname
else:
raise ValueError('unsure how to open file')
# Read data
if hasattr(f, 'readlines'):
# it is a file reader object, e.g. BytesIO
self.fname = f.__class__.__name__
self.lines = f.readlines()
else:
self.fpath = self.parent.fpath = f
if getattr(self, 'fname', None) is None:
self.fname = os.path.split(self.parent.fpath)[1]
# Read whole file at once, then close it
with open(self.parent.fpath, 'r') as fp:
self.lines = fp.readlines()
if self.parent.nam is None:
self.parent.nam = Modflow()
try:
self.parent.nam.ref_dir = os.path.dirname(self.fpath)
except:
pass
self.logger.info("read file '%s' with %d lines",
self.fname, len(self.lines))
self.lineno = 0
self.data_set_num = None
def __len__(self):
"""Returns number of lines"""
return len(self.lines)
def location_exception(self, e):
"""Use to show location of exception while reading file
Example:
fp = _MFFileReader(fpath, self)
try:
fp.read_text(0)
...
fp.check_end()
except Exception as e:
exec(fp.location_exception(e))
"""
location = '%s:%s:%s:Data set %s:' % \
(self.parent.__class__.__name__, self.fname, self.lineno,
self.data_set_num)
if sys.version_info[0] < 3:
return "import sys; raise type(e), type(e)('" + location + "' + " \
"str(e)), sys.exc_info()[2]"
else:
return "import sys; raise type(e)(str(e) + '" + location + "' + " \
"str(e)).with_traceback(sys.exc_info()[2])"
def check_end(self):
"""Check end of file and show messages in logger on status"""
if len(self) == self.lineno:
self.logger.info("finished reading %d lines", self.lineno)
elif len(self) > self.lineno:
remain = len(self) - self.lineno
a, b = 's', ''
if remain == 1:
b, a = a, b
self.logger.warn(
"finished reading %d lines, but %d line%s remain%s",
self.lineno, remain, a, b)
else:
raise ValueError("%d > %d ?" % (self.lineno, len(self)))
@property
def curinfo(self):
"""Returns line and data set number info"""
return str(self.lineno) + ':Data set ' + str(self.data_set_num)
@property
def not_eof(self):
"""Reader is not at the end of file (EOF)"""
return self.lineno < len(self.lines)
@property
def curline(self):
"""Return the current line"""
try:
if self.lineno == 0:
return ''
else:
return self.lines[self.lineno - 1]
except IndexError:
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
def nextline(self, data_set_num=None):
"""Get next line, setting data set number and increment lineno"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug('%s:using nextline', self.curinfo)
self.lineno += 1
try:
line = self.lines[self.lineno - 1]
except IndexError:
self.lineno -= 1
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
if data_set_num is not None:
self.logger.debug(
'%s:returning line with length %d:%r',
self.curinfo, len(line), line)
return line
def readline(self):
"""Alias for nextline()"""
return self.nextline()
def conv(self, item, fmt, name=None):
"""Convert item to format fmt
Parameters
----------
item : str
fmt : str, default ('s')
's' for string or no conversion (default)
'i' for integer
'f' for float
name : str or None
Optional name to provide context information for debugging
"""
try:
if type(fmt) == np.dtype:
return fmt.type(item)
elif fmt == 's': # string
return item
elif fmt == 'i': # integer
return int(item)
elif fmt == 'f': # any floating-point number
# typically either a REAL or DOUBLE PRECISION
return self.parent._float_type.type(item)
else:
raise ValueError('Unknown fmt code %r' % (fmt,))
except ValueError:
if name is not None:
msg = 'Cannot cast %r of %r to type %r' % (name, item, fmt)
else:
msg = 'Cannot cast %r to type %r' % (item, fmt)
raise ValueError(msg)
def get_items(self, data_set_num=None, num_items=None, fmt='s',
multiline=False):
"""Get items from one or more lines (if multiline) into a list
If num_items is defined, then only this count will be returned and any
remaining items from the line will be ignored. If there are too few
items on the line, the values will be some form of "zero", such as
0, 0.0 or ''.
However, if `multiline=True`, then multiple lines can be read to reach
num_items.
If fmt is defined, it must be:
- 's' for string or no conversion (default)
- 'i' for integer
- 'f' for float, as defined by parent._float_type
"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug(
'%s:using get_items for num_items=%s',
self.curinfo, num_items)
startln = self.lineno + 1
fill_missing = False
if num_items is None or not multiline:
items = self.nextline().split()
if num_items is not None and len(items) > num_items:
items = items[:num_items]
if (not multiline and num_items is not None and
len(items) < num_items):
fill_missing = (num_items - len(items))
else:
assert isinstance(num_items, int), type(num_items)
assert num_items > 0, num_items
items = []
while len(items) < num_items:
items += self.nextline().split()
if len(items) > num_items: # trim off too many
items = items[:num_items]
if fmt == 's':
res = items
else:
res = [self.conv(x, fmt) for x in items]
if fill_missing:
if fmt == 's':
fill_value = ''
else:
fill_value = '0'
res += [self.conv(fill_value, fmt)] * fill_missing
if data_set_num is not None:
if multiline:
toline = ' to %s' % (self.lineno,)
else:
toline = ''
self.logger.debug('%s:read %d items from line %d%s',
self.data_set_num, num_items, startln, toline)
return res
def get_named_items(self, data_set_num, names, fmt='s'):
"""Get items into dict. See get_items for fmt usage"""
items = self.get_items(data_set_num, len(names), fmt)
res = {}
for name, item in zip(names, items):
if fmt != 's':
item = self.conv(item, fmt, name)
res[name] = item
return res
def read_named_items(self, data_set_num, names, fmt='s'):
"""Read items into parent. See get_items for fmt usage"""
startln = self.lineno + 1
items = self.get_named_items(data_set_num, names, fmt)
for name in items.keys():
setattr(self.parent, name, items[name])
self.logger.debug('%s:read %d items from line %d',
self.data_set_num, len(items), startln)
def read_text(self, data_set_num=0):
"""Reads 0 or more text (comment) for lines that start with '#'"""
startln = self.lineno + 1
self.parent.text = []
while True:
try:
line = self.nextline(data_set_num)
except IndexError:
break
if line.startswith('#'):
line = line[1:].strip()
self.parent.text.append(line)
else:
self.lineno -= 1 # scroll back one?
break
self.logger.debug('%s:read %d lines of text from line %d to %d',
self.data_set_num,
len(self.parent.text), startln, self.lineno)
def read_options(self, data_set_num, process_aux=True):
"""Read options, and optionally process auxiliary variables"""
line = self.nextline(data_set_num)
self.parent.Options = line.upper().split()
if hasattr(self.parent, 'valid_options'):
for opt in self.parent.Options:
if opt not in self.parent.Options:
self.logger.warn("%s:unrecognised option %r",
self.data_set_num, opt)
if process_aux:
raise NotImplementedError
else:
self.logger.debug('%s:read %d options from line %d:%s',
self.data_set_num, len(self.parent.Options),
self.lineno, self.parent.Options)
def read_parameter(self, data_set_num, names):
"""Read [PARAMETER values]
This optional item must start with the word "PARAMETER". If not found,
then names are set to 0.
Parameter names are provided in a list, and are stored as integers
to the parent object.
"""
startln = self.lineno + 1
line = self.nextline(data_set_num)
self.lineno -= 1
if line.upper().startswith('PARAMETER'):
items = self.get_items(num_items=len(names) + 1)
assert items[0].upper() == 'PARAMETER', items[0]
for name, item in zip(names, items[1:]):
value = self.conv(item, 'i', name)
setattr(self.parent, name, value)
else:
for name in names:
setattr(self.parent, name, 0)
self.logger.debug('%s:read %d parameters from line %d',
self.data_set_num, len(names), startln)
def get_array(self, data_set_num, shape, dtype, return_dict=False):
"""Returns array data, similar to array reading utilities U2DREL,
U2DINT, and U1DREL. If return_dict=True, a dict is returned with all
other attributes.
Inputs:
data_set_num - number
shape - 1D array, e.g. 10, or 2D array (20, 30)
dtype - e.g. np.float32 or 'f'
See Page 8-57 from the MODFLOW-2005 mannual for details.
"""
startln = self.lineno + 1
res = {}
first_line = self.nextline(data_set_num)
# Comments are considered after a '#' character on the first line
if '#' in first_line:
res['text'] = first_line[(first_line.find('#') + 1):].strip()
num_type = np.dtype(dtype).type
res['array'] = ar = np.empty(shape, dtype=dtype)
num_items = ar.size
def read_array_data(obj, fmtin):
'''Helper subroutine to actually read array data'''
fmt = _re_fmtin.search(fmtin.upper())
if not fmt:
raise ValueError(
'cannot understand Fortran format: ' + repr(fmtin))
fmt = fmt.groupdict()
if fmt['body'] == 'BINARY':
data_size = ar.size * ar.dtype.itemsize
if hasattr(obj, 'read'):
data = obj.read(data_size)
else:
raise NotImplementedError(
"not sure how to 'read' from " + repr(obj))
iar = np.fromstring(data, dtype)
else: # ASCII
items = []
if not hasattr(obj, 'readline'):
raise NotImplementedError(
"not sure how to 'readline' from " + repr(obj))
if fmt['body'] == 'FREE':
while len(items) < num_items:
items += obj.readline().split()
else: # interpret Fortran format
if fmt['rep']:
rep = int(fmt['rep'])
else:
rep = 1
width = int(fmt['w'])
while len(items) < num_items:
line = obj.readline()
pos = 0
for n in range(rep):
try:
item = line[pos:pos + width].strip()
pos += width
if item:
items.append(item)
except IndexError:
break
iar = np.fromiter(items, dtype=dtype)
if iar.size != ar.size:
raise ValueError('expected size %s, but found %s' %
(ar.size, iar.size))
return iar
# First, assume using more modern free-format control line
control_line = first_line
dat = control_line.split()
# First item is the control word
res['cntrl'] = cntrl = dat[0].upper()
if cntrl == 'CONSTANT':
# CONSTANT CNSTNT
if len(dat) < 2:
raise ValueError(
'expecting to find at least 2 items for CONSTANT')
res['cnstnt'] = cnstnt = dat[1]
if len(dat) > 2 and 'text' not in res:
st = first_line.find(cnstnt) + len(cnstnt)
res['text'] = first_line[st:].strip()
ar.fill(cnstnt)
elif cntrl == 'INTERNAL':
# INTERNAL CNSTNT FMTIN [IPRN]
if len(dat) < 3:
raise ValueError(
'expecting to find at least 3 items for INTERNAL')
res['cnstnt'] = cnstnt = dat[1]
res['fmtin'] = fmtin = dat[2]
if len(dat) >= 4:
res['iprn'] = iprn = dat[3] # not used
if len(dat) > 4 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
iar = read_array_data(self, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'EXTERNAL':
# EXTERNAL Nunit CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for EXTERNAL')
res['nunit'] = nunit = int(dat[1])
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4] # not used
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
# Needs a reference to nam[nunit]
if self.parent.nam is None:
raise AttributeError(
"reference to 'nam' required for EXTERNAL array")
try:
obj = self.parent.nam[nunit]
except KeyError:
raise KeyError("nunit %s not in nam", nunit)
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'OPEN/CLOSE':
# OPEN/CLOSE FNAME CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for OPEN/CLOSE')
res['fname'] = fname = dat[1]
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4]
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
with open(fname, 'rb') as fp:
iar = read_array_data(fp, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'HDF5':
# GMS extension: http://www.xmswiki.com/xms/GMS:MODFLOW_with_HDF5
if not h5py:
raise ImportError('h5py module required to read HDF5 data')
# HDF5 CNSTNT IPRN "FNAME" "pathInFile" nDim start1 nToRead1 ...
file_ch = r'\w/\.\-\+_\(\)'
dat = re.findall('([' + file_ch + ']+|"[' + file_ch + ' ]+")',
control_line)
if len(dat) < 8:
raise ValueError('expecting to find at least 8 '
'items for HDF5; found ' + str(len(dat)))
assert dat[0].upper() == 'HDF5', dat[0]
res['cnstnt'] = cnstnt = dat[1]
try:
cnstnt_val = num_type(cnstnt)
except ValueError: # e.g. 1.0 as int 1
cnstnt_val = num_type(float(cnstnt))
res['iprn'] = dat[2]
res['fname'] = fname = dat[3].strip('"')
res['pathInFile'] = pathInFile = dat[4].strip('"')
nDim = int(dat[5])
nDim_len = {1: 8, 2: 10, 3: 12}
if nDim not in nDim_len:
raise ValueError('expecting to nDim to be one of 1, 2, or 3; '
'found ' + str(nDim))
elif len(dat) < nDim_len[nDim]:
raise ValueError(
('expecting to find at least %d items for HDF5 with '
'%d dimensions; found %d') %
(nDim_len[nDim], nDim, len(dat)))
elif len(dat) > nDim_len[nDim]:
token = dat[nDim_len[nDim]]
st = first_line.find(token) + len(token)
res['text'] = first_line[st:].strip()
if nDim >= 1:
start1, nToRead1 = int(dat[6]), int(dat[7])
slice1 = slice(start1, start1 + nToRead1)
if nDim >= 2:
start2, nToRead2 = int(dat[8]), int(dat[9])
slice2 = slice(start2, start2 + nToRead2)
if nDim == 3:
start3, nToRead3 = int(dat[10]), int(dat[11])
slice3 = slice(start3, start3 + nToRead3)
fpath = os.path.join(self.parent.nam.ref_dir, fname)
if not os.path.isfile(fpath):
raise MissingFile("cannot find file '%s'" % (fpath,))
h5 = h5py.File(fpath, 'r')
ds = h5[pathInFile]
if nDim == 1:
iar = ds[slice1]
elif nDim == 2:
iar = ds[slice1, slice2]
elif nDim == 3:
iar = ds[slice1, slice2, slice3]
h5.close()
ar[:] = iar.reshape(shape) * cnstnt_val
elif len(control_line) > 20: # FIXED-FORMAT CONTROL LINE
# LOCAT CNSTNT FMTIN IPRN
del res['cntrl'] # control word was not used for fixed-format
try:
res['locat'] = locat = int(control_line[0:10])
res['cnstnt'] = cnstnt = control_line[10:20].strip()
if len(control_line) > 20:
res['fmtin'] = fmtin = control_line[20:40].strip().upper()
if len(control_line) > 40:
res['iprn'] = iprn = control_line[40:50].strip()
except ValueError:
raise ValueError('fixed-format control line not '
'understood: ' + repr(control_line))
if len(control_line) > 50 and 'text' not in res:
res['text'] = first_line[50:].strip()
if locat == 0: # all elements are set equal to cnstnt
ar.fill(cnstnt)
else:
nunit = abs(locat)
if self.parent.nunit == nunit:
obj = self
elif self.parent.nam is None:
obj = self
else:
obj = self.parent.nam[nunit]
if locat < 0:
fmtin = '(BINARY)'
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
else:
raise ValueError('array control line not understood: ' +
repr(control_line))
if 'text' in res:
withtext = ' with text "' + res['text'] + '"'
else:
withtext = ''
self.logger.debug(
'%s:read %r array with shape %s from line %d to %d%s',
self.data_set_num, ar.dtype.char, ar.shape,
startln, self.lineno, withtext)
if return_dict:
return res
else:
return ar
|
[
"h5py.File",
"numpy.empty",
"os.path.dirname",
"numpy.dtype",
"os.path.isfile",
"re.findall",
"numpy.fromiter",
"os.path.split",
"os.path.join",
"numpy.fromstring",
"re.compile"
] |
[((333, 449), 're.compile', 're.compile', (['"""\\\\((?P<body>(?P<rep>\\\\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\\\\d+)(\\\\.(?P<d>\\\\d+))?|FREE|BINARY)\\\\)"""'], {}), "(\n '\\\\((?P<body>(?P<rep>\\\\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\\\\d+)(\\\\.(?P<d>\\\\d+))?|FREE|BINARY)\\\\)'\n )\n", (343, 449), False, 'import re\n'), ((13151, 13179), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (13159, 13179), True, 'import numpy as np\n'), ((13102, 13117), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (13110, 13117), True, 'import numpy as np\n'), ((2382, 2409), 'os.path.dirname', 'os.path.dirname', (['self.fpath'], {}), '(self.fpath)\n', (2397, 2409), False, 'import os\n'), ((13871, 13897), 'numpy.fromstring', 'np.fromstring', (['data', 'dtype'], {}), '(data, dtype)\n', (13884, 13897), True, 'import numpy as np\n'), ((15022, 15053), 'numpy.fromiter', 'np.fromiter', (['items'], {'dtype': 'dtype'}), '(items, dtype=dtype)\n', (15033, 15053), True, 'import numpy as np\n'), ((2061, 2093), 'os.path.split', 'os.path.split', (['self.parent.fpath'], {}), '(self.parent.fpath)\n', (2074, 2093), False, 'import os\n'), ((18679, 18749), 're.findall', 're.findall', (['(\'([\' + file_ch + \']+|"[\' + file_ch + \' ]+")\')', 'control_line'], {}), '(\'([\' + file_ch + \']+|"[\' + file_ch + \' ]+")\', control_line)\n', (18689, 18749), False, 'import re\n'), ((20516, 20560), 'os.path.join', 'os.path.join', (['self.parent.nam.ref_dir', 'fname'], {}), '(self.parent.nam.ref_dir, fname)\n', (20528, 20560), False, 'import os\n'), ((20690, 20711), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (20699, 20711), False, 'import h5py\n'), ((20580, 20601), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (20594, 20601), False, 'import os\n')]
|
import numpy as np
import scipy
import scipy.stats
import csv
scores = np.load('regional_avgScore_nAD.npy')
print(scores.shape)
pool = [[0 for _ in range(scores.shape[1])] for _ in range(scores.shape[1])]
for i in range(scores.shape[1]-1):
for j in range(i+1, scores.shape[1]):
corr, _ = scipy.stats.pearsonr(scores[:, i], scores[:, j])
pool[i][j] = corr
pool[j][i] = corr
print(pool)
regions = \
['hippoR',
'hippoL',
'tempoR',
'tempoL',
'cerebeR',
'cerebeL',
'brainstem',
'insulaR',
'insulaL',
'occiR',
'occiL',
'frontR',
'frontL',
'parieR',
'parieL',
'ventri']
with open('nAD_correlation.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow([''] + regions)
for i in range(len(regions)):
spamwriter.writerow([regions[i]] + pool[i])
|
[
"numpy.load",
"csv.writer",
"scipy.stats.pearsonr"
] |
[((72, 108), 'numpy.load', 'np.load', (['"""regional_avgScore_nAD.npy"""'], {}), "('regional_avgScore_nAD.npy')\n", (79, 108), True, 'import numpy as np\n'), ((679, 755), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""" """', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (689, 755), False, 'import csv\n'), ((303, 351), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['scores[:, i]', 'scores[:, j]'], {}), '(scores[:, i], scores[:, j])\n', (323, 351), False, 'import scipy\n')]
|
import os
import argparse
import numpy as np
import pymatgen
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("mp_dir", help="Root directory with Materials Project dataset")
parser.add_argument("radial_cutoff", type=float, help="Radius of sphere that decides neighborhood")
args = parser.parse_args()
mp_dir = args.mp_dir
r_cut = args.radial_cutoff
index = np.load(os.path.join(mp_dir, 'meta_derived', f'index_connected_{r_cut}.npy'))
mp_cif_dir = os.path.join(mp_dir, "cif")
mp_save_dir = os.path.join(mp_dir, f"derived_radial_cutoff_{r_cut}")
def get_max_atomic_number(cif_paths):
max_atomic_number = -1
for cif_path in tqdm(cif_paths):
structure = pymatgen.Structure.from_file(cif_path)
max_atomic_number = max(max_atomic_number, max(structure.atomic_numbers))
return max_atomic_number
def process_cif(cif_path):
structure = pymatgen.Structure.from_file(cif_path)
return np.array(structure.atomic_numbers)
cif_paths = [os.path.join(mp_cif_dir, filename) for filename in index]
max_atomic_number = get_max_atomic_number(cif_paths)
atom_type_mask = np.zeros((len(cif_paths), max_atomic_number+1), dtype=np.bool)
for i, cif_path in enumerate(tqdm(cif_paths)):
atom_type_mask[i, process_cif(cif_path)] = True
np.save(os.path.join(mp_save_dir, "atom_type_mask.npy"), atom_type_mask)
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"pymatgen.Structure.from_file",
"numpy.array",
"os.path.join"
] |
[((93, 118), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (116, 118), False, 'import argparse\n'), ((479, 506), 'os.path.join', 'os.path.join', (['mp_dir', '"""cif"""'], {}), "(mp_dir, 'cif')\n", (491, 506), False, 'import os\n'), ((521, 575), 'os.path.join', 'os.path.join', (['mp_dir', 'f"""derived_radial_cutoff_{r_cut}"""'], {}), "(mp_dir, f'derived_radial_cutoff_{r_cut}')\n", (533, 575), False, 'import os\n'), ((396, 464), 'os.path.join', 'os.path.join', (['mp_dir', '"""meta_derived"""', 'f"""index_connected_{r_cut}.npy"""'], {}), "(mp_dir, 'meta_derived', f'index_connected_{r_cut}.npy')\n", (408, 464), False, 'import os\n'), ((662, 677), 'tqdm.tqdm', 'tqdm', (['cif_paths'], {}), '(cif_paths)\n', (666, 677), False, 'from tqdm import tqdm\n'), ((898, 936), 'pymatgen.Structure.from_file', 'pymatgen.Structure.from_file', (['cif_path'], {}), '(cif_path)\n', (926, 936), False, 'import pymatgen\n'), ((948, 982), 'numpy.array', 'np.array', (['structure.atomic_numbers'], {}), '(structure.atomic_numbers)\n', (956, 982), True, 'import numpy as np\n'), ((998, 1032), 'os.path.join', 'os.path.join', (['mp_cif_dir', 'filename'], {}), '(mp_cif_dir, filename)\n', (1010, 1032), False, 'import os\n'), ((1219, 1234), 'tqdm.tqdm', 'tqdm', (['cif_paths'], {}), '(cif_paths)\n', (1223, 1234), False, 'from tqdm import tqdm\n'), ((1302, 1349), 'os.path.join', 'os.path.join', (['mp_save_dir', '"""atom_type_mask.npy"""'], {}), "(mp_save_dir, 'atom_type_mask.npy')\n", (1314, 1349), False, 'import os\n'), ((699, 737), 'pymatgen.Structure.from_file', 'pymatgen.Structure.from_file', (['cif_path'], {}), '(cif_path)\n', (727, 737), False, 'import pymatgen\n')]
|
"""
Copyright (c) 2018, University of Oxford, Rama Cont and ETH Zurich, <NAME>
This module provides the helper functions and the class LOBSTERReader, a subclass of OBReader to read in limit order book data in lobster format.
"""
######
# Imports
######
import csv
import math
import warnings
import numpy as np
from lobpy.datareader.orderbook import *
# LOBSTER specific file name functions
def _split_lobster_filename(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
filename2,_ = filename.split(".")
ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels = filename2.split("_")
return ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels
def split_lobster_filename(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
return _split_lobster_filename(filename)
def _split_lobster_filename_core(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
filename2, _ = filename.split(".")
ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels = filename2.split("_")
return ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels
def _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels):
return "_".join((ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels))
def create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels):
return _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels)
def _get_time_stamp_before(time_stamps, time_stamp):
''' Returns the value and index of the last time point in time_stamps before or equal time_stamp '''
time = time_stamps[0]
index = int(0)
if time == time_stamp:
# time_stamp found at index 0
return time, index
if time > time_stamp:
raise LookupError("Time stamp data start at {} which is after time_stamps: {}".format(time, time_stamp))
for ctr, time_now in enumerate(time_stamps[1:]):
if time_now > time_stamp:
return time, ctr
time = time_now
return time, ctr+1
class LOBSTERReader(OBReader):
"""
OBReader object specified for using LOBSTER files
----------
params:
ticker_str,
date_str,
time_start_str,
time_end_str,
num_levels_str,
time_start_calc_str,
time_end_calc_str
Example usage:
to create an object
>>> lobreader = LOBSTERReader("SYMBOL", "2012-06-21", "34200000", "57600000", "10")
read market depth on uniform time grid with num_observation number of observations
>>> dt, time_stamps, depth_bid, depth_ask = lobreader.load_marketdepth(num_observations)
read price process on that time grid specified above
>>> dt2, time_stamps2, price_mid, price_bid, price_ask = lobreader.load_marketdepth(None)
"""
def __init__(
self,
ticker_str,
date_str,
time_start_str,
time_end_str,
num_levels_str,
time_start_calc_str="",
time_end_calc_str="",
num_levels_calc_str=""
):
self.ticker_str = ticker_str
self.date_str = date_str
self.lobfilename = _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, ORDERBOOK_FILE_ID, num_levels_str)
self.msgfilename = _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, MESSAGE_FILE_ID, num_levels_str)
self.time_start = int(time_start_str)
self.time_end = int(time_end_str)
self.num_levels = int(num_levels_str)
self.time_start_calc = int(time_start_str)
self.time_end_calc = int(time_end_str)
self.num_levels_calc = int(num_levels_str)
if not (num_levels_calc_str == ""):
self.num_levels_calc = int(num_levels_calc_str)
self.data = dict()
if not (time_start_calc_str == ""):
self.time_start_calc = int(time_start_calc_str)
if not (time_end_calc_str == ""):
self.time_end_calc = int(time_end_calc_str)
def set_timecalc(self, time_start_calc_str, time_end_calc_str):
self.time_start_calc = int(time_start_calc_str)
self.time_end_calc = int(time_end_calc_str)
return True
def create_filestr(self, identifier_str, num_levels=None):
""" Creates lobster type file string """
if num_levels is None:
num_levels = self.num_levels
return _create_lobster_filename(self.ticker_str, self.date_str, str(self.time_start_calc), str(self.time_end_calc), identifier_str, str(num_levels))
def average_profile_tt(self, num_levels_calc_str="" , write_outputfile = False):
""" Computes the average order book profile, averaged over trading time, from the csv sourcefile. To avoid numerical errors by summing up large numbers, the Kahan Summation algorithm is used for mean computation
----------
args:
num_levels_calc: number of levels which should be considered for the output
write_output: if True, then the average order book profile is stored as a csv file
----------
output:
(mean_bid, mean_ask) in format of numpy arrays
"""
print("Starting computation of average order book profile in file %s."%self.lobfilename)
num_levels_calc = self.num_levels
if not(num_levels_calc_str == ""):
num_levels_calc = int(num_levels_calc_str)
if self.num_levels < num_levels_calc:
raise DataRequestError("Number of levels in data ({0}) is smaller than number of levels requested for calculation ({1}).".format(self.num_levels, num_levels_calc))
tempval1 = 0.0
tempval2 = 0.0
comp = np.zeros(num_levels_calc * 2) # compensator for lost low-order bits
mean = np.zeros(num_levels_calc * 2) # running mean
with open(self.lobfilename+".csv", newline='') as csvfile:
lobdata = csv.reader(csvfile, delimiter=',')
num_lines = sum(1 for row in lobdata)
print("Loaded successfully. Number of lines: " + str(num_lines))
csvfile.seek(0) # reset iterator to beginning of the file
print("Start calculation.")
for row in lobdata: # data are read as list of strings
currorders = np.fromiter(row[1:(4*num_levels_calc + 1):2], np.float) # parse to integer
for ctr, currorder in enumerate(currorders):
#print(lobstate)
tempval1 = currorder / num_lines - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
print("Calculation finished.")
# Add data to self.data
self.add_data("--".join(("ttime-"+AV_ORDERBOOK_FILE_ID, "bid")), mean[1::2])
self.add_data("--".join(("ttime-"+AV_ORDERBOOK_FILE_ID, "ask")), mean[0::2])
if not write_outputfile:
return mean[1::2], mean[0::2] # LOBster format: bid data at odd * 2, LOBster format: ask data at even * 2
print("Write output file.")
outfilename = self.create_filestr("-".join(("ttime",AV_ORDERBOOK_FILE_ID)) , str(num_levels_calc))
outfilename = ".".join((outfilename,'csv'))
with open(outfilename, 'w') as outfile:
wr = csv.writer(outfile)
wr.writerow(mean[1::2]) # LOBster format: bid data at odd * 2
wr.writerow(mean[0::2]) # LOBster format: ask data at even * 2
print("Average order book saved as %s."%outfilename)
return mean[1::2], mean[0::2]
def average_profile(
self,
num_levels_calc_str="",
write_outputfile = False
):
""" Returns the average oder book profile from the csv sourcefile, averaged in real time. To avoid numerical errors by summing up large numbers, the Kahan Summation algorithm is used for mean computation """
if num_levels_calc_str == "":
num_levels_calc = self.num_levels_calc
else:
num_levels_calc = int(num_levels_calc_str)
if int(self.num_levels) < num_levels_calc:
raise DataRequestError("Number of levels in data ({0}) is smaller than number of levels requested for calculation ({1}).".format(self.num_level, num_levels_calc))
time_start = float(self.time_start_calc / 1000.)
time_end = float(self.time_end_calc / 1000.)
mean = np.zeros(num_levels_calc * 2) # running mean
tempval1 = 0.0
tempval2 = 0.0
linectr = 0
comp = np.zeros(num_levels_calc * 2) # compensator for lost low-order bits
flag = 0
with open(".".join((self.lobfilename, 'csv')), newline='') as orderbookfile, open(".".join((self.msgfilename, 'csv')), newline='') as messagefile:
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
nexttime = float(rowMES[0]) # t(0)
if time_end < nexttime:
# In this case there are no entries in the file for the selected time interval. Array of 0s is returned
warnings.warn("The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean.")
return mean[1::2], mean[0::2]
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only at t(0)
if time_start <= nexttime:
flag = 1
for rowLOB, rowMES in zip(lobdata,messagedata): # data are read as list of string, iterator now starts at second entry (since first has been exhausted above)
currtime = nexttime #(t(i))
nexttime = float(rowMES[0]) #(t(i+1))
if flag == 0:
if time_start <= nexttime:
# Start calculation
flag = 1
currtime = time_start
for ctr, currbucket in enumerate(currprofile):
tempval1 = (nexttime - currtime) / float(time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
else:
if time_end < nexttime:
# Finish calculation
nexttime = time_end
for ctr, currbucket in enumerate(currprofile):
#print(currprofile)
tempval1 = (nexttime - currtime) / float(time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
if time_end == nexttime:
# Finish calculation
break
## Update order book to time t(i+1)
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2],np.float) # parse to integer, extract bucket volumes only
else: # executed only when not quitted by break, i.e. time_end >= time at end of file in this case we extrapolate
warnings.warn("Extrapolated order book data since time_end exceed time at end of the file by %f seconds."%(time_end - nexttime))
currtime = nexttime
nexttime = time_end
for ctr, currbucket in enumerate(currprofile):
#print(lobstate)
tempval1 = (nexttime - currtime) / (time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
print("Calculation finished.")
# Add data to self.data
self.add_data("--".join((AV_ORDERBOOK_FILE_ID, "bid")), mean[1::2])
self.add_data("--".join((AV_ORDERBOOK_FILE_ID, "ask")), mean[0::2])
if not write_outputfile:
return mean[1::2], mean[0::2] # LOBster format: bid data at odd * 2, LOBster format: ask data at even * 2
print("Write output file.")
outfilename = self.create_filestr(AV_ORDERBOOK_FILE_ID , str(num_levels_calc))
outfilename = ".".join((outfilename,'csv'))
with open(outfilename, 'w') as outfile:
wr = csv.writer(outfile)
wr.writerow(mean[1::2]) # LOBster format: bid data at odd * 2
wr.writerow(mean[0::2]) # LOBster format: ask data at even * 2
print("Average order book saved as %s."%outfilename)
return mean[1::2], mean[0::2]
def _load_ordervolume(
self,
num_observations,
num_levels_calc,
profile2vol_fct=np.sum
):
''' Extracts the volume of orders in the first num_level buckets at a uniform time grid of num_observations observations from the interval [time_start_calc, time_end_calc]. The volume process is extrapolated constantly on the last level in the file, for the case that time_end_calc is larger than the last time stamp in the file. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean.
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
volume_bid = np.zeros(num_observations)
volume_ask = np.zeros(num_observations)
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float)
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of volume in the file. Keep processes constant
if (ctr_obs > 0):
volume_bid[ctr_obs] = volume_bid[ctr_obs-1]
volume_ask[ctr_obs] = volume_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
volume_bid[ctr_obs] = 0.
volume_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# extract order volume from profile
volume_bid[ctr_obs] = profile2vol_fct(currprofile[1::2])
volume_ask[ctr_obs] = profile2vol_fct(currprofile[0::2])
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update currprofile and time_file
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
time_file = float(rowMES[0])
if (file_ended_line < num_observations):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
return dt, time_stamps, volume_bid, volume_ask
def _load_ordervolume_levelx(
self,
num_observations,
level
):
''' Extracts the volume of orders in the first num_level buckets at a uniform time grid of num_observations observations from the interval [time_start_calc, time_end_calc]. The volume process is extrapolated constantly on the last level in the file, for the case that time_end_calc is larger than the last time stamp in the file. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean.
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
volume_bid = np.zeros(num_observations)
volume_ask = np.zeros(num_observations)
# Ask level x is at position (x-1)*4 + 1, bid level x is at position (x-1)*4 + 3
x_bid = (int(level) - 1) * 4 + 3
x_ask = (int(level) - 1) * 4 + 1
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
#currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float)
currbid = float(rowLOB[x_bid])
currask = float(rowLOB[x_ask])
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of volume in the file. Keep processes constant
if (ctr_obs > 0):
volume_bid[ctr_obs] = volume_bid[ctr_obs-1]
volume_ask[ctr_obs] = volume_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
volume_bid[ctr_obs] = 0.
volume_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# extract order volume from profile
volume_bid[ctr_obs] = currbid
volume_ask[ctr_obs] = currask
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update currprofile and time_file
#currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
currbid = float(rowLOB[x_bid])
currask = float(rowLOB[x_ask])
time_file = float(rowMES[0])
if (file_ended_line < num_observations):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
return dt, time_stamps, volume_bid, volume_ask
def _load_ordervolume_full(
self,
num_levels_calc,
profile2vol_fct=np.sum,
ret_np=True
):
''' Extracts the volume of orders in the first num_level buckets from the interval [time_start_calc, time_end_calc]. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean. If ret_np==False then the output format are lists, else numpy arrays
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
time_stamps = []
volume_bid = []
volume_ask = []
index_start = -1
index_end = -1
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
for ctrRow, (rowLOB, rowMES) in enumerate(zip(lobdata, messagedata)):
time_now = float(rowMES[0])
if (index_start == -1) and (time_now >= time_start_calc):
index_start = ctrRow
if (index_end == -1) and (time_now > time_end_calc):
index_end = ctrRow
break
time_stamps.append(time_now)
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
volume_bid.append(profile2vol_fct(currprofile[1::2]))
volume_ask.append(profile2vol_fct(currprofile[0::2]))
if index_end == -1:
#file end reached
index_end = len(time_stamps)
if ret_np:
return np.array(time_stamps[index_start:index_end]), np.array(volume_bid[index_start:index_end]), np.array(volume_ask[index_start:index_end])
return time_stamps[index_start:index_end], volume_bid[index_start:index_end], volume_ask[index_start:index_end]
def _load_prices(
self,
num_observations
):
''' private method to implement how the price data are loaded from the files '''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
prices_bid = np.empty(num_observations)
prices_ask = np.empty(num_observations)
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of prices in the file. Keep processes constant
if (ctr_obs > 0):
prices_bid[ctr_obs] = prices_bid[ctr_obs-1]
prices_ask[ctr_obs] = prices_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
prices_bid[ctr_obs] = 0.
prices_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# LOBster stores best ask and bid price in resp. 1st and 3rd column, price in unit USD*10000
prices_bid[ctr_obs] = float(rowLOB[2]) / float(10000)
prices_ask[ctr_obs] = float(rowLOB[0]) / float(10000)
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update time_file
time_file = float(rowMES[0])
if (file_ended_line < num_observations-1):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
while ctr_obs < (num_observations-1):
prices_bid[ctr_obs+1] = prices_bid[ctr_obs]
prices_ask[ctr_obs+1] = prices_ask[ctr_obs]
return dt, time_stamps, prices_bid, prices_ask
def _load_profile_snapshot_lobster(
self,
time_stamp,
num_levels_calc=None
):
''' Returns a two numpy arrays with snapshots of the bid- and ask-side of the order book at a given time stamp
Output:
bid_prices, bid_volume, ask_prices, ask_volume
'''
#convert time from msec to sec
time_stamp = float(time_stamp) / 1000.
if num_levels_calc is None:
num_levels_calc = self.num_levels_calc
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
time_file = float(rowMES[0])
if time_file > time_stamp:
raise LookupError("Time data in the file start at {} which is after time_stamps: {}".format(time_file, time_stamp))
if time_file == time_stamp:
# file format is [ask level, ask volume, bid level, bid volume, ask level, ....]
#conversion of price levels to USD
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
for rowMES in messagedata:
time_file = float(rowMES[0])
if time_file > time_stamp:
# file format is [ask level, ask volume, bid level, bid volume, ask level, ....]
#conversion of price levels to USD
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
break
rowLOB = next(lobdata)
else:
# time in file did not exceed time stamp to the end. Return last entries of the file
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
return bid_prices, bid_volume, ask_prices, ask_volume
def load_profile_snapshot(
self,
time_stamp,
num_levels_calc=None
):
''' Returns a two numpy arrays with snapshots of the bid- and ask-side of the order book at a given time stamp
Output:
bid_prices, bid_volume, ask_prices, ask_volume
'''
return self._load_profile_snapshot_lobster(time_stamp, num_levels_calc)
# END LOBSTERReader
|
[
"csv.reader",
"csv.writer",
"numpy.empty",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.fromiter",
"warnings.warn"
] |
[((6227, 6256), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (6235, 6256), True, 'import numpy as np\n'), ((6314, 6343), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (6322, 6343), True, 'import numpy as np\n'), ((9228, 9257), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (9236, 9257), True, 'import numpy as np\n'), ((9357, 9386), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (9365, 9386), True, 'import numpy as np\n'), ((15093, 15168), 'numpy.linspace', 'np.linspace', (['time_start_calc', 'time_end_calc', 'num_observations'], {'retstep': '(True)'}), '(time_start_calc, time_end_calc, num_observations, retstep=True)\n', (15104, 15168), True, 'import numpy as np\n'), ((15192, 15218), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (15200, 15218), True, 'import numpy as np\n'), ((15240, 15266), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (15248, 15266), True, 'import numpy as np\n'), ((18846, 18921), 'numpy.linspace', 'np.linspace', (['time_start_calc', 'time_end_calc', 'num_observations'], {'retstep': '(True)'}), '(time_start_calc, time_end_calc, num_observations, retstep=True)\n', (18857, 18921), True, 'import numpy as np\n'), ((18945, 18971), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (18953, 18971), True, 'import numpy as np\n'), ((18993, 19019), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (19001, 19019), True, 'import numpy as np\n'), ((24748, 24823), 'numpy.linspace', 'np.linspace', (['time_start_calc', 'time_end_calc', 'num_observations'], {'retstep': '(True)'}), '(time_start_calc, time_end_calc, num_observations, retstep=True)\n', (24759, 24823), True, 'import numpy as np\n'), ((24847, 24873), 'numpy.empty', 'np.empty', (['num_observations'], {}), '(num_observations)\n', (24855, 24873), True, 'import numpy as np\n'), ((24895, 24921), 'numpy.empty', 'np.empty', (['num_observations'], {}), '(num_observations)\n', (24903, 24921), True, 'import numpy as np\n'), ((6452, 6486), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (6462, 6486), False, 'import csv\n'), ((9628, 9668), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (9638, 9668), False, 'import csv\n'), ((9695, 9733), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (9705, 9733), False, 'import csv\n'), ((10323, 10381), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (10334, 10381), True, 'import numpy as np\n'), ((13738, 13757), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (13748, 13757), False, 'import csv\n'), ((15441, 15481), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (15451, 15481), False, 'import csv\n'), ((15508, 15546), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (15518, 15546), False, 'import csv\n'), ((15788, 15846), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (15799, 15846), True, 'import numpy as np\n'), ((17626, 17773), 'warnings.warn', 'warnings.warn', (["('End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line))", 'RuntimeWarning'], {}), "(\n 'End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line), RuntimeWarning)\n", (17639, 17773), False, 'import warnings\n'), ((19375, 19415), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (19385, 19415), False, 'import csv\n'), ((19442, 19480), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (19452, 19480), False, 'import csv\n'), ((21697, 21844), 'warnings.warn', 'warnings.warn', (["('End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line))", 'RuntimeWarning'], {}), "(\n 'End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line), RuntimeWarning)\n", (21710, 21844), False, 'import warnings\n'), ((22937, 22977), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (22947, 22977), False, 'import csv\n'), ((23004, 23042), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (23014, 23042), False, 'import csv\n'), ((25096, 25136), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (25106, 25136), False, 'import csv\n'), ((25163, 25201), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (25173, 25201), False, 'import csv\n'), ((27049, 27196), 'warnings.warn', 'warnings.warn', (["('End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line))", 'RuntimeWarning'], {}), "(\n 'End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line), RuntimeWarning)\n", (27062, 27196), False, 'import warnings\n'), ((28085, 28125), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (28095, 28125), False, 'import csv\n'), ((28152, 28190), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (28162, 28190), False, 'import csv\n'), ((6836, 6891), 'numpy.fromiter', 'np.fromiter', (['row[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(row[1:4 * num_levels_calc + 1:2], np.float)\n', (6847, 6891), True, 'import numpy as np\n'), ((7989, 8008), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (7999, 8008), False, 'import csv\n'), ((10102, 10247), 'warnings.warn', 'warnings.warn', (['"""The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean."""'], {}), "(\n 'The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean.'\n )\n", (10115, 10247), False, 'import warnings\n'), ((12231, 12289), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (12242, 12289), True, 'import numpy as np\n'), ((12514, 12654), 'warnings.warn', 'warnings.warn', (["('Extrapolated order book data since time_end exceed time at end of the file by %f seconds.'\n % (time_end - nexttime))"], {}), "(\n 'Extrapolated order book data since time_end exceed time at end of the file by %f seconds.'\n % (time_end - nexttime))\n", (12527, 12654), False, 'import warnings\n'), ((23619, 23677), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (23630, 23677), True, 'import numpy as np\n'), ((24028, 24072), 'numpy.array', 'np.array', (['time_stamps[index_start:index_end]'], {}), '(time_stamps[index_start:index_end])\n', (24036, 24072), True, 'import numpy as np\n'), ((24074, 24117), 'numpy.array', 'np.array', (['volume_bid[index_start:index_end]'], {}), '(volume_bid[index_start:index_end])\n', (24082, 24117), True, 'import numpy as np\n'), ((24119, 24162), 'numpy.array', 'np.array', (['volume_ask[index_start:index_end]'], {}), '(volume_ask[index_start:index_end])\n', (24127, 24162), True, 'import numpy as np\n'), ((28934, 28988), 'numpy.fromiter', 'np.fromiter', (['rowLOB[3:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[3:4 * num_levels_calc:4], np.float)\n', (28945, 28988), True, 'import numpy as np\n'), ((29168, 29222), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc:4], np.float)\n', (29179, 29222), True, 'import numpy as np\n'), ((30320, 30374), 'numpy.fromiter', 'np.fromiter', (['rowLOB[3:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[3:4 * num_levels_calc:4], np.float)\n', (30331, 30374), True, 'import numpy as np\n'), ((30554, 30608), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc:4], np.float)\n', (30565, 30608), True, 'import numpy as np\n'), ((17381, 17439), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (17392, 17439), True, 'import numpy as np\n'), ((28835, 28889), 'numpy.fromiter', 'np.fromiter', (['rowLOB[2:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[2:4 * num_levels_calc:4], np.float)\n', (28846, 28889), True, 'import numpy as np\n'), ((29069, 29123), 'numpy.fromiter', 'np.fromiter', (['rowLOB[0:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[0:4 * num_levels_calc:4], np.float)\n', (29080, 29123), True, 'import numpy as np\n'), ((29674, 29728), 'numpy.fromiter', 'np.fromiter', (['rowLOB[3:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[3:4 * num_levels_calc:4], np.float)\n', (29685, 29728), True, 'import numpy as np\n'), ((29920, 29974), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc:4], np.float)\n', (29931, 29974), True, 'import numpy as np\n'), ((30221, 30275), 'numpy.fromiter', 'np.fromiter', (['rowLOB[2:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[2:4 * num_levels_calc:4], np.float)\n', (30232, 30275), True, 'import numpy as np\n'), ((30455, 30509), 'numpy.fromiter', 'np.fromiter', (['rowLOB[0:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[0:4 * num_levels_calc:4], np.float)\n', (30466, 30509), True, 'import numpy as np\n'), ((16391, 16505), 'warnings.warn', 'warnings.warn', (['"""Data do not contain beginning of the monitoring period. Values set to 0."""', 'RuntimeWarning'], {}), "(\n 'Data do not contain beginning of the monitoring period. Values set to 0.',\n RuntimeWarning)\n", (16404, 16505), False, 'import warnings\n'), ((20413, 20527), 'warnings.warn', 'warnings.warn', (['"""Data do not contain beginning of the monitoring period. Values set to 0."""', 'RuntimeWarning'], {}), "(\n 'Data do not contain beginning of the monitoring period. Values set to 0.',\n RuntimeWarning)\n", (20426, 20527), False, 'import warnings\n'), ((25903, 26017), 'warnings.warn', 'warnings.warn', (['"""Data do not contain beginning of the monitoring period. Values set to 0."""', 'RuntimeWarning'], {}), "(\n 'Data do not contain beginning of the monitoring period. Values set to 0.',\n RuntimeWarning)\n", (25916, 26017), False, 'import warnings\n'), ((29571, 29625), 'numpy.fromiter', 'np.fromiter', (['rowLOB[2:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[2:4 * num_levels_calc:4], np.float)\n', (29582, 29625), True, 'import numpy as np\n'), ((29817, 29871), 'numpy.fromiter', 'np.fromiter', (['rowLOB[0:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[0:4 * num_levels_calc:4], np.float)\n', (29828, 29871), True, 'import numpy as np\n')]
|
from .array import TensorTrainArray
from .slice import TensorTrainSlice
from .dispatch import implement_function
from ..raw import find_balanced_cluster,trivial_decomposition
import numpy as np
def _get_cluster_chi_array(shape,cluster,chi):
if cluster is None:
cluster=find_balanced_cluster(shape)
if isinstance(chi,int):
chi=[chi]*(len(cluster)-1)
chi=tuple([1]+list(chi)+[1])
return cluster,chi
def _get_cluster_chi_slice(shape,cluster,chi):
if len(shape)<2:
raise ValueError("TensorTrainSlice has at least 2 dimensions.")
if cluster is None:
cluster=find_balanced_cluster(shape[1:-1])
if isinstance(chi,int):
chi=[chi]*(len(cluster)-1)
chi=tuple([shape[0]]+list(chi)+[shape[-1]])
return cluster,chi
@implement_function("empty","array")
def empty(shape,dtype=np.float64, cluster=None,chi=1):
'''
Create an empty TensorTrainArray
'''
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.empty([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainArray.frommatrices(ms)
@implement_function("empty","slice")
def empty_slice(shape,dtype=np.float64, cluster=None,chi=1):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.empty([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def empty_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return empty(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return empty_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("zeros","array")
def zeros(shape,dtype=np.float64,cluster=None,chi=1):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainArray.frommatrices(ms)
@implement_function("zeros","slice")
def zeros_slice(shape,dtype=np.float64,cluster=None,chi=1):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def zeros_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return zeros(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return zeros_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("ones","array")
def ones(shape,dtype=np.float64,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
for m in ms:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
return TensorTrainArray.frommatrices(ms)
@implement_function("ones","slice")
def ones_slice(shape,dtype=np.float64,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
if len(ms)==1:
ms[0]=np.ones(ms[0].shape,dtype)
else:
ms[0][...,0]=np.ones(ms[0].shape[:-1],dtype)
ms[-1][0,...]=np.ones(ms[-1].shape[1:],dtype)
for m in ms[1:-1]:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def ones_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return ones(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return ones_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("full","array")
def full(shape,fill_value,dtype=None,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
if dtype is None:
dtype=np.array(fill_value).dtype
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
for m in ms:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
ms[-1]*=fill_value
return TensorTrainArray.frommatrices(ms)
@implement_function("full","slice")
def full_slice(shape,fill_value,dtype=None,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
if dtype is None:
dtype=np.array(fill_value).dtype
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
if len(ms)==1:
ms[0]=np.ones(ms[0].shape,dtype)
else:
ms[0][...,0]=np.ones(ms[0].shape[:-1],dtype)
ms[-1][0,...]=np.ones(ms[-1].shape[1:],dtype)
for m in ms[1:-1]:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
ms[-1]*=fill_value
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def full_like(prototype, fill_value, dtype=None, shape=None, cluster=None, chi=1):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return full(shape,fill_value,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return full_slice(shape,fill_value,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("eye","array")
def eye(N, M=None, k=0, dtype=np.float64, cluster=None):
if M!=None:
raise NotImplementedError("not implemented yet ...")
return diag(ones((N,),dtype=dtype,cluster=cluster),k)
@implement_function("identity","array")
def identity(n,dtype=None,cluster=None):
return eye(N=n,dtype=dtype,cluster=cluster)
@implement_function("diag","array")
def diag(v,k=0):
pass
@implement_function("array","array")
def array(ar, dtype=None, cluster=None, copy=True,*,ndim=0):
if isinstance(ar,TensorTrainArray):
#copy if necessary, recluster if necessary
pass
elif isinstance(ar,TensorTrainSlice):
#just recast, clustering is then a bit weird, recluster afterwards
arm=ar.asmatrices()
if not copy:
arm[0]=arm[0][None,...]
else:
pass
ret=TensorTrainArray.frommatrices(arm)
if cluster is not None:
ret.recluster(cluster)
return ret
else:
return TensorTrainArray.fromdense(ar,dtype,cluster)
@implement_function("array","slice")
def slice(ar, dtype=None, cluster=None, copy=True,*,ndim=0):
if isinstance(ar,TensorTrainSlice):
#copy if necessary
pass
elif isinstance(ar,TensorTrainArray):
#recluster then recast
pass
else:
return TensorTrainSlice.fromdense(ar,dtype,cluster)
@implement_function("asarray","array")
def asarray(ar, dtype=None,cluster=None):
return array(ar,dtype,cluster=cluster,copy=False)
@implement_function("asarray","slice")
def asslice(ar, dtype=None,cluster=None):
return slice(ar,dtype,cluster=cluster,copy=False)
@implement_function("asanyarray","array")
def asanyarray(ar, dtype=None,cluster=None):
return array(ar,dtype,cluster=cluster,copy=False)
@implement_function("asanyarray","slice")
def asanyslice(ar, dtype=None,cluster=None):
return slice(ar,dtype,cluster=cluster,copy=False)
@implement_function("frombuffer","array")
def frombuffer(buffer, dtype=float, count=- 1, offset=0, cluster=None):
return array(np.frombuffer(buffer,dtype,count,offset),dtype=dtype,cluster=cluster)
@implement_function("fromiter","array")
def fromiter(iter, dtype, count=- 1, cluster=None):
return array(np.fromiter(iter,dtype,count),dtype=dtype,cluster=cluster)
@implement_function("fromfunction","array")
def fromfunction(function, shape, dtype=float, cluster=None, **kwargs):
'''
Should be upgraded to support ttcross eventually, so might change behavior if function is not sane
'''
return array(np.fromfunction(function,shape,dtype=dtype,**kwargs),dtype=dtype,cluster=cluster)
@implement_function("fromfunction","slice")
def fromfunction_slice(function, shape, dtype=float, cluster=None, **kwargs):
'''
Should be upgraded to support ttcross eventually, so might change behavior if function is not sane
'''
return slice(np.fromfunction(function,shape,dtype=dtype,**kwargs),dtype=dtype,cluster=cluster)
@implement_function()
def copy(a,*,order=None,subok=None):
if isinstance(a,TensorTrainArray) or isinstance(a,TensorTrainSlice):
return a.copy()
else:
return NotImplemented
@implement_function("arange","array")
def arange(*args, **kwargs):
#wild hack to deal with optional arguments
if len(args)==5:
array(np.arange(*args[:-1],**kwargs),cluster=args[-1])
elif "cluster" in kwargs.keys():
cluster=kwargs["cluster"]
del kwargs["cluster"]
array(np.arange(*args,**kwargs),cluster=cluster)
else:
array(np.arange(*args,**kwargs))
@implement_function("linspace","array")
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0,cluster=None):
array(np.linspace(todense(start),todense(stop),num,endpoint,retstep,dtype,axis),cluster=cluster)
# @implement_function("linspace","slice")
# def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0,cluster=None):
# slice(np.linspace(todense(start),todense(stop),num,endpoint,retstep,dtype,axis),cluster=cluster)
@implement_function("logspace","array")
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0,cluster=None):
raise NotImplementedError("not yet")
# @implement_function("logspace","slice")
# def logspace_slice(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0,cluster=None):
# slice(np.logspace(todense(start),todense(stop),num,endpoint,base,dtype,axis),cluster=cluster)
@implement_function("geomspace","array")
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0,cluster=None):
raise NotImplementedError("not yet")
# @implement_function("geomspace","slice")
# def geomspace_slice(start, stop, num=50, endpoint=True, dtype=None, axis=0,cluster=None):
# slice(np.geomspace(todense(start),todense(stop),num,endpoint,dtype,axis),cluster=cluster)
# def fromdense(ar,dtype=None,cluster=None):
# return TensorTrainArray.fromdense(ar,dtype,cluster)
# def fromdense_slice(ar,dtype,cluster):
# return TensorTrainSlice.fromdense(ar,dtype,cluster)
def todense(ttar):
return ttar.todense()
@implement_function("asfarray","array")
def asfarray(ttar,dtype=None):
if not np.issubdtype(dtype,np.inexact):
dtype=float
return asarray(ttar,dtype=dtype)
@implement_function("asfarray","slice")
def asfslice(ttar,dtype=None):
if not np.issubdtype(dtype,np.inexact):
dtype=float
return asslice(ttar,dtype=dtype)
|
[
"numpy.frombuffer",
"numpy.ones",
"numpy.array",
"numpy.arange",
"numpy.fromiter",
"numpy.fromfunction",
"numpy.issubdtype"
] |
[((3271, 3300), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (3278, 3300), True, 'import numpy as np\n'), ((3633, 3660), 'numpy.ones', 'np.ones', (['ms[0].shape', 'dtype'], {}), '(ms[0].shape, dtype)\n', (3640, 3660), True, 'import numpy as np\n'), ((3691, 3723), 'numpy.ones', 'np.ones', (['ms[0].shape[:-1]', 'dtype'], {}), '(ms[0].shape[:-1], dtype)\n', (3698, 3723), True, 'import numpy as np\n'), ((3745, 3777), 'numpy.ones', 'np.ones', (['ms[-1].shape[1:]', 'dtype'], {}), '(ms[-1].shape[1:], dtype)\n', (3752, 3777), True, 'import numpy as np\n'), ((3819, 3848), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (3826, 3848), True, 'import numpy as np\n'), ((4744, 4773), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (4751, 4773), True, 'import numpy as np\n'), ((5196, 5223), 'numpy.ones', 'np.ones', (['ms[0].shape', 'dtype'], {}), '(ms[0].shape, dtype)\n', (5203, 5223), True, 'import numpy as np\n'), ((5254, 5286), 'numpy.ones', 'np.ones', (['ms[0].shape[:-1]', 'dtype'], {}), '(ms[0].shape[:-1], dtype)\n', (5261, 5286), True, 'import numpy as np\n'), ((5308, 5340), 'numpy.ones', 'np.ones', (['ms[-1].shape[1:]', 'dtype'], {}), '(ms[-1].shape[1:], dtype)\n', (5315, 5340), True, 'import numpy as np\n'), ((5382, 5411), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (5389, 5411), True, 'import numpy as np\n'), ((8067, 8110), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'dtype', 'count', 'offset'], {}), '(buffer, dtype, count, offset)\n', (8080, 8110), True, 'import numpy as np\n'), ((8247, 8278), 'numpy.fromiter', 'np.fromiter', (['iter', 'dtype', 'count'], {}), '(iter, dtype, count)\n', (8258, 8278), True, 'import numpy as np\n'), ((8564, 8619), 'numpy.fromfunction', 'np.fromfunction', (['function', 'shape'], {'dtype': 'dtype'}), '(function, shape, dtype=dtype, **kwargs)\n', (8579, 8619), True, 'import numpy as np\n'), ((8909, 8964), 'numpy.fromfunction', 'np.fromfunction', (['function', 'shape'], {'dtype': 'dtype'}), '(function, shape, dtype=dtype, **kwargs)\n', (8924, 8964), True, 'import numpy as np\n'), ((11232, 11264), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.inexact'], {}), '(dtype, np.inexact)\n', (11245, 11264), True, 'import numpy as np\n'), ((11407, 11439), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.inexact'], {}), '(dtype, np.inexact)\n', (11420, 11439), True, 'import numpy as np\n'), ((4593, 4613), 'numpy.array', 'np.array', (['fill_value'], {}), '(fill_value)\n', (4601, 4613), True, 'import numpy as np\n'), ((5048, 5068), 'numpy.array', 'np.array', (['fill_value'], {}), '(fill_value)\n', (5056, 5068), True, 'import numpy as np\n'), ((9338, 9369), 'numpy.arange', 'np.arange', (['*args[:-1]'], {}), '(*args[:-1], **kwargs)\n', (9347, 9369), True, 'import numpy as np\n'), ((9502, 9528), 'numpy.arange', 'np.arange', (['*args'], {}), '(*args, **kwargs)\n', (9511, 9528), True, 'import numpy as np\n'), ((9569, 9595), 'numpy.arange', 'np.arange', (['*args'], {}), '(*args, **kwargs)\n', (9578, 9595), True, 'import numpy as np\n')]
|
import os, csv
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from scipy import signal
class ProcessSignalData(object):
def __init__(self):
# path to video data from signal_output.py
self.dir = './processed_new/videos'
self.full_path = ''
self.dataframe = pd.DataFrame()
self.real_data = pd.DataFrame()
self.fake_data = pd.DataFrame()
self.dataset = pd.DataFrame()
self.real_data_mean = {}
self.fake_data_mean = {}
self.real_data_var = {}
self.fake_data_var = {}
self.real_data_std = {}
self.fake_data_std = {}
self.real_data_psd = {}
self.fake_data_psd = {}
self.real_data_csd = {}
self.fake_data_csd = {}
self.real_data_f1 = {}
self.fake_data_f1 = {}
self.real_data_test = {}
self.fake_data_test = {}
self.real_data_RCCE = {}
self.real_data_LCCE = {}
self.real_data_LCRC = {}
self.fake_data_RCCE = {}
self.fake_data_LCCE = {}
self.fake_data_LCRC = {}
self.real_count = 0
self.fake_count = 0
self.vid_count = 0
self.data_path_lcce = './lcce250.csv'
self.data_path_lcrc = './lcrc250.csv'
self.data_path_rcce = './rcce250.csv'
self.data_path_m = './mean_data16.csv'
self.data_path_v = './new_chrom/var_data16.csv'
self.data_path_s = './new_chrom/std_data16.csv'
self.data_path_p = './new_chrom/psd_data16.csv'
self.data_path_c = './new_chrom/csd_data_128.csv'
self.data_path_c = './f1_data_128.csv'
self.log_path = './process_log.csv'
self.test_data_lcce_path = './new_chrom/test_lcce.csv'
self.test_data_lcrc_path = './new_chrom/test_lcrc.csv'
self.test_data_rcce_path = './new_chrom/test_rcce.csv'
self.train_data_lcce_path = './new_chrom/train_lcce.csv'
self.train_data_lcrc_path = './new_chrom/train_lcrc.csv'
self.train_data_rcce_path = './new_chrom/train_rcce.csv'
self.test_data_v_path = './new_chrom/train_data_v32c.csv'
self.train_data_v_path = './new_chrom/test_data_v32c.csv'
self.test_data_m_path = './new_chrom/train_data_m32c.csv'
self.train_data_m_path = './new_chrom/test_data_m32c.csv'
self.test_data_s_path = './new_chrom/train_data_s32c.csv'
self.train_data_s_path = './new_chrom/test_data_s32c.csv'
self.test_data_p_path = './new_chrom/train_data_p128c.csv'
self.train_data_p_path = './new_chrom/test_data_p128c.csv'
self.test_data_c_path = './train_data_c128c.csv'
self.train_data_c_path = './test_data_c128c.csv'
self.test_data_f1_path = './train_data_f1-128c.csv'
self.train_data_f1_path = './test_data_f1-128c.csv'
self.test_data_test_path = './train_data_test.csv'
self.train_data_test_path = './test_data_test.csv'
self.main()
def new_chrom(self, red, green, blue):
# calculation of new X and Y
Xcomp = 3 * red - 2 * green
Ycomp = (1.5 * red) + green - (1.5 * blue)
# standard deviations
sX = np.std(Xcomp)
sY = np.std(Ycomp)
alpha = sX / sY
# -- rPPG signal
bvp = Xcomp - alpha * Ycomp
return bvp
def main(self):
# length of video in frames to process
sample_length = 250
# interval for mean, var, std
group_size = 32
#window for psd
psd_size = 128
for paths, subdir, files in os.walk(self.dir):
for file in files:
if file.endswith('.csv'):
self.full_path = os.path.join(paths, file)
if 'rejected' in self.full_path.lower() or '.txt' in self.full_path.lower() or 'imposter' in self.full_path.lower():
pass
else:
print(self.full_path)
self.dataset = pd.read_csv(self.full_path)
right_R = self.dataset['RC-R'].iloc[:sample_length]
left_R = self.dataset['LC-R'].iloc[:sample_length]
chin_R = self.dataset['C-R'].iloc[:sample_length]
forehead_R = self.dataset['F-R'].iloc[:sample_length]
outerR_R = self.dataset['OR-R'].iloc[:sample_length]
outerL_R = self.dataset['OL-R'].iloc[:sample_length]
center_R = self.dataset['CE-R'].iloc[:sample_length]
right_G = self.dataset['RC-G'].iloc[:sample_length]
left_G = self.dataset['LC-G'].iloc[:sample_length]
chin_G = self.dataset['C-G'].iloc[:sample_length]
forehead_G = self.dataset['F-G'].iloc[:sample_length]
outerR_G = self.dataset['OR-G'].iloc[:sample_length]
outerL_G = self.dataset['OL-G'].iloc[:sample_length]
center_G = self.dataset['CE-G'].iloc[:sample_length]
right_B = self.dataset['RC-B'].iloc[:sample_length]
left_B = self.dataset['LC-B'].iloc[:sample_length]
chin_B = self.dataset['C-B'].iloc[:sample_length]
forehead_B = self.dataset['F-B'].iloc[:sample_length]
outerR_B = self.dataset['OR-B'].iloc[:sample_length]
outerL_B = self.dataset['OL-B'].iloc[:sample_length]
center_B = self.dataset['CE-B'].iloc[:sample_length]
right_C = self.dataset['RC-chrom'].iloc[:sample_length]
left_C = self.dataset['LC-Chrom'].iloc[:sample_length]
chin_C = self.dataset['C-chrom'].iloc[:sample_length]
forehead_C = self.dataset['F-chrom'].iloc[:sample_length]
outerR_C = self.dataset['OR-chrom'].iloc[:sample_length]
outerL_C = self.dataset['OL-chrom'].iloc[:sample_length]
center_C = self.dataset['CE-chrom'].iloc[:sample_length]
chrom_R = right_C
chrom_L = left_C
chrom_CE = center_C
chrom_OL = outerL_C
chrom_OR = outerR_C
#chrom_R = self.new_chrom(right_R, right_G, right_B)
#chrom_L = self.new_chrom(left_R, left_G, left_B)
chrom_C = self.new_chrom(chin_R, chin_G, chin_B)
chrom_F = self.new_chrom(forehead_R, forehead_G, forehead_B)
#chrom_OR = self.new_chrom(outerR_R, outerR_G, outerR_B)
#chrom_OL = self.new_chrom(outerL_R, outerL_G, outerL_B)
#chrom_CE = self.new_chrom(center_R, center_G, center_B)
difg_LCRC = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['LC-G'].iloc[:sample_length]).abs()
difc_LCRC = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['LC-Chrom'].iloc[:sample_length]).abs()
difg_o1 = (self.dataset['C-G'].iloc[:sample_length] - self.dataset['F-G'].iloc[:sample_length]).abs()
difc_o1 = (self.dataset['C-chrom'].iloc[:sample_length] - self.dataset['F-chrom'].iloc[:sample_length]).abs()
difg_o2 = (self.dataset['OR-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difc_o2 = (self.dataset['OR-chrom'].iloc[:sample_length] - self.dataset['OL-chrom'].iloc[:sample_length]).abs()
difc_LCCe = (self.dataset['LC-Chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_RCCe = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_LCRC = (chrom_R.iloc[:sample_length] - chrom_L.iloc[:sample_length]).abs()
difc_LCCe = (chrom_L.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_RCCe = (chrom_R.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_LCOL = (chrom_L.iloc[:sample_length] - chrom_OL.iloc[:sample_length]).abs()
difc_RCOR = (chrom_R.iloc[:sample_length] - chrom_OR.iloc[:sample_length]).abs()
difg_LCOL = (self.dataset['LC-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difg_RCOR = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['OR-G'].iloc[:sample_length]).abs()
# green channel features
# right cheek - left cheek
difg_LCRC_lst = [difg_LCRC.iloc[i:i + group_size] for i in
range(0, len(difg_LCRC) - group_size + 1, group_size)]
# forehead - chin
difg_o1_lst = [difg_o1.iloc[i:i + group_size] for i in
range(0, len(difg_o1) - group_size + 1, group_size)]
# outer right - outer left
difg_o2_lst = [difg_o2.iloc[i:i + group_size] for i in
range(0, len(difg_o2) - group_size + 1, group_size)]
# chrominance features
# right cheek - left cheek
difc_LCRC_lst = [difc_LCRC.iloc[i:i + group_size] for i in
range(0, len(difc_LCRC) - group_size + 1, group_size)]
# forehead - chin
difc_o1_lst = [difc_o1.iloc[i:i + group_size] for i in
range(0, len(difc_o1) - group_size + 1, group_size)]
# outer right - outer left
difc_o2_lst = [difc_o2.iloc[i:i + group_size] for i in
range(0, len(difc_o2) - group_size + 1, group_size)]
# mean
difg_LCRC_mean = np.array([difg_LCRC_lst[i].mean() for i in range(len(difg_LCRC_lst))])
difc_LCRC_mean = np.array([difc_LCRC_lst[i].mean() for i in range(len(difc_LCRC_lst))])
print("MEAN")
print(difc_LCRC_mean)
difg_o1_mean = np.array([difg_o1_lst[i].mean() for i in range(len(difg_o1_lst))])
difc_o1_mean = np.array([difc_o1_lst[i].mean() for i in range(len(difc_o1_lst))])
difg_o2_mean = np.array([difg_o2_lst[i].mean() for i in range(len(difg_o2_lst))])
difc_o2_mean = np.array([difc_o2_lst[i].mean() for i in range(len(difc_o2_lst))])
# variance
difg_LCRC_var = np.array([difg_LCRC_lst[i].var() for i in range(len(difg_LCRC_lst))])
difc_LCRC_var = np.array([difc_LCRC_lst[i].var() for i in range(len(difc_LCRC_lst))])
print("VAR")
print(difc_LCRC_var)
difg_o1_var = np.array([difg_o1_lst[i].var() for i in range(len(difg_o1_lst))])
difc_o1_var = np.array([difc_o1_lst[i].var() for i in range(len(difc_o1_lst))])
difg_o2_var = np.array([difg_o2_lst[i].var() for i in range(len(difg_o2_lst))])
difc_o2_var = np.array([difc_o2_lst[i].var() for i in range(len(difc_o2_lst))])
# standard deviation
difg_LCRC_std = np.array([difg_LCRC_lst[i].std() for i in range(len(difg_LCRC_lst))])
difc_LCRC_std = np.array([difc_LCRC_lst[i].std() for i in range(len(difc_LCRC_lst))])
print("STD")
print(difc_LCRC_std)
difg_o1_std = np.array([difg_o1_lst[i].std() for i in range(len(difg_o1_lst))])
difc_o1_std = np.array([difc_o1_lst[i].std() for i in range(len(difc_o1_lst))])
difg_o2_std = np.array([difg_o2_lst[i].std() for i in range(len(difg_o2_lst))])
difc_o2_std = np.array([difc_o2_lst[i].std() for i in range(len(difc_o2_lst))])
# power spectral density
f, difg_LCRC_psd = signal.welch(difg_LCRC, nperseg=psd_size)
f, difc_LCCe_psd = signal.welch(difc_LCCe, nperseg=psd_size)
f, difc_RCCe_psd = signal.welch(difc_RCCe, nperseg=psd_size)
f, difc_LCRC_psd = signal.welch(difc_LCRC, nperseg=psd_size)
print("PSD")
print(difc_LCRC_psd)
f, difg_o1_psd = signal.welch(difg_o1, nperseg=psd_size)
f, difc_o1_psd = signal.welch(difc_o1, nperseg=psd_size)
f, difg_o2_psd = signal.welch(difg_o2, nperseg=psd_size)
f, difc_o2_psd = signal.welch(difc_o2, nperseg=psd_size)
# cross power spectral density
left_C.fillna(0, inplace=True)
center_C.fillna(0, inplace=True)
right_C.fillna(0, inplace=True)
outerL_C.fillna(0, inplace=True)
outerR_C.fillna(0, inplace=True)
f, difc_LCCe_v_csd = signal.csd(left_C, center_C, nperseg=128)
f, difc_LCRC_v_csd = signal.csd(left_C, right_C, nperseg=128)
f, difc_RCCe_v_csd = signal.csd(right_C, center_C, nperseg=128)
f, difc_LCOL_v_csd = signal.csd(left_C, outerL_C, nperseg=128)
f, difc_RCOR_v_csd =signal.csd(right_C, outerR_C, nperseg=128)
difc_LCCe_csd_0 = []
difc_LCRC_csd_0 = []
difc_RCCe_csd_0 = []
difc_LCOL_csd_0 = []
difc_RCOR_csd_0 = []
difc_LCCe_csd_1 = []
difc_LCRC_csd_1 = []
difc_RCCe_csd_1 = []
difc_LCOL_csd_1 = []
difc_RCOR_csd_1 = []
for i in range(len(difc_LCCe_v_csd)):
difc_LCCe_csd_0.append(difc_LCCe_v_csd[i].real)
difc_LCCe_csd_1.append(difc_LCCe_v_csd[i].imag)
for i in range(len(difc_LCRC_v_csd)):
difc_LCRC_csd_0.append(difc_LCRC_v_csd[i].real)
difc_LCRC_csd_1.append(difc_LCRC_v_csd[i].imag)
for i in range(len(difc_RCCe_v_csd)):
difc_RCCe_csd_0.append(difc_RCCe_v_csd[i].real)
difc_RCCe_csd_1.append(difc_RCCe_v_csd[i].imag)
for i in range(len(difc_LCOL_v_csd)):
difc_LCOL_csd_0.append(difc_LCOL_v_csd[i].real)
difc_LCOL_csd_1.append(difc_LCOL_v_csd[i].imag)
for i in range(len(difc_RCOR_v_csd)):
difc_RCOR_csd_0.append(difc_RCOR_v_csd[i].real)
difc_RCOR_csd_1.append(difc_RCOR_v_csd[i].imag)
csd2_LCCe = []
csd2_LCRC = []
csd2_RCCe = []
for i in range(len(difc_RCCe_csd_0)):
csd2_LCCe.append((difc_LCCe_csd_0[i], difc_LCCe_csd_1[i]))
csd2_LCRC.append((difc_LCRC_csd_0[i], difc_LCRC_csd_1[i]))
csd2_RCCe.append((difc_RCCe_csd_0[i], difc_RCCe_csd_1[i]))
# f1 feature
t = np.abs(difc_LCCe_v_csd)
j = np.argmax(t)
max_cLCCe = (difc_LCCe_csd_0[j], difc_LCCe_csd_1[j])
mean_cLCCe = [np.mean(np.asarray(difc_LCCe_csd_0)), np.mean(np.asarray(difc_LCCe_csd_1))]
f1LCCe = np.array([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])
t = np.abs(difc_LCRC_v_csd)
j = np.argmax(t)
max_cLCRC = (difc_LCRC_csd_0[j], difc_LCRC_csd_1[j])
mean_cLCRC = [np.mean(np.asarray(difc_LCRC_csd_0)), np.mean(np.asarray(difc_LCRC_csd_1))]
f1LCRC = np.array([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])
t = np.abs(difc_RCCe_v_csd)
j = np.argmax(t)
max_cRCCe = (difc_RCCe_csd_0[j], difc_RCCe_csd_1[j])
mean_cRCCe = [np.mean(np.asarray(difc_RCCe_csd_0)), np.mean(np.asarray(difc_RCCe_csd_1))]
f1RCCe = np.array([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])
t = np.abs(difc_LCOL_v_csd)
j = np.argmax(t)
max_cLCOL = (difc_LCOL_csd_0[j], difc_LCOL_csd_1[j])
mean_cLCOL = [np.mean(np.asarray(difc_LCOL_csd_0)), np.mean(np.asarray(difc_LCOL_csd_1))]
f1LCOL = np.array([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])
t = np.abs(difc_RCOR_v_csd)
j = np.argmax(t)
max_cRCOR = (difc_RCOR_csd_0[j], difc_RCOR_csd_1[j])
mean_cRCOR = [np.mean(np.asarray(difc_RCOR_csd_0)), np.mean(np.asarray(difc_RCOR_csd_1))]
f1RCOR = np.array([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])
derived_data_mean = np.concatenate([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,
difg_o2_mean, difc_o2_mean])
derived_data_var = np.concatenate([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,
difg_o2_var, difc_o2_var])
derived_data_std = np.concatenate([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,
difg_o2_std, difc_o2_std])
derived_data_psd = np.concatenate([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])
derived_data_csd = np.concatenate([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])
derived_data_rcsd = np.concatenate([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])
derived_data_f1 = np.concatenate([f1LCCe, f1LCRC, f1RCCe])
derived_data_test = np.concatenate([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])
chrom_data = self.dataset['RC-chrom'].iloc[50] - self.dataset['C-chrom'].iloc[50]
if 'fake' in self.full_path.lower():
self.fake_data_LCCE[self.fake_count] = difc_LCCe
self.fake_data_LCRC[self.fake_count] = difc_LCRC
self.fake_data_RCCE[self.fake_count] = difc_RCCe
self.fake_data_mean[self.fake_count] = derived_data_mean
self.fake_data_var[self.fake_count] = derived_data_var
self.fake_data_std[self.fake_count] = derived_data_std
self.fake_data_psd[self.fake_count] = derived_data_psd
self.fake_data_csd[self.fake_count] = derived_data_csd
self.fake_data_f1[self.fake_count] = derived_data_f1
self.fake_data_test[self.fake_count] = derived_data_test
self.fake_count += 1
else:
self.real_data_LCCE[self.real_count] = difc_LCCe
self.real_data_LCRC[self.real_count] = difc_LCRC
self.real_data_RCCE[self.real_count] = difc_RCCe
self.real_data_mean[self.real_count] = derived_data_mean
self.real_data_var[self.real_count] = derived_data_var
self.real_data_std[self.real_count] = derived_data_std
self.real_data_psd[self.real_count] = derived_data_psd
self.real_data_csd[self.real_count] = derived_data_csd
self.real_data_f1[self.real_count] = derived_data_f1
self.real_data_test[self.real_count] = derived_data_test
self.real_count += 1
self.vid_count += 1
self.real_df_LCCE = pd.DataFrame(self.real_data_LCCE)
self.real_df_LCRC = pd.DataFrame(self.real_data_LCRC)
self.real_df_RCCE = pd.DataFrame(self.real_data_RCCE)
self.fake_df_LCCE = pd.DataFrame(self.fake_data_LCCE)
self.fake_df_LCRC = pd.DataFrame(self.fake_data_LCRC)
self.fake_df_RCCE = pd.DataFrame(self.fake_data_RCCE)
self.real_df_m = pd.DataFrame(self.real_data_mean)
self.fake_df_m = pd.DataFrame(self.fake_data_mean)
self.real_df_v = pd.DataFrame(self.real_data_var)
self.fake_df_v = pd.DataFrame(self.fake_data_var)
self.real_df_s = pd.DataFrame(self.real_data_std)
self.fake_df_s = pd.DataFrame(self.fake_data_std)
self.real_df_p = pd.DataFrame(self.real_data_psd)
self.fake_df_p = pd.DataFrame(self.fake_data_psd)
self.real_df_csp = pd.DataFrame(self.real_data_csd)
self.fake_df_csp = pd.DataFrame(self.fake_data_csd)
self.real_df_f1 = pd.DataFrame(self.real_data_f1)
self.fake_df_f1 = pd.DataFrame(self.fake_data_f1)
self.real_df_test = pd.DataFrame(self.real_data_test)
self.fake_df_test = pd.DataFrame(self.fake_data_test)
r_lcce = self.real_df_LCCE.transpose()
r_lcrc = self.real_df_LCRC.transpose()
r_rcce = self.real_df_RCCE.transpose()
f_lcce = self.fake_df_LCCE.transpose()
f_lcrc = self.fake_df_LCRC.transpose()
f_rcce = self.fake_df_RCCE.transpose()
r_m = self.real_df_m.transpose()
f_m = self.fake_df_m.transpose()
r_v = self.real_df_v.transpose()
f_v = self.fake_df_v.transpose()
r_s = self.real_df_s.transpose()
f_s = self.fake_df_s.transpose()
r_p = self.real_df_s.transpose()
f_p = self.fake_df_s.transpose()
r_c = self.real_df_csp.transpose()
f_c = self.fake_df_csp.transpose()
r_f = self.real_df_f1.transpose()
f_f = self.fake_df_f1.transpose()
r_t = self.real_df_test.transpose()
f_t = self.fake_df_test.transpose()
r_f.to_csv("./real_f1.csv", index=False)
f_f.to_csv("./fake_f1.csv", index=False)
r_lcce['Target'] = 1
r_lcrc['Target'] = 1
r_rcce['Target'] = 1
f_lcce['Target'] = 0
f_lcrc['Target'] = 0
f_rcce['Target'] = 0
r_m['Target'] = 1
f_m['Target'] = 0
r_v['Target'] = 1
f_v['Target'] = 0
r_s['Target'] = 1
f_s['Target'] = 0
r_p['Target'] = 1
f_p['Target'] = 0
r_c['Target'] = 1
f_c['Target'] = 0
r_f['Target'] = 1
f_f['Target'] = 0
r_t['Target'] = 1
f_t['Target'] = 0
rf_lcce = r_lcce.append(f_lcce)
rf_lcrc = r_lcrc.append(f_lcrc)
rf_rcce = r_rcce.append(f_rcce)
rf_m = r_m.append(f_m)
rf_v = r_v.append(f_v)
rf_s = r_s.append(f_s)
rf_p = r_p.append(f_p)
rf_c = r_c.append(f_c)
rf_f = r_f.append(f_f)
rf_t = r_t.append(f_t)
test_v, train_v = train_test_split(rf_v, test_size=0.2)
test_m, train_m = train_test_split(rf_m, test_size=0.2)
test_s, train_s = train_test_split(rf_s, test_size=0.2)
test_p, train_p = train_test_split(rf_p, test_size=0.2)
test_c, train_c = train_test_split(rf_c, test_size=0.2)
test_f, train_f = train_test_split(rf_f, test_size=0.2)
test_t, train_t = train_test_split(rf_t, test_size=0.2)
test_lcce, train_lcce = train_test_split(rf_lcce, test_size=0.2)
test_lcrc, train_lcrc = train_test_split(rf_lcrc, test_size=0.2)
test_rcce, train_rcce = train_test_split(rf_rcce, test_size=0.2)
train_lcce.to_csv(self.train_data_lcce_path, index=False)
train_lcrc.to_csv(self.train_data_lcrc_path, index=False)
train_rcce.to_csv(self.train_data_rcce_path, index=False)
test_lcce.to_csv(self.test_data_lcce_path, index=False)
test_lcrc.to_csv(self.test_data_lcrc_path, index=False)
test_rcce.to_csv(self.test_data_rcce_path, index=False)
train_s.to_csv(self.train_data_s_path, index=False)
test_s.to_csv(self.test_data_s_path, index=False)
train_v.to_csv(self.train_data_v_path, index=False)
test_v.to_csv(self.test_data_v_path, index=False)
train_m.to_csv(self.train_data_m_path, index=False)
test_m.to_csv(self.test_data_m_path, index=False)
train_p.to_csv(self.train_data_p_path, index=False)
test_p.to_csv(self.test_data_p_path, index=False)
train_c.to_csv(self.train_data_c_path, index=False)
test_c.to_csv(self.test_data_c_path, index=False)
train_f.to_csv(self.train_data_f1_path, index=False)
test_f.to_csv(self.test_data_f1_path, index=False)
train_t.to_csv(self.train_data_test_path, index=False)
test_t.to_csv(self.test_data_test_path, index=False)
r_c.to_csv("./csd_real128.csv", index=False)
f_c.to_csv("./csd_fake128.csv", index=False)
p = ProcessSignalData()
|
[
"pandas.DataFrame",
"numpy.abs",
"scipy.signal.welch",
"numpy.argmax",
"numpy.std",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"os.walk",
"numpy.asarray",
"numpy.array",
"os.path.join",
"scipy.signal.csd",
"numpy.concatenate"
] |
[((364, 378), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (376, 378), True, 'import pandas as pd\n'), ((404, 418), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (416, 418), True, 'import pandas as pd\n'), ((444, 458), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (456, 458), True, 'import pandas as pd\n'), ((482, 496), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (494, 496), True, 'import pandas as pd\n'), ((3230, 3243), 'numpy.std', 'np.std', (['Xcomp'], {}), '(Xcomp)\n', (3236, 3243), True, 'import numpy as np\n'), ((3257, 3270), 'numpy.std', 'np.std', (['Ycomp'], {}), '(Ycomp)\n', (3263, 3270), True, 'import numpy as np\n'), ((3623, 3640), 'os.walk', 'os.walk', (['self.dir'], {}), '(self.dir)\n', (3630, 3640), False, 'import os, csv\n'), ((20621, 20654), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_LCCE'], {}), '(self.real_data_LCCE)\n', (20633, 20654), True, 'import pandas as pd\n'), ((20683, 20716), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_LCRC'], {}), '(self.real_data_LCRC)\n', (20695, 20716), True, 'import pandas as pd\n'), ((20745, 20778), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_RCCE'], {}), '(self.real_data_RCCE)\n', (20757, 20778), True, 'import pandas as pd\n'), ((20807, 20840), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_LCCE'], {}), '(self.fake_data_LCCE)\n', (20819, 20840), True, 'import pandas as pd\n'), ((20869, 20902), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_LCRC'], {}), '(self.fake_data_LCRC)\n', (20881, 20902), True, 'import pandas as pd\n'), ((20931, 20964), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_RCCE'], {}), '(self.fake_data_RCCE)\n', (20943, 20964), True, 'import pandas as pd\n'), ((20990, 21023), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_mean'], {}), '(self.real_data_mean)\n', (21002, 21023), True, 'import pandas as pd\n'), ((21049, 21082), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_mean'], {}), '(self.fake_data_mean)\n', (21061, 21082), True, 'import pandas as pd\n'), ((21108, 21140), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_var'], {}), '(self.real_data_var)\n', (21120, 21140), True, 'import pandas as pd\n'), ((21166, 21198), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_var'], {}), '(self.fake_data_var)\n', (21178, 21198), True, 'import pandas as pd\n'), ((21224, 21256), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_std'], {}), '(self.real_data_std)\n', (21236, 21256), True, 'import pandas as pd\n'), ((21282, 21314), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_std'], {}), '(self.fake_data_std)\n', (21294, 21314), True, 'import pandas as pd\n'), ((21340, 21372), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_psd'], {}), '(self.real_data_psd)\n', (21352, 21372), True, 'import pandas as pd\n'), ((21398, 21430), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_psd'], {}), '(self.fake_data_psd)\n', (21410, 21430), True, 'import pandas as pd\n'), ((21458, 21490), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_csd'], {}), '(self.real_data_csd)\n', (21470, 21490), True, 'import pandas as pd\n'), ((21518, 21550), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_csd'], {}), '(self.fake_data_csd)\n', (21530, 21550), True, 'import pandas as pd\n'), ((21577, 21608), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_f1'], {}), '(self.real_data_f1)\n', (21589, 21608), True, 'import pandas as pd\n'), ((21635, 21666), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_f1'], {}), '(self.fake_data_f1)\n', (21647, 21666), True, 'import pandas as pd\n'), ((21695, 21728), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_test'], {}), '(self.real_data_test)\n', (21707, 21728), True, 'import pandas as pd\n'), ((21757, 21790), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_test'], {}), '(self.fake_data_test)\n', (21769, 21790), True, 'import pandas as pd\n'), ((23658, 23695), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_v'], {'test_size': '(0.2)'}), '(rf_v, test_size=0.2)\n', (23674, 23695), False, 'from sklearn.model_selection import train_test_split\n'), ((23722, 23759), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_m'], {'test_size': '(0.2)'}), '(rf_m, test_size=0.2)\n', (23738, 23759), False, 'from sklearn.model_selection import train_test_split\n'), ((23786, 23823), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_s'], {'test_size': '(0.2)'}), '(rf_s, test_size=0.2)\n', (23802, 23823), False, 'from sklearn.model_selection import train_test_split\n'), ((23850, 23887), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_p'], {'test_size': '(0.2)'}), '(rf_p, test_size=0.2)\n', (23866, 23887), False, 'from sklearn.model_selection import train_test_split\n'), ((23914, 23951), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_c'], {'test_size': '(0.2)'}), '(rf_c, test_size=0.2)\n', (23930, 23951), False, 'from sklearn.model_selection import train_test_split\n'), ((23978, 24015), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_f'], {'test_size': '(0.2)'}), '(rf_f, test_size=0.2)\n', (23994, 24015), False, 'from sklearn.model_selection import train_test_split\n'), ((24042, 24079), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_t'], {'test_size': '(0.2)'}), '(rf_t, test_size=0.2)\n', (24058, 24079), False, 'from sklearn.model_selection import train_test_split\n'), ((24112, 24152), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_lcce'], {'test_size': '(0.2)'}), '(rf_lcce, test_size=0.2)\n', (24128, 24152), False, 'from sklearn.model_selection import train_test_split\n'), ((24185, 24225), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_lcrc'], {'test_size': '(0.2)'}), '(rf_lcrc, test_size=0.2)\n', (24201, 24225), False, 'from sklearn.model_selection import train_test_split\n'), ((24258, 24298), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_rcce'], {'test_size': '(0.2)'}), '(rf_rcce, test_size=0.2)\n', (24274, 24298), False, 'from sklearn.model_selection import train_test_split\n'), ((3752, 3777), 'os.path.join', 'os.path.join', (['paths', 'file'], {}), '(paths, file)\n', (3764, 3777), False, 'import os, csv\n'), ((4035, 4062), 'pandas.read_csv', 'pd.read_csv', (['self.full_path'], {}), '(self.full_path)\n', (4046, 4062), True, 'import pandas as pd\n'), ((12409, 12450), 'scipy.signal.welch', 'signal.welch', (['difg_LCRC'], {'nperseg': 'psd_size'}), '(difg_LCRC, nperseg=psd_size)\n', (12421, 12450), False, 'from scipy import signal\n'), ((12490, 12531), 'scipy.signal.welch', 'signal.welch', (['difc_LCCe'], {'nperseg': 'psd_size'}), '(difc_LCCe, nperseg=psd_size)\n', (12502, 12531), False, 'from scipy import signal\n'), ((12571, 12612), 'scipy.signal.welch', 'signal.welch', (['difc_RCCe'], {'nperseg': 'psd_size'}), '(difc_RCCe, nperseg=psd_size)\n', (12583, 12612), False, 'from scipy import signal\n'), ((12652, 12693), 'scipy.signal.welch', 'signal.welch', (['difc_LCRC'], {'nperseg': 'psd_size'}), '(difc_LCRC, nperseg=psd_size)\n', (12664, 12693), False, 'from scipy import signal\n'), ((12805, 12844), 'scipy.signal.welch', 'signal.welch', (['difg_o1'], {'nperseg': 'psd_size'}), '(difg_o1, nperseg=psd_size)\n', (12817, 12844), False, 'from scipy import signal\n'), ((12882, 12921), 'scipy.signal.welch', 'signal.welch', (['difc_o1'], {'nperseg': 'psd_size'}), '(difc_o1, nperseg=psd_size)\n', (12894, 12921), False, 'from scipy import signal\n'), ((12959, 12998), 'scipy.signal.welch', 'signal.welch', (['difg_o2'], {'nperseg': 'psd_size'}), '(difg_o2, nperseg=psd_size)\n', (12971, 12998), False, 'from scipy import signal\n'), ((13036, 13075), 'scipy.signal.welch', 'signal.welch', (['difc_o2'], {'nperseg': 'psd_size'}), '(difc_o2, nperseg=psd_size)\n', (13048, 13075), False, 'from scipy import signal\n'), ((13432, 13473), 'scipy.signal.csd', 'signal.csd', (['left_C', 'center_C'], {'nperseg': '(128)'}), '(left_C, center_C, nperseg=128)\n', (13442, 13473), False, 'from scipy import signal\n'), ((13515, 13555), 'scipy.signal.csd', 'signal.csd', (['left_C', 'right_C'], {'nperseg': '(128)'}), '(left_C, right_C, nperseg=128)\n', (13525, 13555), False, 'from scipy import signal\n'), ((13597, 13639), 'scipy.signal.csd', 'signal.csd', (['right_C', 'center_C'], {'nperseg': '(128)'}), '(right_C, center_C, nperseg=128)\n', (13607, 13639), False, 'from scipy import signal\n'), ((13681, 13722), 'scipy.signal.csd', 'signal.csd', (['left_C', 'outerL_C'], {'nperseg': '(128)'}), '(left_C, outerL_C, nperseg=128)\n', (13691, 13722), False, 'from scipy import signal\n'), ((13763, 13805), 'scipy.signal.csd', 'signal.csd', (['right_C', 'outerR_C'], {'nperseg': '(128)'}), '(right_C, outerR_C, nperseg=128)\n', (13773, 13805), False, 'from scipy import signal\n'), ((15706, 15729), 'numpy.abs', 'np.abs', (['difc_LCCe_v_csd'], {}), '(difc_LCCe_v_csd)\n', (15712, 15729), True, 'import numpy as np\n'), ((15754, 15766), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (15763, 15766), True, 'import numpy as np\n'), ((15981, 16049), 'numpy.array', 'np.array', (['[max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]]'], {}), '([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])\n', (15989, 16049), True, 'import numpy as np\n'), ((16075, 16098), 'numpy.abs', 'np.abs', (['difc_LCRC_v_csd'], {}), '(difc_LCRC_v_csd)\n', (16081, 16098), True, 'import numpy as np\n'), ((16123, 16135), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (16132, 16135), True, 'import numpy as np\n'), ((16350, 16418), 'numpy.array', 'np.array', (['[max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]]'], {}), '([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])\n', (16358, 16418), True, 'import numpy as np\n'), ((16444, 16467), 'numpy.abs', 'np.abs', (['difc_RCCe_v_csd'], {}), '(difc_RCCe_v_csd)\n', (16450, 16467), True, 'import numpy as np\n'), ((16492, 16504), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (16501, 16504), True, 'import numpy as np\n'), ((16719, 16787), 'numpy.array', 'np.array', (['[max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]]'], {}), '([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])\n', (16727, 16787), True, 'import numpy as np\n'), ((16813, 16836), 'numpy.abs', 'np.abs', (['difc_LCOL_v_csd'], {}), '(difc_LCOL_v_csd)\n', (16819, 16836), True, 'import numpy as np\n'), ((16861, 16873), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (16870, 16873), True, 'import numpy as np\n'), ((17088, 17156), 'numpy.array', 'np.array', (['[max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]]'], {}), '([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])\n', (17096, 17156), True, 'import numpy as np\n'), ((17182, 17205), 'numpy.abs', 'np.abs', (['difc_RCOR_v_csd'], {}), '(difc_RCOR_v_csd)\n', (17188, 17205), True, 'import numpy as np\n'), ((17230, 17242), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (17239, 17242), True, 'import numpy as np\n'), ((17457, 17525), 'numpy.array', 'np.array', (['[max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]]'], {}), '([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])\n', (17465, 17525), True, 'import numpy as np\n'), ((17567, 17675), 'numpy.concatenate', 'np.concatenate', (['[difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean, difg_o2_mean,\n difc_o2_mean]'], {}), '([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,\n difg_o2_mean, difc_o2_mean])\n', (17581, 17675), True, 'import numpy as np\n'), ((17767, 17869), 'numpy.concatenate', 'np.concatenate', (['[difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var, difg_o2_var,\n difc_o2_var]'], {}), '([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,\n difg_o2_var, difc_o2_var])\n', (17781, 17869), True, 'import numpy as np\n'), ((17961, 18063), 'numpy.concatenate', 'np.concatenate', (['[difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std, difg_o2_std,\n difc_o2_std]'], {}), '([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,\n difg_o2_std, difc_o2_std])\n', (17975, 18063), True, 'import numpy as np\n'), ((18156, 18217), 'numpy.concatenate', 'np.concatenate', (['[difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd]'], {}), '([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])\n', (18170, 18217), True, 'import numpy as np\n'), ((18258, 18380), 'numpy.concatenate', 'np.concatenate', (['[difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1,\n difc_RCCe_csd_0, difc_RCCe_csd_1]'], {}), '([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0,\n difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])\n', (18272, 18380), True, 'import numpy as np\n'), ((18418, 18485), 'numpy.concatenate', 'np.concatenate', (['[difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0]'], {}), '([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])\n', (18432, 18485), True, 'import numpy as np\n'), ((18525, 18565), 'numpy.concatenate', 'np.concatenate', (['[f1LCCe, f1LCRC, f1RCCe]'], {}), '([f1LCCe, f1LCRC, f1RCCe])\n', (18539, 18565), True, 'import numpy as np\n'), ((18607, 18728), 'numpy.concatenate', 'np.concatenate', (['[f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var,\n difc_LCRC_psd, difc_LCRC_mean]'], {}), '([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std,\n difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])\n', (18621, 18728), True, 'import numpy as np\n'), ((15883, 15910), 'numpy.asarray', 'np.asarray', (['difc_LCCe_csd_0'], {}), '(difc_LCCe_csd_0)\n', (15893, 15910), True, 'import numpy as np\n'), ((15921, 15948), 'numpy.asarray', 'np.asarray', (['difc_LCCe_csd_1'], {}), '(difc_LCCe_csd_1)\n', (15931, 15948), True, 'import numpy as np\n'), ((16252, 16279), 'numpy.asarray', 'np.asarray', (['difc_LCRC_csd_0'], {}), '(difc_LCRC_csd_0)\n', (16262, 16279), True, 'import numpy as np\n'), ((16290, 16317), 'numpy.asarray', 'np.asarray', (['difc_LCRC_csd_1'], {}), '(difc_LCRC_csd_1)\n', (16300, 16317), True, 'import numpy as np\n'), ((16621, 16648), 'numpy.asarray', 'np.asarray', (['difc_RCCe_csd_0'], {}), '(difc_RCCe_csd_0)\n', (16631, 16648), True, 'import numpy as np\n'), ((16659, 16686), 'numpy.asarray', 'np.asarray', (['difc_RCCe_csd_1'], {}), '(difc_RCCe_csd_1)\n', (16669, 16686), True, 'import numpy as np\n'), ((16990, 17017), 'numpy.asarray', 'np.asarray', (['difc_LCOL_csd_0'], {}), '(difc_LCOL_csd_0)\n', (17000, 17017), True, 'import numpy as np\n'), ((17028, 17055), 'numpy.asarray', 'np.asarray', (['difc_LCOL_csd_1'], {}), '(difc_LCOL_csd_1)\n', (17038, 17055), True, 'import numpy as np\n'), ((17359, 17386), 'numpy.asarray', 'np.asarray', (['difc_RCOR_csd_0'], {}), '(difc_RCOR_csd_0)\n', (17369, 17386), True, 'import numpy as np\n'), ((17397, 17424), 'numpy.asarray', 'np.asarray', (['difc_RCOR_csd_1'], {}), '(difc_RCOR_csd_1)\n', (17407, 17424), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from utils.test_images_generator.generator_config import AVAILABLE_SHAPES_DICT
from utils.test_images_generator.generator_utils import generate_random_color, generate_random_image_points
def generate_random_image(width, height):
# ToDo generate white image
# https://numpy.org/doc/1.18/reference/generated/numpy.full.html
generated_image = np.zeros((height, width, 3), dtype=np.uint8)
# ToDo choose random number of shapes from AVAILABLE_SHAPES_DICT
# https://numpy.org/doc/1.18/reference/random/generated/numpy.random.randint.html
# https://numpy.org/doc/1.18/reference/random/generated/numpy.random.choice.html
chosen_shapes = []
for shape in chosen_shapes:
if shape == AVAILABLE_SHAPES_DICT['LINE']:
_draw_random_line(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['TRIANGLE']:
_draw_random_triangle(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['RECTANGLE']:
_draw_random_rectangle(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['CIRCLE']:
_draw_random_circle(generated_image)
return generated_image
def _draw_random_line(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
def _draw_random_triangle(generated_image):
# ToDo draw random triangle (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/3.1.0/dc/da5/tutorial_py_drawing_functions.html
# https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga3069baf93b51565e386c8e591f8418e6
# format for triangle: reshape((-1, 1, 2)
# https://numpy.org/doc/1.18/reference/generated/numpy.reshape.html
return
def _draw_random_rectangle(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
def _draw_random_circle(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
|
[
"numpy.zeros"
] |
[((385, 429), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'np.uint8'}), '((height, width, 3), dtype=np.uint8)\n', (393, 429), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 19 11:30:56 2022
@author: adowa
"""
import numpy as np
import tensorflow as tf
from utils import (build_logistic_regression,
compile_logistic_regression)
from tensorflow.keras import regularizers
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
if __name__ == "__main__":
n_epochs = int(1e3) # just a large number
print_train = True
# clear memory states
tf.keras.backend.clear_session()
# generate random test data
X,y = make_classification(n_samples = 150,
n_features = 100,
n_informative = 3,
n_redundant = 10,
n_classes = 2,
n_clusters_per_class = 4,
flip_y = .01,
class_sep = .75,# how easy to separate the two classes
shuffle = True,
random_state = 12345,
)
# one-hot encoding for softmax
y = y.reshape((-1,1))
y = np.hstack([y,1-y])
# split the data into train, validation, and test
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = .1,random_state = 12345)
X_train,X_valid,y_train,y_valid = train_test_split(X_train,y_train,test_size = .1,random_state = 12345)
# add some 0.5 labeled data - don't use too much
X_noise = np.random.normal(X_train.mean(),X_train.std(),size = (int(X_train.shape[0]/4),100))
y_noise = np.array([[0.5,0.5]] * int(X_train.shape[0]/4))
X_train = np.concatenate([X_train,X_noise])
y_train = np.concatenate([y_train,y_noise])
# X_noise = np.random.normal(X_test.mean(),X_test.std(),size = (int(X_test.shape[0]/2),100))
# y_noise = np.array([[0.5,0.5]] * int(X_test.shape[0]/2))
# X_test = np.concatenate([X_test,X_noise])
# y_test = np.concatenate([y_test,y_noise])
# build the model
tf.random.set_seed(12345)
logistic_regression = build_logistic_regression(
input_size = X_train.shape[1],
output_size = 2,
special = False,
kernel_regularizer = regularizers.L2(l2 = 1e-3),
activity_regularizer = regularizers.L1(l1 = 1e-3),
print_model = True,
)
# compile the model
logistic_regression,callbacks = compile_logistic_regression(
logistic_regression,
model_name = 'temp.h5',
optimizer = None,
loss_function = None,
metric = None,
callbacks = None,
learning_rate = 1e-3,
tol = 1e-4,
patience = 10,
)
# train and validate the model
logistic_regression.fit(
X_train,
y_train,
batch_size = 4,
epochs = n_epochs,
verbose = print_train,
callbacks = callbacks,
validation_data = (X_valid,y_valid),
shuffle = True,
class_weight = None,# tf has this but I don't think it is the same as sklearn
)
y_pred = logistic_regression.predict(X_test)
print(roc_auc_score(y_test,y_pred,))
|
[
"tensorflow.random.set_seed",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.backend.clear_session",
"sklearn.datasets.make_classification",
"tensorflow.keras.regularizers.L1",
"numpy.hstack",
"sklearn.metrics.roc_auc_score",
"tensorflow.keras.regularizers.L2",
"numpy.concatenate",
"utils.compile_logistic_regression"
] |
[((557, 589), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (587, 589), True, 'import tensorflow as tf\n'), ((632, 823), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(150)', 'n_features': '(100)', 'n_informative': '(3)', 'n_redundant': '(10)', 'n_classes': '(2)', 'n_clusters_per_class': '(4)', 'flip_y': '(0.01)', 'class_sep': '(0.75)', 'shuffle': '(True)', 'random_state': '(12345)'}), '(n_samples=150, n_features=100, n_informative=3,\n n_redundant=10, n_classes=2, n_clusters_per_class=4, flip_y=0.01,\n class_sep=0.75, shuffle=True, random_state=12345)\n', (651, 823), False, 'from sklearn.datasets import make_classification\n'), ((1347, 1368), 'numpy.hstack', 'np.hstack', (['[y, 1 - y]'], {}), '([y, 1 - y])\n', (1356, 1368), True, 'import numpy as np\n'), ((1458, 1515), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': '(12345)'}), '(X, y, test_size=0.1, random_state=12345)\n', (1474, 1515), False, 'from sklearn.model_selection import train_test_split\n'), ((1554, 1623), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.1)', 'random_state': '(12345)'}), '(X_train, y_train, test_size=0.1, random_state=12345)\n', (1570, 1623), False, 'from sklearn.model_selection import train_test_split\n'), ((1851, 1885), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_noise]'], {}), '([X_train, X_noise])\n', (1865, 1885), True, 'import numpy as np\n'), ((1899, 1933), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_noise]'], {}), '([y_train, y_noise])\n', (1913, 1933), True, 'import numpy as np\n'), ((2222, 2247), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(12345)'], {}), '(12345)\n', (2240, 2247), True, 'import tensorflow as tf\n'), ((2805, 2994), 'utils.compile_logistic_regression', 'compile_logistic_regression', (['logistic_regression'], {'model_name': '"""temp.h5"""', 'optimizer': 'None', 'loss_function': 'None', 'metric': 'None', 'callbacks': 'None', 'learning_rate': '(0.001)', 'tol': '(0.0001)', 'patience': '(10)'}), "(logistic_regression, model_name='temp.h5',\n optimizer=None, loss_function=None, metric=None, callbacks=None,\n learning_rate=0.001, tol=0.0001, patience=10)\n", (2832, 2994), False, 'from utils import build_logistic_regression, compile_logistic_regression\n'), ((4086, 4115), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4099, 4115), False, 'from sklearn.metrics import roc_auc_score\n'), ((2545, 2570), 'tensorflow.keras.regularizers.L2', 'regularizers.L2', ([], {'l2': '(0.001)'}), '(l2=0.001)\n', (2560, 2570), False, 'from tensorflow.keras import regularizers\n'), ((2627, 2652), 'tensorflow.keras.regularizers.L1', 'regularizers.L1', ([], {'l1': '(0.001)'}), '(l1=0.001)\n', (2642, 2652), False, 'from tensorflow.keras import regularizers\n')]
|
import scipy
import scipy.sparse.csgraph
import wall_generation, mesh
import mesh_operations
import utils
import triangulation, filters
from mesh_utilities import SurfaceSampler, tubeRemesh
import numpy as np
def meshComponents(m, cutEdges):
"""
Get the connected components of triangles of a mesh cut along the edges `cutEdges`.
Parameters
----------
m
The mesh to split
cutEdges
The edges (vertex index pairs) splitting the mesh up into disconnected regions
Returns
-------
ncomponents
Number of connected components
components
The component index for each mesh triangle.
"""
cutEdgeSet = set([(min(fs), max(fs)) for fs in cutEdges])
tri_sets = [set(t) for t in m.triangles()]
# Build dual graph, excluding dual edges that cross the fused segments.
def includeDualEdge(u, v):
common_vertices = tri_sets[u] & tri_sets[v]
return (len(common_vertices) == 2) and ((min(common_vertices), max(common_vertices)) not in cutEdgeSet)
dual_edges = [(u, v) for u in range(len(tri_sets))
for v in m.trisAdjTri(u)
if includeDualEdge(u, v)]
adj = scipy.sparse.coo_matrix((np.ones(len(dual_edges)), np.transpose(dual_edges))).tocsc()
return scipy.sparse.csgraph.connected_components(adj)
def wallMeshComponents(sheet, distinctTubeComponents = False):
"""
Get the connected wall components of a sheet's mesh (assigning the tubes "component" -1 by default or components -1, -2, ... if distinctTubeComponents is True).
"""
m = sheet.mesh()
nt = m.numTris()
iwt = np.array([sheet.isWallTri(ti) for ti in range(nt)], dtype=np.bool)
dual_edges = [(u, v) for u in range(nt)
for v in m.trisAdjTri(u)
if iwt[u] == iwt[v]]
adj = scipy.sparse.coo_matrix((np.ones(len(dual_edges)), np.transpose(dual_edges))).tocsc()
numComponents, components = scipy.sparse.csgraph.connected_components(adj)
wallLabels = components[iwt].copy()
renumber = np.empty(numComponents, dtype=np.int)
renumber[:] = -1 # This assigns all non-wall triangles the "component" -1
uniqueWallLabels = np.unique(wallLabels)
numWallComponents = len(uniqueWallLabels)
renumber[uniqueWallLabels] = np.arange(numWallComponents, dtype=np.int)
if distinctTubeComponents:
uniqueTubeLabels = np.unique(components[~iwt])
renumber[uniqueTubeLabels] = -1 - np.arange(len(uniqueTubeLabels), dtype=np.int)
components = renumber[components]
return numWallComponents, components
def remeshWallRegions(m, fuseMarkers, fuseSegments, pointSetLiesInWall, permitWallInteriorVertices = False, pointsLieInHole = None):
"""
Take an initial mesh of the sheet and determine the connected triangle components
that correspond to fused regions. Also remesh these regions so as not to have
interior wall vertices (if requested).
Parameters
----------
m
The initial sheet mesh
fuseMarkers
Fused vertices of the original sheet mesh
fuseSegments
Fused edges of the original sheet mesh
pointSetLiesInWall
Function for testing whether a given point set lies within a wall region
permitWallInteriorVertices
Whether to permit Triangle to add vertices inside the wall regions.
(This should usually be `False`, since these vertices permit the walls to crumple.)
pointsLieInHole
If provided, this function is used to test whether a given mesh
component is actually a hole.
Returns
-------
remeshedSheet
A `MeshFEM` triangle mesh of the top sheet ready for inflation simulation.
isWallVtx
Per-vertex boolean array specifying whether each vertex is part of the wall region
(interior or boundary).
isWallBdryVtx
Per-vertex boolean array specifying whether each vertex is part of the wall boundary.
If `permitWallInteriorVertices` is `False`, then this is the same as `isWallVtx`.
"""
ncomponents, components = meshComponents(m, fuseSegments)
############################################################################
# Determine which connected components are walls.
############################################################################
triCenters = m.vertices()[m.triangles()].mean(axis=1)
numWalls = 0
wallLabels = -np.ones(m.numTris(), dtype=np.int)
for c in range(ncomponents):
component_tris = np.flatnonzero(components == c)
centers = triCenters[component_tris]
# Discard the entire component if it is actually a hole of the flattened input mesh.
if (pointsLieInHole is not None):
if (pointsLieInHole(centers)):
wallLabels[component_tris] = -2 # only labels -1 (tube) and >= 0 (wall) are kept
if (pointSetLiesInWall(centers)):
wallLabels[component_tris] = numWalls
numWalls = numWalls + 1
############################################################################
# Separately remesh each wall sub-mesh, preserving the non-wall component.
############################################################################
origV = m.vertices()
origF = m.triangles()
meshes = [(origV, origF[wallLabels == -1])]
remeshFlags = 'Y' + ('S0' if not permitWallInteriorVertices else '')
for wall in range(numWalls):
# Extract the original wall mesh.
wallMesh = mesh.Mesh(*mesh_operations.submesh(origV, origF, wallLabels == wall))
# Perform the initial remeshing of the wall's boundary segments.
wallMesh, wmFuseMarkers, wmFusedEdges = wall_generation.triangulate_channel_walls(*mesh_operations.removeDanglingVertices(wallMesh.vertices()[:, 0:2], wallMesh.boundaryElements()), triArea=float('inf'), flags=remeshFlags)
# Note: if the wall mesh encloses holes, Triangle will also have triangulated the holes;
# we must detect these and remove them from the output.
# We decompose the remeshed wall into connected components (after
# cutting away the original wall boundary segments) and keep only the one
# inside the wall region. Exactly one component should remain after this
# process.
nc, wallMeshComponents = meshComponents(wallMesh, wmFusedEdges)
if (nc != 1):
wmV = wallMesh.vertices()
wmF = wallMesh.triangles()
triCenters = wmV[wmF].mean(axis=1)
keepTri = np.zeros(wallMesh.numTris(), dtype=np.bool)
keptComponents = 0
for c in range(nc):
component_tris = np.flatnonzero(wallMeshComponents == c)
if (pointSetLiesInWall(triCenters[component_tris])):
keepTri[component_tris] = True
keptComponents += 1
if (keptComponents != 1): raise Exception('Should have kept exactly one component of the remeshed wall')
# Extract only the kept component.
wallMesh = mesh.Mesh(*mesh_operations.removeDanglingVertices(wmV, wmF[keepTri]))
meshes.append(wallMesh)
mergedV, mergedF = mesh_operations.mergedMesh(meshes)
remeshedSheet = mesh.Mesh(mergedV, mergedF)
############################################################################
# Determine wall vertices and wall boundary vertices.
############################################################################
wallVertices = set()
wallBoundaryVertices = set()
def addVertices(vtxSet, V):
for v in V: vtxSet.add(tuple(v))
rsV = remeshedSheet.vertices()
addVertices(wallBoundaryVertices, rsV[remeshedSheet.boundaryVertices()])
for wallmesh in meshes[1:]:
wmV = wallmesh.vertices()
addVertices(wallBoundaryVertices, wmV[wallmesh.boundaryVertices()])
addVertices(wallVertices, wmV)
wallVertices = wallVertices | wallBoundaryVertices
isWallVtx = np.array([tuple(v) in wallVertices for v in rsV])
isWallBdryVtx = np.array([tuple(v) in wallBoundaryVertices for v in rsV])
return remeshedSheet, isWallVtx, isWallBdryVtx
# Consider a point set to lie within a hole if over half of its points are within a hole (to a given tolerance)
class HoleTester:
def __init__(self, meshVertices, meshSampler):
self.V = meshVertices
self.sampler = meshSampler
self.eps = 1e-12 * utils.bbox_dims(meshVertices).max()
def dist(self, X):
"""
Compute the distance of each point in "X" to the sampler mesh.
"""
# This could be more efficient if SurfaceSampler had a method to get a
# distance to the sampled mesh...
if (X.shape[1] == 2):
X = np.pad(X, [(0, 0), (0, 1)])
closestPts = self.sampler.sample(X, self.V)
#print(closestPts)
return np.linalg.norm(X - closestPts, axis=1)
def pointWithinHole(self, X):
"""
Check whether each point in X individually lies with a hole.
"""
return self.dist(X) > self.eps
def __call__(self, X):
"""
Check whether a point set generally lies within a hole (i.e. if more
than half of its points are within a hole).
"""
return np.count_nonzero(self.pointWithinHole(X)) >= (X.shape[0] / 2)
# Note: if `targetEdgeSpacing` is set too low relative to `triArea`, `triangle`
# will insert new boundary points that fall in the interior of the flattened
# target surface (in strictly convex regions) when refining the triangulation.
#
# If the parametrization is subsequently used to lift the boundary points to
# 3D, these lifted points will not lie on the target surface's boundary. E.g.,
# they may lift off the ground plane even if all boundary vertices of the
# target surface lie on the ground plane.
def generateSheetMesh(sdfVertices, sdfTris, sdf, triArea, permitWallInteriorVertices = False, targetEdgeSpacing = 0.5, minContourLen = 0.75):
"""
Extract the channel walls described by a signed distance function and use
them generate a high-quality triangle mesh of the inflatable sheet.
Parameters
----------
sdfVertices
Vertices for the wall SDF domain mesh.
sdfTris
Triangles for the wall SDF domain mesh.
sdf
Per-vertex signed distances to the channel walls
triArea
Maximum triangle area (passed to Triangle)
permitWallInteriorVertices
Whether to permit Triangle to add vertices inside the wall regions.
(This should usually be `False`, since these vertices permit the walls to crumple.)
targetEdgeSpacing
The approximate resolution at which the extracted contours of the SDF are resampled
to generate the wall boundary curves.
minContourLen
The length threshold below which extracted contours are discarded.
Returns
-------
remeshedSheet
A `MeshFEM` triangle mesh of the top sheet ready for inflation simulation.
isWallVtx
Per-vertex boolean array specifying whether each vertex is part of the wall region
(interior or boundary).
isWallBdryVtx
Per-vertex boolean array specifying whether each vertex is part of the wall boundary.
If `permitWallInteriorVertices` is `False`, then this is the same as `isWallVtx`.
"""
pts, edges = wall_generation.extract_contours(sdfVertices, sdfTris, sdf,
targetEdgeSpacing=targetEdgeSpacing,
minContourLen=minContourLen)
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(pts[:,0:2], edges, triArea)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
pointsAreInWall = lambda X: np.mean(sdfSampler.sample(X, sdf)) < 0
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
return remeshWallRegions(m, fuseMarkers, fuseSegments, pointsAreInWall, permitWallInteriorVertices, pointsLieInHole=pointsLieInHole)
def generateSheetMeshCustomEdges(sdfVertices, sdfTris, sdf, customPts, customEdges, triArea, permitWallInteriorVertices = False):
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(customPts[:,0:2], customEdges, triArea)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
pointsAreInWall = lambda X: np.mean(sdfSampler.sample(X, sdf)) < 0
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
return remeshWallRegions(m, fuseMarkers, fuseSegments, pointsAreInWall, permitWallInteriorVertices, pointsLieInHole=pointsLieInHole)
def meshWallsAndTubes(fusing_V, fusing_E, m, isWallTri, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles):
"""
Create a high quality mesh of the wall and tube regions enclosed by given fusing curves.
Parameters
----------
fusing_V, fusing_E
PSLG to be triangulated representing the fusing curves.
m
An initial mesh of the sheet region (with any hole triangles removed!) used to obtain the intersection of the wall regions with the sheet boundary.
isWallTri
Boolean array holding whether each wall of `m` is a wall triangle
holePoints, tubePoints, wallPoints
Lists of points within the hole, tube, and wall regions.
triArea
Maximum triangle area for the triangulation
permitWallInteriorVertices
Whether wall regions get interior vertices.
Returns
-------
remeshedSheet, isWallVtx, isWallBdryVtx
"""
############################################################################
# 1. Create a quality mesh of the air tubes.
############################################################################
# print(f"fusing_V.shape: {fusing_V.shape}")
# print(f"fusing_E.shape: {fusing_E.shape}")
# print(f"wallPoints: {np.array(wallPoints).shape}")
# print(f"holePoints: {np.array(holePoints).shape}")
mTubes, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(fusing_V[:, 0:2], fusing_E, holePoints=wallPoints + holePoints, triArea=triArea, omitQualityFlag=False, flags="j") # jettison vertices that got eaten by holes...
#utils.save((mTubes, fuseMarkers, fuseSegments), 'tubes_and_markers.pkl.gz')
if avoidSpuriousFusedTriangles:
try:
mTubes = tubeRemesh(mTubes, fuseMarkers, fuseSegments, minRelEdgeLen=0.3) # retriangulate where necessary to avoid spurious fused triangles in the tubes
except:
utils.save((mTubes, fuseMarkers, fuseSegments), utils.freshPath('tubeRemeshFailure', suffix='.pkl.gz'))
raise
fuseMarkers += [0 for i in range(mTubes.numVertices() - len(fuseMarkers))]
#mTubes.save('remeshedTubes.msh')
# For meshes without finite-thickness wall regions, we are done
# (and all vertices in fusing_V are wall/wall boundary vertices.)
if not np.any(isWallTri):
isWallVtx = np.array(fuseMarkers, dtype=np.bool)
return mTubes, isWallVtx, isWallVtx
############################################################################
# 2. Triangulate the wall meshes without inserting any Steiner points.
############################################################################
# We need to triangulate the new collection of boundary segments, which consists of the
# boundary segments from the new tube mesh along with the original mesh boundary segments
# that border wall regions.
boundarySegments = [(mTubes.vertices(), mTubes.boundaryElements())]
#print(boundarySegments)
# mesh.save("tube_bdry.obj", *boundarySegments[0])
wallBoundaryElements = m.boundaryElements()[isWallTri[m.elementsAdjacentBoundary()]]
if len(wallBoundaryElements) > 0:
boundarySegments.append((m.vertices(), wallBoundaryElements))
# mesh.save("sheet_bdry_intersect_walls.obj", *boundarySegments[1])
newPts, newEdges = mesh_operations.mergedMesh(boundarySegments)
# mesh.save("new_contour.obj", newPts, newEdges)
wallmeshFlags = 'Y' + ('S0' if not permitWallInteriorVertices else '')
mWall, _, _ = wall_generation.triangulate_channel_walls(newPts[:,0:2], newEdges, holePoints=tubePoints + holePoints, triArea=triArea if permitWallInteriorVertices else float('inf'), omitQualityFlag=False, flags="j" + wallmeshFlags) # jettison vertices that got eaten by holes...
# mWall.save("walls.obj")
############################################################################
# 3. Merge the tube and wall meshes
############################################################################
mFinal = mesh.Mesh(*mesh_operations.mergedMesh([mTubes, mWall]), embeddingDimension=3)
# mFinal.save("final.obj")
############################################################################
# 4. Determine wall vertices and wall boundary vertices.
############################################################################
wallVertices = set()
wallBoundaryVertices = set()
def addVertices(vtxSet, V):
for v in V: vtxSet.add(tuple(v))
finalV = mFinal.vertices()
addVertices(wallBoundaryVertices, finalV[mFinal.boundaryVertices()])
wmV = mWall.vertices()
addVertices(wallBoundaryVertices, wmV[mWall.boundaryVertices()])
addVertices(wallVertices, wmV)
# Also include fused vertices marked inside the tube mesh (i.e., those
# fused by zero-width curves)
addVertices(wallBoundaryVertices, mTubes.vertices()[np.array(fuseMarkers, dtype=np.bool)])
wallVertices = wallVertices | wallBoundaryVertices
isWallVtx = np.array([tuple(v) in wallVertices for v in finalV])
isWallBdryVtx = np.array([tuple(v) in wallBoundaryVertices for v in finalV])
return mFinal, isWallVtx, isWallBdryVtx
def newMeshingAlgorithm(sdfVertices, sdfTris, sdf, customPts, customEdges, triArea, permitWallInteriorVertices = False, avoidSpuriousFusedTriangles = True):
############################################################################
# 1. Perform an initial, low quality triangulation used only to segment the
# design domain into tube and wall regions.
############################################################################
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(customPts[:, 0:2], customEdges, triArea=float('inf'), omitQualityFlag=True, flags="YY")
# m.save('initial_triangulation.msh')
############################################################################
# 2. Determine the wall components/hole points.
############################################################################
triCenters = m.vertices()[m.triangles()].mean(axis=1)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
numWalls = 0
wallPoints = []
tubePoints = []
holePoints = []
ncomponents, components = meshComponents(m, fuseSegments)
# First detect and remove holes
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
for c in range(ncomponents):
component_tris = components == c
centers = triCenters[component_tris]
p = centers[0, 0:2] # Todo: pick center of largest area triangle?
if (pointsLieInHole(centers)):
components[component_tris] = -1 #mark for deletion
holePoints.append(p)
continue
if len(holePoints) > 0:
print(f'Detected {len(holePoints)} holes')
# Note: there shouldn't be any dangling vertices since no new vertices
# are inserted inside the holes.
m = mesh.Mesh(m.vertices(), m.elements()[components >= 0])
ncomponents, components = meshComponents(m, fuseSegments)
triCenters = m.vertices()[m.triangles()].mean(axis=1)
# m.save('without_holes.msh')
wallLabels = -np.ones(m.numTris(), dtype=np.int) # assign -1 to tubes
# Next, pick a point within each air tube
for c in range(ncomponents):
component_tris = components == c
centers = triCenters[component_tris]
p = centers[0, 0:2] # Todo: pick center of largest area triangle?
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
triCentersAreInWall = np.mean(sdfSampler.sample(centers, sdf)) < 0
if (triCentersAreInWall):
wallPoints.append(p)
wallLabels[component_tris] = numWalls
numWalls = numWalls + 1
else:
tubePoints.append(p)
return meshWallsAndTubes(customPts, customEdges, m, wallLabels >= 0, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles)
def generateSheetMeshNewAlgorithm(sdfVertices, sdfTris, sdf, triArea, permitWallInteriorVertices = False, targetEdgeSpacing = 0.5, minContourLen = 0.75, avoidSpuriousFusedTriangles=True):
pts, edges = wall_generation.extract_contours(sdfVertices, sdfTris, sdf,
targetEdgeSpacing=targetEdgeSpacing,
minContourLen=minContourLen)
return newMeshingAlgorithm(sdfVertices, sdfTris, sdf, pts, edges, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles)
def remeshSheet(isheet, triArea, permitWallInteriorVertices = False, omitWallsContainingPoints=[]):
"""
Remesh an inflatable sheet design with a high quality triangulation
(leaving the fusing curves unchanged).
We can omit certain walls by passing a nonempty point set for the `omitWallsContainingPoints` argument.
Returns
-------
remeshedSheet, isWallVtx, isWallBdryVtx
"""
nwmc, wmc = wallMeshComponents(isheet, distinctTubeComponents=True)
im = isheet.mesh()
imV = im.vertices()
imF = im.triangles()
# Convert walls specified by `omitWallsContainingPoints` into tube regions.
ssampler = SurfaceSampler(imV, imF)
omittedWallComponents = []
if (len(omitWallsContainingPoints) > 0):
tris, _ = ssampler.closestTriAndBaryCoords(np.array(omitWallsContainingPoints))
omittedWallComponents = np.unique(wmc[tris])
if (np.any(omittedWallComponents < 0)): raise Exception("omitWallsContainingPoints contains non-wall points.")
wmc[tris] = -1 # Reassign omitted walls to the first tube region.
# Generate tube and wall points (one in each tube/wall component)
tubePoints = []
wallPoints = []
triCenters = imV[:, 0:2][imF].mean(axis=1)
#print(np.unique(wmc))
for c in range(np.min(wmc), nwmc):
#print(f'Component: {c}')
if c in omittedWallComponents: continue
p = triCenters[np.where(wmc == c)[0][0]]
if (c < 0): tubePoints.append(p)
else : wallPoints.append(p)
# Generate hole points inside each internal boundary loop; this
# requires a low-quality triangulation of the boundary loops.
sheetBoundary = mesh_operations.removeDanglingVertices(imV, im.boundaryElements())
mHoleDetect, mFuseMarkers, mFuseSegments = wall_generation.triangulate_channel_walls(sheetBoundary[0][:, 0:2], sheetBoundary[1], triArea=float('inf'), omitQualityFlag=True, flags="YY")
nholeDetectComponents, holeDetectComponents = meshComponents(mHoleDetect, mFuseSegments)
holeTest = HoleTester(imV, ssampler)
holePoints = np.array([triCenters[np.where(holeDetectComponents == c)[0][0]] for c in range(nholeDetectComponents)])
holePoints = list(holePoints[holeTest.pointWithinHole(holePoints)])
# Get all design curves for remeshing. These consist of the union of two disjoint sets of curves:
# - Boundaries of the wall regions.
# - The intersection of the tube and sheet boundaries.
wm = mesh.Mesh(*mesh_operations.mergedMesh([(imV, imF[wmc == i]) for i in range(nwmc)])) # Mesh of walls only.
sheetBoundaryIntersectTubes = mesh_operations.removeDanglingVertices(imV, im.boundaryElements()[wmc[im.elementsAdjacentBoundary()] < 0])
fusing_V, fusing_E = mesh_operations.mergedMesh([(wm.vertices(), wm.boundaryElements()), sheetBoundaryIntersectTubes])
isWallTri = (wmc >= 0)
return meshWallsAndTubes(fusing_V, fusing_E, im, isWallTri, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles=True)
import triangulation, field_sampler
def forward_design_mesh(V, E, fusedPts, holePts, triArea):
"""
Create an inflatable sheet mesh from a collection of curves and points indicating
whether the closed curve containing them should be considered a wall or a hole
(instead of a tube/pillow).
"""
sdfV, sdfF, pointMarkers, edgeMarkers = triangulation.triangulate(V[:, 0:2], E, holePts=holePts, triArea=1e8, omitQualityFlag=True, outputPointMarkers=True, outputEdgeMarkers=True)
minit = mesh.Mesh(sdfV[:, 0:2], sdfF)
# Create a SDF field indicating the wall regions (the data needed by newMeshingAlgorithm)
nc, c = meshComponents(minit, edgeMarkers)
sdf = c
fs = field_sampler.FieldSampler(minit)
if len(fusedPts) > 0:
fusedComponents = np.array(np.unique(fs.sample(fusedPts, c)), dtype=np.int)
sdf[c == fusedComponents] = -1
return newMeshingAlgorithm(sdfV, sdfF, sdf, V, E, triArea=triArea)
|
[
"mesh_utilities.tubeRemesh",
"numpy.empty",
"utils.freshPath",
"numpy.arange",
"scipy.sparse.csgraph.connected_components",
"numpy.linalg.norm",
"numpy.unique",
"numpy.pad",
"utils.bbox_dims",
"field_sampler.FieldSampler",
"numpy.transpose",
"mesh.Mesh",
"mesh_utilities.SurfaceSampler",
"numpy.min",
"mesh_operations.submesh",
"wall_generation.extract_contours",
"numpy.flatnonzero",
"mesh_operations.removeDanglingVertices",
"numpy.any",
"triangulation.triangulate",
"numpy.where",
"numpy.array",
"mesh_operations.mergedMesh",
"wall_generation.triangulate_channel_walls"
] |
[((1304, 1350), 'scipy.sparse.csgraph.connected_components', 'scipy.sparse.csgraph.connected_components', (['adj'], {}), '(adj)\n', (1345, 1350), False, 'import scipy\n'), ((1983, 2029), 'scipy.sparse.csgraph.connected_components', 'scipy.sparse.csgraph.connected_components', (['adj'], {}), '(adj)\n', (2024, 2029), False, 'import scipy\n'), ((2086, 2123), 'numpy.empty', 'np.empty', (['numComponents'], {'dtype': 'np.int'}), '(numComponents, dtype=np.int)\n', (2094, 2123), True, 'import numpy as np\n'), ((2225, 2246), 'numpy.unique', 'np.unique', (['wallLabels'], {}), '(wallLabels)\n', (2234, 2246), True, 'import numpy as np\n'), ((2326, 2368), 'numpy.arange', 'np.arange', (['numWallComponents'], {'dtype': 'np.int'}), '(numWallComponents, dtype=np.int)\n', (2335, 2368), True, 'import numpy as np\n'), ((7225, 7259), 'mesh_operations.mergedMesh', 'mesh_operations.mergedMesh', (['meshes'], {}), '(meshes)\n', (7251, 7259), False, 'import mesh_operations\n'), ((7280, 7307), 'mesh.Mesh', 'mesh.Mesh', (['mergedV', 'mergedF'], {}), '(mergedV, mergedF)\n', (7289, 7307), False, 'import wall_generation, mesh\n'), ((11433, 11562), 'wall_generation.extract_contours', 'wall_generation.extract_contours', (['sdfVertices', 'sdfTris', 'sdf'], {'targetEdgeSpacing': 'targetEdgeSpacing', 'minContourLen': 'minContourLen'}), '(sdfVertices, sdfTris, sdf,\n targetEdgeSpacing=targetEdgeSpacing, minContourLen=minContourLen)\n', (11465, 11562), False, 'import wall_generation, mesh\n'), ((11694, 11764), 'wall_generation.triangulate_channel_walls', 'wall_generation.triangulate_channel_walls', (['pts[:, 0:2]', 'edges', 'triArea'], {}), '(pts[:, 0:2], edges, triArea)\n', (11735, 11764), False, 'import wall_generation, mesh\n'), ((11782, 11818), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['sdfVertices', 'sdfTris'], {}), '(sdfVertices, sdfTris)\n', (11796, 11818), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((12468, 12554), 'wall_generation.triangulate_channel_walls', 'wall_generation.triangulate_channel_walls', (['customPts[:, 0:2]', 'customEdges', 'triArea'], {}), '(customPts[:, 0:2], customEdges,\n triArea)\n', (12509, 12554), False, 'import wall_generation, mesh\n'), ((12568, 12604), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['sdfVertices', 'sdfTris'], {}), '(sdfVertices, sdfTris)\n', (12582, 12604), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((14509, 14674), 'wall_generation.triangulate_channel_walls', 'wall_generation.triangulate_channel_walls', (['fusing_V[:, 0:2]', 'fusing_E'], {'holePoints': '(wallPoints + holePoints)', 'triArea': 'triArea', 'omitQualityFlag': '(False)', 'flags': '"""j"""'}), "(fusing_V[:, 0:2], fusing_E,\n holePoints=wallPoints + holePoints, triArea=triArea, omitQualityFlag=\n False, flags='j')\n", (14550, 14674), False, 'import wall_generation, mesh\n'), ((16450, 16494), 'mesh_operations.mergedMesh', 'mesh_operations.mergedMesh', (['boundarySegments'], {}), '(boundarySegments)\n', (16476, 16494), False, 'import mesh_operations\n'), ((19273, 19309), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['sdfVertices', 'sdfTris'], {}), '(sdfVertices, sdfTris)\n', (19287, 19309), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((21522, 21651), 'wall_generation.extract_contours', 'wall_generation.extract_contours', (['sdfVertices', 'sdfTris', 'sdf'], {'targetEdgeSpacing': 'targetEdgeSpacing', 'minContourLen': 'minContourLen'}), '(sdfVertices, sdfTris, sdf,\n targetEdgeSpacing=targetEdgeSpacing, minContourLen=minContourLen)\n', (21554, 21651), False, 'import wall_generation, mesh\n'), ((22533, 22557), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['imV', 'imF'], {}), '(imV, imF)\n', (22547, 22557), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((25286, 25443), 'triangulation.triangulate', 'triangulation.triangulate', (['V[:, 0:2]', 'E'], {'holePts': 'holePts', 'triArea': '(100000000.0)', 'omitQualityFlag': '(True)', 'outputPointMarkers': '(True)', 'outputEdgeMarkers': '(True)'}), '(V[:, 0:2], E, holePts=holePts, triArea=\n 100000000.0, omitQualityFlag=True, outputPointMarkers=True,\n outputEdgeMarkers=True)\n', (25311, 25443), False, 'import triangulation, field_sampler\n'), ((25442, 25471), 'mesh.Mesh', 'mesh.Mesh', (['sdfV[:, 0:2]', 'sdfF'], {}), '(sdfV[:, 0:2], sdfF)\n', (25451, 25471), False, 'import wall_generation, mesh\n'), ((25635, 25668), 'field_sampler.FieldSampler', 'field_sampler.FieldSampler', (['minit'], {}), '(minit)\n', (25661, 25668), False, 'import triangulation, field_sampler\n'), ((2428, 2455), 'numpy.unique', 'np.unique', (['components[~iwt]'], {}), '(components[~iwt])\n', (2437, 2455), True, 'import numpy as np\n'), ((4548, 4579), 'numpy.flatnonzero', 'np.flatnonzero', (['(components == c)'], {}), '(components == c)\n', (4562, 4579), True, 'import numpy as np\n'), ((8936, 8974), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - closestPts)'], {'axis': '(1)'}), '(X - closestPts, axis=1)\n', (8950, 8974), True, 'import numpy as np\n'), ((15426, 15443), 'numpy.any', 'np.any', (['isWallTri'], {}), '(isWallTri)\n', (15432, 15443), True, 'import numpy as np\n'), ((15465, 15501), 'numpy.array', 'np.array', (['fuseMarkers'], {'dtype': 'np.bool'}), '(fuseMarkers, dtype=np.bool)\n', (15473, 15501), True, 'import numpy as np\n'), ((22754, 22774), 'numpy.unique', 'np.unique', (['wmc[tris]'], {}), '(wmc[tris])\n', (22763, 22774), True, 'import numpy as np\n'), ((22787, 22820), 'numpy.any', 'np.any', (['(omittedWallComponents < 0)'], {}), '(omittedWallComponents < 0)\n', (22793, 22820), True, 'import numpy as np\n'), ((23172, 23183), 'numpy.min', 'np.min', (['wmc'], {}), '(wmc)\n', (23178, 23183), True, 'import numpy as np\n'), ((8814, 8841), 'numpy.pad', 'np.pad', (['X', '[(0, 0), (0, 1)]'], {}), '(X, [(0, 0), (0, 1)])\n', (8820, 8841), True, 'import numpy as np\n'), ((14864, 14928), 'mesh_utilities.tubeRemesh', 'tubeRemesh', (['mTubes', 'fuseMarkers', 'fuseSegments'], {'minRelEdgeLen': '(0.3)'}), '(mTubes, fuseMarkers, fuseSegments, minRelEdgeLen=0.3)\n', (14874, 14928), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((17165, 17208), 'mesh_operations.mergedMesh', 'mesh_operations.mergedMesh', (['[mTubes, mWall]'], {}), '([mTubes, mWall])\n', (17191, 17208), False, 'import mesh_operations\n'), ((18021, 18057), 'numpy.array', 'np.array', (['fuseMarkers'], {'dtype': 'np.bool'}), '(fuseMarkers, dtype=np.bool)\n', (18029, 18057), True, 'import numpy as np\n'), ((22685, 22720), 'numpy.array', 'np.array', (['omitWallsContainingPoints'], {}), '(omitWallsContainingPoints)\n', (22693, 22720), True, 'import numpy as np\n'), ((5548, 5605), 'mesh_operations.submesh', 'mesh_operations.submesh', (['origV', 'origF', '(wallLabels == wall)'], {}), '(origV, origF, wallLabels == wall)\n', (5571, 5605), False, 'import mesh_operations\n'), ((6710, 6749), 'numpy.flatnonzero', 'np.flatnonzero', (['(wallMeshComponents == c)'], {}), '(wallMeshComponents == c)\n', (6724, 6749), True, 'import numpy as np\n'), ((1257, 1281), 'numpy.transpose', 'np.transpose', (['dual_edges'], {}), '(dual_edges)\n', (1269, 1281), True, 'import numpy as np\n'), ((1916, 1940), 'numpy.transpose', 'np.transpose', (['dual_edges'], {}), '(dual_edges)\n', (1928, 1940), True, 'import numpy as np\n'), ((7109, 7166), 'mesh_operations.removeDanglingVertices', 'mesh_operations.removeDanglingVertices', (['wmV', 'wmF[keepTri]'], {}), '(wmV, wmF[keepTri])\n', (7147, 7166), False, 'import mesh_operations\n'), ((8493, 8522), 'utils.bbox_dims', 'utils.bbox_dims', (['meshVertices'], {}), '(meshVertices)\n', (8508, 8522), False, 'import utils\n'), ((15084, 15138), 'utils.freshPath', 'utils.freshPath', (['"""tubeRemeshFailure"""'], {'suffix': '""".pkl.gz"""'}), "('tubeRemeshFailure', suffix='.pkl.gz')\n", (15099, 15138), False, 'import utils\n'), ((23297, 23315), 'numpy.where', 'np.where', (['(wmc == c)'], {}), '(wmc == c)\n', (23305, 23315), True, 'import numpy as np\n'), ((23989, 24024), 'numpy.where', 'np.where', (['(holeDetectComponents == c)'], {}), '(holeDetectComponents == c)\n', (23997, 24024), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn import linear_model
np.random.seed(123)
np.set_printoptions(suppress=True, linewidth=120)
X = np.random.random([10, 5]).astype(np.float)
y = np.random.random(10).astype(np.float)
# sklearn
linear = linear_model.LinearRegression()
linear.fit(X, y)
# Pure Python
X = np.hstack([np.ones([10, 1]), X])
IX = np.linalg.inv(np.matmul(X.T, X))
XIX = np.matmul(X, IX)
w = np.matmul(y, XIX)
print("----- Code Output -----")
print("sklearn coef", linear.coef_)
print("sklearn intercept", linear.intercept_)
print("numpy coef", w[1:])
print("numpy intercept", w[0])
"""
----- Code Output -----
sklearn coef [ 0.49571807 -0.4013861 0.67121452 -0.4458699 -0.68057386]
sklearn intercept 0.767935574124093
numpy coef [ 0.49571807 -0.4013861 0.67121452 -0.4458699 -0.68057386]
numpy intercept 0.7679355741241028
"""
|
[
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.ones",
"sklearn.linear_model.LinearRegression",
"numpy.random.random",
"numpy.matmul"
] |
[((53, 72), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (67, 72), True, 'import numpy as np\n'), ((73, 122), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'linewidth': '(120)'}), '(suppress=True, linewidth=120)\n', (92, 122), True, 'import numpy as np\n'), ((233, 264), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (262, 264), False, 'from sklearn import linear_model\n'), ((379, 395), 'numpy.matmul', 'np.matmul', (['X', 'IX'], {}), '(X, IX)\n', (388, 395), True, 'import numpy as np\n'), ((400, 417), 'numpy.matmul', 'np.matmul', (['y', 'XIX'], {}), '(y, XIX)\n', (409, 417), True, 'import numpy as np\n'), ((354, 371), 'numpy.matmul', 'np.matmul', (['X.T', 'X'], {}), '(X.T, X)\n', (363, 371), True, 'import numpy as np\n'), ((128, 153), 'numpy.random.random', 'np.random.random', (['[10, 5]'], {}), '([10, 5])\n', (144, 153), True, 'import numpy as np\n'), ((175, 195), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (191, 195), True, 'import numpy as np\n'), ((312, 328), 'numpy.ones', 'np.ones', (['[10, 1]'], {}), '([10, 1])\n', (319, 328), True, 'import numpy as np\n')]
|
import numpy as np
import ray
import pyspiel
from open_spiel.python.algorithms.psro_v2.ars_ray.shared_noise import *
from open_spiel.python.algorithms.psro_v2.ars_ray.utils import rewards_combinator
from open_spiel.python.algorithms.psro_v2 import rl_policy
from open_spiel.python import rl_environment
import tensorflow.compat.v1 as tf
import random
# Function that loads the game.
# @ray.remote
# def worker(env_name):
# game = pyspiel.load_game_as_turn_based(env_name,
# {"players": pyspiel.GameParameter(
# 2)})
# env = rl_environment.Environment(game)
# return env.name
#
# SB worker
# @ray.remote
# class Worker(object):
# def __init__(self,
# env_name,
# env_seed=2,
# deltas=None,
# slow_oracle_kargs=None,
# fast_oracle_kargs=None
# ):
# pass
#
# def output(self):
# import sys
# return sys.path
@ray.remote
class Worker(object):
"""
Object class for parallel rollout generation.
"""
def __init__(self,
env_name,
env_seed=2,
deltas=None,
slow_oracle_kargs=None,
fast_oracle_kargs=None
):
# initialize rl environment.
from open_spiel.python import rl_environment
import pyspiel
self._num_players = 2
game = pyspiel.load_game_as_turn_based(env_name,
{"players": pyspiel.GameParameter(
self._num_players)})
self._env = rl_environment.Environment(game)
# Each worker gets access to the shared noise table
# with independent random streams for sampling
# from the shared noise table.
self.deltas = SharedNoiseTable(deltas, env_seed + 7)
self._policies = [[] for _ in range(self._num_players)]
self._slow_oracle_kargs = slow_oracle_kargs
self._fast_oracle_kargs = fast_oracle_kargs
self._delta_std = self._fast_oracle_kargs['noise']
self._sess = tf.get_default_session()
if self._sess is None:
self._sess = tf.Session()
if self._slow_oracle_kargs is not None:
self._slow_oracle_kargs['session'] = self._sess
def sample_episode(self,
unused_time_step,
agents,
is_evaluation=False,
noise=None,
chosen_player=None):
"""
Sample an episode and get the cumulative rewards. Notice that we do not
update the agents during this sampling.
:param unused_time_step: placeholder for openspiel.
:param agents: a list of policies, one per player.
:param is_evaluation: evaluation flag.
:param noise: noise to be added to current policy.
:param live_agent_id: id of the agent being trained.
:return: a list of returns, one per player.
"""
time_step = self._env.reset()
cumulative_rewards = 0.0
while not time_step.last():
if time_step.is_simultaneous_move():
action_list = []
for i, agent in enumerate(agents):
if i == chosen_player:
output = agent.step(time_step,
is_evaluation=is_evaluation,
noise=noise)
else:
output = agent.step(time_step, is_evaluation=is_evaluation)
action_list.append(output.action)
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
else:
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(
time_step, is_evaluation=is_evaluation)
action_list = [agent_output.action]
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
# No agents update at this step. This step may not be necessary.
if not is_evaluation:
for agent in agents:
agent.step(time_step)
return cumulative_rewards
def do_sample_episode(self,
probabilities_of_playing_policies,
chosen_player,
num_rollouts = 1,
is_evaluation = False):
"""
Generate multiple rollouts using noisy policies.
"""
with self._sess:
rollout_rewards = [[] for _ in range(self._num_players)]
deltas_idx = []
for _ in range(num_rollouts):
agents = self.sample_agents(probabilities_of_playing_policies, chosen_player)
if is_evaluation:
deltas_idx.append(-1)
reward = self.sample_episode(None, agents, is_evaluation)
for i, rew in enumerate(reward):
rollout_rewards[i].append(rew)
else:
# The idx marks the beginning of a sequence of noise with length dim.
# Refer to shared_noise.py
idx, delta = self.deltas.get_delta(agents[chosen_player].get_weights().size)
delta = (self._delta_std * delta).reshape(agents[chosen_player].get_weights().shape)
deltas_idx.append(idx)
# compute reward used for positive perturbation rollout. List, one reward per player.
pos_reward = self.sample_episode(None, agents, is_evaluation, delta, chosen_player)
# compute reward used for negative pertubation rollout. List, one reward per player.
neg_reward = self.sample_episode(None, agents, is_evaluation, -delta, chosen_player)
# a list of lists, one per player. For each player, a list contains the positive
# rewards and negative rewards in a format [[pos rew, neg rew],
# [pos rew, neg rew]]
#, one row per noise.
rollout_rewards = rewards_combinator(rollout_rewards, pos_reward, neg_reward)
return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards}
def freeze_all(self):
"""Freezes all policies within policy_per_player.
Args:
policies_per_player: List of list of number of policies.
"""
for policies in self._policies:
for pol in policies:
pol.freeze()
# def sync_total_policies(self, extra_policies_weights, policies_types, chosen_player):
# with self._sess:
# if chosen_player is not None:
# self._policies[chosen_player][-1].set_weights(extra_policies_weights[chosen_player][-1])
# else:
# for player in range(self._num_players):
# for i, policy_type in enumerate(policies_types[player]):
# new_pol = self.best_responder(policy_type, player)
# new_pol.set_weights(extra_policies_weights[player][i])
# self._policies[player].append(new_pol)
def get_num_policies(self):
return len(self._policies[0])
# def best_responder(self, policy_type, player):
# if policy_type == "DQN":
# agent_class = rl_policy.DQNPolicy
# assert self._slow_oracle_kargs is not None
# new_pol = agent_class(self._env, player, **self._slow_oracle_kargs)
# elif policy_type == "PG":
# agent_class = rl_policy.PGPolicy
# assert self._slow_oracle_kargs is not None
# new_pol = agent_class(self._env, player, **self._slow_oracle_kargs)
# elif policy_type == "ARS_parallel":
# agent_class = rl_policy.ARSPolicy_parallel
# new_pol = agent_class(self._env, player, **self._fast_oracle_kargs)
# else:
# raise ValueError("Agent class not supported in workers")
#
# return new_pol
def sample_agents(self, probabilities_of_playing_policies, chosen_player):
agents = self.sample_strategy_marginal(self._policies, probabilities_of_playing_policies)
agents[chosen_player] = self._policies[chosen_player][-1]
return agents
def sample_strategy_marginal(self, total_policies, probabilities_of_playing_policies):
"""Samples strategies given marginal probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list, with the k-th element
also a list specifying the play probabilities of the k-th player's
policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
num_players = len(total_policies)
sampled_policies = []
for k in range(num_players):
current_policies = total_policies[k]
current_probabilities = probabilities_of_playing_policies[k]
sampled_policy_k = self.random_choice(current_policies, current_probabilities)
sampled_policies.append(sampled_policy_k)
return sampled_policies
def random_choice(self, outcomes, probabilities):
"""Samples from discrete probability distribution.
`numpy.choice` does not seem optimized for repeated calls, this code
had higher performance.
Args:
outcomes: List of categorical outcomes.
probabilities: Discrete probability distribtuion as list of floats.
Returns:
Entry of `outcomes` sampled according to the distribution.
"""
cumsum = np.cumsum(probabilities)
return outcomes[np.searchsorted(cumsum / cumsum[-1], random.random())]
def output(self):
return "asdf"
|
[
"pyspiel.GameParameter",
"open_spiel.python.algorithms.psro_v2.ars_ray.utils.rewards_combinator",
"numpy.cumsum",
"tensorflow.compat.v1.Session",
"random.random",
"numpy.array",
"open_spiel.python.rl_environment.Environment",
"tensorflow.compat.v1.get_default_session"
] |
[((1739, 1771), 'open_spiel.python.rl_environment.Environment', 'rl_environment.Environment', (['game'], {}), '(game)\n', (1765, 1771), False, 'from open_spiel.python import rl_environment\n'), ((2240, 2264), 'tensorflow.compat.v1.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2262, 2264), True, 'import tensorflow.compat.v1 as tf\n'), ((10247, 10271), 'numpy.cumsum', 'np.cumsum', (['probabilities'], {}), '(probabilities)\n', (10256, 10271), True, 'import numpy as np\n'), ((2321, 2333), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2331, 2333), True, 'import tensorflow.compat.v1 as tf\n'), ((1624, 1664), 'pyspiel.GameParameter', 'pyspiel.GameParameter', (['self._num_players'], {}), '(self._num_players)\n', (1645, 1664), False, 'import pyspiel\n'), ((3887, 3914), 'numpy.array', 'np.array', (['time_step.rewards'], {}), '(time_step.rewards)\n', (3895, 3914), True, 'import numpy as np\n'), ((4264, 4291), 'numpy.array', 'np.array', (['time_step.rewards'], {}), '(time_step.rewards)\n', (4272, 4291), True, 'import numpy as np\n'), ((10333, 10348), 'random.random', 'random.random', ([], {}), '()\n', (10346, 10348), False, 'import random\n'), ((6514, 6573), 'open_spiel.python.algorithms.psro_v2.ars_ray.utils.rewards_combinator', 'rewards_combinator', (['rollout_rewards', 'pos_reward', 'neg_reward'], {}), '(rollout_rewards, pos_reward, neg_reward)\n', (6532, 6573), False, 'from open_spiel.python.algorithms.psro_v2.ars_ray.utils import rewards_combinator\n')]
|
# credit card default dataset: https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients
# kaggle link: https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset
import pandas as pd
from fim import fpgrowth#,fim
import numpy as np
#import math
#from itertools import chain, combinations
import itertools
from numpy.random import random
#from scipy import sparse
from bisect import bisect_left
from random import sample
#from scipy.stats.distributions import poisson, gamma, beta, bernoulli, binom
from time import time
#import scipy
#from sklearn.preprocessing import binarize
import operator
#from collections import Counter, defaultdict
from scipy.sparse import csc_matrix
from sklearn.ensemble import RandomForestClassifier#, AdaBoostClassifier
class hyb(object):
def __init__(self, binary_data,Y,Yb):
"""
:param binary_data: X_train? excludes labels?
:param Y: is this the y labels for the data predicted by interpretable model? no, is correct label
:param Yb: is this the y labels for the data to be predicted by the black box model? no, is all predictions from the black box model
:return: None
"""
self.df = binary_data
self.Y = Y
# no. of training examples
self.N = float(len(Y))
self.Yb = Yb
def set_parameters(self, alpha = 1, beta = 0.1):
"""
initialise weights in objective function
:param alpha: weight of interpretability (no. of rules) in objective function
:param beta: weight of transparency (proportion of rules predicted by interpretable model) in objective function
:return: None
"""
# input al and bl are lists
self.alpha = alpha
self.beta = beta
def generate_rulespace(self,supp,maxlen,N, need_negcode = False,njobs = 5, method = 'fpgrowth',criteria = 'IG',add_rules = []):
"""
generates initial rulespace, from which rules are taken
then screens the rules using self.screen_rules
:param supp: (int, according to arg of fpgrowth)
:param maxlen: (int)
:param N: (int)
:param add_rules: seems to be useless??
"""
print('generating rulespace...')
if method == 'fpgrowth':
if need_negcode:
df = 1-self.df
df.columns = [name.strip() + 'neg' for name in self.df.columns]
df = pd.concat([self.df,df],axis = 1)
else:
df = 1 - self.df
# [0] needed to get first dimension (others empty)
pindex = np.where(self.Y==1)[0]
nindex = np.where(self.Y!=1)[0]
itemMatrix = [[item for item in df.columns if row[item] ==1] for i,row in df.iterrows() ]
# are the supp arguments for fpgrowth supposed to be different? according to the lower bounds (minsupp) in the paper
prules= fpgrowth([itemMatrix[i] for i in pindex],supp = supp,zmin = 1,zmax = maxlen)
prules = [np.sort(x[0]).tolist() for x in prules]
nrules= fpgrowth([itemMatrix[i] for i in nindex],supp = supp,zmin = 1,zmax = maxlen)
nrules = [np.sort(x[0]).tolist() for x in nrules]
else:
print('Using random forest to generate rules ...')
prules = []
for length in range(2,maxlen+1,1):
n_estimators = 250*length# min(5000,int(min(comb(df.shape[1], length, exact=True),10000/maxlen)))
clf = RandomForestClassifier(n_estimators = n_estimators,max_depth = length)
clf.fit(self.df,self.Y)
for n in range(n_estimators):
prules.extend(extract_rules(clf.estimators_[n],self.df.columns))
prules = [list(x) for x in set(tuple(np.sort(x)) for x in prules)]
nrules = []
for length in range(2,maxlen+1,1):
n_estimators = 250*length# min(5000,int(min(comb(df.shape[1], length, exact=True),10000/maxlen)))
clf = RandomForestClassifier(n_estimators = n_estimators,max_depth = length)
clf.fit(self.df,1-self.Y)
for n in range(n_estimators):
nrules.extend(extract_rules(clf.estimators_[n],self.df.columns))
nrules = [list(x) for x in set(tuple(np.sort(x)) for x in nrules)]
df = 1-self.df
df.columns = [name.strip() + 'neg' for name in self.df.columns]
df = pd.concat([self.df,df],axis = 1)
print('unpruned prules (' + str(len(prules)) + '):\n' + str(prules))
print()
print('unpruned nrules (' + str(len(nrules)) + '):\n' + str(nrules))
self.prules, self.pRMatrix, self.psupp, self.pprecision, self.perror = self.screen_rules(prules,df,self.Y,N,supp)
self.nrules, self.nRMatrix, self.nsupp, self.nprecision, self.nerror = self.screen_rules(nrules,df,1-self.Y,N,supp)
print('rulespace generated')
# print '\tTook %0.3fs to generate %d rules' % (self.screen_time, len(self.rules))
def screen_rules(self,rules,df,y,N,supp,criteria = 'precision',njobs = 5,add_rules = []):
"""
screens rules??? how????
helper, used by self.generate_rulespace
"""
print ('screening rules')
start_time = time() #removed time. and changed import statement above
itemInd = {}
# create a dictionary of col name : index -- why??
for i,name in enumerate(df.columns):
itemInd[name] = int(i)
len_rules = [len(rule) for rule in rules]
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
# array of indices corresponding to the features in the rules e.g. [r1a r1b r2a r3a]
indices = np.array(list(itertools.chain.from_iterable([[itemInd[x] for x in rule] for rule in rules])))
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
indptr =list(accumulate(len_rules))
indptr.insert(0,0)
indptr = np.array(indptr)
data = np.ones(len(indices))
# standard CSC representation where the row indices for column i are stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored in data[indptr[i]:indptr[i+1]]
# csc_matrix helps expand the compressed representation (data, indices, indptr), which ignores many of the zeros in the expanded matrix
ruleMatrix = csc_matrix((data,indices,indptr),shape = (len(df.columns),len(rules)))
# mat = sparse.csr_matrix.dot(df,ruleMatrix)
mat = np.matrix(df)*ruleMatrix
lenMatrix = np.matrix([len_rules for i in range(df.shape[0])])
Z = (mat ==lenMatrix).astype(int)
Zpos = [Z[i] for i in np.where(y>0)][0]
TP = np.array(np.sum(Zpos,axis=0).tolist()[0])
supp_select = np.where(TP>=supp*sum(y)/100)[0]
# if len(supp_select)<=N:
# rules = [rules[i] for i in supp_select]
# RMatrix = np.array(Z[:,supp_select])
# rules_len = [len(set([name.split('_')[0] for name in rule])) for rule in rules]
# supp = np.array(np.sum(Z,axis=0).tolist()[0])[supp_select]
# else:
FP = np.array(np.sum(Z,axis = 0))[0] - TP
# TN = len(y) - np.sum(self.Y) - FP
# FN = np.sum(y) - TP
p1 = TP.astype(float)/(TP+FP)
# p2 = FN.astype(float)/(FN+TN)
# pp = (TP+FP).astype(float)/(TP+FP+TN+FN)
supp_select = np.array([i for i in supp_select if p1[i]>np.mean(y)])
select = np.argsort(p1[supp_select])[::-1][:N].tolist()
ind = list(supp_select[select])
rules = [rules[i] for i in ind]
RMatrix = np.array(Z[:,ind])
rules_len = [len(set([name.split('_')[0] for name in rule])) for rule in rules]
supp = np.array(np.sum(Z,axis=0).tolist()[0])[ind]
return rules, RMatrix, supp, p1[ind], FP[ind]
def train(self, Niteration = 5000, print_message=True, interpretability = 'size'):
"""
unused
"""
print('training hybrid...')
self.maps = []
int_flag = int(interpretability =='size')
T0 = 0.01
nprules = len(self.prules)
pnrules = len(self.nrules)
prs_curr = sample(list(range(nprules)),3)
nrs_curr = sample(list(range(pnrules)),3)
obj_curr = 1000000000
obj_min = obj_curr
self.maps.append([-1,obj_curr,prs_curr,nrs_curr,[]])
p = np.sum(self.pRMatrix[:,prs_curr],axis = 1)>0
n = np.sum(self.nRMatrix[:,nrs_curr],axis = 1)>0
overlap_curr = np.multiply(p,n)
pcovered_curr = p ^ overlap_curr
ncovered_curr = n ^ overlap_curr
covered_curr = np.logical_xor(p,n)
Yhat_curr,TP,FP,TN,FN = self.compute_obj(pcovered_curr,covered_curr)
print(Yhat_curr,TP,FP,TN,FN)
nfeatures = len(np.unique([con.split('_')[0] for i in prs_curr for con in self.prules[i]])) + len(np.unique([con.split('_')[0] for i in nrs_curr for con in self.nrules[i]]))
obj_curr = ( FN + FP)/self.N +self.alpha*(int_flag *(len(prs_curr) + len(nrs_curr))+(1-int_flag)*nfeatures)+ self.beta * sum(~covered_curr)/self.N
self.actions = []
for iter in range(Niteration):
if iter >0.75 * Niteration:
prs_curr,nrs_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr = prs_opt[:],nrs_opt[:],pcovered_opt[:],ncovered_opt[:],overlap_opt[:],covered_opt[:], Yhat_opt[:]
prs_new,nrs_new , pcovered_new,ncovered_new,overlap_new,covered_new= self.propose_rs(prs_curr,nrs_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr, obj_min,print_message)
self.covered1 = covered_new[:]
self.Yhat_curr = Yhat_curr
# if sum(covered_new)<len(self.Y):
# # bbmodel.fit(self.df.iloc[~covered_new],self.Y[~covered_new])
# bbmodel.fit(self.df,self.Y)
Yhat_new,TP,FP,TN,FN = self.compute_obj(pcovered_new,covered_new)
self.Yhat_new = Yhat_new
nfeatures = len(np.unique([con.split('_')[0] for i in prs_new for con in self.prules[i]])) + len(np.unique([con.split('_')[0] for i in nrs_new for con in self.nrules[i]]))
obj_new = (FP + FN)/self.N +self.alpha*(int_flag *(len(prs_new) + len(nrs_new))+(1-int_flag)*nfeatures)+ self.beta * sum(~covered_new)/self.N
T = T0**(iter/Niteration)
alpha = np.exp(float(-obj_new +obj_curr)/T) # minimize
if obj_new < self.maps[-1][1]:
prs_opt,nrs_opt,obj_opt,pcovered_opt,ncovered_opt,overlap_opt,covered_opt, Yhat_opt = prs_new[:],nrs_new[:],obj_new,pcovered_new[:],ncovered_new[:],overlap_new[:],covered_new[:], Yhat_new[:]
perror, nerror, oerror, berror = self.diagnose(pcovered_new,ncovered_new,overlap_new,covered_new,Yhat_new)
accuracy_min = float(TP+TN)/self.N
explainability_min = sum(covered_new)/self.N
covered_min = covered_new
print('\n** max at iter = {} ** \n {}(obj) = {}(error) + {}(nrules) + {}(exp)\n accuracy = {}, explainability = {}, nfeatures = {}\n perror = {}, nerror = {}, oerror = {}, berror = {}\n '.format(iter,round(obj_new,3),(FP+FN)/self.N, self.alpha*(len(prs_new) + len(nrs_new)), self.beta*sum(~covered_new)/self.N, (TP+TN+0.0)/self.N,sum(covered_new)/self.N,nfeatures,perror,nerror,oerror,berror ))
self.maps.append([iter,obj_new,prs_new,nrs_new])
if print_message:
perror, nerror, oerror, berror = self.diagnose(pcovered_new,ncovered_new,overlap_new,covered_new,Yhat_new)
if print_message:
print('\niter = {}, alpha = {}, {}(obj) = {}(error) + {}(nrules) + {}(exp)\n accuracy = {}, explainability = {}, nfeatures = {}\n perror = {}, nerror = {}, oerror = {}, berror = {}\n '.format(iter,round(alpha,2),round(obj_new,3),(FP+FN)/self.N, self.alpha*(len(prs_new) + len(nrs_new)), self.beta*sum(~covered_new)/self.N, (TP+TN+0.0)/self.N,sum(covered_new)/self.N, nfeatures,perror,nerror,oerror,berror ))
print('prs = {}, nrs = {}'.format(prs_new, nrs_new))
if random() <= alpha:
prs_curr,nrs_curr,obj_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr = prs_new[:],nrs_new[:],obj_new,pcovered_new[:],ncovered_new[:],overlap_new[:],covered_new[:], Yhat_new[:]
self.prs_min = prs_opt
self.nrs_min = nrs_opt
print('training complete')
return self.maps,accuracy_min,covered_min
def diagnose(self, pcovered, ncovered, overlapped, covered, Yhat):
"""
returns sums of the misclassification errors
helper, used in self.train
what is "~"???? invert/complement function, https://stackoverflow.com/questions/8305199/the-tilde-operator-in-python
integers ~x become (-x) - 1
"""
perror = sum(self.Y[pcovered]!=Yhat[pcovered])
nerror = sum(self.Y[ncovered]!=Yhat[ncovered])
oerror = sum(self.Y[overlapped]!=Yhat[overlapped])
# does it work as expected?
berror = sum(self.Y[~covered]!=Yhat[~covered])
return perror, nerror, oerror, berror
def compute_obj(self,pcovered,covered):
"""
helper, used in self.train
"""
Yhat = np.zeros(int(self.N))
Yhat[pcovered] = 1
Yhat[~covered] = self.Yb[~covered] #self.Y[~covered]#
TP,FP,TN,FN = getConfusion(Yhat,self.Y)
return Yhat,TP,FP,TN,FN
def propose_rs(self, prs,nrs,pcovered,ncovered,overlapped, covered,Yhat, vt,print_message = False):
"""
helper, used in self.train
"""
incorr = np.where(Yhat[covered]!=self.Y[covered])[0]# correct interpretable models
incorrb = np.where(Yhat[~covered]!=self.Y[~covered])[0]
overlapped_ind = np.where(overlapped)[0]
p = np.sum(self.pRMatrix[:,prs],axis = 1)
n = np.sum(self.nRMatrix[:,nrs],axis = 1)
ex = -1
if sum(covered) ==self.N: # covering all examples.
if print_message:
print('===== already covering all examples ===== ')
# print('0')
move = ['cut']
self.actions.append(0)
if len(prs)==0:
sign = [0]
elif len(nrs)==0:
sign = [1]
else:
sign = [int(random()<0.5)]
elif len(incorr) ==0 and (len(incorrb)==0 or len(overlapped) ==self.N) or sum(overlapped) > sum(covered):
if print_message:
print(' ===== 1 ===== ')
self.actions.append(1)
# print('1')
move = ['cut']
sign = [int(random()<0.5)]
# elif (len(incorr) == 0 and (sum(covered)>0)) or len(incorr)/sum(covered) >= len(incorrb)/sum(~covered):
# if print_message:
# print(' ===== 2 ===== ')
# self.actions.append(2)
# ex = sample(list(np.where(~covered)[0]) + list(np.where(overlapped)[0]),1)[0]
# if overlapped[ex] or len(prs) + len(nrs) >= (vt + self.beta)/self.alpha:
# # print('2')
# move = ['cut']
# sign = [int(random()<0.5)]
# else:
# # print('3')
# move = ['expand']
# sign = [int(random()<0.5)]
else:
# if sum(overlapped)/sum(pcovered)>.5 or sum(overlapped)/sum(ncovered)>.5:
# if print_message:
# print(' ===== 3 ===== ')
# # print('4')
# move = ['cut']
# sign = [int(len(prs)>len(nrs))]
# else:
t = random()
if t< 1./3: # try to decrease errors
self.actions.append(3)
if print_message:
print(' ===== decrease error ===== ')
ex = sample(list(incorr) + list(incorrb),1)[0]
if ex in incorr: # incorrectly classified by the interpretable model
rs_indicator = (pcovered[ex]).astype(int) # covered by prules
if random()<0.5:
# print('7')
move = ['cut']
sign = [rs_indicator]
else:
# print('8')
move = ['cut','add']
sign = [rs_indicator,rs_indicator]
# elif overlapped[ex]:
# if random()<0.5 :
# # print('5')
# move = ['cut']
# sign = [1 - self.Y[ex]]
# else:
# # print('6')
# move = ['cut','add']
# sign = [1 - self.Y[ex],1 - self.Y[ex]]
else: # incorrectly classified by the black box model
# print('9')
move = ['add']
sign = [int(self.Y[ex]==1)]
elif t<2./3: # decrease coverage
self.actions.append(4)
if print_message:
print(' ===== decrease size ===== ')
move = ['cut']
sign = [round(random())]
else: # increase coverage
self.actions.append(5)
if print_message:
print(' ===== increase coverage ===== ')
move = ['expand']
sign = [round(random())]
# if random()<0.5:
# move.append('add')
# sign.append(1-rs_indicator)
# else:
# move.extend(['cut','add'])
# sign.extend([1-rs_indicator,1-rs_indicator])
for j in range(len(move)):
if sign[j]==1:
prs = self.action(move[j],sign[j],ex,prs,Yhat,pcovered)
else:
nrs = self.action(move[j],sign[j],ex,nrs,Yhat,ncovered)
p = np.sum(self.pRMatrix[:,prs],axis = 1)>0
n = np.sum(self.nRMatrix[:,nrs],axis = 1)>0
o = np.multiply(p,n)
return prs, nrs,p,n^o,o, np.logical_xor(p,n) + o
def action(self,move, rs_indicator, ex, rules,Yhat,covered):
"""
helper, used in self.propose_rs
"""
if rs_indicator==1:
RMatrix = self.pRMatrix
# error = self.perror
supp = self.psupp
else:
RMatrix = self.nRMatrix
# error = self.nerror
supp = self.nsupp
Y = self.Y if rs_indicator else 1- self.Y
if move=='cut' and len(rules)>0:
# print('======= cut =======')
""" cut """
if random()<0.25 and ex >=0:
candidate = list(set(np.where(RMatrix[ex,:]==1)[0]).intersection(rules))
if len(candidate)==0:
candidate = rules
cut_rule = sample(candidate,1)[0]
else:
p = []
all_sum = np.sum(RMatrix[:,rules],axis = 1)
for index,rule in enumerate(rules):
Yhat= ((all_sum - np.array(RMatrix[:,rule]))>0).astype(int)
TP,FP,TN,FN = getConfusion(Yhat,Y)
p.append(TP.astype(float)/(TP+FP+1))
# p.append(log_betabin(TP,TP+FP,self.alpha_1,self.beta_1) + log_betabin(FN,FN+TN,self.alpha_2,self.beta_2))
p = [x - min(p) for x in p]
p = np.exp(p)
p = np.insert(p,0,0)
p = np.array(list(accumulate(p)))
if p[-1]==0:
cut_rule = sample(rules,1)[0]
else:
p = p/p[-1]
index = find_lt(p,random())
cut_rule = rules[index]
rules.remove(cut_rule)
elif move == 'add' and ex>=0:
# print('======= add =======')
""" add """
score_max = -self.N *10000000
if self.Y[ex]*rs_indicator + (1 - self.Y[ex])*(1 - rs_indicator)==1:
# select = list(np.where(RMatrix[ex] & (error +self.alpha*self.N < self.beta * supp))[0]) # fix
select = list(np.where(RMatrix[ex])[0])
else:
# select = list(np.where( ~RMatrix[ex]& (error +self.alpha*self.N < self.beta * supp))[0])
select = list(np.where( ~RMatrix[ex])[0])
self.select = select
if len(select)>0:
if random()<0.25:
add_rule = sample(select,1)[0]
else:
# cover = np.sum(RMatrix[(~covered)&(~covered2), select],axis = 0)
# =============== Use precision as a criteria ===============
# Yhat_neg_index = np.where(np.sum(RMatrix[:,rules],axis = 1)<1)[0]
# mat = np.multiply(RMatrix[Yhat_neg_index.reshape(-1,1),select].transpose(),Y[Yhat_neg_index])
# TP = np.sum(mat,axis = 1)
# FP = np.array(np.sum(RMatrix[Yhat_neg_index.reshape(-1,1),select],axis = 0) - TP)
# TN = np.sum(Y[Yhat_neg_index]==0)-FP
# FN = sum(Y[Yhat_neg_index]) - TP
# p = (TP.astype(float)/(TP+FP+1)) + self.alpha * supp[select]
# add_rule = select[sample(list(np.where(p==max(p))[0]),1)[0]]
# =============== Use objective function as a criteria ===============
for ind in select:
z = np.logical_or(RMatrix[:,ind],Yhat)
TP,FP,TN,FN = getConfusion(z,self.Y)
score = FP+FN -self.beta * sum(RMatrix[~covered ,ind])
if score > score_max:
score_max = score
add_rule = ind
if add_rule not in rules:
rules.append(add_rule)
else: # expand
# print(['======= expand =======', len(rules)])
# candidates = np.where(error < self.beta * supp-self.alpha*self.N)[0] # fix
candidates = [x for x in range(RMatrix.shape[1])]
if rs_indicator:
select = list(set(candidates).difference(rules))
else:
select = list(set(candidates).difference(rules))
# self.error = error
self.supp = supp
self.select = select
self.candidates = candidates
self.rules = rules
if random()<0.25:
add_rule = sample(select, 1)[0]
else:
# Yhat_neg_index = np.where(np.sum(RMatrix[:,rules],axis = 1)<1)[0]
Yhat_neg_index = np.where(~covered)[0]
mat = np.multiply(RMatrix[Yhat_neg_index.reshape(-1,1),select].transpose(),Y[Yhat_neg_index])
# TP = np.array(np.sum(mat,axis = 0).tolist()[0])
TP = np.sum(mat,axis = 1)
FP = np.array(np.sum(RMatrix[Yhat_neg_index.reshape(-1,1),select],axis = 0) - TP)
TN = np.sum(Y[Yhat_neg_index]==0)-FP
FN = sum(Y[Yhat_neg_index]) - TP
score = (FP + FN)+ self.beta * (TN + FN)
# score = (TP.astype(float)/(TP+FP+1)) + self.alpha * supp[select] # using precision as the criteria
add_rule = select[sample(list(np.where(score==min(score))[0]),1)[0]]
if add_rule not in rules:
rules.append(add_rule)
return rules
def print_rules(self, rules_max):
"""
unused
"""
for rule_index in rules_max:
print(self.rules[rule_index])
def predict_text(self,df,Y,Yb):
"""
unused
"""
prules = [self.prules[i] for i in self.prs_min]
nrules = [self.nrules[i] for i in self.nrs_min]
if len(prules):
p = [[] for rule in prules]
for i,rule in enumerate(prules):
p[i] = np.array((np.sum(df[:,list(rule)],axis=1)==len(rule)).flatten().tolist()[0]).astype(int)
p = (np.sum(p,axis=0)>0).astype(int)
else:
p = np.zeros(len(Y))
if len(nrules):
n = [[] for rule in nrules]
for i,rule in enumerate(nrules):
n[i] = np.array((np.sum(df[:,list(rule)],axis=1)==len(rule)).flatten().tolist()[0]).astype(int)
n = (np.sum(n,axis=0)>0).astype(int)
else:
n = np.zeros(len(Y))
pind = list(np.where(p)[0])
nind = list(np.where(n)[0])
covered = [x for x in range(len(Y)) if x in pind or x in nind]
Yhat = Yb
Yhat[nind] = 0
Yhat[pind] = 1
return Yhat,covered,Yb
def predict(self, df, Y,Yb ):
"""
unused
"""
prules = [self.prules[i] for i in self.prs_min]
nrules = [self.nrules[i] for i in self.nrs_min]
# if isinstance(self.df, scipy.sparse.csc.csc_matrix)==False:
dfn = 1-df #df has negative associations
dfn.columns = [name.strip() + 'neg' for name in df.columns]
df_test = pd.concat([df,dfn],axis = 1)
if len(prules):
p = [[] for rule in prules]
for i,rule in enumerate(prules):
p[i] = (np.sum(df_test[list(rule)],axis=1)==len(rule)).astype(int)
p = (np.sum(p,axis=0)>0).astype(int)
else:
p = np.zeros(len(Y))
if len(nrules):
n = [[] for rule in nrules]
for i,rule in enumerate(nrules):
n[i] = (np.sum(df_test[list(rule)],axis=1)==len(rule)).astype(int)
n = (np.sum(n,axis=0)>0).astype(int)
else:
n = np.zeros(len(Y))
pind = list(np.where(p)[0])
nind = list(np.where(n)[0])
covered = [x for x in range(len(Y)) if x in pind or x in nind]
Yhat = np.array([i for i in Yb])
Yhat[nind] = 0
Yhat[pind] = 1
return Yhat,covered,Yb
def accumulate(iterable, func=operator.add):
"""
helper, used in hyb.action, hyb.screen_rules
"""
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def find_lt(a, x):
"""
hepler, used in hyb.action
"""
""" Find rightmost value less than x"""
i = bisect_left(a, x)
if i:
return int(i-1)
else:
return 0
def getConfusion(Yhat,Y):
"""
helper, used in hyb.computeObject, hyb.action
"""
if len(Yhat)!=len(Y):
raise NameError('Yhat has different length')
TP = np.dot(np.array(Y),np.array(Yhat))
FP = np.sum(Yhat) - TP
TN = len(Y) - np.sum(Y)-FP
FN = len(Yhat) - np.sum(Yhat) - TN
return TP,FP,TN,FN
def extract_rules(tree, feature_names):
"""
helper, used in hyb.generate_rulespace, when using random forest to generate rulespace
"""
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
# get ids of child nodes
idx = np.argwhere(left == -1)[:,0]
def recurse(left, right, child, lineage=None):
if lineage is None:
lineage = []
if child in left:
parent = np.where(left == child)[0].item()
suffix = 'neg'
else:
parent = np.where(right == child)[0].item()
suffix = ''
# lineage.append((parent, split, threshold[parent], features[parent]))
lineage.append((features[parent].strip()+suffix))
if parent == 0:
lineage.reverse()
return lineage
else:
return recurse(left, right, parent, lineage)
rules = []
for child in idx:
rule = []
for node in recurse(left, right, child):
rule.append(node)
rules.append(rule)
return rules
def binary_code(df,collist,Nlevel, length):
"""
preprocessing
converts a column of continuous values to binary format, into Nlevel number of parts
modifies df in place
:param df: dataframe to be modified
:param collist: list of names of columns with continuous values to be modified
:param Nlevel: number of parts to split the column into (will create new N-1 columns, deleting the original)
:return: None
"""
for col in collist:
for q in range(1,Nlevel,1):
threshold = df[col].quantile(float(q)/Nlevel)
df[col+'_geq_'+str(int(q))+'q'] = (df[col] >= threshold).astype(float)
print(col)
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
df.drop(collist,axis = 1, inplace = True)
# =============================================================================
#
# =============================================================================
import os
# =============================================================================
# preprocessing
# =============================================================================
df = pd.read_excel('default of credit card clients.xls', sheet_name='Data', header=1)
length = len(df)
print('length: ' + str(length))
# drop duplicates
df.drop_duplicates(subset=[x for x in df.columns if x != 'ID'], inplace=True)
print('drop_duplicates')
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
# drop weird values
dict_correct_vals = {'EDUCATION': [1, 2, 3, 4],
'MARRIAGE': [1, 2, 3]}
cols_payment_hist = ['PAY_'+str(x) for x in range(7) if x!= 1]
# removed this part because it somehow drops too many rows (half the samples have 0 in PAY_0, and so is unspecified in the paper)
# =============================================================================
# for col in cols_payment_hist:
# dict_correct_vals[col] = [x for x in range(-1,10,1) if x!=0]
# =============================================================================
for col in dict_correct_vals.keys():
df[col] = df[col].apply(lambda x: x if x in dict_correct_vals[col] else 0)
df = df[df[col] != 0]
print(col)
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
cols_bill_amt = ['BILL_AMT' + str(x) for x in range(1,7)]
cols_past_payment = ['PAY_AMT'+str(x) for x in range(1,7)]
binary_code(df, ['LIMIT_BAL', 'EDUCATION', 'AGE'] + cols_payment_hist + cols_bill_amt + cols_past_payment, 4, length=length)
# OHE
df = pd.get_dummies(df, columns=['MARRIAGE'])
df['SEX'] = df['SEX'].apply(lambda x: 0 if 2 else x)
# =============================================================================
# black box models
# =============================================================================
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import f1_score, roc_auc_score, average_precision_score, precision_recall_curve
import matplotlib.pyplot as plt
X, y = df.drop(labels=['ID', 'default payment next month'], axis='columns'), df.loc[:,'default payment next month']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)
from time import time
from scipy.stats import randint as sp_randint
# =============================================================================
# # build a classifier
# clf = RandomForestClassifier(n_estimators=20)
#
#
# # Utility function to report best scores
# def report(results, n_top=3):
# for i in range(1, n_top + 1):
# candidates = np.flatnonzero(results['rank_test_score'] == i)
# for candidate in candidates:
# print("Model with rank: {0}".format(i))
# print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
# results['mean_test_score'][candidate],
# results['std_test_score'][candidate]))
# print("Parameters: {0}".format(results['params'][candidate]))
# print("")
#
#
# # specify parameters and distributions to sample from
# param_dist = {"n_estimators":sp_randint(10, 1000), # added this argument, removed the n_estimators argument above
# "max_depth": [3, None],
# "max_features": sp_randint(1, 11),
# "min_samples_split": sp_randint(2, 11),
# "bootstrap": [True, False],
# "criterion": ["gini", "entropy"]}
#
# # run randomized search
# n_iter_search = 20
# random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
# n_iter=n_iter_search, cv=10, iid=False)
#
# start = time()
# random_search.fit(X_train, y_train)
# print("RandomizedSearchCV took %.2f seconds for %d candidates"
# " parameter settings." % ((time() - start), n_iter_search))
# report(random_search.cv_results_)
# =============================================================================
clf = RandomForestClassifier(n_estimators=20, random_state=0, **{'bootstrap': True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 9, 'min_samples_split': 10})
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print('rf accuracy: ' + str(acc))
yb = clf.predict(X_test)
model = hyb(X_train, y_train, yb)
model.set_parameters(alpha=1, beta=0.1)
model.set_parameters(alpha=0.01, beta=0.95) # added after running
model.generate_rulespace(supp=30, maxlen=10, N=model.N, method='rf')
maps,accuracy_min,covered_min = model.train()
Yhat,covered,Yb = model.predict(X_test, y_test, yb)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_true=y_test, y_pred=Yhat)
coverage = len(covered)/len(Yhat)
print('accuracy: ' + str(accuracy))
print('coverage: ' + str(coverage))
|
[
"numpy.sum",
"random.sample",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"numpy.argsort",
"numpy.mean",
"numpy.exp",
"numpy.multiply",
"fim.fpgrowth",
"numpy.insert",
"numpy.logical_xor",
"pandas.concat",
"sklearn.ensemble.RandomForestClassifier",
"pandas.get_dummies",
"pandas.read_excel",
"numpy.sort",
"numpy.argwhere",
"numpy.matrix",
"bisect.bisect_left",
"time.time",
"numpy.where",
"numpy.array",
"numpy.random.random",
"numpy.logical_or",
"itertools.chain.from_iterable"
] |
[((29338, 29423), 'pandas.read_excel', 'pd.read_excel', (['"""default of credit card clients.xls"""'], {'sheet_name': '"""Data"""', 'header': '(1)'}), "('default of credit card clients.xls', sheet_name='Data', header=1\n )\n", (29351, 29423), True, 'import pandas as pd\n'), ((30790, 30830), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['MARRIAGE']"}), "(df, columns=['MARRIAGE'])\n", (30804, 30830), True, 'import pandas as pd\n'), ((31877, 31942), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=0, stratify=y)\n', (31893, 31942), False, 'from sklearn.model_selection import train_test_split, RandomizedSearchCV\n'), ((33670, 33844), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(20)', 'random_state': '(0)'}), "(n_estimators=20, random_state=0, **{'bootstrap': \n True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 9,\n 'min_samples_split': 10})\n", (33692, 33844), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n'), ((34317, 34359), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_test', 'y_pred': 'Yhat'}), '(y_true=y_test, y_pred=Yhat)\n', (34331, 34359), False, 'from sklearn.metrics import accuracy_score\n'), ((26529, 26546), 'bisect.bisect_left', 'bisect_left', (['a', 'x'], {}), '(a, x)\n', (26540, 26546), False, 'from bisect import bisect_left\n'), ((5287, 5293), 'time.time', 'time', ([], {}), '()\n', (5291, 5293), False, 'from time import time\n'), ((5959, 5975), 'numpy.array', 'np.array', (['indptr'], {}), '(indptr)\n', (5967, 5975), True, 'import numpy as np\n'), ((7620, 7639), 'numpy.array', 'np.array', (['Z[:, ind]'], {}), '(Z[:, ind])\n', (7628, 7639), True, 'import numpy as np\n'), ((8520, 8537), 'numpy.multiply', 'np.multiply', (['p', 'n'], {}), '(p, n)\n', (8531, 8537), True, 'import numpy as np\n'), ((8642, 8662), 'numpy.logical_xor', 'np.logical_xor', (['p', 'n'], {}), '(p, n)\n', (8656, 8662), True, 'import numpy as np\n'), ((13867, 13904), 'numpy.sum', 'np.sum', (['self.pRMatrix[:, prs]'], {'axis': '(1)'}), '(self.pRMatrix[:, prs], axis=1)\n', (13873, 13904), True, 'import numpy as np\n'), ((13917, 13954), 'numpy.sum', 'np.sum', (['self.nRMatrix[:, nrs]'], {'axis': '(1)'}), '(self.nRMatrix[:, nrs], axis=1)\n', (13923, 13954), True, 'import numpy as np\n'), ((18099, 18116), 'numpy.multiply', 'np.multiply', (['p', 'n'], {}), '(p, n)\n', (18110, 18116), True, 'import numpy as np\n'), ((25157, 25185), 'pandas.concat', 'pd.concat', (['[df, dfn]'], {'axis': '(1)'}), '([df, dfn], axis=1)\n', (25166, 25185), True, 'import pandas as pd\n'), ((25920, 25945), 'numpy.array', 'np.array', (['[i for i in Yb]'], {}), '([i for i in Yb])\n', (25928, 25945), True, 'import numpy as np\n'), ((26797, 26808), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (26805, 26808), True, 'import numpy as np\n'), ((26809, 26823), 'numpy.array', 'np.array', (['Yhat'], {}), '(Yhat)\n', (26817, 26823), True, 'import numpy as np\n'), ((26834, 26846), 'numpy.sum', 'np.sum', (['Yhat'], {}), '(Yhat)\n', (26840, 26846), True, 'import numpy as np\n'), ((27316, 27339), 'numpy.argwhere', 'np.argwhere', (['(left == -1)'], {}), '(left == -1)\n', (27327, 27339), True, 'import numpy as np\n'), ((2901, 2974), 'fim.fpgrowth', 'fpgrowth', (['[itemMatrix[i] for i in pindex]'], {'supp': 'supp', 'zmin': '(1)', 'zmax': 'maxlen'}), '([itemMatrix[i] for i in pindex], supp=supp, zmin=1, zmax=maxlen)\n', (2909, 2974), False, 'from fim import fpgrowth\n'), ((3060, 3133), 'fim.fpgrowth', 'fpgrowth', (['[itemMatrix[i] for i in nindex]'], {'supp': 'supp', 'zmin': '(1)', 'zmax': 'maxlen'}), '([itemMatrix[i] for i in nindex], supp=supp, zmin=1, zmax=maxlen)\n', (3068, 3133), False, 'from fim import fpgrowth\n'), ((4454, 4486), 'pandas.concat', 'pd.concat', (['[self.df, df]'], {'axis': '(1)'}), '([self.df, df], axis=1)\n', (4463, 4486), True, 'import pandas as pd\n'), ((6507, 6520), 'numpy.matrix', 'np.matrix', (['df'], {}), '(df)\n', (6516, 6520), True, 'import numpy as np\n'), ((8395, 8437), 'numpy.sum', 'np.sum', (['self.pRMatrix[:, prs_curr]'], {'axis': '(1)'}), '(self.pRMatrix[:, prs_curr], axis=1)\n', (8401, 8437), True, 'import numpy as np\n'), ((8452, 8494), 'numpy.sum', 'np.sum', (['self.nRMatrix[:, nrs_curr]'], {'axis': '(1)'}), '(self.nRMatrix[:, nrs_curr], axis=1)\n', (8458, 8494), True, 'import numpy as np\n'), ((13668, 13710), 'numpy.where', 'np.where', (['(Yhat[covered] != self.Y[covered])'], {}), '(Yhat[covered] != self.Y[covered])\n', (13676, 13710), True, 'import numpy as np\n'), ((13760, 13804), 'numpy.where', 'np.where', (['(Yhat[~covered] != self.Y[~covered])'], {}), '(Yhat[~covered] != self.Y[~covered])\n', (13768, 13804), True, 'import numpy as np\n'), ((13831, 13851), 'numpy.where', 'np.where', (['overlapped'], {}), '(overlapped)\n', (13839, 13851), True, 'import numpy as np\n'), ((17995, 18032), 'numpy.sum', 'np.sum', (['self.pRMatrix[:, prs]'], {'axis': '(1)'}), '(self.pRMatrix[:, prs], axis=1)\n', (18001, 18032), True, 'import numpy as np\n'), ((18047, 18084), 'numpy.sum', 'np.sum', (['self.nRMatrix[:, nrs]'], {'axis': '(1)'}), '(self.nRMatrix[:, nrs], axis=1)\n', (18053, 18084), True, 'import numpy as np\n'), ((26870, 26879), 'numpy.sum', 'np.sum', (['Y'], {}), '(Y)\n', (26876, 26879), True, 'import numpy as np\n'), ((26904, 26916), 'numpy.sum', 'np.sum', (['Yhat'], {}), '(Yhat)\n', (26910, 26916), True, 'import numpy as np\n'), ((2415, 2447), 'pandas.concat', 'pd.concat', (['[self.df, df]'], {'axis': '(1)'}), '([self.df, df], axis=1)\n', (2424, 2447), True, 'import pandas as pd\n'), ((2583, 2604), 'numpy.where', 'np.where', (['(self.Y == 1)'], {}), '(self.Y == 1)\n', (2591, 2604), True, 'import numpy as np\n'), ((2627, 2648), 'numpy.where', 'np.where', (['(self.Y != 1)'], {}), '(self.Y != 1)\n', (2635, 2648), True, 'import numpy as np\n'), ((3483, 3550), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'length'}), '(n_estimators=n_estimators, max_depth=length)\n', (3505, 3550), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n'), ((4011, 4078), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'length'}), '(n_estimators=n_estimators, max_depth=length)\n', (4033, 4078), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n'), ((5741, 5818), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[[itemInd[x] for x in rule] for rule in rules]'], {}), '([[itemInd[x] for x in rule] for rule in rules])\n', (5770, 5818), False, 'import itertools\n'), ((12143, 12151), 'numpy.random.random', 'random', ([], {}), '()\n', (12149, 12151), False, 'from numpy.random import random\n'), ((15679, 15687), 'numpy.random.random', 'random', ([], {}), '()\n', (15685, 15687), False, 'from numpy.random import random\n'), ((18149, 18169), 'numpy.logical_xor', 'np.logical_xor', (['p', 'n'], {}), '(p, n)\n', (18163, 18169), True, 'import numpy as np\n'), ((19026, 19059), 'numpy.sum', 'np.sum', (['RMatrix[:, rules]'], {'axis': '(1)'}), '(RMatrix[:, rules], axis=1)\n', (19032, 19059), True, 'import numpy as np\n'), ((19497, 19506), 'numpy.exp', 'np.exp', (['p'], {}), '(p)\n', (19503, 19506), True, 'import numpy as np\n'), ((19527, 19545), 'numpy.insert', 'np.insert', (['p', '(0)', '(0)'], {}), '(p, 0, 0)\n', (19536, 19545), True, 'import numpy as np\n'), ((24548, 24559), 'numpy.where', 'np.where', (['p'], {}), '(p)\n', (24556, 24559), True, 'import numpy as np\n'), ((24584, 24595), 'numpy.where', 'np.where', (['n'], {}), '(n)\n', (24592, 24595), True, 'import numpy as np\n'), ((25782, 25793), 'numpy.where', 'np.where', (['p'], {}), '(p)\n', (25790, 25793), True, 'import numpy as np\n'), ((25818, 25829), 'numpy.where', 'np.where', (['n'], {}), '(n)\n', (25826, 25829), True, 'import numpy as np\n'), ((6677, 6692), 'numpy.where', 'np.where', (['(y > 0)'], {}), '(y > 0)\n', (6685, 6692), True, 'import numpy as np\n'), ((7149, 7166), 'numpy.sum', 'np.sum', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (7155, 7166), True, 'import numpy as np\n'), ((18718, 18726), 'numpy.random.random', 'random', ([], {}), '()\n', (18724, 18726), False, 'from numpy.random import random\n'), ((18936, 18956), 'random.sample', 'sample', (['candidate', '(1)'], {}), '(candidate, 1)\n', (18942, 18956), False, 'from random import sample\n'), ((22554, 22562), 'numpy.random.random', 'random', ([], {}), '()\n', (22560, 22562), False, 'from numpy.random import random\n'), ((22971, 22990), 'numpy.sum', 'np.sum', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (22977, 22990), True, 'import numpy as np\n'), ((3000, 3013), 'numpy.sort', 'np.sort', (['x[0]'], {}), '(x[0])\n', (3007, 3013), True, 'import numpy as np\n'), ((3159, 3172), 'numpy.sort', 'np.sort', (['x[0]'], {}), '(x[0])\n', (3166, 3172), True, 'import numpy as np\n'), ((6717, 6737), 'numpy.sum', 'np.sum', (['Zpos'], {'axis': '(0)'}), '(Zpos, axis=0)\n', (6723, 6737), True, 'import numpy as np\n'), ((7445, 7455), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (7452, 7455), True, 'import numpy as np\n'), ((7475, 7502), 'numpy.argsort', 'np.argsort', (['p1[supp_select]'], {}), '(p1[supp_select])\n', (7485, 7502), True, 'import numpy as np\n'), ((19654, 19670), 'random.sample', 'sample', (['rules', '(1)'], {}), '(rules, 1)\n', (19660, 19670), False, 'from random import sample\n'), ((19765, 19773), 'numpy.random.random', 'random', ([], {}), '()\n', (19771, 19773), False, 'from numpy.random import random\n'), ((20515, 20523), 'numpy.random.random', 'random', ([], {}), '()\n', (20521, 20523), False, 'from numpy.random import random\n'), ((22596, 22613), 'random.sample', 'sample', (['select', '(1)'], {}), '(select, 1)\n', (22602, 22613), False, 'from random import sample\n'), ((22752, 22770), 'numpy.where', 'np.where', (['(~covered)'], {}), '(~covered)\n', (22760, 22770), True, 'import numpy as np\n'), ((23111, 23141), 'numpy.sum', 'np.sum', (['(Y[Yhat_neg_index] == 0)'], {}), '(Y[Yhat_neg_index] == 0)\n', (23117, 23141), True, 'import numpy as np\n'), ((24132, 24149), 'numpy.sum', 'np.sum', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (24138, 24149), True, 'import numpy as np\n'), ((24449, 24466), 'numpy.sum', 'np.sum', (['n'], {'axis': '(0)'}), '(n, axis=0)\n', (24455, 24466), True, 'import numpy as np\n'), ((25395, 25412), 'numpy.sum', 'np.sum', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (25401, 25412), True, 'import numpy as np\n'), ((25683, 25700), 'numpy.sum', 'np.sum', (['n'], {'axis': '(0)'}), '(n, axis=0)\n', (25689, 25700), True, 'import numpy as np\n'), ((27497, 27520), 'numpy.where', 'np.where', (['(left == child)'], {}), '(left == child)\n', (27505, 27520), True, 'import numpy as np\n'), ((27593, 27617), 'numpy.where', 'np.where', (['(right == child)'], {}), '(right == child)\n', (27601, 27617), True, 'import numpy as np\n'), ((7751, 7768), 'numpy.sum', 'np.sum', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (7757, 7768), True, 'import numpy as np\n'), ((14684, 14692), 'numpy.random.random', 'random', ([], {}), '()\n', (14690, 14692), False, 'from numpy.random import random\n'), ((16121, 16129), 'numpy.random.random', 'random', ([], {}), '()\n', (16127, 16129), False, 'from numpy.random import random\n'), ((20224, 20245), 'numpy.where', 'np.where', (['RMatrix[ex]'], {}), '(RMatrix[ex])\n', (20232, 20245), True, 'import numpy as np\n'), ((20405, 20427), 'numpy.where', 'np.where', (['(~RMatrix[ex])'], {}), '(~RMatrix[ex])\n', (20413, 20427), True, 'import numpy as np\n'), ((20561, 20578), 'random.sample', 'sample', (['select', '(1)'], {}), '(select, 1)\n', (20567, 20578), False, 'from random import sample\n'), ((21566, 21602), 'numpy.logical_or', 'np.logical_or', (['RMatrix[:, ind]', 'Yhat'], {}), '(RMatrix[:, ind], Yhat)\n', (21579, 21602), True, 'import numpy as np\n'), ((3774, 3784), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (3781, 3784), True, 'import numpy as np\n'), ((4304, 4314), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (4311, 4314), True, 'import numpy as np\n'), ((14373, 14381), 'numpy.random.random', 'random', ([], {}), '()\n', (14379, 14381), False, 'from numpy.random import random\n'), ((17234, 17242), 'numpy.random.random', 'random', ([], {}), '()\n', (17240, 17242), False, 'from numpy.random import random\n'), ((17481, 17489), 'numpy.random.random', 'random', ([], {}), '()\n', (17487, 17489), False, 'from numpy.random import random\n'), ((18781, 18810), 'numpy.where', 'np.where', (['(RMatrix[ex, :] == 1)'], {}), '(RMatrix[ex, :] == 1)\n', (18789, 18810), True, 'import numpy as np\n'), ((19150, 19176), 'numpy.array', 'np.array', (['RMatrix[:, rule]'], {}), '(RMatrix[:, rule])\n', (19158, 19176), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy
import warnings
import operator
import collections
from sagar.crystal.structure import Cell
from sagar.element.base import get_symbol
def read_vasp(filename='POSCAR'):
"""
Import POSCAR/CONTCAR or filename with .vasp suffix
parameter:
filename: string, the filename
return: Cell object.
"""
# TODO: read velocities, now not supported. or not needed?
with open(filename, "r") as f:
# _read_string return Cell object.
return _read_string(f.read())
def _read_string(data):
"""
_read_string make io easy to be tested.
parameter: string of vasp input
return: Cell object
"""
lines = [l for l in data.split('\n') if l.rstrip()]
name = lines[0]
lattice_scale = float(lines[1].split()[0])
# lattice vectors
lattice = []
for i in [2, 3, 4]:
s = lines[i].split()
vec = float(s[0]), float(s[1]), float(s[2])
lattice.append(vec)
lattice = numpy.array(lattice)
if lattice_scale < 0:
# In vasp , a negative scale factor is treated as a volume.
# http://pymatgen.org/_modules/pymatgen/io/vasp/inputs.html#POSCAR
vol = abs(numpy.linalg.det(lattice))
lattice *= (-lattice_scale / vol) ** (1 / 3)
else:
lattice *= lattice_scale
# atoms
vasp5 = False
_fifth_line = lines[5].split()
# VASP 5.x use the fifth line to represent atomic symbols
try:
for i in _fifth_line:
int(i)
numofatoms = _fifth_line
except ValueError:
vasp5 = True
atomtypes = _fifth_line
numofatoms = lines[6].split() # list of string here
if not vasp5:
warnings.warn("symbols of elements in fifth line are missing, "
"all atoms are init to NaN_i (i=0,1,2...)", UserWarning, stacklevel=2)
atomtypes = [str("NaN_{:}".format(i)) for i in range(len(numofatoms))]
atoms = []
for i, num in enumerate(numofatoms):
# https://gitlab.com/ase/ase/blob/master/ase/io/vasp.py
numofatoms[i] = int(num)
[atoms.append(atomtypes[i]) for na in range(numofatoms[i])]
if not vasp5:
line_coortype = 6
else:
line_coortype = 7
# TODO: Supporting Cartesian coordinates vasp input
coortype = lines[line_coortype].split()[0]
if coortype[0] in "sS":
warnings.warn("Sorry! Selective dynamics "
"are not supported now", FutureWarning, stacklevel=2)
line_coortype += 1
coortype = lines[line_coortype].split()[0]
if coortype[0] in "cCkK":
line_first_pos = line_coortype + 1
iscart=True
else:
iscart =False
if coortype[0] in "dD":
line_first_pos = line_coortype + 1
positions = []
total_atoms = sum(numofatoms)
for i in range(line_first_pos, line_first_pos + total_atoms):
s = lines[i].split()
vec = float(s[0]), float(s[1]), float(s[2])
positions.append(vec)
if iscart:
positions = numpy.dot(numpy.array(positions),numpy.linalg.inv(lattice))
return Cell(lattice, positions, atoms)
def write_vasp(cell, filename='POSCAR', suffix='.vasp', long_format=True):
"""
write vasp POSCAR type into file, vasp5 format only.
always write atoms sorted POSCAR.
parameters:
cell: Cell object, the Cell that you wanna write into vasp POSCAR.
filename: string, filename of output file, default='POSCAR'
suffix: string, suffix of filename, default='.vasp'
long_format: bool, if True format %.16f will be write, else %.6f
ref: https://gitlab.com/ase/ase/blob/master/ase/io/vasp.py
if optional parameters (filename and suffix) are not set,
the filename will be 'POSCAR.vasp'
"""
# TODO: write Cartesian coor POSCAR
filname_suffix = ''.join([filename, suffix])
with open(filname_suffix, "w") as f:
f.write(_write_string(cell, long_format))
def _write_string(cell, long_format, print_vacc=False):
"""
_write_string make io easy to be tested.
return: string represtent POSCAR
"""
# 对原子种类合并排序,用以产生体系名称和原子顺序数目和正确的坐标排序
# sorted is a list of tuple(atom, na)
atoms_dict = collections.Counter(cell.atoms)
if not print_vacc:
del atoms_dict[0]
sorted_symbols = sorted(atoms_dict.items(), key=operator.itemgetter(0))
list_symbols = ["{:}{:}".format(get_symbol(atom), na)
for atom, na in sorted_symbols]
comment = ' '.join(list_symbols)
comment += '\n'
scale = '{:9.6f}'.format(1.0)
scale += '\n'
lattice_string = ""
if long_format:
latt_form = '21.16f'
else:
latt_form = '11.6f'
for vec in cell.lattice:
lattice_string += ' '
for v in vec:
lattice_string += '{:{form}}'.format(v, form=latt_form)
lattice_string += '\n'
# atom types and their numbers
atom_types = ' '.join([get_symbol(i[0]) for i in sorted_symbols])
atom_types += '\n'
atom_numbers = ' '.join([str(i[1]) for i in sorted_symbols])
atom_numbers += '\n'
# TODO: write Cartesian coor
coor_type = 'Direct\n'
# argsort atoms and resort coor
idx = numpy.argsort(cell.atoms)
coord = cell.positions[idx]
atoms = cell.atoms[idx]
positions_string = ""
if long_format:
pos_form = '19.16f'
else:
pos_form = '9.6f'
for i, vec in enumerate(coord):
if atoms[i] == 0:
continue
positions_string += ' '
for v in vec:
positions_string += '{:{form}}'.format(v, form=pos_form)
positions_string += ' ' + get_symbol(atoms[i])
positions_string += '\n'
poscar_string = ''.join([comment,
scale,
lattice_string,
atom_types,
atom_numbers,
coor_type,
positions_string])
return poscar_string
|
[
"sagar.crystal.structure.Cell",
"numpy.argsort",
"sagar.element.base.get_symbol",
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.inv",
"collections.Counter",
"operator.itemgetter",
"warnings.warn"
] |
[((996, 1016), 'numpy.array', 'numpy.array', (['lattice'], {}), '(lattice)\n', (1007, 1016), False, 'import numpy\n'), ((3122, 3153), 'sagar.crystal.structure.Cell', 'Cell', (['lattice', 'positions', 'atoms'], {}), '(lattice, positions, atoms)\n', (3126, 3153), False, 'from sagar.crystal.structure import Cell\n'), ((4222, 4253), 'collections.Counter', 'collections.Counter', (['cell.atoms'], {}), '(cell.atoms)\n', (4241, 4253), False, 'import collections\n'), ((5222, 5247), 'numpy.argsort', 'numpy.argsort', (['cell.atoms'], {}), '(cell.atoms)\n', (5235, 5247), False, 'import numpy\n'), ((1711, 1852), 'warnings.warn', 'warnings.warn', (['"""symbols of elements in fifth line are missing, all atoms are init to NaN_i (i=0,1,2...)"""', 'UserWarning'], {'stacklevel': '(2)'}), "(\n 'symbols of elements in fifth line are missing, all atoms are init to NaN_i (i=0,1,2...)'\n , UserWarning, stacklevel=2)\n", (1724, 1852), False, 'import warnings\n'), ((2390, 2487), 'warnings.warn', 'warnings.warn', (['"""Sorry! Selective dynamics are not supported now"""', 'FutureWarning'], {'stacklevel': '(2)'}), "('Sorry! Selective dynamics are not supported now',\n FutureWarning, stacklevel=2)\n", (2403, 2487), False, 'import warnings\n'), ((1205, 1230), 'numpy.linalg.det', 'numpy.linalg.det', (['lattice'], {}), '(lattice)\n', (1221, 1230), False, 'import numpy\n'), ((3061, 3083), 'numpy.array', 'numpy.array', (['positions'], {}), '(positions)\n', (3072, 3083), False, 'import numpy\n'), ((3084, 3109), 'numpy.linalg.inv', 'numpy.linalg.inv', (['lattice'], {}), '(lattice)\n', (3100, 3109), False, 'import numpy\n'), ((4355, 4377), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (4374, 4377), False, 'import operator\n'), ((4416, 4432), 'sagar.element.base.get_symbol', 'get_symbol', (['atom'], {}), '(atom)\n', (4426, 4432), False, 'from sagar.element.base import get_symbol\n'), ((4957, 4973), 'sagar.element.base.get_symbol', 'get_symbol', (['i[0]'], {}), '(i[0])\n', (4967, 4973), False, 'from sagar.element.base import get_symbol\n'), ((5659, 5679), 'sagar.element.base.get_symbol', 'get_symbol', (['atoms[i]'], {}), '(atoms[i])\n', (5669, 5679), False, 'from sagar.element.base import get_symbol\n')]
|
from datetime import datetime
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import src.config as cfg
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
def main():
connection_rw_size = cfg.CONNECTION_RW_SIZE
timw_rw_size_min = cfg.TIME_RW_SIZE
features_csv = f'../../data/processed/full_ft_netflow_crw_{connection_rw_size}_trw_{timw_rw_size_min}_2.csv'
features_df = pd.read_csv(features_csv)
label = np.array(features_df['VPN'])
baseline_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'VPN']
comb_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'VPN','Tot Pkts', 'TotLen']
time_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'Tot Pkts', 'TotLen', 'VPN']
conn_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'Tot Pkts', 'TotLen', 'VPN']
time_features = features_df.loc[:, 'Time Flow count': 'Time-Rev Pkt Len Tot'].columns
conn_features = features_df.loc[:, 'Con Flow count': 'Conn-Rev Pkt Len Tot'].columns
for tft in time_features:
baseline_drop_table.append(tft)
conn_drop_table.append(tft)
for ctf in conn_features:
baseline_drop_table.append(ctf)
time_drop_table.append(ctf)
rf_baseline_ft = features_df.drop(baseline_drop_table, axis=1)
rf_time_ft = features_df.drop(time_drop_table, axis=1)
rf_conn_ft = features_df.drop(conn_drop_table, axis=1)
rf_comb_ft = features_df.drop(comb_drop_table,axis=1)
baseline_ft_name = list(rf_baseline_ft.columns)
time_ft_name=list(rf_time_ft.columns)
conn_ft_name=list(rf_conn_ft.columns)
comb_ft_name=list(rf_comb_ft.columns)
rf_baseline_ft_array=np.array(rf_baseline_ft)
rf_time_ft_array=np.array(rf_time_ft)
rf_conn_ft_array=np.array(rf_conn_ft)
rf_comb_ft_array=np.array(rf_comb_ft)
baseline_predictions, baseline_y_test, baseline_model=random_forest_classifyer(rf_baseline_ft_array,label)
time_predictions, time_y_test, time_model = random_forest_classifyer(rf_time_ft_array, label)
conn_predictions, conn_y_test, conn_model = random_forest_classifyer(rf_conn_ft_array, label)
comb_predictions, comb_y_test, comb_model = random_forest_classifyer(rf_comb_ft_array, label)
print('///////////////// Baseline /////////////////')
print('\n')
evaluate_rf_results(baseline_predictions,baseline_y_test,baseline_model, rf_baseline_ft_array, label)
feature_importance(baseline_model, baseline_ft_name)
print('///////////////// Time /////////////////')
print('\n')
evaluate_rf_results(time_predictions,time_y_test, time_model, rf_time_ft_array, label)
print('///////////////// Connection /////////////////')
print('\n')
evaluate_rf_results(conn_predictions,conn_y_test,conn_model,rf_conn_ft_array,label)
print('///////////////// Combined /////////////////')
print('\n')
evaluate_rf_results(comb_predictions, comb_y_test, comb_model, rf_comb_ft_array, label)
feature_importance(comb_model,comb_ft_name)
def random_forest_classifyer(feature_data, label, test_size=0.3, random_state=None):
x_train, x_test, y_train, y_test = train_test_split(
feature_data, label, test_size=test_size, random_state=random_state)
random_forest = RandomForestClassifier(n_jobs=2, random_state=random_state)
start_time=datetime.now()
random_forest.fit(x_train, y_train)
print(datetime.now()-start_time)
start_time = datetime.now()
predictions = random_forest.predict(x_test)
print(datetime.now() - start_time)
return predictions, y_test, random_forest
def evaluate_rf_results(predictions, y_test, model, feature_data, label):
start_time = datetime.now()
rfc_cv_score = cross_val_score(model, feature_data, label, cv=10, scoring='roc_auc')
print(datetime.now() - start_time)
start_time = datetime.now()
rfc_cv_score_acc = cross_val_score(model, feature_data, label, cv=10, scoring='accuracy')
print(datetime.now() - start_time)
print("=== Confusion Matrix ===")
start_time = datetime.now()
cf_matrix = confusion_matrix(y_test, predictions)
print(datetime.now() - start_time)
print(cf_matrix)
print('\n')
print("=== Classification Report ===")
start_time = datetime.now()
class_report = classification_report(y_test, predictions)
print(datetime.now() - start_time)
print(class_report)
print('\n')
print("=== All AUC Scores ===")
print(rfc_cv_score)
print('\n')
print("=== Mean AUC Score ===")
print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
print('\n')
print("=== All ACC Scores ===")
print(rfc_cv_score_acc)
print('\n')
print("=== Mean ACC Score ===")
print("Mean ACC Score - Random Forest: ", rfc_cv_score_acc.mean())
return cf_matrix, class_report, rfc_cv_score, rfc_cv_score.mean()
def feature_importance(model, feature_names):
fi = pd.DataFrame({'feature': feature_names,
'importance': model.feature_importances_}). \
sort_values('importance', ascending=False)
print(fi)
return fi
if __name__ == '__main__':
main()
# print(features_connection)
# connection_train_ft, connection_test_ft, connection_train_labels, conection_test_labels=train_test_split(features_connection, label, test_size=0.3, random_state=42)
#
# print(f'Training Features Shape: {connection_train_ft.shape}' )
# print(f'Training Labels Shape:{connection_test_ft.shape}' )
# print(f'Testing Features Shape:{connection_train_labels.shape}')
# print(f'Testing Labels Shape:{conection_test_labels.shape}')
#
# accuracy_list=[]
#
# random_forest= RandomForestClassifier(n_jobs=2)
# # t = tqdm(total=1000)
# # for xx in range(0,1000):
#
# random_forest.fit(connection_train_ft,connection_train_labels)
#
# predictions=random_forest.predict(connection_test_ft)
#
#
#
# matches=0
#
# # print(int(predictions[20]))
# # print(int(conection_test_labels[20]))
#
#
# for x in range(0, predictions.shape[0]):
# if int(predictions[x])==int(conection_test_labels[x]):
# matches+=1
#
# # print(matches/predictions.shape[0])
# accuracy_list.append(matches/predictions.shape[0])
# # t.update(1)
#
# print('///////////////////////////////////')
# print(max(accuracy_list))
# print(min(accuracy_list))
# print(sum(accuracy_list)/len(accuracy_list))
#
# rfc_cv_score = cross_val_score(random_forest, features_connection, label, cv=10, scoring='roc_auc')
# print("=== Confusion Matrix ===")
# print(confusion_matrix(conection_test_labels, predictions))
# print('\n')
# print("=== Classification Report ===")
# print(classification_report(conection_test_labels, predictions))
# print('\n')
# print("=== All AUC Scores ===")
# print(rfc_cv_score)
# print('\n')
# print("=== Mean AUC Score ===")
# print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
#
# fi=pd.DataFrame({'feature': features_connection_names,
# 'importance': random_forest.feature_importances_}).\
# sort_values('importance', ascending = False)
#
# print(fi)
# print("/////////////")
# print(fi.head())
|
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"sklearn.metrics.classification_report",
"numpy.array",
"sklearn.metrics.confusion_matrix",
"datetime.datetime.now"
] |
[((636, 661), 'pandas.read_csv', 'pd.read_csv', (['features_csv'], {}), '(features_csv)\n', (647, 661), True, 'import pandas as pd\n'), ((675, 703), 'numpy.array', 'np.array', (["features_df['VPN']"], {}), "(features_df['VPN'])\n", (683, 703), True, 'import numpy as np\n'), ((1911, 1935), 'numpy.array', 'np.array', (['rf_baseline_ft'], {}), '(rf_baseline_ft)\n', (1919, 1935), True, 'import numpy as np\n'), ((1957, 1977), 'numpy.array', 'np.array', (['rf_time_ft'], {}), '(rf_time_ft)\n', (1965, 1977), True, 'import numpy as np\n'), ((1999, 2019), 'numpy.array', 'np.array', (['rf_conn_ft'], {}), '(rf_conn_ft)\n', (2007, 2019), True, 'import numpy as np\n'), ((2041, 2061), 'numpy.array', 'np.array', (['rf_comb_ft'], {}), '(rf_comb_ft)\n', (2049, 2061), True, 'import numpy as np\n'), ((3376, 3466), 'sklearn.model_selection.train_test_split', 'train_test_split', (['feature_data', 'label'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(feature_data, label, test_size=test_size, random_state=\n random_state)\n', (3392, 3466), False, 'from sklearn.model_selection import train_test_split\n'), ((3491, 3550), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(2)', 'random_state': 'random_state'}), '(n_jobs=2, random_state=random_state)\n', (3513, 3550), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3566, 3580), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3578, 3580), False, 'from datetime import datetime\n'), ((3676, 3690), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3688, 3690), False, 'from datetime import datetime\n'), ((3917, 3931), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3929, 3931), False, 'from datetime import datetime\n'), ((3951, 4020), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'feature_data', 'label'], {'cv': '(10)', 'scoring': '"""roc_auc"""'}), "(model, feature_data, label, cv=10, scoring='roc_auc')\n", (3966, 4020), False, 'from sklearn.model_selection import cross_val_score\n'), ((4077, 4091), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4089, 4091), False, 'from datetime import datetime\n'), ((4115, 4185), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'feature_data', 'label'], {'cv': '(10)', 'scoring': '"""accuracy"""'}), "(model, feature_data, label, cv=10, scoring='accuracy')\n", (4130, 4185), False, 'from sklearn.model_selection import cross_val_score\n'), ((4280, 4294), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4292, 4294), False, 'from datetime import datetime\n'), ((4311, 4348), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4327, 4348), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((4485, 4499), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4497, 4499), False, 'from datetime import datetime\n'), ((4519, 4561), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4540, 4561), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((3631, 3645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3643, 3645), False, 'from datetime import datetime\n'), ((3749, 3763), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3761, 3763), False, 'from datetime import datetime\n'), ((4031, 4045), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4043, 4045), False, 'from datetime import datetime\n'), ((4196, 4210), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4208, 4210), False, 'from datetime import datetime\n'), ((4359, 4373), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4371, 4373), False, 'from datetime import datetime\n'), ((4572, 4586), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4584, 4586), False, 'from datetime import datetime\n'), ((5152, 5239), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature_names, 'importance': model.feature_importances_}"], {}), "({'feature': feature_names, 'importance': model.\n feature_importances_})\n", (5164, 5239), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
#read generator parameters into DataFrame
df_gen = pd.read_excel('NEISO_data_file/generators.xlsx',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('NEISO_data_file/paths.csv',header=0)
#list zones
zones = ['CT', 'ME', 'NH', 'NEMA', 'RI', 'SEMA', 'VT', 'WCMA']
##time series of load for each zone
df_load_all = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load_all = df_load_all[zones]
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/NEISO_dispatchable_hydro.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_excel('NEISO_data_file/must_run.xlsx',header=0)
# must run generation
must_run_CT = []
must_run_ME = []
must_run_NEMA = []
must_run_NH = []
must_run_RI = []
must_run_SEMA = []
must_run_VT = []
must_run_WCMA = []
must_run_CT = np.ones((8760,1))*df_must.loc[0,'CT']
must_run_ME = np.ones((8760,1))*df_must.loc[0,'ME']
must_run_NEMA = np.ones((8760,1))*df_must.loc[0,'NEMA']
must_run_NH = np.ones((8760,1))*df_must.loc[0,'NH']
must_run_RI = np.ones((8760,1))*df_must.loc[0,'RI']
must_run_SEMA = np.ones((8760,1))*df_must.loc[0,'SEMA']
must_run_VT = np.ones((8760,1))*df_must.loc[0,'VT']
must_run_WCMA = np.ones((8760,1))*df_must.loc[0,'WCMA']
must_run = np.column_stack((must_run_CT,must_run_ME,must_run_NEMA,must_run_NH,must_run_RI,must_run_SEMA,must_run_VT,must_run_WCMA))
df_total_must_run = pd.DataFrame(must_run,columns=('CT','ME','NEMA','NH','RI','SEMA','VT','WCMA'))
df_total_must_run.to_csv('NEISO_data_file/must_run_hourly.csv')
#natural gas prices
df_ng_all = pd.read_excel('../Time_series_data/Gas_prices/NG.xlsx', header=0)
df_ng_all = df_ng_all[zones]
#oil prices
df_oil_all = pd.read_excel('../Time_series_data/Oil_prices/Oil_prices.xlsx', header=0)
df_oil_all = df_oil_all[zones]
# time series of offshore wind generation for each zone
df_offshore_wind_all = pd.read_excel('../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx',header=0)
# time series of solar generation
df_solar = pd.read_excel('NEISO_data_file/hourly_solar_gen.xlsx',header=0)
solar_caps = pd.read_excel('NEISO_data_file/solar_caps.xlsx',header=0)
# time series of onshore wind generation
df_onshore_wind = pd.read_excel('NEISO_data_file/hourly_onshore_wind_gen.xlsx',header=0)
onshore_wind_caps = pd.read_excel('NEISO_data_file/wind_onshore_caps.xlsx',header=0)
def setup(year, Hub_height, Offshore_capacity):
##time series of natural gas prices for each zone
df_ng = globals()['df_ng_all'].copy()
df_ng = df_ng.reset_index()
##time series of oil prices for each zone
df_oil = globals()['df_oil_all'].copy()
df_oil = df_oil.reset_index()
##time series of load for each zone
df_load = globals()['df_load_all'].loc[year*8760:year*8760+8759].copy()
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/NEISO_dispatchable_imports.csv',header=0)
##hourly time series of exports by zone
df_exports = pd.read_csv('Path_setup/NEISO_exports.csv',header=0)
# time series of offshore wind generation for each zone
df_offshore_wind = globals()['df_offshore_wind_all'].loc[:, Hub_height].copy()
df_offshore_wind = df_offshore_wind.loc[year*8760:year*8760+8759]
df_offshore_wind = df_offshore_wind.reset_index()
offshore_wind_caps = pd.read_excel('NEISO_data_file/wind_offshore_caps.xlsx')
############
# sets #
############
#write data.dat file
import os
from shutil import copy
from pathlib import Path
path = str(Path.cwd().parent) + str(Path('/UCED/LR/NEISO' +'_'+ str(Hub_height) +'_'+ str(Offshore_capacity) +'_'+ str(year)))
os.makedirs(path,exist_ok=True)
generators_file='NEISO_data_file/generators.xlsx'
dispatch_file='../UCED/NEISO_dispatch.py'
dispatchLP_file='../UCED/NEISO_dispatchLP.py'
wrapper_file='../UCED/NEISO_wrapper.py'
simulation_file='../UCED/NEISO_simulation.py'
copy(dispatch_file,path)
copy(wrapper_file,path)
copy(simulation_file,path)
copy(dispatchLP_file,path)
copy(generators_file,path)
filename = path + '/data.dat'
#write data.dat file
# filename = 'NEISO_data_file/data.dat'
with open(filename, 'w') as f:
# generator sets by zone
for z in zones:
# zone string
z_int = zones.index(z)
f.write('set Zone%dGenerators :=\n' % (z_int+1))
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z:
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_CT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYCT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_WCMA :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYWCMA_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# HQ imports
f.write('set HQ_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'HQVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NB imports
f.write('set NB_Imports_ME :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NBME_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# generator sets by type
# coal
f.write('set Coal :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'coal':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# # oil
# f.write('set Oil :=\n')
# # pull relevant generators
# for gen in range(0,len(df_gen)):
# if df_gen.loc[gen,'typ'] == 'oil':
# unit_name = df_gen.loc[gen,'name']
# unit_name = unit_name.replace(' ','_')
# f.write(unit_name + ' ')
# f.write(';\n\n')
# Slack
f.write('set Slack :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'slack':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Hydro
f.write('set Hydro :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Ramping
f.write('set Ramping :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro' or df_gen.loc[gen,'typ'] == 'imports':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# gas generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# Natural Gas
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dGas :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# oil generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dOil :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# zones
f.write('set zones :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sources
f.write('set sources :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sinks
f.write('set sinks :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
################
# parameters #
################
# simulation details
SimHours = 8760
f.write('param SimHours := %d;' % SimHours)
f.write('\n')
f.write('param SimDays:= %d;' % int(SimHours/24))
f.write('\n\n')
HorizonHours = 48
f.write('param HorizonHours := %d;' % HorizonHours)
f.write('\n\n')
HorizonDays = int(HorizonHours/24)
f.write('param HorizonDays := %d;' % HorizonDays)
f.write('\n\n')
# create parameter matrix for transmission paths (source and sink connections)
f.write('param:' + '\t' + 'limit' + '\t' +'hurdle :=' + '\n')
for z in zones:
for x in zones:
f.write(z + '\t' + x + '\t')
match = 0
for p in range(0,len(df_paths)):
source = df_paths.loc[p,'start_zone']
sink = df_paths.loc[p,'end_zone']
if source == z and sink == x:
match = 1
p_match = p
if match > 0:
f.write(str(round(df_paths.loc[p_match,'limit'],3)) + '\t' + str(round(df_paths.loc[p_match,'hurdle'],3)) + '\n')
else:
f.write('0' + '\t' + '0' + '\n')
f.write(';\n\n')
# create parameter matrix for generators
f.write('param:' + '\t')
for c in df_gen.columns:
if c != 'name':
f.write(c + '\t')
f.write(':=\n\n')
for i in range(0,len(df_gen)):
for c in df_gen.columns:
if c == 'name':
unit_name = df_gen.loc[i,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + '\t')
elif c == 'typ' or c == 'zone':
f.write(str(df_gen.loc[i,c]) + '\t')
else:
f.write(str(round(df_gen.loc[i,c],3)) + '\t')
f.write('\n')
f.write(';\n\n')
# times series data
# zonal (hourly)
f.write('param:' + '\t' + 'SimDemand' + '\t' + 'SimOffshoreWind' \
+ '\t' + 'SimSolar' + '\t' + 'SimOnshoreWind' + '\t' + 'SimMustRun:=' + '\n')
for z in zones:
wz = offshore_wind_caps.loc[0,z]
sz = solar_caps.loc[0,z]
owz = onshore_wind_caps.loc[0,z]
for h in range(0,len(df_load)):
f.write(z + '\t' + str(h+1) + '\t' + str(round(df_load.loc[h,z],3))\
+ '\t' + str(round(df_offshore_wind.loc[h,Hub_height]*wz,3))\
+ '\t' + str(round(df_solar.loc[h,'Solar_Output_MWh']*sz,3))\
+ '\t' + str(round(df_onshore_wind.loc[h,'Onshore_Output_MWh']*owz,3))\
+ '\t' + str(round(df_total_must_run.loc[h,z],3)) + '\n')
f.write(';\n\n')
# zonal (daily)
f.write('param:' + '\t' + 'SimGasPrice' + '\t' + 'SimOilPrice:=' + '\n')
for z in zones:
for d in range(0,int(SimHours/24)):
f.write(z + '\t' + str(d+1) + '\t' + str(round(df_ng.loc[d,z], 3)) + '\t' + str(round(df_oil.loc[d,z], 3)) + '\n')
f.write(';\n\n')
#system wide (daily)
f.write('param:' + '\t' + 'SimNY_imports_CT' + '\t' + 'SimNY_imports_VT' + '\t' + 'SimNY_imports_WCMA' + '\t' + 'SimNB_imports_ME' + '\t' + 'SimHQ_imports_VT' + '\t' + 'SimCT_hydro' + '\t' + 'SimME_hydro' + '\t' + 'SimNH_hydro' + '\t' + 'SimNEMA_hydro' + '\t' + 'SimRI_hydro' + '\t' + 'SimVT_hydro' + '\t' + 'SimWCMA_hydro:=' + '\n')
for d in range(0,len(df_imports)):
f.write(str(d+1) + '\t' + str(round(df_imports.loc[d,'NY_imports_CT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_VT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_WCMA'],3)) + '\t' + str(round(df_imports.loc[d,'NB_imports_ME'],3)) + '\t' + str(round(df_imports.loc[d,'HQ_imports_VT'],3)) + '\t' + str(round(df_hydro.loc[d,'CT'],3)) + '\t' + str(round(df_hydro.loc[d,'ME'],3)) + '\t' + str(round(df_hydro.loc[d,'NH'],3)) + '\t' + str(round(df_hydro.loc[d,'NEMA'],3)) + '\t' + str(round(df_hydro.loc[d,'RI'],3)) + '\t' + str(round(df_hydro.loc[d,'VT'],3)) + '\t' + str(round(df_hydro.loc[d,'WCMA'],3)) + '\n')
f.write(';\n\n')
#system wide (hourly)
f.write('param:' + '\t' + 'SimCT_exports_NY' + '\t' + 'SimWCMA_exports_NY' + '\t' + 'SimVT_exports_NY' + '\t' + 'SimVT_exports_HQ' + '\t' + 'SimME_exports_NB' + '\t' + 'SimReserves:=' + '\n')
for h in range(0,len(df_load)):
f.write(str(h+1) + '\t' + str(round(df_exports.loc[h,'CT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'WCMA_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_HQ'],3)) + '\t' + str(round(df_exports.loc[h,'ME_exports_NB'],3)) + '\t' + str(round(df_reserves.loc[h,'reserves'],3)) + '\n')
f.write(';\n\n')
return None
|
[
"pandas.DataFrame",
"numpy.sum",
"os.makedirs",
"pandas.read_csv",
"numpy.ones",
"pandas.read_excel",
"numpy.column_stack",
"pathlib.Path.cwd",
"shutil.copy"
] |
[((177, 235), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/generators.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/generators.xlsx', header=0)\n", (190, 235), True, 'import pandas as pd\n'), ((297, 347), 'pandas.read_csv', 'pd.read_csv', (['"""NEISO_data_file/paths.csv"""'], {'header': '(0)'}), "('NEISO_data_file/paths.csv', header=0)\n", (308, 347), True, 'import pandas as pd\n'), ((474, 574), 'pandas.read_csv', 'pd.read_csv', (['"""../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv"""'], {'header': '(0)'}), "(\n '../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv',\n header=0)\n", (485, 574), True, 'import pandas as pd\n'), ((643, 708), 'pandas.read_csv', 'pd.read_csv', (['"""Hydro_setup/NEISO_dispatchable_hydro.csv"""'], {'header': '(0)'}), "('Hydro_setup/NEISO_dispatchable_hydro.csv', header=0)\n", (654, 708), True, 'import pandas as pd\n'), ((762, 818), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/must_run.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/must_run.xlsx', header=0)\n", (775, 818), True, 'import pandas as pd\n'), ((1428, 1559), 'numpy.column_stack', 'np.column_stack', (['(must_run_CT, must_run_ME, must_run_NEMA, must_run_NH, must_run_RI,\n must_run_SEMA, must_run_VT, must_run_WCMA)'], {}), '((must_run_CT, must_run_ME, must_run_NEMA, must_run_NH,\n must_run_RI, must_run_SEMA, must_run_VT, must_run_WCMA))\n', (1443, 1559), True, 'import numpy as np\n'), ((1569, 1659), 'pandas.DataFrame', 'pd.DataFrame', (['must_run'], {'columns': "('CT', 'ME', 'NEMA', 'NH', 'RI', 'SEMA', 'VT', 'WCMA')"}), "(must_run, columns=('CT', 'ME', 'NEMA', 'NH', 'RI', 'SEMA',\n 'VT', 'WCMA'))\n", (1581, 1659), True, 'import pandas as pd\n'), ((1745, 1810), 'pandas.read_excel', 'pd.read_excel', (['"""../Time_series_data/Gas_prices/NG.xlsx"""'], {'header': '(0)'}), "('../Time_series_data/Gas_prices/NG.xlsx', header=0)\n", (1758, 1810), True, 'import pandas as pd\n'), ((1866, 1939), 'pandas.read_excel', 'pd.read_excel', (['"""../Time_series_data/Oil_prices/Oil_prices.xlsx"""'], {'header': '(0)'}), "('../Time_series_data/Oil_prices/Oil_prices.xlsx', header=0)\n", (1879, 1939), True, 'import pandas as pd\n'), ((2051, 2156), 'pandas.read_excel', 'pd.read_excel', (['"""../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx"""'], {'header': '(0)'}), "(\n '../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx',\n header=0)\n", (2064, 2156), True, 'import pandas as pd\n'), ((2193, 2257), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/hourly_solar_gen.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/hourly_solar_gen.xlsx', header=0)\n", (2206, 2257), True, 'import pandas as pd\n'), ((2270, 2328), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/solar_caps.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/solar_caps.xlsx', header=0)\n", (2283, 2328), True, 'import pandas as pd\n'), ((2388, 2459), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/hourly_onshore_wind_gen.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/hourly_onshore_wind_gen.xlsx', header=0)\n", (2401, 2459), True, 'import pandas as pd\n'), ((2479, 2544), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/wind_onshore_caps.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/wind_onshore_caps.xlsx', header=0)\n", (2492, 2544), True, 'import pandas as pd\n'), ((1003, 1021), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1010, 1021), True, 'import numpy as np\n'), ((1055, 1073), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1062, 1073), True, 'import numpy as np\n'), ((1109, 1127), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1116, 1127), True, 'import numpy as np\n'), ((1163, 1181), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1170, 1181), True, 'import numpy as np\n'), ((1215, 1233), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1222, 1233), True, 'import numpy as np\n'), ((1269, 1287), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1276, 1287), True, 'import numpy as np\n'), ((1323, 1341), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1330, 1341), True, 'import numpy as np\n'), ((1377, 1395), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1384, 1395), True, 'import numpy as np\n'), ((3238, 3260), 'pandas.DataFrame', 'pd.DataFrame', (['reserves'], {}), '(reserves)\n', (3250, 3260), True, 'import pandas as pd\n'), ((3378, 3444), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/NEISO_dispatchable_imports.csv"""'], {'header': '(0)'}), "('Path_setup/NEISO_dispatchable_imports.csv', header=0)\n", (3389, 3444), True, 'import pandas as pd\n'), ((3510, 3563), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/NEISO_exports.csv"""'], {'header': '(0)'}), "('Path_setup/NEISO_exports.csv', header=0)\n", (3521, 3563), True, 'import pandas as pd\n'), ((3864, 3920), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/wind_offshore_caps.xlsx"""'], {}), "('NEISO_data_file/wind_offshore_caps.xlsx')\n", (3877, 3920), True, 'import pandas as pd\n'), ((4226, 4258), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (4237, 4258), False, 'import os\n'), ((4516, 4541), 'shutil.copy', 'copy', (['dispatch_file', 'path'], {}), '(dispatch_file, path)\n', (4520, 4541), False, 'from shutil import copy\n'), ((4545, 4569), 'shutil.copy', 'copy', (['wrapper_file', 'path'], {}), '(wrapper_file, path)\n', (4549, 4569), False, 'from shutil import copy\n'), ((4573, 4600), 'shutil.copy', 'copy', (['simulation_file', 'path'], {}), '(simulation_file, path)\n', (4577, 4600), False, 'from shutil import copy\n'), ((4604, 4631), 'shutil.copy', 'copy', (['dispatchLP_file', 'path'], {}), '(dispatchLP_file, path)\n', (4608, 4631), False, 'from shutil import copy\n'), ((4635, 4662), 'shutil.copy', 'copy', (['generators_file', 'path'], {}), '(generators_file, path)\n', (4639, 4662), False, 'from shutil import copy\n'), ((3200, 3216), 'numpy.sum', 'np.sum', (['rv[i, :]'], {}), '(rv[i, :])\n', (3206, 3216), True, 'import numpy as np\n'), ((4106, 4116), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4114, 4116), False, 'from pathlib import Path\n')]
|
import numpy as np
# dictionary describing options available to tune this algorithm
options = {
"peak_size": {"purpose": "Estimate of the peak size, in pixels. If 'auto', attempts to determine automatically. Otherwise, this should be an integer.",
"default": "auto",
"type": "int",
"has_auto": True},
"refine_positions": {"purpose": "TODO",
"default": False,
"type": "bool"},
"progress_object": {"purpose": "Object used to present a progress bar to the user. For definition, see UI_interface folder.",
"default": None},
}
def run(data):
# TODO: need to actually implement this peak finder.
return np.zeros((4,2))
|
[
"numpy.zeros"
] |
[((750, 766), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (758, 766), True, 'import numpy as np\n')]
|
from plume.perceptron import PerceptronClassifier
import numpy as np
x_train = np.array([[3, 3], [4, 3], [1, 1]])
y_train = np.array([1, 1, -1])
clf = PerceptronClassifier(dual=False)
clf.fit(x_train, y_train)
print(clf.get_model())
print(clf.predict(x_train))
clf1 = PerceptronClassifier()
clf1.fit(x_train, y_train)
print(clf1.get_model())
print(clf1.predict(x_train))
|
[
"plume.perceptron.PerceptronClassifier",
"numpy.array"
] |
[((80, 114), 'numpy.array', 'np.array', (['[[3, 3], [4, 3], [1, 1]]'], {}), '([[3, 3], [4, 3], [1, 1]])\n', (88, 114), True, 'import numpy as np\n'), ((125, 145), 'numpy.array', 'np.array', (['[1, 1, -1]'], {}), '([1, 1, -1])\n', (133, 145), True, 'import numpy as np\n'), ((153, 185), 'plume.perceptron.PerceptronClassifier', 'PerceptronClassifier', ([], {'dual': '(False)'}), '(dual=False)\n', (173, 185), False, 'from plume.perceptron import PerceptronClassifier\n'), ((272, 294), 'plume.perceptron.PerceptronClassifier', 'PerceptronClassifier', ([], {}), '()\n', (292, 294), False, 'from plume.perceptron import PerceptronClassifier\n')]
|
from transformers import BertForTokenClassification
import torch
from transformers import BertTokenizer
import numpy as np
import nltk.data
nltk.download('punkt')
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertConfig, AutoModelForTokenClassification, AutoConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import BertForTokenClassification, AdamW
from transformers import get_linear_schedule_with_warmup
from seqeval.metrics import accuracy_score
from sklearn.metrics import f1_score, classification_report, precision_score, recall_score
import torch.nn as nn
from tqdm import trange
import numpy as np
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize
import os
##########################################################
# import wandb
# from transformers import TrainingArguments, Trainer
# wandb.init(project="project", entity="3rd_year_project")
##########################################################
class NER_BERT(object):
device = torch.device("cuda")
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tag2idx = {'O':0, 'ID':1, 'PHI':2, 'NAME':3, 'CONTACT':4, 'DATE':5, 'AGE':6, 'PROFESSION':7, 'LOCATION':8, 'PAD': 9}
tag_values = ["O","ID", "PHI", "NAME", "CONTACT", "DATE", "AGE", "PROFESSION", "LOCATION", "PAD"]
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', num_labels=len(tag2idx), do_lower_case=False)
MAX_LEN = 75 # max length of sequence, needs for padding
bs = 32 # batch size
"""Abstract class that other NER plugins should implement"""
def __init__(self):
#config = AutoConfig.from_pretrained('ArishkaBelovishka/bert-i2b2')
#self.model = AutoModelForTokenClassification.from_pretrained('ArishkaBelovishka/bert-i2b2', config = config)
# Uncomment the following if you want to load your fine-tuned model from Models folder.
# If you just want to run NER use hugging-face repository where fine-tuned on half of i2b2 data model lives.
if os.path.exists("Models/BERT_epoch-10.pt"):
print("Loading model")
state_dict = torch.load("Models/BERT_epoch-10.pt", map_location=torch.device('cuda'))
print("Loaded model")
self.model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
state_dict = state_dict,
num_labels=len(NER_BERT.tag2idx),
output_attentions = True,
output_hidden_states = True
)
else:
self.model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
num_labels=len(NER_BERT.tag2idx),
output_attentions = True,
output_hidden_states = True
)
def perform_NER(self,text):
"""Implementation of the method that should perform named entity recognition"""
# tokenizer to divide data into sentences (thanks, nltk)
list_of_sents = sent_tokenize(text)
list_of_tuples_by_sent = []
for sent in list_of_sents:
# , truncation=True
tokenized_sentence = self.tokenizer.encode(sent, truncation=True) # BERT tokenizer is clever, it will internally divide the sentence by words, so all we need to provide there is sentence and it will return an array where each token is either special token/word/subword, refer to BERT WordPiece tokenizer approach
# truncation=True to comply with 512 length of the sentence
input_ids = torch.tensor([tokenized_sentence])
with torch.no_grad():
# Run inference/classification
output = self.model(input_ids)
label_indices = np.argmax(output[0].to("cpu").numpy(), axis=2)
tokens = self.tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0])
new_tokens, new_labels = [], []
for token, label_idx in zip(tokens, label_indices[0]):
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(self.tag_values[label_idx])
new_tokens.append(token)
list_of_tuples = []
for token, label in zip(new_tokens, new_labels):
list_of_tuples.append((token, label))
#print("{}\t{}".format(label, token))
list_of_tuples_by_sent.append(list_of_tuples)
# remove [CLS] and [SEP] tokens to comply wth xml structure
for i in range(len(list_of_tuples_by_sent)):
for tag in self.tag_values:
if ('[CLS]', tag) in list_of_tuples_by_sent[i]:
list_of_tuples_by_sent[i].remove(('[CLS]', tag))
if ('[SEP]', tag) in list_of_tuples_by_sent[i]:
list_of_tuples_by_sent[i].remove(('[SEP]', tag))
return list_of_tuples_by_sent
# Needed for transform_sequences
def tokenize_and_preserve_labels(self, sentence, text_labels):
tokenized_sentence = []
labels = []
for word, label in zip(sentence, text_labels):
# Tokenize the word and count # of subwords the word is broken into
tokenized_word = NER_BERT.tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
# Add the tokenized word to the final tokenized word list
tokenized_sentence.extend(tokenized_word)
# Add the same label to the new list of labels `n_subwords` times
labels.extend([label] * n_subwords)
return tokenized_sentence, labels
def transform_sequences(self,tokens_labels):
"""method that transforms sequences of (token,label) into feature sequences. Returns two sequence lists for X and Y"""
print("I am in transform seq")
# result - one document, result[i] is sentence in document, result [i][i] is word in sentence
tokenized_sentences = []
labels = []
for index, sentence in enumerate(tokens_labels):
text_labels = []
sentence_to_feed = []
for word_label in sentence:
text_labels.append(word_label[1])
sentence_to_feed.append(word_label[0])
a, b = self.tokenize_and_preserve_labels(sentence_to_feed, text_labels)
tokenized_sentences.append(a)
labels.append(b)
# Now need to split long tokenized sequences into subsequences of length less than 512 tokens
# not to loose valuable information in NER, basically not to cut sentences
# i2b2 docs are very ugly and sentences in them are usually way too long as doctors forget to put full stops...
# tokenized_sentences AND labels are the same strucutre of 2d arrays
# I need to take care of the issue if I am going to split beginning of the word and its end, like
# Arina is tokenized as "Ari" and "##na", thus I cannot separate the two, otherwise it will not make sense
distributed_tokenized_sentences, distributed_labels = [], []
for sent, label in zip(tokenized_sentences, labels):
if len(sent) > NER_BERT.MAX_LEN:
while len(sent) > NER_BERT.MAX_LEN:
#print("I am in while loop to truncate sequence")
index = NER_BERT.MAX_LEN - 2
for i in range(NER_BERT.MAX_LEN - 2, 0, -1):
if sent[i][:2] == "##":
index = index - 1
else:
break
new_sent = sent[:index] # 511 because we want to append [SEP] token in the end
new_label = label[:index]
sent = sent[index:] # update given sent
label = label[index:]
distributed_tokenized_sentences.append(new_sent)
distributed_labels.append(new_label)
distributed_tokenized_sentences.append(sent)
distributed_labels.append(label)
#print(sent)
else:
distributed_tokenized_sentences.append(sent)
distributed_labels.append(label)
input_ids = pad_sequences([NER_BERT.tokenizer.convert_tokens_to_ids(txt) for txt in distributed_tokenized_sentences],
maxlen=NER_BERT.MAX_LEN, dtype="long", value=0.0,
truncating="post", padding="post")
tags = pad_sequences([[NER_BERT.tag2idx.get(l) for l in lab] for lab in distributed_labels],
maxlen=NER_BERT.MAX_LEN, value=NER_BERT.tag2idx["PAD"], padding="post",
dtype="long", truncating="post")
# Result is pair X (array of sentences, where each sentence is an array of words) and Y (array of labels)
return input_ids, tags
def learn(self, X_train,Y_train, epochs=1):
"""Function that actually train the algorithm"""
# if torch.cuda.is_available():
# self.model.cuda()
tr_masks = [[float(i != 0.0) for i in ii] for ii in X_train]
print("READY TO CREATE SOME TENZORS!!!!!!!!!!!!!!!!!!!!!!!!!!")
tr_inputs = torch.tensor(X_train).type(torch.long)
tr_tags = torch.tensor(Y_train).type(torch.long)
tr_masks = torch.tensor(tr_masks).type(torch.long)
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=NER_BERT.bs)
print("READY TO PREPARE OPTIMIZER!!!!!!!!!!!!!!!!!!!!!!!!!!")
# Weight decay in Adam optimiser (adaptive gradient algorithm) is a regularisation technique which is extensively disucssed in this paper:
# https://arxiv.org/abs/1711.05101
# (Like L2 for SGD but different)
# resularisation of the model objective function in order to prevent overfitting of the model.
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01}, # in AdamW implementation (default: 1e-2)
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(self.model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
# TODO: change to new implementation of AdamW: torch.optim.AdamW(...)
optimizer = AdamW(
optimizer_grouped_parameters,
lr=3e-5,
eps=1e-8
)
max_grad_norm = 1.0
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
# We need it to adjust learning rate if the accuracy does not change between epochs much,
# basically pushing the model to learn.
# https://sajjjadayobi.github.io/blog/markdown/2021/05/23/adamw-warmup.html
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
print("START TRAINING!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
## Store the average loss after each epoch so we can plot them.
loss_values, validation_loss_values = [], []
# just for intermediate model save naming
epoch_num = 3
for _ in trange(epochs, desc="Epoch"):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
# clean the cache not to fail with video memory
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# just for intermediate model save naming
epoch_num += 1
# Put the model into training mode.
self.model.train()
# Reset the total loss for this epoch.
total_loss = 0
print("Start backprop and optimisation!!! Epoch has passed!!!!!!!!!!!!!!!!!!!!!!!")
# Training loop
for step, batch in enumerate(train_dataloader):
print("We are in the batch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# add batch to gpu
batch = tuple(b.to(NER_BERT.device) for b in batch)
b_input_ids, b_input_mask, b_labels = batch
# Always clear any previously calculated gradients before performing a backward pass.
self.model.zero_grad()
# forward pass
# This will return the loss (rather than the model output)
# because we have provided the `labels`.
outputs = self.model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# get the loss
loss = outputs[0]
# Perform a backward pass to calculate the gradients.
loss.backward()
# track train loss
total_loss += loss.item()
# Clip the norm of the gradient
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(parameters=self.model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
# Update the learning rate.
scheduler.step()
print("We processed one batch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
print("Average train loss: {}".format(avg_train_loss))
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
# Save intermediate weights of the model, i.e. if computer goes crazy and drops the training or you
# want to test the performance from different epochs
torch.save(self.model.state_dict(), os.path.join("Models_intermediate/", 'BERT_epoch-{}.pt'.format(epoch_num)))
#Plot the learning curve.
plt.figure()
plt.plot(loss_values, 'b-o', label="training loss")
# Label the plot.
plt.title("Learning curve")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
def evaluate(self, X_test,Y_test):
if torch.cuda.is_available():
self.model.cuda()
"""Function to evaluate algorithm"""
val_masks = [[float(i != 0.0) for i in ii] for ii in X_test]
val_inputs = torch.tensor(X_test).type(torch.long)
val_tags = torch.tensor(Y_test).type(torch.long)
val_masks = torch.tensor(val_masks).type(torch.long)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=NER_BERT.bs)
# seed
# for _ in range(2):
#valid_dataloader = DataLoader(valid_data, shuffle=True, batch_size=NER_BERT.bs)
# for one random seed of valid_dataloader:
# ...
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
# Put the model into evaluation mode to set dropout and batch normalization layers to evaluation mode to have consistent results
self.model.eval()
# Reset the validation loss for this epoch.
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(self.device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients,
# saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have not provided labels.
outputs = self.model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# Move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
eval_loss += outputs[0].mean().item()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.extend(label_ids)
eval_loss = eval_loss / len(valid_dataloader)
print("Validation loss: {}".format(eval_loss))
pred_tags = [NER_BERT.tag_values[p_i] for p, l in zip(predictions, true_labels)
for p_i, l_i in zip(p, l) if NER_BERT.tag_values[l_i] != "PAD"]
###############################################################################
# reconstruct given text for purposes of algorithms' performance comparison
# our X_test is again a list of sentences, i.e. 2d array
tokens = [self.tokenizer.convert_ids_to_tokens(sent) for sent in X_test]
# Unpack tokens into 1d array to be able to go through it with labels
# [PAD] and not just PAD because that is what BERT actually puts
tokens_flat = [item for sublist in tokens for item in sublist if item != "[PAD]"]
#for sentence in tokens:
new_tokens, new_labels = [], []
for token, pred in zip(tokens_flat, pred_tags):
#print("{}\t{}".format(token, pred))
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(pred)
new_tokens.append(token)
###############################################################################
valid_tags = [NER_BERT.tag_values[l_i] for l in true_labels
for l_i in l if NER_BERT.tag_values[l_i] != "PAD"]
print("Validation Accuracy: {}".format(accuracy_score(valid_tags, pred_tags))) # was other way around, why?
print("Validation F1-Score: {}".format(f1_score(valid_tags, pred_tags, average='weighted'))) # correct
print("Validation precision: {}".format(precision_score(valid_tags, pred_tags, average='weighted')))
print("Validation recall: {}".format(recall_score(valid_tags, pred_tags, average='weighted')))
labels = ["ID", "PHI", "NAME", "CONTACT", "DATE", "AGE",
"PROFESSION", "LOCATION"]
print(classification_report(valid_tags, pred_tags, digits=4, labels=labels))
print()
###############################################################################
# to evaluate union/intersection of algorithms
# for t, l in zip(new_tokens, new_labels):
# print("{}\t{}".format(t, l))
return new_labels
# # Use plot styling from seaborn.
# sns.set(style='darkgrid')
# # Increase the plot size and font size.
# sns.set(font_scale=1.5)
# plt.rcParams["figure.figsize"] = (12,6)
# # Plot the learning curve.
# plt.plot(loss_values, 'b-o', label="training loss")
# plt.plot(validation_loss_values, 'r-o', label="validation loss")
# # Label the plot.
# plt.title("Learning curve")
# plt.xlabel("Epoch")
# plt.ylabel("Loss")
# plt.legend()
# plt.show()
def save(self, model_path):
"""
Function to save model. Models are saved as h5 files in Models directory. Name is passed as argument
:param model_path: Name of the model file
:return: Doesn't return anything
"""
torch.save(self.model.state_dict(), "Models/"+model_path+".pt")
print("Saved model to disk")
|
[
"matplotlib.pyplot.title",
"seqeval.metrics.accuracy_score",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"sklearn.metrics.f1_score",
"torch.utils.data.TensorDataset",
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.utils.data.SequentialSampler",
"matplotlib.pyplot.show",
"tqdm.trange",
"matplotlib.pyplot.legend",
"sklearn.metrics.recall_score",
"transformers.get_linear_schedule_with_warmup",
"transformers.AdamW",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"nltk.tokenize.sent_tokenize",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.xlabel",
"torch.tensor"
] |
[((1162, 1182), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1174, 1182), False, 'import torch\n'), ((2192, 2233), 'os.path.exists', 'os.path.exists', (['"""Models/BERT_epoch-10.pt"""'], {}), "('Models/BERT_epoch-10.pt')\n", (2206, 2233), False, 'import os\n'), ((3178, 3197), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (3191, 3197), False, 'from nltk.tokenize import sent_tokenize\n'), ((9640, 9683), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tr_inputs', 'tr_masks', 'tr_tags'], {}), '(tr_inputs, tr_masks, tr_tags)\n', (9653, 9683), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((9708, 9733), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (9721, 9733), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((9761, 9830), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'NER_BERT.bs'}), '(train_data, sampler=train_sampler, batch_size=NER_BERT.bs)\n', (9771, 9830), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((11073, 11129), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': '(3e-05)', 'eps': '(1e-08)'}), '(optimizer_grouped_parameters, lr=3e-05, eps=1e-08)\n', (11078, 11129), False, 'from transformers import BertForTokenClassification, AdamW\n'), ((11636, 11734), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (11667, 11734), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((12054, 12082), 'tqdm.trange', 'trange', (['epochs'], {'desc': '"""Epoch"""'}), "(epochs, desc='Epoch')\n", (12060, 12082), False, 'from tqdm import trange\n'), ((14939, 14951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14949, 14951), True, 'import matplotlib.pyplot as plt\n'), ((14960, 15011), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_values', '"""b-o"""'], {'label': '"""training loss"""'}), "(loss_values, 'b-o', label='training loss')\n", (14968, 15011), True, 'import matplotlib.pyplot as plt\n'), ((15046, 15073), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning curve"""'], {}), "('Learning curve')\n", (15055, 15073), True, 'import matplotlib.pyplot as plt\n'), ((15082, 15101), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (15092, 15101), True, 'import matplotlib.pyplot as plt\n'), ((15110, 15128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (15120, 15128), True, 'import matplotlib.pyplot as plt\n'), ((15137, 15149), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15147, 15149), True, 'import matplotlib.pyplot as plt\n'), ((15159, 15169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15167, 15169), True, 'import matplotlib.pyplot as plt\n'), ((15221, 15246), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15244, 15246), False, 'import torch\n'), ((15591, 15637), 'torch.utils.data.TensorDataset', 'TensorDataset', (['val_inputs', 'val_masks', 'val_tags'], {}), '(val_inputs, val_masks, val_tags)\n', (15604, 15637), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((15662, 15691), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['valid_data'], {}), '(valid_data)\n', (15679, 15691), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((15719, 15788), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'sampler': 'valid_sampler', 'batch_size': 'NER_BERT.bs'}), '(valid_data, sampler=valid_sampler, batch_size=NER_BERT.bs)\n', (15729, 15788), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3724, 3758), 'torch.tensor', 'torch.tensor', (['[tokenized_sentence]'], {}), '([tokenized_sentence])\n', (3736, 3758), False, 'import torch\n'), ((19642, 19711), 'sklearn.metrics.classification_report', 'classification_report', (['valid_tags', 'pred_tags'], {'digits': '(4)', 'labels': 'labels'}), '(valid_tags, pred_tags, digits=4, labels=labels)\n', (19663, 19711), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((3777, 3792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3790, 3792), False, 'import torch\n'), ((9463, 9484), 'torch.tensor', 'torch.tensor', (['X_train'], {}), '(X_train)\n', (9475, 9484), False, 'import torch\n'), ((9520, 9541), 'torch.tensor', 'torch.tensor', (['Y_train'], {}), '(Y_train)\n', (9532, 9541), False, 'import torch\n'), ((9578, 9600), 'torch.tensor', 'torch.tensor', (['tr_masks'], {}), '(tr_masks)\n', (9590, 9600), False, 'import torch\n'), ((15413, 15433), 'torch.tensor', 'torch.tensor', (['X_test'], {}), '(X_test)\n', (15425, 15433), False, 'import torch\n'), ((15470, 15490), 'torch.tensor', 'torch.tensor', (['Y_test'], {}), '(Y_test)\n', (15482, 15490), False, 'import torch\n'), ((15528, 15551), 'torch.tensor', 'torch.tensor', (['val_masks'], {}), '(val_masks)\n', (15540, 15551), False, 'import torch\n'), ((16879, 16894), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16892, 16894), False, 'import torch\n'), ((19128, 19165), 'seqeval.metrics.accuracy_score', 'accuracy_score', (['valid_tags', 'pred_tags'], {}), '(valid_tags, pred_tags)\n', (19142, 19165), False, 'from seqeval.metrics import accuracy_score\n'), ((19245, 19296), 'sklearn.metrics.f1_score', 'f1_score', (['valid_tags', 'pred_tags'], {'average': '"""weighted"""'}), "(valid_tags, pred_tags, average='weighted')\n", (19253, 19296), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((19357, 19415), 'sklearn.metrics.precision_score', 'precision_score', (['valid_tags', 'pred_tags'], {'average': '"""weighted"""'}), "(valid_tags, pred_tags, average='weighted')\n", (19372, 19415), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((19463, 19518), 'sklearn.metrics.recall_score', 'recall_score', (['valid_tags', 'pred_tags'], {'average': '"""weighted"""'}), "(valid_tags, pred_tags, average='weighted')\n", (19475, 19518), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((2346, 2366), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2358, 2366), False, 'import torch\n'), ((17531, 17556), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(2)'}), '(logits, axis=2)\n', (17540, 17556), True, 'import numpy as np\n')]
|
'''
The Normal CDF
100xp
Now that you have a feel for how the Normal PDF looks, let's consider its CDF. Using the
samples you generated in the last exercise (in your namespace as samples_std1, samples_std3,
and samples_std10), generate and plot the CDFs.
Instructions
-Use your ecdf() function to generate x and y values for CDFs: x_std1, y_std1, x_std3, y_std3
and x_std10, y_std10, respectively.
-Plot all three CDFs as dots (do not forget the marker and linestyle keyword arguments!).
-Make a 2% margin in your plot.
-Hit submit to make a legend, showing which standard deviations you used, and to show your plot.
There is no need to label the axes because we have not defined what is being described by the Normal distribution; we are just looking at shapes of CDFs.
'''
import numpy as np
import matplotlib.pyplot as plt
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
# Seed random number generator
np.random.seed(42)
# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3,
# samples_std10
samples_std1 = np.random.normal(20, 1, size=100000)
samples_std3 = np.random.normal(20, 3, size=100000)
samples_std10 = np.random.normal(20, 10, size=100000)
# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)
# Plot CDFs
_ = plt.plot(x_std1, y_std1, marker='.', linestyle='none')
_ = plt.plot(x_std3, y_std3, marker='.', linestyle='none')
_ = plt.plot(x_std10, y_std10, marker='.', linestyle='none')
# Make 2% margin
plt.margins(0.02)
# Make a legend and show the plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
|
[
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.legend",
"numpy.sort",
"numpy.arange",
"numpy.random.normal"
] |
[((1123, 1141), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1137, 1141), True, 'import numpy as np\n'), ((1274, 1310), 'numpy.random.normal', 'np.random.normal', (['(20)', '(1)'], {'size': '(100000)'}), '(20, 1, size=100000)\n', (1290, 1310), True, 'import numpy as np\n'), ((1326, 1362), 'numpy.random.normal', 'np.random.normal', (['(20)', '(3)'], {'size': '(100000)'}), '(20, 3, size=100000)\n', (1342, 1362), True, 'import numpy as np\n'), ((1379, 1416), 'numpy.random.normal', 'np.random.normal', (['(20)', '(10)'], {'size': '(100000)'}), '(20, 10, size=100000)\n', (1395, 1416), True, 'import numpy as np\n'), ((1562, 1616), 'matplotlib.pyplot.plot', 'plt.plot', (['x_std1', 'y_std1'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_std1, y_std1, marker='.', linestyle='none')\n", (1570, 1616), True, 'import matplotlib.pyplot as plt\n'), ((1621, 1675), 'matplotlib.pyplot.plot', 'plt.plot', (['x_std3', 'y_std3'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_std3, y_std3, marker='.', linestyle='none')\n", (1629, 1675), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1736), 'matplotlib.pyplot.plot', 'plt.plot', (['x_std10', 'y_std10'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_std10, y_std10, marker='.', linestyle='none')\n", (1688, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1772), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.02)'], {}), '(0.02)\n', (1766, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1877), 'matplotlib.pyplot.legend', 'plt.legend', (["('std = 1', 'std = 3', 'std = 10')"], {'loc': '"""lower right"""'}), "(('std = 1', 'std = 3', 'std = 10'), loc='lower right')\n", (1822, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1886, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1000, 1013), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (1007, 1013), True, 'import numpy as np\n'), ((1052, 1071), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1061, 1071), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Description
-----------
This module defines the :obj:`ParaMol.Tasks.parametrization.Parametrization` class, which is a ParaMol task that performs force field parametrization.
"""
import numpy as np
import logging
# ParaMol libraries
from .task import *
from ..Optimizers.optimizer import *
from ..Parameter_space.parameter_space import *
from ..Objective_function.objective_function import *
from ..Utils.interface import *
# ------------------------------------------------------------
# #
# PARAMETRIZATION TASK #
# #
# ------------------------------------------------------------
class Parametrization(Task):
"""
ParaMol parametrization task.
"""
def __init__(self):
pass
# ---------------------------------------------------------- #
# #
# PUBLIC METHODS #
# #
# ---------------------------------------------------------- #
def run_task(self, settings, systems, parameter_space=None, objective_function=None, optimizer=None, interface=None, adaptive_parametrization=False, apply_charge_correction=False, restart=False):
"""
Method that performs the standard ParaMol parametrization.
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List containing instances of ParaMol systems.
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instances of ParameterSpace.
objective_function : :obj:`ParaMol.Objective_function.objective_function.ObjectiveFunction`
Instance of the objective function.
optimizer : one of the optimizers defined in the subpackage :obj:`ParaMol.Optimizers`
Instance of the optimizer.
interface: :obj:`ParaMol.Utils.interface.ParaMolInterface`
ParaMol system instance.
adaptive_parametrization: bool
Flag that signals if this parametrization is being done inside a an adaptive parametrization loop. If `False` the sytem xml file is not written in this method (default is `False`).
apply_charge_correction : bool
Whether or not to apply charge correction. Important if charges are being optimized.
restart : bool
Flag that controls whether or not to perform a restart.
Returns
-------
systems, parameter_space, objective_function, optimizer
"""
print("!=================================================================================!")
print("! PARAMETRIZATION !")
print("!=================================================================================!")
for system in systems:
# Perform basic assertions
self._perform_assertions(settings, system)
# Create force field optimizable for every system
system.force_field.create_force_field_optimizable()
# Create IO Interface
if interface is None:
interface = ParaMolInterface()
else:
assert type(interface) is ParaMolInterface
# Create ParameterSpace
if parameter_space is None:
parameter_space = self.create_parameter_space(settings, systems, interface, restart=restart)
else:
assert type(parameter_space) is ParameterSpace
# Create properties and objective function
if objective_function is None:
properties = self.create_properties(settings.properties, settings.parameter_space, systems, parameter_space)
objective_function = self.create_objective_function(settings.objective_function, settings.restart, parameter_space, properties, systems)
else:
assert type(objective_function) is ObjectiveFunction
if settings.objective_function["parallel"]:
# Number of structures might have been changed and therefore it is necessary to re-initialize the parallel objective function
objective_function.init_parallel()
# Recalculate variance in case reference data has changed.
if objective_function.properties is not None:
for property in objective_function.properties:
property.calculate_variance()
'''
for prop in objective_function.properties:
if prop.name == "REGULARIZATION":
# TODO: if commented, reg in adaptive parametrization is done w.r.t. to the initial parameters at iter 0
#prop.set_initial_parameters_values(parameter_space.initial_optimizable_parameters_values_scaled)
pass
'''
# Print Initial Info of Objective Function
objective_function.f(parameter_space.optimizable_parameters_values_scaled, opt_mode=False)
# Create optimizer
if optimizer is None:
optimizer = self.create_optimizer(settings.optimizer["method"],
settings.optimizer[settings.optimizer["method"].lower()])
else:
assert type(optimizer) is Optimizer
# ================================================================================= #
# APPLY CHARGE CORRECTION #
# ================================================================================= #
if apply_charge_correction:
for system in systems:
# Apply charge correction
self._apply_charge_correction(system)
# Create optimizable force field
system.force_field.create_force_field_optimizable()
# Get optimizable parameters
parameter_space.get_optimizable_parameters(systems)
# Calculate prior widths, scaling constants and apply jacobi preconditioning (they may have changes if charges changed).
# Otherwise, we may assume that the change is so small that this has no effect... quite good approximation, hence these lines may be commented
# parameter_space.calculate_scaling_constants()
# parameter_space.calculate_prior_widths()
parameter_space.jacobi_preconditioning()
# Update the OpenMM context
parameter_space.update_systems(systems, parameter_space.optimizable_parameters_values_scaled)
# ================================================================================= #
# END APPLY CHARGE CORRECTION #
# ================================================================================= #
# ================================================================================= #
# PARAMETERS OPTIMZIATION #
# ================================================================================= #
# Perform Optimization
print("Using {} structures in the optimization.".format(np.sum([system.n_structures for system in systems])))
parameters_values = self._perform_optimization(settings, optimizer, objective_function, parameter_space)
# Update the parameters in the force field
parameter_space.update_systems(systems, parameters_values)
# Print Final Info of Objective Function
objective_function.f(parameter_space.optimizable_parameters_values_scaled, opt_mode=False)
# Write ParameterSpace restart file
self.write_restart_pickle(settings.restart, interface, "restart_parameter_space_file", parameter_space.__dict__)
# Write final system to xml file
if not adaptive_parametrization:
for system in systems:
system.engine.write_system_xml("{}_reparametrized.xml".format(system.name))
print("!=================================================================================!")
print("! PARAMETRIZATION TERMINATED SUCCESSFULLY :) !")
print("!=================================================================================!")
return systems, parameter_space, objective_function, optimizer
# -----------------------------------------------------------#
# #
# PRIVATE METHODS #
# #
# -----------------------------------------------------------#
def _perform_optimization(self, settings, optimizer, objective_function, parameter_space):
"""
Method that wraps the functions used to perform the optimization of the parameters.
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of parameter space.
objective_function : :obj:`ParaMol.Objective_function.objective_function.ObjectiveFunction`
Instance of objective function.
optimizer : :obj:`ParaMol.Optimizers.optimizer.Optimizer`
Instance of optimizer.
Returns
-------
parameters_values: list
List of optimized parameters
"""
# Determine whether to perform constrained or unconstrained optimization
constrained = False
for parameter in parameter_space.optimizable_parameters:
if parameter.param_key == "charge":
# If charges are present in the optimizable parameters, perform constrained optimization
constrained = True
break
print("Number of parameters to be optimized: {}.".format(len(parameter_space.optimizable_parameters_values_scaled)))
if constrained:
print("ParaMol will perform constrained optimization.")
constraints = self._get_constraints(scipy_method=settings.optimizer["scipy"]["method"],
parameter_space=parameter_space)
parameters_values = optimizer.run_optimization(f=objective_function.f,
parameters_values=parameter_space.optimizable_parameters_values_scaled,
constraints=constraints)
else:
print("ParaMol will perform unconstrained optimization.")
parameters_values = optimizer.run_optimization(f=objective_function.f,
parameters_values=parameter_space.optimizable_parameters_values_scaled)
return parameters_values
def _apply_charge_correction(self, system):
"""
Method that applies charge correction to the system.
Parameters
----------
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Notes
----
Due to numerical errors, the numerical total charge of the system may not be equal to the real total charge of the system.
Hence, in order to overcome this problem, which causes unexpected behaviour specially when constraints are being applied, the excess or deficiency of charge is shared equally amongst all atoms.
This usually changes the charge in each atom by a very small (negligible) amount.
Note that this method only changes the charges in the ParaMol ForceField of the ParaMolSystem. Therefore, it is required to update the OpenMM systems after this method is called.
Returns
-------
total_charge : float
Final total charge of the system.
"""
if "NonbondedForce" in system.force_field.force_field:
# Get total charge and calculate charge correction
total_charge = self._get_total_charge(system)
logging.info("Applying charge correction.")
logging.info("Total charge before correction: {}e .".format(total_charge))
charge_correction = total_charge / system.n_atoms
logging.info("Charge correction {}e per atom.".format(charge_correction))
# Add charge correction to all atoms
for sub_force in system.force_field.force_field["NonbondedForce"]:
for nonbonded_term in sub_force:
nonbonded_term.parameters["charge"].value -= charge_correction
total_charge = self._get_total_charge(system)
logging.info("Total charge after correction: {}e .\n".format(total_charge))
return total_charge
else:
logging.info("Not applying charge correction.")
return 1
# -----------------------------------------------------------#
# #
# STATIC METHODS #
# #
# -----------------------------------------------------------#
@staticmethod
def _get_total_charge(system):
"""
Method that gets the system's total charge as in the ParaMol ForceField of the ParaMolSystem.
Parameters
----------
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Returns
-------
total_charge : float
Final total charge of the system.
"""
total_charge = 0.0
if "NonbondedForce" in system.force_field.force_field:
for sub_force in system.force_field.force_field["NonbondedForce"]:
for nonbonded_term in sub_force:
total_charge += nonbonded_term.parameters["charge"].value
return total_charge
@staticmethod
def _perform_assertions(settings, system):
"""
Method that asserts if the parametrization asked by the user contains the necessary data (coordinates, forces, energies, esp).
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Returns
-------
True
"""
assert system.ref_coordinates is not None, "Conformations data was not set."
if settings.properties["include_energies"]:
assert system.ref_energies is not None, "Energies were not set."
if settings.properties["include_forces"]:
assert system.ref_forces is not None, "Forces were not set."
if settings.properties["include_esp"]:
assert system.ref_esp is not None, "ESP was not set."
assert system.ref_esp_grid is not None, "ESP was not set."
return True
@staticmethod
def _get_constraints(scipy_method, parameter_space, total_charge=0.0, threshold=1e-8):
"""
Method that gets the constraints to be passed into the SciPy optimizer.
Parameters
----------
scipy_method : str
SciPy method. Should be "COBYLA", SLSQP" or "trust-consr".
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of parameter space.
total_charge : float
System's total charge
threshold : float
Constraint's threshold.
Returns
-------
list
List with constraints.
"""
if scipy_method == "COBYLA":
# Constraint functions must all be >=0 (a single function if only 1 constraint).
# Each function takes the parameters x as its first argument, and it can return either a single number or an array or list of numbers.
constraint_vector_charges = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
constraints = [
{'type': 'ineq', 'fun': lambda x, b: x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) - total_charge + threshold, 'args': (constraint_vector_charges,)},
{'type': 'ineq', 'fun': lambda x, b: -x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) + total_charge + threshold, 'args': (constraint_vector_charges,)}]
return constraints
elif scipy_method == "SLSQP":
# Total charge constraint defined as an equality
constraint_vector_charges = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
constraints = [
{'type': 'ineq', 'fun': lambda x, b: x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) - total_charge + threshold, 'args': (constraint_vector_charges,)},
{'type': 'ineq', 'fun': lambda x, b: -x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) + total_charge + threshold, 'args': (constraint_vector_charges,)}]
return constraints
elif scipy_method == "trust-constr":
from scipy.optimize import LinearConstraint
constraint_vector = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
return LinearConstraint(constraint_vector, [total_charge - threshold], [total_charge + threshold])
else:
raise NotImplementedError("SciPy method {} does not support constraints.".format(scipy_method))
|
[
"logging.info",
"scipy.optimize.LinearConstraint",
"numpy.sum",
"numpy.asarray"
] |
[((12519, 12562), 'logging.info', 'logging.info', (['"""Applying charge correction."""'], {}), "('Applying charge correction.')\n", (12531, 12562), False, 'import logging\n'), ((13266, 13313), 'logging.info', 'logging.info', (['"""Not applying charge correction."""'], {}), "('Not applying charge correction.')\n", (13278, 13313), False, 'import logging\n'), ((7538, 7589), 'numpy.sum', 'np.sum', (['[system.n_structures for system in systems]'], {}), '([system.n_structures for system in systems])\n', (7544, 7589), True, 'import numpy as np\n'), ((17981, 18077), 'scipy.optimize.LinearConstraint', 'LinearConstraint', (['constraint_vector', '[total_charge - threshold]', '[total_charge + threshold]'], {}), '(constraint_vector, [total_charge - threshold], [\n total_charge + threshold])\n', (17997, 18077), False, 'from scipy.optimize import LinearConstraint\n'), ((16661, 16674), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (16671, 16674), True, 'import numpy as np\n'), ((16854, 16867), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (16864, 16867), True, 'import numpy as np\n'), ((17358, 17371), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (17368, 17371), True, 'import numpy as np\n'), ((17551, 17564), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (17561, 17564), True, 'import numpy as np\n')]
|
import sys
sys.path.append('../')
import torchnet as tnt
from torch.autograd import Variable
import torch.nn.functional as F
from model_utils.load_utils import load_model, SAVE_ROOT
from model_utils.model_utils import get_layer_names
MODEL_NAME='mobilenetv2_imagenet'
model_init,model = load_model(MODEL_NAME)
layer_names, conv_layer_mask = get_layer_names(model,'conv')
layer_names_bn, bn_layer_mask = get_layer_names(model,'batchnorm')
fc_layer_mask = (1 - conv_layer_mask).astype(bool)
print(model)
bs = 64
from tensor_compression import get_compressed_model
import copy
import torch
import os
import numpy as np
CONV_SPLIT = 3
n_layers = len(layer_names)
n_layers_bn = len(layer_names_bn)
#decomposition_conv = 'cp3'
decomposition_conv = 'tucker2'
#X_FACTOR used (how much each layer will be compressed):
WEAKEN_FACTOR = None
X_FACTOR = 1.71
rank_selection_suffix = "{}x".format(X_FACTOR)
#specify rank of each layer
ranks_conv = [None if not (name.endswith('conv.2') or name.endswith('0.0') ) else -X_FACTOR
for name in layer_names[conv_layer_mask]]
ranks_fc = [-X_FACTOR] * (len(layer_names[fc_layer_mask]))
ranks_conv[0] = None
ranks_conv[1] = None
ranks_conv[2] = -X_FACTOR
ranks = np.array([None] * len(layer_names))
ranks[conv_layer_mask] = ranks_conv
decompositions = np.array([None] * len(layer_names))
decompositions[conv_layer_mask] = decomposition_conv
SPLIT_FACTOR = CONV_SPLIT
save_dir = "{}/models_finetuned/{}/{}/{}/layer_groups:{}".format(SAVE_ROOT,MODEL_NAME,
decomposition_conv,
rank_selection_suffix,
SPLIT_FACTOR)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
device = 'cuda'
split_tuples = np.array_split(np.arange(n_layers)[conv_layer_mask], CONV_SPLIT)[::-1]
split_tuples.reverse()
compressed_model = copy.deepcopy(model)
print(ranks)
for local_iter, tupl in enumerate(split_tuples):
lname,lname_bn, rank, decomposition = layer_names[tupl], layer_names_bn[tupl],ranks[tupl], decompositions[tupl]
if isinstance(tupl[0], np.ndarray):
print(lname, tupl[0])
compressed_model = get_compressed_model(MODEL_NAME,compressed_model,
ranks=rank, layer_names = lname, layer_names_bn = lname_bn,
decompositions = decomposition,
vbmf_weaken_factor = WEAKEN_FACTOR,return_ranks=True)
print(compressed_model)
#
filename = "{}/mobilenetv2_hooi.pth.tar".format(save_dir)
torch.save(compressed_model,filename)
print(filename)
def test(model,test_loader):
model.eval()
test_loss = tnt.meter.AverageValueMeter()
correct = 0
with torch.no_grad():
for data, target,index in test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss=F.cross_entropy(output, target)
test_loss.add(loss.item()) # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
loss.item(), correct, len(test_loader.sampler),
100. * float(correct) / len(test_loader.sampler)))
return float(correct) / float(len(test_loader.sampler))
from collections import defaultdict
def count_params(model):
n_params = defaultdict()
for name, param in model.named_parameters():
n_params[name] = param.numel()
return n_params
def count_params_by_layers(params_count_dict):
params_count_dict_modif = defaultdict()
for k, v in params_count_dict.items():
if '-' not in k:
k_head = k.strip('.weight').strip('.bias')
try:
params_count_dict_modif[k_head] += params_count_dict[k]
except:
params_count_dict_modif[k_head] = params_count_dict[k]
else:
k_head = '.'.join(k.split('-')[0].split('.')[:-1])
try:
params_count_dict_modif[k_head] += params_count_dict[k]
except:
params_count_dict_modif[k_head] = params_count_dict[k]
return params_count_dict_modif
params_count_dict_m = count_params(model)
params_count_dict_cm = count_params(compressed_model)
params_count_dict_m_init = count_params(model_init)
num_parameters = sum([param.nelement() for param in compressed_model.parameters()])
num_parameters1 = sum([param.nelement() for param in model.parameters()])
num_parameters2 = sum([param.nelement() for param in model_init.parameters()])
print('Params, a:initial, b:pruned, c:decomposed ')
x1=sum(params_count_dict_m.values())/sum(params_count_dict_cm.values())
x11=sum(params_count_dict_m_init.values())/sum(params_count_dict_cm.values())
print('a: '+str(sum(params_count_dict_m_init.values())))
print('a: '+str(num_parameters2))
print('b: '+str(sum(params_count_dict_m.values())))
print('b: '+str(num_parameters1))
print('c: '+str(sum(params_count_dict_cm.values())))
print('c: '+str(num_parameters))
print('Params ratio, a:initial/decomposed, b:pruned/decomposed')
print('a: '+str(x11))
print('b: '+str(x1))
print('a: '+str(num_parameters2/num_parameters))
print('b: '+str(num_parameters1/num_parameters))
print('Params pruned, a:decomposed to initial, b:decomposed to pruned')
print('a: '+str(1-num_parameters/num_parameters2))
print('b: '+str(1-num_parameters/num_parameters1))
#
import sys
sys.path.append("../")
from flopco import FlopCo
model.cpu()
model_init.cpu()
compressed_model.cpu()
flopco_m = FlopCo(model, img_size=(1, 3, 224, 224), device='cpu')
flopco_m_init = FlopCo(model_init, img_size=(1, 3, 224, 224), device='cpu')
flopco_cm = FlopCo(compressed_model, img_size=(1, 3, 224, 224), device='cpu')
print('FLOPs a:init/decomposed, b:pruned/decomposed')
print('a: '+str(flopco_m_init.total_flops / flopco_cm.total_flops))
print('b: '+str(flopco_m.total_flops / flopco_cm.total_flops))
print('FLOPs pruned, a:decomposed to initial, b:decomposed to pruned')
print('a: '+str(1-flopco_cm.total_flops/flopco_m_init.total_flops) )
print('b: '+str(1-flopco_cm.total_flops/flopco_m.total_flops) )
|
[
"sys.path.append",
"tensor_compression.get_compressed_model",
"copy.deepcopy",
"os.makedirs",
"torch.autograd.Variable",
"flopco.FlopCo",
"os.path.exists",
"torchnet.meter.AverageValueMeter",
"torch.nn.functional.cross_entropy",
"model_utils.model_utils.get_layer_names",
"torch.save",
"collections.defaultdict",
"numpy.arange",
"torch.no_grad",
"model_utils.load_utils.load_model"
] |
[((16, 38), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (31, 38), False, 'import sys\n'), ((307, 329), 'model_utils.load_utils.load_model', 'load_model', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (317, 329), False, 'from model_utils.load_utils import load_model, SAVE_ROOT\n'), ((364, 394), 'model_utils.model_utils.get_layer_names', 'get_layer_names', (['model', '"""conv"""'], {}), "(model, 'conv')\n", (379, 394), False, 'from model_utils.model_utils import get_layer_names\n'), ((426, 461), 'model_utils.model_utils.get_layer_names', 'get_layer_names', (['model', '"""batchnorm"""'], {}), "(model, 'batchnorm')\n", (441, 461), False, 'from model_utils.model_utils import get_layer_names\n'), ((2097, 2117), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2110, 2117), False, 'import copy\n'), ((2790, 2828), 'torch.save', 'torch.save', (['compressed_model', 'filename'], {}), '(compressed_model, filename)\n', (2800, 2828), False, 'import torch\n'), ((5879, 5901), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (5894, 5901), False, 'import sys\n'), ((5998, 6052), 'flopco.FlopCo', 'FlopCo', (['model'], {'img_size': '(1, 3, 224, 224)', 'device': '"""cpu"""'}), "(model, img_size=(1, 3, 224, 224), device='cpu')\n", (6004, 6052), False, 'from flopco import FlopCo\n'), ((6069, 6128), 'flopco.FlopCo', 'FlopCo', (['model_init'], {'img_size': '(1, 3, 224, 224)', 'device': '"""cpu"""'}), "(model_init, img_size=(1, 3, 224, 224), device='cpu')\n", (6075, 6128), False, 'from flopco import FlopCo\n'), ((6143, 6208), 'flopco.FlopCo', 'FlopCo', (['compressed_model'], {'img_size': '(1, 3, 224, 224)', 'device': '"""cpu"""'}), "(compressed_model, img_size=(1, 3, 224, 224), device='cpu')\n", (6149, 6208), False, 'from flopco import FlopCo\n'), ((1888, 1912), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1902, 1912), False, 'import os\n'), ((1918, 1939), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1929, 1939), False, 'import os\n'), ((2399, 2597), 'tensor_compression.get_compressed_model', 'get_compressed_model', (['MODEL_NAME', 'compressed_model'], {'ranks': 'rank', 'layer_names': 'lname', 'layer_names_bn': 'lname_bn', 'decompositions': 'decomposition', 'vbmf_weaken_factor': 'WEAKEN_FACTOR', 'return_ranks': '(True)'}), '(MODEL_NAME, compressed_model, ranks=rank, layer_names=\n lname, layer_names_bn=lname_bn, decompositions=decomposition,\n vbmf_weaken_factor=WEAKEN_FACTOR, return_ranks=True)\n', (2419, 2597), False, 'from tensor_compression import get_compressed_model\n'), ((2907, 2936), 'torchnet.meter.AverageValueMeter', 'tnt.meter.AverageValueMeter', ([], {}), '()\n', (2934, 2936), True, 'import torchnet as tnt\n'), ((3796, 3809), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (3807, 3809), False, 'from collections import defaultdict\n'), ((3998, 4011), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (4009, 4011), False, 'from collections import defaultdict\n'), ((2962, 2977), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2975, 2977), False, 'import torch\n'), ((1992, 2011), 'numpy.arange', 'np.arange', (['n_layers'], {}), '(n_layers)\n', (2001, 2011), True, 'import numpy as np\n'), ((3190, 3221), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (3205, 3221), True, 'import torch.nn.functional as F\n'), ((3107, 3121), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (3115, 3121), False, 'from torch.autograd import Variable\n'), ((3123, 3139), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (3131, 3139), False, 'from torch.autograd import Variable\n')]
|
"""Example program to show how to read a multi-channel time series from LSL."""
import math
import threading
# import pygame
from random import random
from sklearn.preprocessing import OneHotEncoder
from pylsl import StreamInlet, resolve_stream
import numpy as np
import pandas as pd
import time
from sklearn import model_selection
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import warnings
from statistics import mode
from datetime import datetime
import sys
import os
import models
import pywt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('error')
def handle_keyboard_chunk(chunk, keys):
''' Returns the button statuses from the LSL keyboard chunk '''
ks, times = chunk
new_chunk = [[], []]
for i in range(len(ks)):
if ks[i][0] in ('LCONTROL pressed', 'LCONTROL released', 'RCONTROL pressed', 'RCONTROL released'):
new_chunk[0].append(ks[i])
new_chunk[1].append(times[i])
chunk = tuple(new_chunk)
if not chunk[0]:
if keys is None:
return [[0, 0, 0]], False
return keys, False
if keys is None:
keys = [[0, 0, 0]]
else:
keys = list(keys[-1][:2])
out = np.zeros((0, 3)) # data should be appended in the format LSHIFT, RSHIFT, TIME
for i in range(len(chunk[0])):
action = chunk[0][i][0]
timestamp = chunk[1][i]
if action == 'LCONTROL pressed':
keys[0] = 1
elif action == 'LCONTROL released':
keys[0] = 0
elif action == 'RCONTROL pressed':
keys[1] = 1
elif action == 'RCONTROL released':
keys[1] = 0
else:
continue
out = np.append(out, [keys + [timestamp]], axis=0)
if len(out) == 0:
return keys, False
return out, True
def normalise_list(x):
x = np.array(x)
try:
out = ((x -x.min()) / (x.max() - x.min())).tolist()
except Warning:
out = [np.zeros(len(x[0])).tolist()]
return out
def normalise_eeg(eeg):
return [normalise_list(eeg[i::8]) for i in range(8)]
def my_filter(x, y, a=None, b=None):
# b = [0.9174, -0.7961, 0.9174]
# a = [-1, 0.7961, -0.8347]
# Parameters for a 40-hz low-pass filter
if a is None:
a = [-1, 0.331]
if b is None:
b = [0.3345, 0.3345]
if len(y) > len(a):
for col in range(len(y[-1][:-3])):
y[-1][col] = sum(a[i]*y[-1-i][col] + b[i]*x[-1-i][col] for i in range(len(a)))
# for i in range(len(a)):
# y[-1][col] += a[i]*y[-1-i][col] + b[i]*x[-1-i][col]
return y
def fir_filter(x, y, a=None):
if a is None:
a = [1.4, -0.8, 1.4] # 50 Hz notch filter
# a = [1] # do nothing
if len(x) >= len(a):
for col in range(len(y[-1][:-3])):
y[-1][col] = sum([a[i]*x[-1-i][col] for i in range(len(a))])
# print(y[-1][col])
return y
class EEG:
def __init__(self, user_id, game, data_length=100, ignore_lsl=False, ignore_BCI=False):
# first resolve an EEG stream on the lab network
self.user_id = user_id
self.game = game
self.data_length = data_length
if not ignore_lsl:
print("looking for an Keyboard stream...")
self.keyboard = resolve_stream('name', 'Keyboard')
print(self.keyboard)
self.keyboard_inlet = StreamInlet(self.keyboard[0])
if not ignore_lsl and not ignore_BCI:
print("looking for an EEG stream...")
self.eeg = resolve_stream('type', 'EEG')
print(self.eeg)
self.eeg_inlet = StreamInlet(self.eeg[0])
self.eeg_dataset = [] # of the format [channel0, c1, ..., timestamp, left_shift, right_shift]
self.filtered = []
self.fft = []
self.keys = None
self.running = False
self.clf = None
self.acc = 0
@property
def prev_dl(self):
return np.array([item[:-3] for item in self.filtered[-1:-1-self.data_length:-1]]).T.tolist()
def eeg_sample(self, data=None):
if data is None:
sample, timestamp = self.eeg_inlet.pull_sample()
data = [sample + [timestamp] + list(self.keys[-1][:2])]
self.eeg_dataset += data
self.filtered += [[0]*8 + list(data[0][-3:])]
# self.filtered = my_filter(self.eeg_dataset, self.filtered)
# self.filtered = my_filter(self.eeg_dataset, self.filtered, a=[-1, 1.452, -0.4523], b=[0.2737, 0, -0.2737])
self.filtered = my_filter(self.eeg_dataset, self.filtered,
b=[float(i) for i in '0.3749 -0.2339 0 0.2339 -0.3749'.split()],
a=[-1*float(i) for i in '1.0000 -1.8173 1.9290 -1.3011 0.2154'.split()]) # this one also works well!
# self.filtered = fir_filter(self.eeg_dataset, self.filtered) # this works well!
if len(self.filtered) > self.data_length:
norm = normalise_eeg(self.prev_dl)
fft = np.array([np.abs(np.fft.fft(n)) for n in norm]).flatten().tolist()
# fft = normalise_list(np.array([pywt.dwt(n, 'db2') for n in norm])[:100].flatten())
self.fft += [fft + self.filtered[-1][-2:]]
def mi_to_fft(self):
hist_mi = [f for f in os.listdir('users/data') if 'mi_' + self.user_id == f[:5]]
hist_fft = [f for f in os.listdir('users/data') if 'fft_' + self.user_id == f[:6]]
needed_hist_fft = []
for fmi in hist_mi:
if 'fft_' + fmi[3:] not in hist_fft:
needed_hist_fft.append(fmi)
print('need to convert to fft:', needed_hist_fft)
print('loading {}'.format(needed_hist_fft))
for mi_file in needed_hist_fft:
loaded_data = np.load('users/data/' + mi_file)
self.eeg_dataset = []
self.filtered = []
self.fft = []
t0 = time.time()
for row in range(len(loaded_data)):
data = [loaded_data[row]]
self.eeg_sample(data)
if row % 1000 == 500:
tr = (time.time() - t0) * (len(loaded_data) - row) / row
print('time remaining: {}'.format(tr))
print()
fft_name = 'users/data/fft_' + mi_file[3:]
print('outputting to', fft_name)
np.save(fft_name, self.fft)
# print(pd.DataFrame(self.fft))
# good = 'users/data/good_' + mi_file[3:]
# good = np.load(good)
# print(pd.DataFrame(good))
#
# print(f'{np.array_equal(self.fft, good) = }')
def gather_data(self):
thread = threading.Thread(target=self.__gather)
thread.start()
return thread
def __gather(self):
self.running = True
self.eeg_dataset = []
self.filtered = []
self.fft = []
while self.running:
# get a new sample (you can also omit the timestamp part if you're not interested in it)
chunk = self.keyboard_inlet.pull_chunk()
self.keys, is_new = handle_keyboard_chunk(chunk, self.keys)
self.eeg_sample() # get and process the latest sample from the EEG headset
self.save_training()
def train(self, classifier='KNN', include_historical=False, **kwargs):
thread = threading.Thread(target=self.__train, args=(classifier, include_historical), kwargs=kwargs)
thread.start()
return thread
def __train(self, classifier='KNN', include_historical=False, **kwargs):
print('data recording complete. building model... (this may take a few moments)')
# hist_fft = [f for f in os.listdir('users/data') if 'fft_' + self.user_id in f and 'npy' in f] # grab historical data for user
#
# # take only the most recent data if we don't include_historical
# if not include_historical or classifier == 'ANN':
# print('ignoring historical data...')
# hist_fft = [hist_fft[-1]]
#
# print('loading {}'.format(hist_fft))
# data = [np.load('users/data/' + f).tolist()[::5] for f in hist_fft]
#
# # X = [dat[:][:-2] for dat in data]
# # Y_i = [dat[:][-2:] for dat in data]
# # Y_o = []
# # X_o = []
# data_o = []
#
# # merge historical data together
# for i in range(len(data)):
# # Y_o += Y_i[i]
# # X_o += X[i]
# print('data', i, 'shape', np.array(data[i]).shape)
# data_o += data[i]
def flatten(t):
return [item for sublist in t for item in sublist]
def get_fmi_dl(index, data, length=100):
np_fmi = np.array(data[index:index + length])
x = flatten(np_fmi[:, :-3].tolist())
y = np_fmi[-1, -2:].tolist()
return [x + y]
data = self.filtered
data_o = []
for line in range(len(data)-100):
data_o += get_fmi_dl(line, data)
# data_o = data
print('balancing data')
# print(data_o)
print('data shape:', np.array(data_o).shape)
fft_df = pd.DataFrame(data_o, columns=['c' + str(i) for i in range(802)])
fft_df['y'] = fft_df.apply(lambda row: row.c800 + 2 * row.c801, axis=1)
fft_df = fft_df.loc[fft_df['y'] != 3].reset_index(drop=True)
m = min(fft_df.y.value_counts()) # grab the count of the least common y value (left, right, or none)
y_vals = fft_df.y.unique()
print('got min={}, unique={}'.format(m, y_vals))
randomized_df = fft_df.sample(frac=1).reset_index(drop=True)
out = np.zeros((m*3, 803))
for i, y in enumerate(y_vals):
arr = randomized_df.loc[randomized_df['y'] == y].head(m).to_numpy()
out[i*m:i*m + m] = arr
print('consolidated data')
randomized_df = pd.DataFrame(out)
randomized_df = randomized_df.sample(frac=1).reset_index(drop=True)
print('reordered data')
Y = randomized_df[[800, 801]].to_numpy()
del randomized_df[800], randomized_df[801], randomized_df[802]
X = randomized_df.to_numpy()
print('created X and Y. X.shape={}, Y.shape={}'.format(X.shape, Y.shape))
# y =
# one hot encoding for Y values
# Y_i = list(Y_o)
Y_i = [[0], [1], [2], [3]] + [[2*Y[i][-2] + Y[i][-1]] for i in range(len(Y))]
enc = OneHotEncoder()
print('fitting one hot encoder')
enc.fit(Y_i)
# X = X_o
Y = enc.transform(Y_i).toarray()[4:]
if len(X) == 0 or len(Y) == 0:
print('no training data provided')
return
def train_test_split(X, Y, test_size):
stop_idx = int(len(Y) * test_size)
return X[:stop_idx], X[stop_idx:], Y[:stop_idx], Y[stop_idx:]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
# if classifier == 'KNN':
# self.clf = models.KNN(n_neighbors=3, **kwargs)
# elif classifier == "LDA":
# self.clf = models.LDA()
# elif classifier == "SVM":
# self.clf = models.SVM(**kwargs)
# elif classifier == "ANN":
# self.clf = models.ANN(**kwargs)
# elif classifier == "RNN":
# self.clf = models.RNN(**kwargs)
# elif classifier == "CNN":
# self.clf = models.CNN(**kwargs)
# else:
# print('no valid classifier provided ({}). Using KNN'.format(classifier))
# self.clf = models.KNN(n_neighbors=3)
print('training model ({} classifier)...'.format(self.clf))
self.clf.fit(X_train, Y_train)
print('analysing model...')
preds = self.clf.predict(X_test)
acc = accuracy_score(Y_test, preds)
print('combined acc:', acc)
self.acc = round(acc, 4)
print('combined acc:', self.acc)
print()
print('model complete.')
def build_model(self, classifier, **kwargs):
thread = threading.Thread(target=self._build_model, args=(classifier, ), kwargs=kwargs)
thread.start()
return thread
def _build_model(self, classifier, **kwargs):
if classifier == 'KNN':
self.clf = models.KNN(n_neighbors=3, **kwargs)
elif classifier == "LDA":
self.clf = models.LDA()
elif classifier == "SVM":
self.clf = models.SVM(**kwargs)
elif classifier == "ANN":
self.clf = models.ANN(**kwargs)
elif classifier == "RNN":
self.clf = models.RNN(**kwargs)
elif classifier == "CNN":
self.clf = models.CNN2(transfer=True, **kwargs)
else:
print(f'no valid classifier provided ({classifier}). Using KNN')
self.clf = models.KNN(n_neighbors=3)
def save_training(self):
suffix = '_' + datetime.today().strftime('%d%m%y_%H%M%S') + '.npy'
print('saving eeg data:', np.array(self.eeg_dataset).shape)
eeg_file = './users/data/mi_' + self.user_id + suffix
np.save(eeg_file, self.eeg_dataset)
print('saving filtered eeg data:', np.array(self.filtered).shape)
filt_eeg_file = './users/data/fmi_' + self.user_id + suffix
np.save(filt_eeg_file, self.filtered)
print('saving filtered fft data:', np.array(self.fft).shape)
fft_eeg_file = './users/data/fft_' + self.user_id + suffix
np.save(fft_eeg_file, self.fft)
def test(self, send_to=None):
thread = threading.Thread(target=self.__test, args=(send_to, ))
thread.start()
return thread
def __test(self, send_to=None):
assert self.clf
self.running = True
self.eeg_dataset = []
self.filtered = []
self.fft = []
last_preds = []
def flatten(t):
return [item for sublist in t for item in sublist]
def get_fmi_dl(index, data, length=100):
np_fmi = np.array(data[index:index + length])
x = flatten(np_fmi[:, :-3].tolist())
return [x]
while self.running:
self.eeg_sample()
if len(self.filtered) > self.data_length:
pred = self.clf.predict(get_fmi_dl(-101, self.filtered))
# if pred[0][2]:
# last_preds += [1]
# elif pred[0][1]:
# last_preds += [-1]
# else:
# last_preds += [0]
# if len(last_preds) >= 25:
# last_preds = last_preds[1:]
# avg = sum(last_preds) / len(last_preds)
# left = avg < -0.25
# right = avg > 0.25
left = pred[0][0] or pred[0][2]
right = pred[0][1] or pred[0][2]
if send_to:
send_to((left, right))
elif send_to:
send_to((0, 0))
def close(self):
print('closing eeg and keyboard streams')
if hasattr(self, 'eeg_inlet'):
self.eeg_inlet.close_stream()
if hasattr(self, 'keyboard_inlet'):
self.keyboard_inlet.close_stream()
def main(user_id, train_time=30, test_time=30, classifier='CNN', model=''):
import motor_bci_game
while len(user_id) != 2:
user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
if len(user_id) == 2:
print('user_id={}'.format(user_id))
break
print('user ID must be 2 digits, you put', len(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game)
gathering = eeg.gather_data() # runs in background
eeg.build_model(classifier=classifier, model=model)#, model_location="cnn_model_8_11_22_32") # runs in background
game.run_keyboard(run_time=train_time) # runs in foreground
eeg.running = False
while gathering.is_alive(): pass
print(game.e.scores)
game.e.scores = [0]
training = eeg.train(classifier=classifier, include_historical=False)#, model_location='cnn_model_8_11_22_32') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.running = False # stop eeg gathering once game completes
time.sleep(5)
print('testing')
testing = eeg.test(send_to=game.p1.handle_keys)
game.run_eeg(test_time)
eeg.running = False
while testing.is_alive():
pass
eeg.close()
print('scores:', game.e.scores)
print('acc:', eeg.acc)
game.quit()
sys.exit()
def main_game_2(user_id, train_time=30, test_time=30, classifier='CNN'):
import game_2
while len(user_id) != 2:
user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
if len(user_id) == 2:
print('user_id={}'.format(user_id))
break
print('user ID must be 2 digits, you put', len(user_id))
game = game_2.Game()
eeg = EEG(user_id, game)
gathering = eeg.gather_data() # runs in background
eeg.build_model(classifier=classifier)#, model_location="cnn_model_8_11_22_32") # runs in background
game.run_keyboard(run_time=train_time) # runs in foreground
eeg.running = False
while gathering.is_alive(): pass
training = eeg.train(classifier=classifier, include_historical=False)#, model_location='cnn_model_8_11_22_32') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.running = False # stop eeg gathering once game completes
print('scores:', game.block.scores)
game.block.scores = [0]
# time.sleep(5)
print('testing')
testing = eeg.test(send_to=game.block.handle_keys)
game.run_eeg(test_time)
eeg.running = False
while testing.is_alive():
pass
eeg.close()
print('scores:', game.block.scores)
total = sum(game.block.scores) + len(game.block.scores) - 1
print('total blocks:', total)
print('percent caught:', sum(game.block.scores) / total)
game.quit()
sys.exit()
def train_test(user_id):
import motor_bci_game
# while len(user_id) != 2:
# user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
# if len(user_id) == 2:
# print(f'{user_id=}')
# break
# print('user ID must be 2 digits, you put', len(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game, ignore_lsl=True)
training = eeg.train(classifier='CNN', include_historical=False, model='new_test') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.close()
print('scores:', game.e.scores)
game.quit()
sys.exit()
def convert_mi_to_fft(user_id):
import motor_bci_game
# user_id = '00'# + str(i)
print('user_id={}'.format(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game, ignore_lsl=True)
eeg.mi_to_fft()
eeg.close()
game.quit()
sys.exit()
if __name__ == '__main__':
user_id = '-5' # -9 is cameron post-training recordings, -8 is same for kevin
mode = 2
if mode == 1:
good = np.load('users/data/fmi_01_300921_211231.npy')
print(pd.DataFrame(good))
elif mode == 2:
main(user_id=user_id,
train_time=30,
test_time=30,
model='models/p00_models/cnn_model_2_200',
classifier='LDA'
)
elif mode == 3:
convert_mi_to_fft(user_id)
elif mode == 4:
train_test(user_id)
elif mode == 5:
main_game_2(user_id=user_id,
train_time=30,
test_time=30)
print('done?')
|
[
"motor_bci_game.Game",
"numpy.load",
"models.CNN2",
"sklearn.metrics.accuracy_score",
"models.RNN",
"models.LDA",
"pandas.DataFrame",
"models.KNN",
"numpy.fft.fft",
"pylsl.resolve_stream",
"pylsl.StreamInlet",
"numpy.append",
"threading.Thread",
"numpy.save",
"datetime.datetime.today",
"sklearn.preprocessing.OneHotEncoder",
"time.sleep",
"os.listdir",
"sys.exit",
"warnings.filterwarnings",
"models.SVM",
"game_2.Game",
"numpy.zeros",
"time.time",
"numpy.array",
"models.ANN"
] |
[((592, 624), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (615, 624), False, 'import warnings\n'), ((1244, 1260), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (1252, 1260), True, 'import numpy as np\n'), ((1889, 1900), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1897, 1900), True, 'import numpy as np\n'), ((15694, 15715), 'motor_bci_game.Game', 'motor_bci_game.Game', ([], {}), '()\n', (15713, 15715), False, 'import motor_bci_game\n'), ((16352, 16365), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (16362, 16365), False, 'import time\n'), ((16635, 16645), 'sys.exit', 'sys.exit', ([], {}), '()\n', (16643, 16645), False, 'import sys\n'), ((17051, 17064), 'game_2.Game', 'game_2.Game', ([], {}), '()\n', (17062, 17064), False, 'import game_2\n'), ((18132, 18142), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18140, 18142), False, 'import sys\n'), ((18505, 18526), 'motor_bci_game.Game', 'motor_bci_game.Game', ([], {}), '()\n', (18524, 18526), False, 'import motor_bci_game\n'), ((18805, 18815), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18813, 18815), False, 'import sys\n'), ((18959, 18980), 'motor_bci_game.Game', 'motor_bci_game.Game', ([], {}), '()\n', (18978, 18980), False, 'import motor_bci_game\n'), ((19084, 19094), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19092, 19094), False, 'import sys\n'), ((1739, 1783), 'numpy.append', 'np.append', (['out', '[keys + [timestamp]]'], {'axis': '(0)'}), '(out, [keys + [timestamp]], axis=0)\n', (1748, 1783), True, 'import numpy as np\n'), ((6727, 6765), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__gather'}), '(target=self.__gather)\n', (6743, 6765), False, 'import threading\n'), ((7409, 7504), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__train', 'args': '(classifier, include_historical)', 'kwargs': 'kwargs'}), '(target=self.__train, args=(classifier, include_historical),\n kwargs=kwargs)\n', (7425, 7504), False, 'import threading\n'), ((9736, 9758), 'numpy.zeros', 'np.zeros', (['(m * 3, 803)'], {}), '((m * 3, 803))\n', (9744, 9758), True, 'import numpy as np\n'), ((9971, 9988), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (9983, 9988), True, 'import pandas as pd\n'), ((10517, 10532), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (10530, 10532), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((11864, 11893), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'preds'], {}), '(Y_test, preds)\n', (11878, 11893), False, 'from sklearn.metrics import accuracy_score\n'), ((12121, 12198), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._build_model', 'args': '(classifier,)', 'kwargs': 'kwargs'}), '(target=self._build_model, args=(classifier,), kwargs=kwargs)\n', (12137, 12198), False, 'import threading\n'), ((13168, 13203), 'numpy.save', 'np.save', (['eeg_file', 'self.eeg_dataset'], {}), '(eeg_file, self.eeg_dataset)\n', (13175, 13203), True, 'import numpy as np\n'), ((13355, 13392), 'numpy.save', 'np.save', (['filt_eeg_file', 'self.filtered'], {}), '(filt_eeg_file, self.filtered)\n', (13362, 13392), True, 'import numpy as np\n'), ((13538, 13569), 'numpy.save', 'np.save', (['fft_eeg_file', 'self.fft'], {}), '(fft_eeg_file, self.fft)\n', (13545, 13569), True, 'import numpy as np\n'), ((13622, 13675), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__test', 'args': '(send_to,)'}), '(target=self.__test, args=(send_to,))\n', (13638, 13675), False, 'import threading\n'), ((19253, 19299), 'numpy.load', 'np.load', (['"""users/data/fmi_01_300921_211231.npy"""'], {}), "('users/data/fmi_01_300921_211231.npy')\n", (19260, 19299), True, 'import numpy as np\n'), ((3341, 3375), 'pylsl.resolve_stream', 'resolve_stream', (['"""name"""', '"""Keyboard"""'], {}), "('name', 'Keyboard')\n", (3355, 3375), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((3443, 3472), 'pylsl.StreamInlet', 'StreamInlet', (['self.keyboard[0]'], {}), '(self.keyboard[0])\n', (3454, 3472), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((3592, 3621), 'pylsl.resolve_stream', 'resolve_stream', (['"""type"""', '"""EEG"""'], {}), "('type', 'EEG')\n", (3606, 3621), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((3679, 3703), 'pylsl.StreamInlet', 'StreamInlet', (['self.eeg[0]'], {}), '(self.eeg[0])\n', (3690, 3703), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((5817, 5849), 'numpy.load', 'np.load', (["('users/data/' + mi_file)"], {}), "('users/data/' + mi_file)\n", (5824, 5849), True, 'import numpy as np\n'), ((5958, 5969), 'time.time', 'time.time', ([], {}), '()\n', (5967, 5969), False, 'import time\n'), ((6407, 6434), 'numpy.save', 'np.save', (['fft_name', 'self.fft'], {}), '(fft_name, self.fft)\n', (6414, 6434), True, 'import numpy as np\n'), ((8793, 8829), 'numpy.array', 'np.array', (['data[index:index + length]'], {}), '(data[index:index + length])\n', (8801, 8829), True, 'import numpy as np\n'), ((12351, 12386), 'models.KNN', 'models.KNN', ([], {'n_neighbors': '(3)'}), '(n_neighbors=3, **kwargs)\n', (12361, 12386), False, 'import models\n'), ((14074, 14110), 'numpy.array', 'np.array', (['data[index:index + length]'], {}), '(data[index:index + length])\n', (14082, 14110), True, 'import numpy as np\n'), ((19314, 19332), 'pandas.DataFrame', 'pd.DataFrame', (['good'], {}), '(good)\n', (19326, 19332), True, 'import pandas as pd\n'), ((5340, 5364), 'os.listdir', 'os.listdir', (['"""users/data"""'], {}), "('users/data')\n", (5350, 5364), False, 'import os\n'), ((5430, 5454), 'os.listdir', 'os.listdir', (['"""users/data"""'], {}), "('users/data')\n", (5440, 5454), False, 'import os\n'), ((9193, 9209), 'numpy.array', 'np.array', (['data_o'], {}), '(data_o)\n', (9201, 9209), True, 'import numpy as np\n'), ((12444, 12456), 'models.LDA', 'models.LDA', ([], {}), '()\n', (12454, 12456), False, 'import models\n'), ((13064, 13090), 'numpy.array', 'np.array', (['self.eeg_dataset'], {}), '(self.eeg_dataset)\n', (13072, 13090), True, 'import numpy as np\n'), ((13248, 13271), 'numpy.array', 'np.array', (['self.filtered'], {}), '(self.filtered)\n', (13256, 13271), True, 'import numpy as np\n'), ((13437, 13455), 'numpy.array', 'np.array', (['self.fft'], {}), '(self.fft)\n', (13445, 13455), True, 'import numpy as np\n'), ((4010, 4086), 'numpy.array', 'np.array', (['[item[:-3] for item in self.filtered[-1:-1 - self.data_length:-1]]'], {}), '([item[:-3] for item in self.filtered[-1:-1 - self.data_length:-1]])\n', (4018, 4086), True, 'import numpy as np\n'), ((12514, 12534), 'models.SVM', 'models.SVM', ([], {}), '(**kwargs)\n', (12524, 12534), False, 'import models\n'), ((12592, 12612), 'models.ANN', 'models.ANN', ([], {}), '(**kwargs)\n', (12602, 12612), False, 'import models\n'), ((12978, 12994), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (12992, 12994), False, 'from datetime import datetime\n'), ((12670, 12690), 'models.RNN', 'models.RNN', ([], {}), '(**kwargs)\n', (12680, 12690), False, 'import models\n'), ((6164, 6175), 'time.time', 'time.time', ([], {}), '()\n', (6173, 6175), False, 'import time\n'), ((12748, 12784), 'models.CNN2', 'models.CNN2', ([], {'transfer': '(True)'}), '(transfer=True, **kwargs)\n', (12759, 12784), False, 'import models\n'), ((12899, 12924), 'models.KNN', 'models.KNN', ([], {'n_neighbors': '(3)'}), '(n_neighbors=3)\n', (12909, 12924), False, 'import models\n'), ((5082, 5095), 'numpy.fft.fft', 'np.fft.fft', (['n'], {}), '(n)\n', (5092, 5095), True, 'import numpy as np\n')]
|
"""
Tests brusselator
"""
import numpy as np
from pymgrit.brusselator.brusselator import Brusselator
from pymgrit.brusselator.brusselator import VectorBrusselator
def test_brusselator_constructor():
"""
Test constructor
"""
brusselator = Brusselator(t_start=0, t_stop=1, nt=11)
np.testing.assert_equal(brusselator.a, 1)
np.testing.assert_equal(brusselator.b, 3)
np.testing.assert_equal(True, isinstance(brusselator.vector_template, VectorBrusselator))
np.testing.assert_equal(True, isinstance(brusselator.vector_t_start, VectorBrusselator))
np.testing.assert_equal(brusselator.vector_t_start.get_values(), np.array([0, 1]))
def test_brusselator_step():
"""
Test step()
"""
brusselator = Brusselator(t_start=0, t_stop=1, nt=11)
brusselator_res = brusselator.step(u_start=VectorBrusselator(), t_start=0, t_stop=0.1)
np.testing.assert_almost_equal(brusselator_res.get_values(), np.array([0.08240173, 0.01319825]))
def test_vector_brusselator_constructor():
"""
Test constructor
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.value[0], 0)
np.testing.assert_equal(vector_brusselator.value[1], 0)
def test_vector_brusselator_add():
"""
Test __add__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_2 = VectorBrusselator()
vector_brusselator_2.value = 2 * np.ones(2)
vector_brusselator_res = vector_brusselator_1 + vector_brusselator_2
np.testing.assert_equal(vector_brusselator_res.value, 3 * np.ones(2))
vector_brusselator_res += vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, 4 * np.ones(2))
def test_vector_brusselator_sub():
"""
Test __sub__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_2 = VectorBrusselator()
vector_brusselator_2.value = 2 * np.ones(2)
vector_brusselator_res = vector_brusselator_2 - vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2))
vector_brusselator_res -= vector_brusselator_2
np.testing.assert_equal(vector_brusselator_res.value, -np.ones(2))
def test_vector_brusselator_mul():
"""
Test __mul__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_res = vector_brusselator_1 * 2
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*2)
vector_brusselator_res = 3 * vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*3)
vector_brusselator_res *= 2
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*6)
def test_vector_brusselator_norm():
"""
Test norm()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator.value = np.array([1, 2])
np.testing.assert_equal(np.linalg.norm(np.array([1, 2])), vector_brusselator.norm())
def test_vector_brusselator_clone_zero():
"""
Test clone_zero()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator_clone = vector_brusselator.clone_zero()
np.testing.assert_equal(True, isinstance(vector_brusselator_clone, VectorBrusselator))
np.testing.assert_equal(vector_brusselator_clone.value, np.zeros(2))
def test_vector_brusselator_clone_rand():
"""
Test clone_rand()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator_clone = vector_brusselator.clone_rand()
np.testing.assert_equal(True, isinstance(vector_brusselator_clone, VectorBrusselator))
def test_vector_brusselator_set_values():
"""
Test the set_values()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator.set_values(np.array([1, 2]))
np.testing.assert_equal(vector_brusselator.value, np.array([1, 2]))
def test_vector_brusselator_get_values():
"""
Test get_values()
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.get_values(), np.zeros(2))
def test_vector_brusselator_plot_solution():
"""
Test get_values()
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.plot_solution(), None)
|
[
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.testing.assert_equal",
"pymgrit.brusselator.brusselator.VectorBrusselator",
"pymgrit.brusselator.brusselator.Brusselator"
] |
[((257, 296), 'pymgrit.brusselator.brusselator.Brusselator', 'Brusselator', ([], {'t_start': '(0)', 't_stop': '(1)', 'nt': '(11)'}), '(t_start=0, t_stop=1, nt=11)\n', (268, 296), False, 'from pymgrit.brusselator.brusselator import Brusselator\n'), ((302, 343), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['brusselator.a', '(1)'], {}), '(brusselator.a, 1)\n', (325, 343), True, 'import numpy as np\n'), ((348, 389), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['brusselator.b', '(3)'], {}), '(brusselator.b, 3)\n', (371, 389), True, 'import numpy as np\n'), ((746, 785), 'pymgrit.brusselator.brusselator.Brusselator', 'Brusselator', ([], {'t_start': '(0)', 't_stop': '(1)', 'nt': '(11)'}), '(t_start=0, t_stop=1, nt=11)\n', (757, 785), False, 'from pymgrit.brusselator.brusselator import Brusselator\n'), ((1086, 1105), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1103, 1105), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1110, 1165), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['vector_brusselator.value[0]', '(0)'], {}), '(vector_brusselator.value[0], 0)\n', (1133, 1165), True, 'import numpy as np\n'), ((1170, 1225), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['vector_brusselator.value[1]', '(0)'], {}), '(vector_brusselator.value[1], 0)\n', (1193, 1225), True, 'import numpy as np\n'), ((1323, 1342), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1340, 1342), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1376, 1386), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1383, 1386), True, 'import numpy as np\n'), ((1414, 1433), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1431, 1433), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1853, 1872), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1870, 1872), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1906, 1916), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1913, 1916), True, 'import numpy as np\n'), ((1944, 1963), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1961, 1963), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((2375, 2394), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (2392, 2394), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((2428, 2438), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2435, 2438), True, 'import numpy as np\n'), ((2892, 2911), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (2909, 2911), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((2943, 2959), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (2951, 2959), True, 'import numpy as np\n'), ((3156, 3175), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (3173, 3175), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((3513, 3532), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (3530, 3532), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((3800, 3819), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (3817, 3819), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((4051, 4070), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (4068, 4070), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((4255, 4274), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (4272, 4274), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((647, 663), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (655, 663), True, 'import numpy as np\n'), ((943, 977), 'numpy.array', 'np.array', (['[0.08240173, 0.01319825]'], {}), '([0.08240173, 0.01319825])\n', (951, 977), True, 'import numpy as np\n'), ((1471, 1481), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1478, 1481), True, 'import numpy as np\n'), ((2001, 2011), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2008, 2011), True, 'import numpy as np\n'), ((2144, 2154), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2151, 2154), True, 'import numpy as np\n'), ((3393, 3404), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3401, 3404), True, 'import numpy as np\n'), ((3854, 3870), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3862, 3870), True, 'import numpy as np\n'), ((3926, 3942), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3934, 3942), True, 'import numpy as np\n'), ((4132, 4143), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4140, 4143), True, 'import numpy as np\n'), ((833, 852), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (850, 852), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1618, 1628), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1625, 1628), True, 'import numpy as np\n'), ((1744, 1754), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1751, 1754), True, 'import numpy as np\n'), ((2267, 2277), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2274, 2277), True, 'import numpy as np\n'), ((2552, 2562), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2559, 2562), True, 'import numpy as np\n'), ((2679, 2689), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2686, 2689), True, 'import numpy as np\n'), ((2784, 2794), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2791, 2794), True, 'import numpy as np\n'), ((3003, 3019), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3011, 3019), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np
import mixem
from mixem.distribution import MultivariateNormalDistribution
def generate_data():
dist_params = [
(np.array([4]), np.diag([1])),
(np.array([1]), np.diag([0.5]))
]
weights = [0.3, 0.7]
n_data = 5000
data = np.zeros((n_data, 1))
for i in range(n_data):
dpi = np.random.choice(range(len(dist_params)), p=weights)
dp = dist_params[dpi]
data[i] = np.random.multivariate_normal(dp[0], dp[1])
return data
def recover(data):
mu = np.mean(data)
sigma = np.var(data)
init_params = [
(np.array([mu + 0.1]), np.diag([sigma])),
(np.array([mu - 0.1]), np.diag([sigma]))
]
weight, distributions, ll = mixem.em(data, [MultivariateNormalDistribution(mu, sigma) for mu, sigma in init_params])
print(weight, distributions, ll)
if __name__ == '__main__':
data = generate_data()
recover(data)
|
[
"numpy.zeros",
"mixem.distribution.MultivariateNormalDistribution",
"numpy.mean",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.diag",
"numpy.var"
] |
[((301, 322), 'numpy.zeros', 'np.zeros', (['(n_data, 1)'], {}), '((n_data, 1))\n', (309, 322), True, 'import numpy as np\n'), ((558, 571), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (565, 571), True, 'import numpy as np\n'), ((584, 596), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (590, 596), True, 'import numpy as np\n'), ((466, 509), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['dp[0]', 'dp[1]'], {}), '(dp[0], dp[1])\n', (495, 509), True, 'import numpy as np\n'), ((169, 182), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (177, 182), True, 'import numpy as np\n'), ((184, 196), 'numpy.diag', 'np.diag', (['[1]'], {}), '([1])\n', (191, 196), True, 'import numpy as np\n'), ((208, 221), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (216, 221), True, 'import numpy as np\n'), ((223, 237), 'numpy.diag', 'np.diag', (['[0.5]'], {}), '([0.5])\n', (230, 237), True, 'import numpy as np\n'), ((627, 647), 'numpy.array', 'np.array', (['[mu + 0.1]'], {}), '([mu + 0.1])\n', (635, 647), True, 'import numpy as np\n'), ((649, 665), 'numpy.diag', 'np.diag', (['[sigma]'], {}), '([sigma])\n', (656, 665), True, 'import numpy as np\n'), ((677, 697), 'numpy.array', 'np.array', (['[mu - 0.1]'], {}), '([mu - 0.1])\n', (685, 697), True, 'import numpy as np\n'), ((699, 715), 'numpy.diag', 'np.diag', (['[sigma]'], {}), '([sigma])\n', (706, 715), True, 'import numpy as np\n'), ((772, 813), 'mixem.distribution.MultivariateNormalDistribution', 'MultivariateNormalDistribution', (['mu', 'sigma'], {}), '(mu, sigma)\n', (802, 813), False, 'from mixem.distribution import MultivariateNormalDistribution\n')]
|
import unittest
import numpy as np
from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues
class TestFilterObject(unittest.TestCase):
def test_gaussian2(self):
std = -0.1339048038303071
mean = -0.1339048038303071
x = 150.10086283379565
temp = self.gaussian_nb.get_gaussian(x, mean, std)
self.assertAlmostEqual(temp, 0)
def test_prior(self):
y = np.array([1] * 10 + [0] * 5)
self.gaussian_nb.set_unique_y(y)
self.gaussian_nb.set_prior(y)
prior = self.gaussian_nb.prior
self.assertAlmostEqual(prior[0], 0.33, places=2)
self.assertAlmostEqual(prior[1], 0.67, places=2)
def test_means_standard_deviations(self):
x = [[0, 0],
[0, 0],
[1, -1],
[1, 0],
[1, 0],
[2, 3]]
y = [0, 0, 0, 1, 1, 1]
x = np.array(x)
y = np.array(y)
self.gaussian_nb.fit(x, y)
means = self.gaussian_nb.means
standard_deviations = self.gaussian_nb.standard_deviations
self.assertAlmostEqual(means[0, 0], 0.33, places=2)
self.assertAlmostEqual(means[0, 1], -0.33, places=2)
self.assertAlmostEqual(means[1, 0], 1.33, places=2)
self.assertAlmostEqual(means[1, 1], 1, places=2)
self.assertAlmostEqual(standard_deviations[0, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[0, 1] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 1] ** 2, 3, places=2)
def test_means_standard_deviations_with_nan(self):
self.gaussian_nb.fit(self.x, self.y)
means = self.gaussian_nb.means
standard_deviations = self.gaussian_nb.standard_deviations
self.assertAlmostEqual(means[0, 0], 0.33, places=2)
self.assertAlmostEqual(means[0, 1], -0.33, places=2)
self.assertAlmostEqual(means[1, 0], 1.33, places=2)
self.assertAlmostEqual(means[1, 1], 1, places=2)
self.assertAlmostEqual(standard_deviations[0, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[0, 1] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 1] ** 2, 3, places=2)
def test_likelihoods(self):
self.gaussian_nb.fit(self.x, self.y)
x_in = np.array([1, 0])
likelihoods = self.gaussian_nb.get_all_likelihoods(x_in)
self.assertNotAlmostEqual(likelihoods[0], 0, places=2)
self.assertNotAlmostEqual(likelihoods[0], 1, places=2)
self.assertNotAlmostEqual(likelihoods[1], 0, places=2)
self.assertNotAlmostEqual(likelihoods[1], 1, places=2)
def setUp(self):
self.x = [[0, np.nan], # 0
[np.nan, 0], # 0
[0, 0], # 0
[1, -1], # 0
[1, np.nan], # 1
[np.nan, 0], # 1
[1, 0], # 1
[2, 3]] # 1
self.y = [0, 0, 0, 0, 1, 1, 1, 1]
self.y_uniq = [0, 1]
self.x = np.array(self.x)
self.y = np.array(self.y)
self.y_uniq = np.array(self.y_uniq)
self.gaussian_nb = GaussianNBWithMissingValues()
def test_predict_proba(self):
clf = GaussianNBWithMissingValues()
clf.fit(self.x, self.y)
x_in = np.array([[1, 0]])
proba = clf.predict_proba(x_in)
self.assertNotAlmostEqual(proba[0][0], 0, places=2)
self.assertNotAlmostEqual(proba[0][0], 1, places=2)
self.assertNotAlmostEqual(proba[0][1], 0, places=2)
self.assertNotAlmostEqual(proba[0][1], 1, places=2)
self.assertGreater(proba[0][1], proba[0][0])
def test_gaussian(self):
gaussian = self.gaussian_nb.get_gaussian(0.441, 1, 0.447213595)
self.assertAlmostEqual(gaussian, 0.40842, places=2)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"quasimodo.assertion_fusion.gaussian_nb_with_missing_values.GaussianNBWithMissingValues",
"numpy.array"
] |
[((4045, 4060), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4058, 4060), False, 'import unittest\n'), ((450, 478), 'numpy.array', 'np.array', (['([1] * 10 + [0] * 5)'], {}), '([1] * 10 + [0] * 5)\n', (458, 478), True, 'import numpy as np\n'), ((928, 939), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (936, 939), True, 'import numpy as np\n'), ((952, 963), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (960, 963), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2515, 2523), True, 'import numpy as np\n'), ((3220, 3236), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (3228, 3236), True, 'import numpy as np\n'), ((3254, 3270), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (3262, 3270), True, 'import numpy as np\n'), ((3293, 3314), 'numpy.array', 'np.array', (['self.y_uniq'], {}), '(self.y_uniq)\n', (3301, 3314), True, 'import numpy as np\n'), ((3342, 3371), 'quasimodo.assertion_fusion.gaussian_nb_with_missing_values.GaussianNBWithMissingValues', 'GaussianNBWithMissingValues', ([], {}), '()\n', (3369, 3371), False, 'from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues\n'), ((3421, 3450), 'quasimodo.assertion_fusion.gaussian_nb_with_missing_values.GaussianNBWithMissingValues', 'GaussianNBWithMissingValues', ([], {}), '()\n', (3448, 3450), False, 'from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues\n'), ((3498, 3516), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (3506, 3516), True, 'import numpy as np\n')]
|
from itertools import cycle
import numpy as np
from scipy import sparse
import h5py
from evaluation import load_nuswide, normalize
rng = np.random.RandomState(1701)
transformer = []
batch_size = 100
def load():
_, label, _, label_name, _, data = load_nuswide('nuswide-decaf.npz', 'train')
data = data.toarray()
data = normalize(data, axis = 1)
label = label.tolil()
return data, label, label_name
def save(fname, I, W):
h5out = h5py.File(fname + '.h5', 'w')
Iset = h5out.create_dataset('I', data = I)
Wset = h5out.create_dataset('W', data = W)
h5out.close()
def projection(X):
norm_X = np.linalg.norm(X, axis = 1)
for i in range(len(norm_X)):
if norm_X[i] > 1:
X[i, :] *= 1. / norm_X[i]
return X
def initialize_word_embeddings(label_name, embed_dim):
import gensim
model = gensim.models.Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
assert model.syn0.shape[1] == embed_dim
W = []
for name in label_name:
W.append(model[name])
return np.asarray(W)
def train(I, W, data, label, lr_I = 0.001, lr_W = 0.001, maxIter = None):
it = 0
loss = 0
sampleIter = cycle(rng.permutation(label.shape[0]))
universe = set(range(label.shape[1]))
I = projection(I)
W = projection(W)
print('Start training with lr_I {}, lr_W {}, maxIter {}'.format(lr_I, lr_W, maxIter))
while True:
# update
sampleId = sampleIter.next()
feat = np.dot(data[sampleId], I)
# obtain label and vlabel (violate label)
l = label.rows[sampleId]
if len(l) == 0:
continue
vl = list(universe.difference(l))
vllen = len(vl)
delta_feat = np.zeros(feat.shape)
delta_W = np.zeros(W.shape)
for y in l:
score = np.dot(W[y, :], feat)
margin = -1
esN = 0
while margin <= 0 and esN < (vllen - 1):
vy = vl[rng.randint(vllen)]
vscore = np.dot(W[vy, :], feat)
margin = vscore - score + 1
esN += 1
if margin > 0:
rank = transformer[(vllen - 1) / esN]
loss += rank * margin
# gradient
delta_feat += (W[y, :] - W[vy, :]) * rank
temp = feat * rank
delta_W[y, :] += temp
delta_W[vy, :] -= temp
I += np.tensordot(data[sampleId], delta_feat, axes = 0) * (lr_I / len(l))
W += delta_W * (lr_W / len(l))
if lr_I > 0.:
I = projection(I)
if lr_W > 0.:
W = projection(W)
it += 1
if maxIter is not None and it == maxIter:
print('Finished training at iteration {} with loss: {}'.format(it, loss / ((it - 1) % batch_size + 1)))
break
if it % batch_size == 0:
print('\titer: {}\tloss: {}'.format(it, loss / batch_size))
loss = 0
# save
if it % label.shape[0] == 0:
print('saving model...')
save('models/wsabie_model_iter_{}'.format(it), I, W)
return I, W
if __name__ == '__main__':
embed_dim = 300
random_init_W = True
# load data
data, label, label_name = load()
print('Data shape: {}'.format(data.shape))
print('Label shape: {}'.format(label.shape))
# initialize transformer
transformer = [0] * (label.shape[1] + 1)
for i in range(label.shape[1]):
transformer[i + 1] = transformer[i] + 1. / (i + 1)
# initialize model
I = rng.rand(data.shape[1], embed_dim).astype(data.dtype)
if random_init_W:
W = rng.rand(label.shape[1], embed_dim).astype(data.dtype)
else:
W = initialize_word_embeddings(label_name, embed_dim)
# train loop
I, W = train(I, W, data, label, lr_I = 0.001, lr_W = 0.00001,
maxIter = 2 * data.shape[0])
# save to hdf5 file
save('models/wsabie_model', I, W)
|
[
"h5py.File",
"gensim.models.Word2Vec.load_word2vec_format",
"numpy.tensordot",
"evaluation.normalize",
"numpy.asarray",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.linalg.norm",
"numpy.dot",
"evaluation.load_nuswide"
] |
[((138, 165), 'numpy.random.RandomState', 'np.random.RandomState', (['(1701)'], {}), '(1701)\n', (159, 165), True, 'import numpy as np\n'), ((250, 292), 'evaluation.load_nuswide', 'load_nuswide', (['"""nuswide-decaf.npz"""', '"""train"""'], {}), "('nuswide-decaf.npz', 'train')\n", (262, 292), False, 'from evaluation import load_nuswide, normalize\n'), ((326, 349), 'evaluation.normalize', 'normalize', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (335, 349), False, 'from evaluation import load_nuswide, normalize\n'), ((443, 472), 'h5py.File', 'h5py.File', (["(fname + '.h5')", '"""w"""'], {}), "(fname + '.h5', 'w')\n", (452, 472), False, 'import h5py\n'), ((610, 635), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (624, 635), True, 'import numpy as np\n'), ((816, 915), 'gensim.models.Word2Vec.load_word2vec_format', 'gensim.models.Word2Vec.load_word2vec_format', (['"""GoogleNews-vectors-negative300.bin"""'], {'binary': '(True)'}), "(\n 'GoogleNews-vectors-negative300.bin', binary=True)\n", (859, 915), False, 'import gensim\n'), ((1023, 1036), 'numpy.asarray', 'np.asarray', (['W'], {}), '(W)\n', (1033, 1036), True, 'import numpy as np\n'), ((1429, 1454), 'numpy.dot', 'np.dot', (['data[sampleId]', 'I'], {}), '(data[sampleId], I)\n', (1435, 1454), True, 'import numpy as np\n'), ((1641, 1661), 'numpy.zeros', 'np.zeros', (['feat.shape'], {}), '(feat.shape)\n', (1649, 1661), True, 'import numpy as np\n'), ((1676, 1693), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (1684, 1693), True, 'import numpy as np\n'), ((1724, 1745), 'numpy.dot', 'np.dot', (['W[y, :]', 'feat'], {}), '(W[y, :], feat)\n', (1730, 1745), True, 'import numpy as np\n'), ((2220, 2268), 'numpy.tensordot', 'np.tensordot', (['data[sampleId]', 'delta_feat'], {'axes': '(0)'}), '(data[sampleId], delta_feat, axes=0)\n', (2232, 2268), True, 'import numpy as np\n'), ((1878, 1900), 'numpy.dot', 'np.dot', (['W[vy, :]', 'feat'], {}), '(W[vy, :], feat)\n', (1884, 1900), True, 'import numpy as np\n')]
|
import numpy as np
from ..AShape import AShape, AShape
class TileInfo:
"""
Tile info.
arguments
shape AShape
tiles Iterable of ints
errors during the construction:
ValueError
result:
.o_shape AShape
.axes_slices list of slice() to fetch original shape
from o_shape for each tile
"""
__slots__ = ['o_shape', 'axes_slices']
def __init__(self, shape, tiles):
if len(tiles) != shape.ndim:
raise ValueError(f'tiles should match shape.ndim {shape.ndim}')
self.o_shape = AShape(dim*tiles[i] for i,dim in enumerate(shape))
c = [0]*shape.ndim
axes_offsets = []
for n in range(np.prod(tiles)):
axes_offsets.append( c.copy() )
for i in range(shape.ndim-1,-1,-1):
c[i] += 1
if c[i] < tiles[i]:
break
c[i] = 0
axes_slices = []
for axes_offset in axes_offsets:
sl = []
for axis,tile in enumerate(axes_offset):
axis_size = shape[axis]
sl.append( slice(axis_size*tile, axis_size*(tile+1)) )
axes_slices.append(tuple(sl))
self.axes_slices = tuple(axes_slices)
|
[
"numpy.prod"
] |
[((738, 752), 'numpy.prod', 'np.prod', (['tiles'], {}), '(tiles)\n', (745, 752), True, 'import numpy as np\n')]
|
import random
import pickle
import numpy as np
import torch
M = 2**32 - 1
def init_fn(worker):
seed = torch.LongTensor(1).random_().item()
seed = (seed + worker) % M
np.random.seed(seed)
random.seed(seed)
def add_mask(x, mask, dim=1):
mask = mask.unsqueeze(dim)
shape = list(x.shape); shape[dim] += 21
new_x = x.new(*shape).zero_()
new_x = new_x.scatter_(dim, mask, 1.0)
s = [slice(None)]*len(shape)
s[dim] = slice(21, None)
new_x[s] = x
return new_x
def sample(x, size):
#https://gist.github.com/yoavram/4134617
i = random.sample(range(x.shape[0]), size)
return torch.tensor(x[i], dtype=torch.int16)
#x = np.random.permutation(x)
#return torch.tensor(x[:size])
def pkload(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
_shape = (240, 240, 155)
def get_all_coords(stride):
return torch.tensor(
np.stack([v.reshape(-1) for v in
np.meshgrid(
*[stride//2 + np.arange(0, s, stride) for s in _shape],
indexing='ij')],
-1), dtype=torch.int16)
_zero = torch.tensor([0])
def gen_feats():
x, y, z = 240, 240, 155
feats = np.stack(
np.meshgrid(
np.arange(x), np.arange(y), np.arange(z),
indexing='ij'), -1).astype('float32')
shape = np.array([x, y, z])
feats -= shape/2.0
feats /= shape
return feats
|
[
"numpy.random.seed",
"torch.LongTensor",
"pickle.load",
"numpy.array",
"random.seed",
"numpy.arange",
"torch.tensor"
] |
[((1122, 1139), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (1134, 1139), False, 'import torch\n'), ((180, 200), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (194, 200), True, 'import numpy as np\n'), ((205, 222), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (216, 222), False, 'import random\n'), ((627, 664), 'torch.tensor', 'torch.tensor', (['x[i]'], {'dtype': 'torch.int16'}), '(x[i], dtype=torch.int16)\n', (639, 664), False, 'import torch\n'), ((1357, 1376), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1365, 1376), True, 'import numpy as np\n'), ((802, 816), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (813, 816), False, 'import pickle\n'), ((108, 127), 'torch.LongTensor', 'torch.LongTensor', (['(1)'], {}), '(1)\n', (124, 127), False, 'import torch\n'), ((1249, 1261), 'numpy.arange', 'np.arange', (['x'], {}), '(x)\n', (1258, 1261), True, 'import numpy as np\n'), ((1263, 1275), 'numpy.arange', 'np.arange', (['y'], {}), '(y)\n', (1272, 1275), True, 'import numpy as np\n'), ((1277, 1289), 'numpy.arange', 'np.arange', (['z'], {}), '(z)\n', (1286, 1289), True, 'import numpy as np\n'), ((998, 1021), 'numpy.arange', 'np.arange', (['(0)', 's', 'stride'], {}), '(0, s, stride)\n', (1007, 1021), True, 'import numpy as np\n')]
|
"""Provide the constant elasticity of substitution function."""
import numpy as np
from copulpy.config_copulpy import IS_DEBUG
from copulpy.clsMeta import MetaCls
class CESCls(MetaCls):
"""CES class."""
def __init__(self, alpha, y_weight, discount_factor):
"""Initialize class."""
self.attr = dict()
self.attr['discount_factor'] = discount_factor
self.attr['y_weight'] = y_weight
self.attr['alpha'] = alpha
self._check_attributes()
def evaluate(self, v_1, v_2):
"""Evaluate the CES function."""
self._additional_checks('evaluate_in', v_1, v_2)
y_weight, discount_factor, alpha = self.get_attr('y_weight', 'discount_factor', 'alpha')
rslt = (v_1 ** alpha + y_weight * v_2 ** alpha) ** (1 / alpha)
rslt = discount_factor * rslt
self._additional_checks('evaluate_out')
return rslt
def _check_attributes(self):
"""Check the attributes of the class."""
alpha, y_weights, discount_factors = self.get_attr('alpha', 'y_weight', 'discount_factor')
np.testing.assert_equal(alpha >= 0, True)
np.testing.assert_equal(np.all(y_weights >= 0), True)
np.testing.assert_equal(np.all(discount_factors >= 0), True)
@staticmethod
def _additional_checks(label, *args):
"""Perform some additional checks on selected features of the class instance."""
# We only run these tests during debugging as otherwise the performance deteriorates.
if not IS_DEBUG:
return
if label in ['evaluate_in']:
for var in args:
np.testing.assert_equal(np.all(var >= 0), True)
elif label in ['evaluate_out']:
rslt, = args
np.testing.assert_equal(np.all(0.0 <= rslt), True)
else:
raise NotImplementedError
|
[
"numpy.all",
"numpy.testing.assert_equal"
] |
[((1094, 1135), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(alpha >= 0)', '(True)'], {}), '(alpha >= 0, True)\n', (1117, 1135), True, 'import numpy as np\n'), ((1168, 1190), 'numpy.all', 'np.all', (['(y_weights >= 0)'], {}), '(y_weights >= 0)\n', (1174, 1190), True, 'import numpy as np\n'), ((1230, 1259), 'numpy.all', 'np.all', (['(discount_factors >= 0)'], {}), '(discount_factors >= 0)\n', (1236, 1259), True, 'import numpy as np\n'), ((1662, 1678), 'numpy.all', 'np.all', (['(var >= 0)'], {}), '(var >= 0)\n', (1668, 1678), True, 'import numpy as np\n'), ((1787, 1806), 'numpy.all', 'np.all', (['(0.0 <= rslt)'], {}), '(0.0 <= rslt)\n', (1793, 1806), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorrt as trt
from .utils import common, calibrator
class TRTModel:
def __init__(self, onnx_path, plan_path, mode="fp16", calibration_cache="calibration.cache",
calibration_dataset="", calibration_image_size="",
calibration_mean=[], calibration_std=[]):
"""
:param onnx_path: local path of onnx file.
:param plan_path: trt plan file to read/save.
:param mode: inference mode, fp16/int8.
:param calibration_cache: int8 cache file of calibration.
:param calibration_dataset: dataset.txt for calibration.
:param calibration_image_size: iamge size (w, h) for calibration.
:param calibration_mean: image mean for calibration.
:param calibration_std: image std for calibration.
"""
self.trt_logger = trt.Logger()
self.onnx_path = onnx_path
self.plan_path = plan_path
self.mode = mode
# for int8 calibration
if self.mode == "int8":
self.calib = self._get_calibrator(calibration_cache, calibration_dataset,
calibration_image_size, calibration_mean, calibration_std)
# init
self.engine = self._get_engine()
self.execution_context = self.engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = common.allocate_buffers(self.engine)
def _get_calibrator(self, cache, dataset, size, mean, std):
if not os.path.exists(dataset):
raise Exception("Calibration dataset: {} not exist!".format(self.calibration_dataset))
calib = calibrator.EntropyCalibrator(dataset, cache, size, mean, std)
return calib
def _check_network(self, network):
"""check network
:param network: INetworkDefinition
"""
if not network.num_outputs:
raise Exception("No output node found!")
input_nodes = [network.get_input(i) for i in range(network.num_inputs)]
output_nodes = [network.get_output(i) for i in range(network.num_outputs)]
print("Network description")
for i, inp in enumerate(input_nodes):
print("Input node {} | Name {} | Shape {}".format(i, inp.name, inp.shape))
print("Total layers: {}".format(network.num_layers))
for i in range(network.num_layers):
layer = network.get_layer(i)
print("index {}, layer name: {}".format(i, layer.name))
for i, out in enumerate(output_nodes):
print("Output node {} | Name {} | Shape {}".format(i, out.name, out.shape))
def _parse_onnx(self):
"""takes an ONNX file and creates a TensorRT engine to run inference with
"""
dynamic = False
flag = common.EXPLICIT_BATCH
with trt.Builder(self.trt_logger) as builder, builder.create_network(flag) as network, builder.create_builder_config() as config, trt.OnnxParser(network, self.trt_logger) as parser, trt.Runtime(self.trt_logger) as runtime:
config.max_workspace_size = common.GiB(1)
builder.max_batch_size = 1
if self.mode == "fp16":
config.set_flag(trt.BuilderFlag.FP16)
print("set FP16 mode.")
if self.mode == "int8":
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = self.calib
print("set INT8 mode.")
# Parse model file
print('Loading ONNX file from path {}...'.format(self.onnx_path))
with open(self.onnx_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
print('Completed parsing of ONNX file')
# check netowrk
self._check_network(network)
# build engine
print('Building an engine from file {}; this may take a while...'.format(self.onnx_path))
plan = builder.build_serialized_network(network, config)
engine = runtime.deserialize_cuda_engine(plan)
print("Completed creating Engine")
# save engine
with open(self.plan_path, "wb") as f:
f.write(plan)
return engine
def _get_engine(self):
"""generate tensorrt runtime engine
"""
if os.path.exists(self.plan_path):
print('Load trt plan from: {}'.format(self.plan_path))
with open(self.plan_path, "rb") as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
if os.path.exists(self.onnx_path):
return self._parse_onnx()
else:
raise Exception("ONNX model file {} not exist!".format(self.onnx_path))
def forward(self, image_tensors):
"""do infernece
:param image_tensors: list, inputs tensor of model.
:return outputs: list, outputs tensor of model.
"""
for i, image_tensor in enumerate(image_tensors):
image = np.array([image_tensor], dtype=np.float32, order='C')
self.inputs[i].host = image
trt_outputs = common.do_inference_v2(self.execution_context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream)
return trt_outputs
|
[
"tensorrt.Logger",
"tensorrt.OnnxParser",
"os.path.exists",
"tensorrt.Builder",
"numpy.array",
"tensorrt.Runtime"
] |
[((862, 874), 'tensorrt.Logger', 'trt.Logger', ([], {}), '()\n', (872, 874), True, 'import tensorrt as trt\n'), ((4532, 4562), 'os.path.exists', 'os.path.exists', (['self.plan_path'], {}), '(self.plan_path)\n', (4546, 4562), False, 'import os\n'), ((1538, 1561), 'os.path.exists', 'os.path.exists', (['dataset'], {}), '(dataset)\n', (1552, 1561), False, 'import os\n'), ((2855, 2883), 'tensorrt.Builder', 'trt.Builder', (['self.trt_logger'], {}), '(self.trt_logger)\n', (2866, 2883), True, 'import tensorrt as trt\n'), ((2980, 3020), 'tensorrt.OnnxParser', 'trt.OnnxParser', (['network', 'self.trt_logger'], {}), '(network, self.trt_logger)\n', (2994, 3020), True, 'import tensorrt as trt\n'), ((3032, 3060), 'tensorrt.Runtime', 'trt.Runtime', (['self.trt_logger'], {}), '(self.trt_logger)\n', (3043, 3060), True, 'import tensorrt as trt\n'), ((4817, 4847), 'os.path.exists', 'os.path.exists', (['self.onnx_path'], {}), '(self.onnx_path)\n', (4831, 4847), False, 'import os\n'), ((5266, 5319), 'numpy.array', 'np.array', (['[image_tensor]'], {'dtype': 'np.float32', 'order': '"""C"""'}), "([image_tensor], dtype=np.float32, order='C')\n", (5274, 5319), True, 'import numpy as np\n'), ((4682, 4710), 'tensorrt.Runtime', 'trt.Runtime', (['self.trt_logger'], {}), '(self.trt_logger)\n', (4693, 4710), True, 'import tensorrt as trt\n')]
|
#!/usr/bin/env python
import nltk
from nltk.corpus import brown
import numpy as np
from math import log
from config import *
"""
convert word list file to a map from word to id
"""
def word2map(filename):
word2idx = {};
with open(filename) as f:
for line in f:
word2idx[line.strip('\n')] = len(word2idx);
return word2idx;
if __name__ == "__main__":
# add nltk serach path
nltk.data.path.append(DATA_HOME);
# get brown text stream
print ("getting text stream...")
brown_text = list(filter(lambda x: x.isalpha(), map(lambda x: x.lower(), brown.words())));
M = len(brown_text);
# mapping word to index
print ("generating word map...")
V2id = word2map(DATA_HOME + "V.txt");
C2id = word2map(DATA_HOME + "C.txt");
print (V2id);
print (C2id);
# prepare for the calculation of Pr(c) and Pr(c|w)
# use ones to apply laplace smoothing
print ("counting context appearance...");
window_count = np.ones((V_SIZE, C_SIZE));
core_count = np.ones((1, C_SIZE));
for i in range(M):
w = brown_text[i];
if w not in V2id:#has_key(w):
continue;
wid = V2id[w];
for j in range(i - HALF_WINDOW, i + HALF_WINDOW + 1):
if j < 0 or j >= M or j == i:
continue;
c = brown_text[j];
if c not in C2id:
continue;
cid = C2id[c];
window_count[wid][cid] += 1;
core_count[0][cid] += 1;
#print (window_count)
#print (core_count)
# calculate Pr(c) and Pr(c|w)
print ("calculating probability...");
pcw, pc = window_count, core_count;
for i in range(len(pcw)):
pcw[i] = pcw[i] / pcw[i].sum();
pc = pc / pc.sum();
# calculate pointwise mutual information
phi = np.zeros((V_SIZE, C_SIZE));
for i in range(V_SIZE):
for j in range(C_SIZE):
phi[i][j] = max(0, log(pcw[i][j] / pc[0][j]));
# save representation matrix to file
print ("saving representation...");
np.save("representation-" + str(C_SIZE) + ".npy", phi);
|
[
"nltk.corpus.brown.words",
"nltk.data.path.append",
"numpy.zeros",
"numpy.ones",
"math.log"
] |
[((416, 448), 'nltk.data.path.append', 'nltk.data.path.append', (['DATA_HOME'], {}), '(DATA_HOME)\n', (437, 448), False, 'import nltk\n'), ((985, 1010), 'numpy.ones', 'np.ones', (['(V_SIZE, C_SIZE)'], {}), '((V_SIZE, C_SIZE))\n', (992, 1010), True, 'import numpy as np\n'), ((1029, 1049), 'numpy.ones', 'np.ones', (['(1, C_SIZE)'], {}), '((1, C_SIZE))\n', (1036, 1049), True, 'import numpy as np\n'), ((1822, 1848), 'numpy.zeros', 'np.zeros', (['(V_SIZE, C_SIZE)'], {}), '((V_SIZE, C_SIZE))\n', (1830, 1848), True, 'import numpy as np\n'), ((593, 606), 'nltk.corpus.brown.words', 'brown.words', ([], {}), '()\n', (604, 606), False, 'from nltk.corpus import brown\n'), ((1941, 1966), 'math.log', 'log', (['(pcw[i][j] / pc[0][j])'], {}), '(pcw[i][j] / pc[0][j])\n', (1944, 1966), False, 'from math import log\n')]
|
import os
import csv
import collections
import numpy as np
class StatsTracker(collections.defaultdict):
"""Keep track of mean values"""
def __init__(self):
super().__init__(float)
self.step = 1
def update(self, data):
for key, val in data.items():
if key.endswith('_min'):
val = np.min(val)
self[key] = min(self.get(key, val), val)
elif key.endswith('_max'):
val = np.max(val)
self[key] = max(self.get(key, val), val)
else:
val = np.mean(val)
self[key] += (val - self[key]) / self.step
self.step += 1
class CSVWriter:
"""CSV Writer"""
def __init__(self, fields, fileobj):
self.fileobj = fileobj
self.writer = csv.DictWriter(fileobj, fieldnames=fields)
self.writer.writeheader()
def write(self, **kwargs):
self.writer.writerow(kwargs)
self.fileobj.flush()
def ensure_dir(filepath):
dirpath = os.path.dirname(filepath)
os.makedirs(dirpath, exist_ok=True)
|
[
"os.makedirs",
"os.path.dirname",
"numpy.max",
"numpy.min",
"numpy.mean",
"csv.DictWriter"
] |
[((1032, 1057), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (1047, 1057), False, 'import os\n'), ((1062, 1097), 'os.makedirs', 'os.makedirs', (['dirpath'], {'exist_ok': '(True)'}), '(dirpath, exist_ok=True)\n', (1073, 1097), False, 'import os\n'), ((815, 857), 'csv.DictWriter', 'csv.DictWriter', (['fileobj'], {'fieldnames': 'fields'}), '(fileobj, fieldnames=fields)\n', (829, 857), False, 'import csv\n'), ((347, 358), 'numpy.min', 'np.min', (['val'], {}), '(val)\n', (353, 358), True, 'import numpy as np\n'), ((477, 488), 'numpy.max', 'np.max', (['val'], {}), '(val)\n', (483, 488), True, 'import numpy as np\n'), ((586, 598), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (593, 598), True, 'import numpy as np\n')]
|
"""
Copyright (C) 2022 <NAME>
This work is released under the MIT License.
See the file LICENSE for details
Utility functions
"""
from math import sqrt
from typing import List
import numpy as np
import carla
import io
def loc_dist(a, b):
return sqrt((a.x - b.x)**2 + (a.y - b.y)**2 + (a.z - b.z)**2)
def vector_normalize(v:carla.Vector3D):
norm = v.x**2 + v.y**2 + v.z**2
new = carla.Vector3D(x=v.x/norm, y=v.y/norm, z=v.z/norm)
return new
def vector_from_to(a:carla.Vector3D, b:carla.Vector3D):
dx = b.x - a.x
dy = b.y - a.y
dz = b.z - a.z
return carla.Vector3D(dx, dy, dz)
def scalar_product(a:carla.Vector3D, b:carla.Vector3D):
return a.x*b.x + a.y*b.y + a.z*b.z
def vector_dist(a, b):
return np.linalg.norm(a-b)
def normalize_numpy_vector(x: np.ndarray):
n = np.linalg.norm(x)
if n > 0.00001:
return x / n
else:
return None
# long_str(2) -> '0002'
# long_str(42, 3) -> '042'
def long_str(i:int, N:int=4, padding='0'):
s = str(i)
n = len(s)
if n < N:
s = padding*(N-n) + s
return s
# Removes 'intro' from left part of 'text', raises error if not found
def good_lstrip(text, intro):
assert(len(intro) <= len(text))
l = len(intro)
first = text[:l]
assert(first == intro)
return text[l:]
def intr(x):
return int(round(float(x)))
# Projective flattening, scales homogeneous coordinates so that last coordinate is always one
def pflat(x):
if len(x.shape) == 1:
x /= x[-1]
else:
x /= x[-1, :]
return x
def print_table(row_names:List[str], col_names:List[str], matrix:np.ndarray,
decimals=2):
matrix = np.around(matrix, decimals=decimals)
row_names = np.array(row_names, dtype=str).reshape((len(row_names), 1))
matrix = np.hstack([row_names, matrix])
col_names = np.array(['', *col_names], dtype=str)
col_names = col_names.reshape((1, len(col_names)))
matrix = np.vstack([col_names, matrix])
max_len = max([len(v) for v in matrix.flatten()])
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
val = matrix[i,j]
matrix[i, j] = long_str(val, max_len, padding=' ')
print(np.array2string(matrix, max_line_width=200))
|
[
"math.sqrt",
"numpy.array2string",
"numpy.hstack",
"numpy.around",
"numpy.linalg.norm",
"numpy.array",
"numpy.vstack",
"carla.Vector3D"
] |
[((272, 332), 'math.sqrt', 'sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (276, 332), False, 'from math import sqrt\n'), ((414, 470), 'carla.Vector3D', 'carla.Vector3D', ([], {'x': '(v.x / norm)', 'y': '(v.y / norm)', 'z': '(v.z / norm)'}), '(x=v.x / norm, y=v.y / norm, z=v.z / norm)\n', (428, 470), False, 'import carla\n'), ((606, 632), 'carla.Vector3D', 'carla.Vector3D', (['dx', 'dy', 'dz'], {}), '(dx, dy, dz)\n', (620, 632), False, 'import carla\n'), ((764, 785), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (778, 785), True, 'import numpy as np\n'), ((836, 853), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (850, 853), True, 'import numpy as np\n'), ((1711, 1747), 'numpy.around', 'np.around', (['matrix'], {'decimals': 'decimals'}), '(matrix, decimals=decimals)\n', (1720, 1747), True, 'import numpy as np\n'), ((1842, 1872), 'numpy.hstack', 'np.hstack', (['[row_names, matrix]'], {}), '([row_names, matrix])\n', (1851, 1872), True, 'import numpy as np\n'), ((1889, 1926), 'numpy.array', 'np.array', (["['', *col_names]"], {'dtype': 'str'}), "(['', *col_names], dtype=str)\n", (1897, 1926), True, 'import numpy as np\n'), ((1995, 2025), 'numpy.vstack', 'np.vstack', (['[col_names, matrix]'], {}), '([col_names, matrix])\n', (2004, 2025), True, 'import numpy as np\n'), ((2263, 2306), 'numpy.array2string', 'np.array2string', (['matrix'], {'max_line_width': '(200)'}), '(matrix, max_line_width=200)\n', (2278, 2306), True, 'import numpy as np\n'), ((1769, 1799), 'numpy.array', 'np.array', (['row_names'], {'dtype': 'str'}), '(row_names, dtype=str)\n', (1777, 1799), True, 'import numpy as np\n')]
|
from apex import amp
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime
import scipy.sparse as sp_sparse
import tables
from itertools import chain
from model import loss_function
from model import VAE
import numpy as np
import os
import pandas as pd
from sklearn.metrics import accuracy_score
# from train_multitask_ccle import read_tsv
import torch
opt_level = 'O1'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def read_tsv(nparpath, genes, outdir, gmtmat, normalize_vals=True):
h5outpath = os.path.join(
outdir, "cellByGeneMatrix.npz")
if "gct" in nparpath:
rnadf = pd.read_csv(
nparpath, sep="\t", index_col=0,
compression="gzip", skiprows=2)
rnadf.drop_duplicates(subset=["Description"], inplace=True)
rnadf = rnadf[rnadf["Description"].isin(genes)]
npar = np.array(rnadf.iloc[:, 1:])
ar_genes = np.array(rnadf["Description"])
barcodes = np.array(rnadf.columns[1:])
else:
rnadf = pd.read_csv(
nparpath, sep="\t", index_col=0,
compression="gzip")
npar = np.array(rnadf)
ar_genes = rnadf.index
barcodes = np.array(rnadf.columns)
# Divide by max
# arsum = np.matrix.sum(npar, axis=0)
if normalize_vals:
arsum = np.apply_along_axis(np.sum, 0, npar)
npar = (npar * 1000) / arsum
_, idx_g1, idx_g2 = np.intersect1d(genes, ar_genes, return_indices=True)
npar = npar[idx_g2, :]
gmtmat = gmtmat[idx_g1, :]
out_genes = genes[idx_g1]
npar = np.transpose(npar)
np.savez_compressed(h5outpath, arr=npar, barcodes=barcodes,
genes=ar_genes)
return npar, barcodes, gmtmat, out_genes
def make_plot_umap(mudf, metadf, outdir, numlvs=10):
metadf.index = metadf["Barcode"]
import umap
import seaborn as sns
mumat = np.array(mudf.iloc[:, :numlvs])
for n_neighbors in [10, 100]:
for min_dist in [0.45]:
adname = "UMAP_dist-{}_nNeigh-{}".format(
min_dist, n_neighbors)
print(adname)
reducer = umap.UMAP(
n_neighbors=n_neighbors,
min_dist=min_dist)
embedding = reducer.fit_transform(mumat)
umap_output = pd.DataFrame(embedding)
umap_output.columns = ["UMAP1", "UMAP2"]
umap_output["CellType"] = list(metadf.loc[mudf.index, "CellType"])
umap_output.index = mudf.index
umap_output.to_csv(
os.path.join(outdir, adname + ".tsv.gz"),
sep="\t", compression="gzip")
sns_plot = sns.relplot(
x="UMAP1", y="UMAP2", hue="CellType", data=umap_output,
height=6, aspect=1.5)
sns_plot.savefig(
os.path.join(outdir, adname + ".pdf"))
sns_plot.savefig(
os.path.join(outdir, adname + ".png"))
def make_args():
metapaths = [
"/scratch/hdd001/home/mkarimza/" +
"ciberAtac/10x/raw/scRNA-seq_10XPBMC" +
"_metadataWithCellType.tsv",
"/scratch/ssd001/home/mkarimza/" +
"data/ciberatac/models/vae202012/" +
"SW480Files/metadata_for_vae_visualization.tsv"]
nparpaths = [
"/scratch/hdd001/home/mkarimza/" +
"ciberAtac/10x/raw/pbmc_unsorted_10k" +
"_filtered_feature_bc_matrix.h5",
"/scratch/hdd001/home/mkarimza/" +
"johnny/A06/10X/outs/" +
"filtered_feature_bc_matrix.h5"]
genepath = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202101/" +\
"scviVersusCustomized/customizedScvi" +\
"FullTrainScaled1000/genes.txt"
gmtpath = "../c3.tft.v7.2.symbols.gmt"
genepath = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202012/" +\
"commonGenes/Genes_passing_40p.txt"
outdir = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202101/" +\
"customScviAppliedOnPbmcAndSw480"
numlvs = 10
os.makedirs(outdir, exist_ok=True)
existingmodelpath = "/scratch/ssd001/home/mkarimza/" +\
"data/ciberatac/models/vae202101/" +\
"scviVersusCustomized/customized" +\
"ScviFullTrainScaled1000/VAE_10LVS.pt"
use_connections = True
loss_scalers = [1, 1, 1]
predict_celltypes = True
num_celltypes = 11
argslist = [gmtpath, nparpaths, outdir,
numlvs, genepath, metapaths,
existingmodelpath,
use_connections,
loss_scalers,
predict_celltypes,
num_celltypes]
return argslist
def get_matrix_from_h5(filename):
with tables.open_file(filename, 'r') as f:
mat_group = f.get_node(f.root, 'matrix')
barcodes = f.get_node(mat_group, 'barcodes').read()
data = getattr(mat_group, 'data').read()
indices = getattr(mat_group, 'indices').read()
indptr = getattr(mat_group, 'indptr').read()
shape = getattr(mat_group, 'shape').read()
matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)
feature_ref = {}
feature_group = f.get_node(mat_group, 'features')
feature_ids = getattr(feature_group, 'id').read()
feature_names = getattr(feature_group, 'name').read()
feature_types = getattr(feature_group, 'feature_type').read()
feature_ref['id'] = feature_ids
feature_ref['name'] = feature_names
feature_ref['feature_type'] = feature_types
tag_keys = getattr(feature_group, '_all_tag_keys').read()
for key in tag_keys:
feature_ref[key] = getattr(feature_group, key.decode()).read()
return feature_ref, barcodes, matrix
def read_npz(nparpath, genes, outdir, gmtmat):
h5outpath = os.path.join(
outdir, "cellByGeneMatrix.npz")
npobj = np.load(nparpath, allow_pickle=True)
npar = npobj["arr"]
if npar.shape[0] > npar.shape[1]:
npar = np.transpose(npar)
ar_genes = npobj["rows"]
barcodes = npobj["cols"]
_, idx_g1, idx_g2 = np.intersect1d(genes, ar_genes, return_indices=True)
# arsum = np.matrix.sum(npar, axis=0)
# arsum = np.apply_along_axis(np.sum, 0, npar)
npar = npar[:, idx_g2]
gmtmat = gmtmat[idx_g1, :]
out_genes = genes[idx_g1]
np.savez_compressed(h5outpath, arr=npar, barcodes=barcodes)
return npar, barcodes, gmtmat, out_genes
def read_h5(h5path, genes, outdir, gmtmat):
h5outpath = os.path.join(
outdir, "cellByGeneMatrix.npz")
# Must be in form of filtered feature matrix
feature_ref, barcodes, matrix = get_matrix_from_h5(h5path)
# Limit the array to gene expression
idx_gexp = np.where(
np.array(feature_ref["feature_type"] == b'Gene Expression'))[0]
npar = matrix.toarray()
npar = np.transpose(npar[idx_gexp, :])
# Normalize npar by dividing by sum of the reads then multiplying by 1000)
# arsum = np.apply_along_axis(np.sum, 0, npar)
# arsum2d = np.zeros((1, npar.shape[1]))
# arsum2d[0, :] = arsum
# npar_scaled = (npar / arsum) * 1000
# tmat = np.transpose(npar_scaled)
expar = np.zeros((len(barcodes), len(genes)), dtype=float)
gene_names = np.array(
feature_ref["name"], dtype="|U64")
_, idx_g1, idx_g2 = np.intersect1d(genes, gene_names, return_indices=True)
expar[:, idx_g1] = npar[:, idx_g2]
np.savez_compressed(h5outpath, arr=npar, barcodes=barcodes, genes=genes)
# return npar, barcodes
return expar, barcodes, gmtmat, genes
def get_genes_from_txt(genepath):
select_genes = np.loadtxt(genepath, dtype="|U64")
return select_genes
def make_gmtmat(gmtpath, outdir, genepath):
gmtoutpath = os.path.join(
outdir, "gmt_conv_matrix.npz")
if os.path.exists(gmtoutpath):
npobj = np.load(gmtoutpath)
npar = npobj["arr"]
all_tfs = npobj["tfs"]
all_genes = npobj["genes"]
return npar, all_tfs, all_genes
gmtdict = {}
with open(gmtpath, "r") as gmtlink:
for gmtline in gmtlink:
gmtlist = gmtline.rstrip().split("\t")
gmtdict[gmtlist[0]] = gmtlist[2:]
all_tfs = np.array(list(gmtdict.keys()))
all_tfs = np.sort(all_tfs)
all_genes = list(gmtdict.values())
all_genes = list(chain.from_iterable(all_genes))
all_genes = np.unique(all_genes)
if genepath != "NA" and os.path.exists(genepath):
select_genes = get_genes_from_txt(genepath)
print("Limiting to {} genes found in {}".format(
len(select_genes), genepath))
all_genes = np.intersect1d(all_genes, select_genes)
print("Found {} TFs and {} genes in {}".format(
len(all_tfs), len(all_genes),
gmtpath))
npar = np.zeros((len(all_genes), len(all_tfs)), dtype=bool)
for tf in all_tfs:
idx_tf = np.where(all_tfs == tf)[0]
genes = gmtdict[tf]
# add index and +1 for the array
for gene in genes:
idx_gene = np.where(all_genes == gene)[0]
npar[idx_gene, idx_tf] = True
if idx_tf % 100 == 0:
print("{}/{} TFs added".format(idx_tf[0], len(all_tfs)))
np.savez_compressed(
gmtoutpath, arr=npar, tfs=all_tfs, genes=all_genes)
return npar, all_tfs, all_genes
def get_n_params(model):
pp = 0
for p in list(model.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
def get_paths(outdir, numlvs):
try:
job_id = os.environ["SLURM_JOB_ID"]
except Exception:
job_id = "NA"
logdir = os.path.join(outdir, "logs")
os.makedirs(logdir, exist_ok=True)
modelpath = os.path.join(
outdir, "VAE_{}LVS.pt".format(numlvs))
chkdir = os.path.join(
"/checkpoint/mkarimza",
job_id)
if not os.path.exists(chkdir):
chkdir = os.path.join(
logdir, "checkpoint")
os.makedirs(chkdir, exist_ok=True)
chkpath = os.path.join(
chkdir, "VAE_{}LVS.pt".format(numlvs))
return logdir, modelpath, chkpath
def train_model(vae, optimizer, MINIBATCH, MAXEPOCH, expar, logdir,
modelpath, chkpath, one_hot_ct_encoding,
loss_scalers, predict_celltypes,
celltypes=[], batch_idxs=None):
criterion_class = torch.nn.CrossEntropyLoss()
time_str = str(datetime.now())
time_str = time_str.replace(" ", "_")
time_str = time_str.replace(":", "0")
logpath = os.path.join(
logdir,
"training.log.{}.{}".format(
os.environ["SLURM_JOB_ID"], time_str))
accpath = logpath + "_accuracy.txt"
loglink = open(logpath, "w")
# header = ["Epoch", "Training.Loss", "MiniBatch.ID", "Time.Stamp"]
header = ["Epoch", "Reconstruction.Loss", "KLD",
"CE.Loss", "Accuracy", "MiniBatch.ID",
"Time.Stamp"]
loglink.write("\t".join(header) + "\n")
loglink.close()
if predict_celltypes:
acclink = open(accpath, "w")
header_acc = ["Epoch"]
for celltype in celltypes:
header_acc.append(celltype + ".acc")
acclink.write("\t".join(header_acc) + "\n")
acclink.close()
TOTBATCHIDX = int(expar.shape[0] / MINIBATCH)
# loss_scalers = np.array([300, 1, 1])
sampled_idxs = np.random.choice(
np.arange(expar.shape[0]), expar.shape[0], replace=False)
for epoch in range(MAXEPOCH):
running_loss_reconst = 0
running_kld = 0
running_ce = 0
running_loss = 0
accval = 0
celltype_resps = np.zeros(
(expar.shape[0]))
celltype_preds = np.zeros(
(expar.shape[0]))
for idxbatch in range(TOTBATCHIDX):
idxbatch_st = idxbatch * MINIBATCH
idxbatch_end = (idxbatch + 1) * MINIBATCH
if idxbatch_end > expar.shape[0]:
idxbatch_end = expar.shape[0]
cur_sidxs = sampled_idxs[idxbatch_st:idxbatch_end]
train1 = torch.from_numpy(
expar[cur_sidxs, :]).to(device).float()
if batch_idxs is not None:
batch_idxs_tensor = torch.from_numpy(
batch_idxs[cur_sidxs]).long().to(device).reshape(
-1, 1)
local_l_mean = np.mean(
np.apply_along_axis(
np.sum, 1, expar[cur_sidxs, :]))
local_l_var = np.var(
np.apply_along_axis(
np.sum, 1, expar[cur_sidxs, :]))
if batch_idxs is None:
outdict = vae(train1)
else:
outdict = vae(train1, batch_idxs_tensor)
ct_pred = outdict["ctpred"]
loss_1, loss_2 = loss_function(
outdict['qz_m'], outdict['qz_v'], train1,
outdict['px_rate'], outdict['px_r'],
outdict['px_dropout'], outdict['ql_m'],
outdict['ql_v'], True,
local_l_mean, local_l_var)
loss_1 = torch.mean(loss_1)
loss_2 = torch.mean(loss_2)
optimizer.zero_grad()
if predict_celltypes:
one_hot_resp = torch.max(
one_hot_ct_encoding[cur_sidxs],
1)[1].to(device).long()
one_hot_pred = torch.max(
ct_pred, 1)[1]
celltype_resps[cur_sidxs] = \
one_hot_resp.detach().cpu().numpy()
celltype_preds[cur_sidxs] = \
one_hot_pred.detach().cpu().numpy()
adacc = accuracy_score(
one_hot_resp.detach().cpu().numpy(),
one_hot_pred.detach().cpu().numpy())
accval += adacc
loss_3 = criterion_class(
ct_pred, one_hot_resp)
else:
loss_3 = 0
if idxbatch == 0:
print(loss_1, loss_2, loss_3)
if idxbatch == -1 and epoch % 25 == 0:
loss_scalers = np.array(
[loss_1.detach().cpu().numpy(),
loss_2.detach().cpu().numpy(),
loss_3.detach().cpu().numpy()])
if np.min(loss_scalers) < 0:
if loss_2 < 0:
loss_2 = loss_2 * -1
else:
raise ValueError("One of the losses are negative")
print(loss_1)
print(loss_2)
print(loss_3)
loss_scalers = loss_scalers / np.min(loss_scalers)
loss = (loss_1 / torch.tensor(loss_scalers[0])) + (
loss_2 / torch.tensor(loss_scalers[1])) + (
loss_3 / torch.tensor(loss_scalers[2]))
if idxbatch == 0:
print(loss)
if torch.isnan(loss):
print("Losses: {} {} {}".format(loss_1, loss_2, loss_3))
raise ValueError("NA occured in loss")
# print(loss)
if torch.cuda.is_available():
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
running_loss_reconst += (loss_1 / loss_scalers[0])
running_kld += (loss_2 / loss_scalers[1])
running_ce += (loss_3 / loss_scalers[2])
running_loss += loss
del train1, outdict
# del one_hot_temp
if torch.cuda.is_available():
torch.cuda.empty_cache()
cur_loss = running_loss / TOTBATCHIDX
cur_loss_reconst = running_loss_reconst / TOTBATCHIDX
cur_kld = running_kld / TOTBATCHIDX
cur_ce = running_ce / TOTBATCHIDX
accval = accval / TOTBATCHIDX
adlist_cts = [str(epoch)]
for k in range(len(celltypes)):
pred_cell = celltype_preds == k
resp_cell = celltype_resps == k
cur_acc = accuracy_score(
resp_cell, pred_cell)
adlist_cts.append(str(round(cur_acc, 3)))
if predict_celltypes:
with open(accpath, "a+") as acclink:
acclink.write("\t".join(adlist_cts) + "\n")
print("Epoch {}, Loss {} at {}".format(
epoch, cur_loss.item(), datetime.now()))
with open(logpath, "a+") as loglink:
adlist = [str(epoch), str(cur_loss_reconst.item()),
str(cur_kld.item()), str(cur_ce.item()),
str(round(accval, 3)),
str(idxbatch), str(datetime.now())]
# adlist = [str(epoch), str(cur_loss.item()),
# str(idxbatch), str(datetime.now())]
loglink.write("\t".join(adlist) + "\n")
if epoch % 10 == 0:
checkpoint = {
'model': vae.state_dict(),
'optimizer': optimizer.state_dict(),
}
if torch.cuda.is_available():
checkpoint["amp"] = amp.state_dict()
for eachpath in [modelpath, chkpath]:
torch.save(checkpoint, eachpath)
return vae
def make_labels(metapath, expar, barcodes):
if "S" in str(barcodes.dtype):
barcodes = np.array(barcodes, dtype="|U64")
metadf = pd.read_csv(metapath, sep="\t", index_col=0)
if "CellType" not in metadf.columns:
if "Site_Primary" in metadf.columns:
metadf["CellType"] = metadf["Site_Primary"]
metadf["Barcode"] = metadf.index
classes = np.unique(metadf["CellType"])
classes = np.array(
[each for each in classes if "Not" not in each])
classes = np.array(
[each for each in classes if "nan" not in each])
metadf = metadf[metadf["CellType"].isin(classes)]
metadf = metadf[metadf["Barcode"].isin(barcodes)]
new_barcodes, idx_1, idx_2 = np.intersect1d(
barcodes, np.array(metadf["Barcode"]),
return_indices=True)
outar = expar[idx_1, :]
outdf = metadf.iloc[idx_2, :]
out_barcodes = np.array(barcodes, dtype="|U64")[idx_1]
one_hot_ct_encoding = pd.get_dummies(outdf["CellType"])
one_hot_tensor = torch.from_numpy(np.array(one_hot_ct_encoding))
return outar, outdf, out_barcodes, one_hot_tensor
def load_npar(nparpath, genes, outdir, gmtmat,
metapath):
if ".npz" in nparpath:
expar, barcodes, gmtmat, genes = read_npz(
nparpath, genes, outdir, gmtmat)
list_temp = make_labels(metapath, expar, barcodes)
elif ".gct" in nparpath or ".tsv" in nparpath:
expar, barcodes, gmtmat, genes = read_tsv(
nparpath, genes, outdir, gmtmat, False)
from train_multitask_ccle import make_labels as tmp_fnc
list_temp = tmp_fnc(
metapath, expar, barcodes)
elif ".h5" in nparpath:
expar, barcodes, gmtmat, genes = read_h5(
nparpath, genes, outdir, gmtmat)
list_temp = make_labels(metapath, expar, barcodes)
expar, metadf, barcodes, _ = list_temp
return expar, metadf, barcodes, genes, gmtmat
def filter_by_var(expar, genes, gmtmat, num_genes):
vars_genes = np.apply_along_axis(np.var, 0, expar)
idx_sorted = np.argsort(vars_genes)[::-1]
newexp = expar[:, idx_sorted[:num_genes]]
newgenes = genes[idx_sorted[:num_genes]]
gmtmat_new = gmtmat[idx_sorted[:num_genes], :]
return newexp, newgenes, gmtmat_new
def intersect_lists(genes_list):
genes = np.intersect1d(genes_list[0], genes_list[1])
for i in range(2, len(genes_list)):
genes = np.intersect1d(genes, genes_list[i])
return genes
def load_inputs(nparpaths, gmtmat, outdir,
genes, metapaths, filter_var=False,
num_genes=2000):
GMTMAT = gmtmat
gmtmat_genes = genes
metadf_list = []
expar_list = []
barcodes_list = []
genes_list = []
celltypes_list = []
num_barcodes = 0
for i in range(len(nparpaths)):
print("Loading {}".format(nparpaths[i]))
expar, metadf, barcodes, genes, gmtmat = load_npar(
nparpaths[i], genes, outdir, gmtmat, metapaths[i])
expar_list.append(expar)
barcodes_list.append(barcodes)
celltypes_list.append(
np.array(metadf["CellType"], dtype="|U64"))
addf = pd.DataFrame(
dict(OriginalBarcode=barcodes, CellType=celltypes_list[-1]))
addf["Dataset"] = "File.{}.".format(i + 1)
addf["Barcode"] = addf["Dataset"] + addf["OriginalBarcode"]
addf["Batch.Index"] = i
metadf_list.append(addf)
genes_list.append(genes)
num_barcodes += len(barcodes)
metadf = pd.concat(metadf_list)
metadf.index = metadf["Barcode"]
if len(genes_list) > 1:
genes = intersect_lists(genes_list)
else:
genes = genes_list[0]
# Filter gmtmat
_, idx_1, idx_2 = np.intersect1d(gmtmat_genes, genes, return_indices=True)
# gmtmat = gmtmat[idx_1, :]
gmtmat = GMTMAT[idx_1, :]
npar = np.zeros((num_barcodes, len(genes)), dtype=int)
i_st = 0
i_end = 0
for k in range(len(expar_list)):
cur_genes = genes_list[k]
expar = expar_list[k]
shared_genes, idx_1, idx_2 = np.intersect1d(
genes, cur_genes, return_indices=True)
i_end = i_st + expar.shape[0]
npar[i_st:i_end, idx_1] = expar[:, idx_2]
i_st = i_end
if filter_var:
print("Filtering by variance")
npar, genes, gmtmat = filter_by_var(
npar, genes, gmtmat, num_genes)
one_hot_ct_encoding = pd.get_dummies(metadf["CellType"])
one_hot_tensor = torch.from_numpy(np.array(one_hot_ct_encoding))
out_dict = dict(
expar=npar,
metadf=metadf,
barcodes=np.array(metadf["Barcode"]),
genes=genes,
gmtmat=gmtmat,
cellTypes=np.array(celltypes_list),
batch_idx=np.array(metadf["Batch.Index"]),
one_hot=one_hot_tensor)
return out_dict
def main(gmtpath, nparpaths, outdir, numlvs, metapaths,
dont_train=False, genepath="NA", existingmodelpath="NA",
use_connections=True, loss_scalers=[1, 1, 1],
predict_celltypes=True, num_celltypes=59, filter_var=False,
num_genes=2000, include_batches=False):
BATCHEFFECT_NUM = 0
if include_batches:
BATCHEFFECT_NUM = len(nparpaths)
MINIBATCH = 32
MAXEPOCH = 20
gmtmat, tfs, genes = make_gmtmat(gmtpath, outdir, genepath)
# expar, barcodes = read_h5(h5path, genes, outdir)
dict_inputs = load_inputs(
nparpaths, gmtmat, outdir, genes, metapaths, filter_var,
num_genes)
expar = dict_inputs["expar"]
metadf = dict_inputs["metadf"]
gmtmat = dict_inputs["gmtmat"]
one_hot_ct_encoding = dict_inputs["one_hot"]
barcodes = dict_inputs["barcodes"]
batch_idxs = dict_inputs["batch_idx"]
if not include_batches:
batch_idxs = None
# celltypes = dict_inputs["cellTypes"]
celltypes = []
if predict_celltypes:
celltypes = list(pd.unique(metadf["CellType"]))
celltypes.sort()
# save metadf
metadf.to_csv(
os.path.join(outdir, "metadata.tsv.gz"),
sep="\t", compression="gzip")
# Save genes
print("Shape of expar is : {}".format(expar.shape))
save_genes(genes, outdir)
print("Max in expar is {}".format(np.max(expar)))
if use_connections:
gmttensor = torch.from_numpy(
np.transpose(gmtmat)).to(device).long()
else:
gmttensor = torch.ones(
gmtmat.shape[1], gmtmat.shape[0]).to(device).long()
print("Shape of expar is : {}".format(expar.shape))
logdir, modelpath, chkpath = get_paths(outdir, numlvs)
if existingmodelpath == "NA":
existingmodelpath = modelpath
vae = VAE(expar.shape[1], # num genes
gmttensor,
num_celltypes,
BATCHEFFECT_NUM, # batch
0, # labels
gmtmat.shape[1], # hiddensize
numlvs)
n_params = get_n_params(vae)
print(vae)
print("VAE has {} parameters".format(n_params))
vae.to(device)
# optimizer = adabound.AdaBound(
# vae.parameters(), lr=0.001, final_lr=0.1)
optimizer = torch.optim.Adam(
vae.parameters(), lr=0.002)
if torch.cuda.is_available():
vae, optimizer = amp.initialize(
vae, optimizer, opt_level=opt_level)
vae, optimizer = load_existing_model(
existingmodelpath, chkpath, vae, optimizer)
if not dont_train:
np.random.seed(42)
# For 10 times, sample 1000 cells
for i in range(20):
# idx_rand = np.random.choice(
# np.arange(expar.shape[0]), SAMPLE_IDXS)
vae = train_model(
vae, optimizer, MINIBATCH, MAXEPOCH,
expar, logdir,
modelpath, chkpath, one_hot_ct_encoding,
loss_scalers, predict_celltypes,
celltypes, batch_idxs)
reconst, mumat, sd2mat, tf_act = apply_model(
vae, expar, numlvs, MINIBATCH, batch_idxs)
mudf = pd.DataFrame(mumat)
mudf.columns = ["LV.mu.{}".format(each)
for each in range(numlvs)]
mudf["Index"] = np.array(
barcodes, dtype="|U64")
mudf.index = mudf["Index"]
mudf.to_csv(
os.path.join(outdir, "VAE_mu-matrix.tsv.gz"),
compression="gzip", sep="\t")
make_plot_umap(mudf, metadf, outdir, numlvs)
reconst, mumat, sd2mat, tf_act = apply_model(
vae, expar, numlvs, MINIBATCH, batch_idxs)
tf_act_df = pd.DataFrame(tf_act)
tf_act_df.index = np.array(
barcodes, dtype="|U64")
tf_act_df.columns = tfs
tf_act_df["Labels"] = metadf.loc[tf_act_df.index]["CellType"]
tf_act_df.to_csv(
os.path.join(outdir, "VAE-TF-adjusted-weights_CellxTF.tsv.gz"),
sep="\t", compression="gzip")
# zmat = np_reparameterize(mumat, sd2mat)
zmat = torch_reparameterize(mumat, sd2mat)
zdf = pd.DataFrame(zmat)
zdf.columns = ["LV.Z.{}".format(each)
for each in range(numlvs)]
zdf["Index"] = np.array(
barcodes, dtype="|U64")
zdf.index = np.array(
barcodes, dtype="|U64")
zdf.to_csv(
os.path.join(outdir, "VAE_Z-matrix.tsv.gz"),
compression="gzip", sep="\t")
outdir_full = os.path.join(
outdir, "fullDatasetZPlot")
os.makedirs(outdir_full, exist_ok=True)
make_plot_umap(zdf, metadf, outdir_full, numlvs)
mudf = pd.DataFrame(mumat)
mudf.columns = ["LV.mu.{}".format(each)
for each in range(numlvs)]
mudf["Index"] = np.array(
barcodes, dtype="|U64")
mudf.index = mudf["Index"]
mudf.to_csv(
os.path.join(outdir, "VAE_mu-matrix.tsv.gz"),
compression="gzip", sep="\t")
outdir_full = os.path.join(
outdir, "fullDatasetPlot")
os.makedirs(outdir_full, exist_ok=True)
make_plot_umap(mudf, metadf, outdir_full, numlvs)
sd2df = pd.DataFrame(sd2mat)
sd2df.columns = [
"LV.logVAR.{}".format(each)
for each in range(numlvs)]
sd2df["Index"] = mudf["Index"]
sd2df.index = mudf["Index"]
sd2df.to_csv(
os.path.join(outdir, "VAE_variance-matrix.tsv.gz"),
compression="gzip", sep="\t")
def np_reparameterize(mu, logvar):
mu_tensor = torch.from_numpy(mu)
logvar_tensor = torch.from_numpy(logvar)
std_tensor = torch.exp(0.5 * logvar_tensor)
eps_tensor = torch.randn_like(std_tensor)
ztensor = mu_tensor + eps_tensor * std_tensor
zmat = ztensor.numpy()
return zmat
def load_existing_model(modelpath, chkpath, vae, optimizer):
for eachpath in [modelpath, chkpath]:
if os.path.exists(eachpath):
try:
checkpoint = torch.load(eachpath)
state_dict = checkpoint['model']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
new_state_dict[k] = v
vae.load_state_dict(new_state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
if torch.cuda.is_available():
amp.load_state_dict(checkpoint['amp'])
print("Loaded from {}".format(eachpath))
return vae, optimizer
except Exception:
pass
print("Didn't load from any")
return vae, optimizer
def save_genes(genes, outdir):
outpath = os.path.join(outdir, "genes.txt")
outlink = open(outpath, "w")
for gene in genes:
outlink.write(gene + "\n")
outlink.close()
def torch_reparameterize(mumat, varmat):
from torch.distributions import Normal
mu = torch.from_numpy(mumat)
var = torch.from_numpy(varmat)
normtensor = Normal(mu, var.sqrt()).rsample()
zmat = normtensor.detach().numpy()
return zmat
def get_hidden_layer(vae, train1, batch_tensor=None, n_batch=0):
if n_batch > 0 and batch_tensor is not None:
batch_ar_temp = batch_tensor.reshape(-1).cpu().numpy()
ad_mat = torch.zeros((train1.shape[0], n_batch))
for j in range(n_batch):
idx_j = np.where(batch_ar_temp == j)[0]
ad_mat[idx_j, j] = 1
train1 = torch.cat((train1, ad_mat.to(train1.device)), dim=-1)
weight_mat = vae.z_encoder.encoder.fc_layers[0][0].weights
connections = vae.z_encoder.encoder.fc_layers[0][0].connections
enforced_weights = torch.mul(
weight_mat, connections)
ew_times_x = torch.mm(train1, enforced_weights.detach().t())
add_bias = vae.z_encoder.encoder.fc_layers[0][0].bias
ew_times_x = torch.add(ew_times_x, add_bias)
output = ew_times_x.cpu().detach().numpy()
return output
def apply_model(vae, expar, numlvs, MINIBATCH, batch_idxs=None):
n_batch = 0
batch_tensor = None
if batch_idxs is not None:
n_batch = len(np.unique(batch_idxs))
conn_dim = vae.z_encoder.encoder.fc_layers[0][0].connections.shape[0]
reconst = np.zeros(expar.shape)
mumat = np.zeros((expar.shape[0], numlvs))
sd2mat = np.zeros((expar.shape[0], numlvs))
tf_activation = np.zeros((expar.shape[0], conn_dim))
TOTBATCHIDX = int(expar.shape[0] / MINIBATCH) + 1
for idxbatch in range(TOTBATCHIDX):
idxbatch_st = idxbatch * MINIBATCH
if idxbatch_st >= expar.shape[0]:
break
idxbatch_end = min(
[(idxbatch + 1) * MINIBATCH, expar.shape[0]])
train1 = torch.from_numpy(
expar[idxbatch_st:idxbatch_end, :]).to(device).float()
if batch_idxs is None:
outdict = vae(train1)
else:
batch_tensor = torch.from_numpy(
batch_idxs[idxbatch_st:idxbatch_end]).to(
device).long().reshape(-1, 1)
outdict = vae(train1, batch_tensor)
reconst[idxbatch_st:idxbatch_end, :] = \
outdict["px_scale"].cpu().detach().numpy()
mumat[idxbatch_st:idxbatch_end, :] = \
outdict["qz_m"].cpu().detach().numpy()
sd2mat[idxbatch_st:idxbatch_end, :] = \
outdict["qz_v"].cpu().detach().numpy()
tf_activation[idxbatch_st:idxbatch_end, :] = \
get_hidden_layer(vae, train1, batch_tensor, n_batch)
if idxbatch % 100 == 0:
print("Applied on {}/{}".format(idxbatch, TOTBATCHIDX))
return reconst, mumat, sd2mat, tf_activation
if __name__ == "__main__":
parser = ArgumentParser(
description="Train VAE using "
"mapping of genes to TFs")
parser.add_argument(
"gmtpath",
help="Path to GMT file mapping "
"genes to TFs")
parser.add_argument(
"outdir",
help="Path to output directory for "
"saving the model and log files")
parser.add_argument(
"--nparpaths",
nargs="*",
help="Space-separated paths to scRNA-seq "
"file npz containing arr, rows, and cols")
parser.add_argument(
"--numlvs",
type=int,
default=10,
help="Number of latent variables")
parser.add_argument(
"--dont-train",
action="store_true",
help="Specify if you want to apply an existing "
"model which is stored in outdir")
parser.add_argument(
"--genepath",
default="NA",
help="Path to .txt file containing "
"one gene per line to limit the list "
"of genes we use here")
parser.add_argument(
"--modelpath",
default="NA",
help="Specify if you don't want the "
"model existing in <outdir>/VAE_<--numlvs>LVS.pt")
parser.add_argument(
"--metapaths",
nargs="*",
required=True,
help="Space-separated path to metadata tsv with "
"a column named as barcode and a "
"column named as cell type")
parser.add_argument(
"--use-connections",
action="store_true",
help="If set, will enforce weights that don't "
"correspong to TF-gene mappings to be zero")
parser.add_argument(
"--loss-scalers",
nargs="*",
default=[1, 1, 1],
type=float,
help="Specify values to divide "
"MSE, KLD, and CE losses by: example: "
"--loss-scalers 100 1 1")
parser.add_argument(
"--predict-celltypes",
action="store_true",
help="Specify --predict-celltypes to "
"optimize the cell type prediction task as well")
parser.add_argument(
"--num-celltypes",
default=59,
type=int,
help="Number of cell types to predict (must match "
"the column CellType in metadata file)")
parser.add_argument(
"--filter-var",
action="store_true",
help="If specified, will filter by top 2000 most "
"variant genes")
parser.add_argument(
"--num-genes",
default=2000,
type=int,
help="Number of genes to filter by highest variance")
parser.add_argument(
"--include-batches",
action="store_true",
help="Specify if more than one h5 file is being passed "
"and you want to allow scVI to correct the batches")
args = parser.parse_args()
print(args)
modelpath = args.modelpath
if modelpath == "NA":
modelpath = os.path.join(
args.outdir, "VAE_{}LVS.pt".format(args.numlvs))
main(args.gmtpath, args.nparpaths,
args.outdir, args.numlvs, args.metapaths,
args.dont_train, args.genepath, modelpath,
args.use_connections, args.loss_scalers,
args.predict_celltypes, args.num_celltypes,
args.filter_var, args.num_genes,
args.include_batches)
|
[
"apex.amp.state_dict",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"numpy.argsort",
"numpy.savez_compressed",
"numpy.arange",
"seaborn.relplot",
"train_multitask_ccle.make_labels",
"os.path.join",
"torch.isnan",
"numpy.unique",
"pandas.DataFrame",
"torch.ones",
"torch.load",
"numpy.transpose",
"os.path.exists",
"numpy.apply_along_axis",
"torch.exp",
"numpy.max",
"apex.amp.scale_loss",
"numpy.loadtxt",
"model.loss_function",
"torch.zeros",
"numpy.intersect1d",
"datetime.datetime.now",
"pandas.concat",
"torch.mean",
"torch.randn_like",
"pandas.get_dummies",
"torch.mul",
"umap.UMAP",
"numpy.sort",
"numpy.min",
"torch.cuda.is_available",
"torch.max",
"tables.open_file",
"model.VAE",
"torch.from_numpy",
"apex.amp.load_state_dict",
"apex.amp.initialize",
"os.makedirs",
"torch.add",
"torch.nn.CrossEntropyLoss",
"numpy.zeros",
"pandas.unique",
"torch.save",
"scipy.sparse.csc_matrix",
"numpy.where",
"numpy.array",
"torch.cuda.empty_cache",
"collections.OrderedDict",
"itertools.chain.from_iterable",
"torch.tensor"
] |
[((578, 622), 'os.path.join', 'os.path.join', (['outdir', '"""cellByGeneMatrix.npz"""'], {}), "(outdir, 'cellByGeneMatrix.npz')\n", (590, 622), False, 'import os\n'), ((1460, 1512), 'numpy.intersect1d', 'np.intersect1d', (['genes', 'ar_genes'], {'return_indices': '(True)'}), '(genes, ar_genes, return_indices=True)\n', (1474, 1512), True, 'import numpy as np\n'), ((1612, 1630), 'numpy.transpose', 'np.transpose', (['npar'], {}), '(npar)\n', (1624, 1630), True, 'import numpy as np\n'), ((1635, 1710), 'numpy.savez_compressed', 'np.savez_compressed', (['h5outpath'], {'arr': 'npar', 'barcodes': 'barcodes', 'genes': 'ar_genes'}), '(h5outpath, arr=npar, barcodes=barcodes, genes=ar_genes)\n', (1654, 1710), True, 'import numpy as np\n'), ((1926, 1957), 'numpy.array', 'np.array', (['mudf.iloc[:, :numlvs]'], {}), '(mudf.iloc[:, :numlvs])\n', (1934, 1957), True, 'import numpy as np\n'), ((4087, 4121), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (4098, 4121), False, 'import os\n'), ((5868, 5912), 'os.path.join', 'os.path.join', (['outdir', '"""cellByGeneMatrix.npz"""'], {}), "(outdir, 'cellByGeneMatrix.npz')\n", (5880, 5912), False, 'import os\n'), ((5934, 5970), 'numpy.load', 'np.load', (['nparpath'], {'allow_pickle': '(True)'}), '(nparpath, allow_pickle=True)\n', (5941, 5970), True, 'import numpy as np\n'), ((6149, 6201), 'numpy.intersect1d', 'np.intersect1d', (['genes', 'ar_genes'], {'return_indices': '(True)'}), '(genes, ar_genes, return_indices=True)\n', (6163, 6201), True, 'import numpy as np\n'), ((6387, 6446), 'numpy.savez_compressed', 'np.savez_compressed', (['h5outpath'], {'arr': 'npar', 'barcodes': 'barcodes'}), '(h5outpath, arr=npar, barcodes=barcodes)\n', (6406, 6446), True, 'import numpy as np\n'), ((6554, 6598), 'os.path.join', 'os.path.join', (['outdir', '"""cellByGeneMatrix.npz"""'], {}), "(outdir, 'cellByGeneMatrix.npz')\n", (6566, 6598), False, 'import os\n'), ((6897, 6928), 'numpy.transpose', 'np.transpose', (['npar[idx_gexp, :]'], {}), '(npar[idx_gexp, :])\n', (6909, 6928), True, 'import numpy as np\n'), ((7293, 7336), 'numpy.array', 'np.array', (["feature_ref['name']"], {'dtype': '"""|U64"""'}), "(feature_ref['name'], dtype='|U64')\n", (7301, 7336), True, 'import numpy as np\n'), ((7370, 7424), 'numpy.intersect1d', 'np.intersect1d', (['genes', 'gene_names'], {'return_indices': '(True)'}), '(genes, gene_names, return_indices=True)\n', (7384, 7424), True, 'import numpy as np\n'), ((7468, 7540), 'numpy.savez_compressed', 'np.savez_compressed', (['h5outpath'], {'arr': 'npar', 'barcodes': 'barcodes', 'genes': 'genes'}), '(h5outpath, arr=npar, barcodes=barcodes, genes=genes)\n', (7487, 7540), True, 'import numpy as np\n'), ((7666, 7700), 'numpy.loadtxt', 'np.loadtxt', (['genepath'], {'dtype': '"""|U64"""'}), "(genepath, dtype='|U64')\n", (7676, 7700), True, 'import numpy as np\n'), ((7788, 7831), 'os.path.join', 'os.path.join', (['outdir', '"""gmt_conv_matrix.npz"""'], {}), "(outdir, 'gmt_conv_matrix.npz')\n", (7800, 7831), False, 'import os\n'), ((7848, 7874), 'os.path.exists', 'os.path.exists', (['gmtoutpath'], {}), '(gmtoutpath)\n', (7862, 7874), False, 'import os\n'), ((8291, 8307), 'numpy.sort', 'np.sort', (['all_tfs'], {}), '(all_tfs)\n', (8298, 8307), True, 'import numpy as np\n'), ((8416, 8436), 'numpy.unique', 'np.unique', (['all_genes'], {}), '(all_genes)\n', (8425, 8436), True, 'import numpy as np\n'), ((9236, 9307), 'numpy.savez_compressed', 'np.savez_compressed', (['gmtoutpath'], {'arr': 'npar', 'tfs': 'all_tfs', 'genes': 'all_genes'}), '(gmtoutpath, arr=npar, tfs=all_tfs, genes=all_genes)\n', (9255, 9307), True, 'import numpy as np\n'), ((9676, 9704), 'os.path.join', 'os.path.join', (['outdir', '"""logs"""'], {}), "(outdir, 'logs')\n", (9688, 9704), False, 'import os\n'), ((9709, 9743), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (9720, 9743), False, 'import os\n'), ((9834, 9878), 'os.path.join', 'os.path.join', (['"""/checkpoint/mkarimza"""', 'job_id'], {}), "('/checkpoint/mkarimza', job_id)\n", (9846, 9878), False, 'import os\n'), ((10398, 10425), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (10423, 10425), False, 'import torch\n'), ((17420, 17464), 'pandas.read_csv', 'pd.read_csv', (['metapath'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(metapath, sep='\\t', index_col=0)\n", (17431, 17464), True, 'import pandas as pd\n'), ((17666, 17695), 'numpy.unique', 'np.unique', (["metadf['CellType']"], {}), "(metadf['CellType'])\n", (17675, 17695), True, 'import numpy as np\n'), ((17710, 17767), 'numpy.array', 'np.array', (["[each for each in classes if 'Not' not in each]"], {}), "([each for each in classes if 'Not' not in each])\n", (17718, 17767), True, 'import numpy as np\n'), ((17791, 17848), 'numpy.array', 'np.array', (["[each for each in classes if 'nan' not in each]"], {}), "([each for each in classes if 'nan' not in each])\n", (17799, 17848), True, 'import numpy as np\n'), ((18238, 18271), 'pandas.get_dummies', 'pd.get_dummies', (["outdf['CellType']"], {}), "(outdf['CellType'])\n", (18252, 18271), True, 'import pandas as pd\n'), ((19283, 19320), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.var', '(0)', 'expar'], {}), '(np.var, 0, expar)\n', (19302, 19320), True, 'import numpy as np\n'), ((19596, 19640), 'numpy.intersect1d', 'np.intersect1d', (['genes_list[0]', 'genes_list[1]'], {}), '(genes_list[0], genes_list[1])\n', (19610, 19640), True, 'import numpy as np\n'), ((20792, 20814), 'pandas.concat', 'pd.concat', (['metadf_list'], {}), '(metadf_list)\n', (20801, 20814), True, 'import pandas as pd\n'), ((21006, 21062), 'numpy.intersect1d', 'np.intersect1d', (['gmtmat_genes', 'genes'], {'return_indices': '(True)'}), '(gmtmat_genes, genes, return_indices=True)\n', (21020, 21062), True, 'import numpy as np\n'), ((21698, 21732), 'pandas.get_dummies', 'pd.get_dummies', (["metadf['CellType']"], {}), "(metadf['CellType'])\n", (21712, 21732), True, 'import pandas as pd\n'), ((23914, 24009), 'model.VAE', 'VAE', (['expar.shape[1]', 'gmttensor', 'num_celltypes', 'BATCHEFFECT_NUM', '(0)', 'gmtmat.shape[1]', 'numlvs'], {}), '(expar.shape[1], gmttensor, num_celltypes, BATCHEFFECT_NUM, 0, gmtmat.\n shape[1], numlvs)\n', (23917, 24009), False, 'from model import VAE\n'), ((24420, 24445), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (24443, 24445), False, 'import torch\n'), ((25799, 25819), 'pandas.DataFrame', 'pd.DataFrame', (['tf_act'], {}), '(tf_act)\n', (25811, 25819), True, 'import pandas as pd\n'), ((25842, 25874), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (25850, 25874), True, 'import numpy as np\n'), ((26213, 26231), 'pandas.DataFrame', 'pd.DataFrame', (['zmat'], {}), '(zmat)\n', (26225, 26231), True, 'import pandas as pd\n'), ((26339, 26371), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (26347, 26371), True, 'import numpy as np\n'), ((26397, 26429), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (26405, 26429), True, 'import numpy as np\n'), ((26564, 26604), 'os.path.join', 'os.path.join', (['outdir', '"""fullDatasetZPlot"""'], {}), "(outdir, 'fullDatasetZPlot')\n", (26576, 26604), False, 'import os\n'), ((26618, 26657), 'os.makedirs', 'os.makedirs', (['outdir_full'], {'exist_ok': '(True)'}), '(outdir_full, exist_ok=True)\n', (26629, 26657), False, 'import os\n'), ((26722, 26741), 'pandas.DataFrame', 'pd.DataFrame', (['mumat'], {}), '(mumat)\n', (26734, 26741), True, 'import pandas as pd\n'), ((26853, 26885), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (26861, 26885), True, 'import numpy as np\n'), ((27053, 27092), 'os.path.join', 'os.path.join', (['outdir', '"""fullDatasetPlot"""'], {}), "(outdir, 'fullDatasetPlot')\n", (27065, 27092), False, 'import os\n'), ((27106, 27145), 'os.makedirs', 'os.makedirs', (['outdir_full'], {'exist_ok': '(True)'}), '(outdir_full, exist_ok=True)\n', (27117, 27145), False, 'import os\n'), ((27212, 27232), 'pandas.DataFrame', 'pd.DataFrame', (['sd2mat'], {}), '(sd2mat)\n', (27224, 27232), True, 'import pandas as pd\n'), ((27562, 27582), 'torch.from_numpy', 'torch.from_numpy', (['mu'], {}), '(mu)\n', (27578, 27582), False, 'import torch\n'), ((27603, 27627), 'torch.from_numpy', 'torch.from_numpy', (['logvar'], {}), '(logvar)\n', (27619, 27627), False, 'import torch\n'), ((27645, 27675), 'torch.exp', 'torch.exp', (['(0.5 * logvar_tensor)'], {}), '(0.5 * logvar_tensor)\n', (27654, 27675), False, 'import torch\n'), ((27693, 27721), 'torch.randn_like', 'torch.randn_like', (['std_tensor'], {}), '(std_tensor)\n', (27709, 27721), False, 'import torch\n'), ((28744, 28777), 'os.path.join', 'os.path.join', (['outdir', '"""genes.txt"""'], {}), "(outdir, 'genes.txt')\n", (28756, 28777), False, 'import os\n'), ((28984, 29007), 'torch.from_numpy', 'torch.from_numpy', (['mumat'], {}), '(mumat)\n', (29000, 29007), False, 'import torch\n'), ((29018, 29042), 'torch.from_numpy', 'torch.from_numpy', (['varmat'], {}), '(varmat)\n', (29034, 29042), False, 'import torch\n'), ((29727, 29761), 'torch.mul', 'torch.mul', (['weight_mat', 'connections'], {}), '(weight_mat, connections)\n', (29736, 29761), False, 'import torch\n'), ((29911, 29942), 'torch.add', 'torch.add', (['ew_times_x', 'add_bias'], {}), '(ew_times_x, add_bias)\n', (29920, 29942), False, 'import torch\n'), ((30279, 30300), 'numpy.zeros', 'np.zeros', (['expar.shape'], {}), '(expar.shape)\n', (30287, 30300), True, 'import numpy as np\n'), ((30313, 30347), 'numpy.zeros', 'np.zeros', (['(expar.shape[0], numlvs)'], {}), '((expar.shape[0], numlvs))\n', (30321, 30347), True, 'import numpy as np\n'), ((30361, 30395), 'numpy.zeros', 'np.zeros', (['(expar.shape[0], numlvs)'], {}), '((expar.shape[0], numlvs))\n', (30369, 30395), True, 'import numpy as np\n'), ((30416, 30452), 'numpy.zeros', 'np.zeros', (['(expar.shape[0], conn_dim)'], {}), '((expar.shape[0], conn_dim))\n', (30424, 30452), True, 'import numpy as np\n'), ((31734, 31803), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Train VAE using mapping of genes to TFs"""'}), "(description='Train VAE using mapping of genes to TFs')\n", (31748, 31803), False, 'from argparse import ArgumentParser\n'), ((454, 479), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (477, 479), False, 'import torch\n'), ((674, 750), 'pandas.read_csv', 'pd.read_csv', (['nparpath'], {'sep': '"""\t"""', 'index_col': '(0)', 'compression': '"""gzip"""', 'skiprows': '(2)'}), "(nparpath, sep='\\t', index_col=0, compression='gzip', skiprows=2)\n", (685, 750), True, 'import pandas as pd\n'), ((915, 942), 'numpy.array', 'np.array', (['rnadf.iloc[:, 1:]'], {}), '(rnadf.iloc[:, 1:])\n', (923, 942), True, 'import numpy as np\n'), ((962, 992), 'numpy.array', 'np.array', (["rnadf['Description']"], {}), "(rnadf['Description'])\n", (970, 992), True, 'import numpy as np\n'), ((1012, 1039), 'numpy.array', 'np.array', (['rnadf.columns[1:]'], {}), '(rnadf.columns[1:])\n', (1020, 1039), True, 'import numpy as np\n'), ((1066, 1130), 'pandas.read_csv', 'pd.read_csv', (['nparpath'], {'sep': '"""\t"""', 'index_col': '(0)', 'compression': '"""gzip"""'}), "(nparpath, sep='\\t', index_col=0, compression='gzip')\n", (1077, 1130), True, 'import pandas as pd\n'), ((1171, 1186), 'numpy.array', 'np.array', (['rnadf'], {}), '(rnadf)\n', (1179, 1186), True, 'import numpy as np\n'), ((1237, 1260), 'numpy.array', 'np.array', (['rnadf.columns'], {}), '(rnadf.columns)\n', (1245, 1260), True, 'import numpy as np\n'), ((1362, 1398), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.sum', '(0)', 'npar'], {}), '(np.sum, 0, npar)\n', (1381, 1398), True, 'import numpy as np\n'), ((4746, 4777), 'tables.open_file', 'tables.open_file', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (4762, 4777), False, 'import tables\n'), ((5118, 5176), 'scipy.sparse.csc_matrix', 'sp_sparse.csc_matrix', (['(data, indices, indptr)'], {'shape': 'shape'}), '((data, indices, indptr), shape=shape)\n', (5138, 5176), True, 'import scipy.sparse as sp_sparse\n'), ((6048, 6066), 'numpy.transpose', 'np.transpose', (['npar'], {}), '(npar)\n', (6060, 6066), True, 'import numpy as np\n'), ((7892, 7911), 'numpy.load', 'np.load', (['gmtoutpath'], {}), '(gmtoutpath)\n', (7899, 7911), True, 'import numpy as np\n'), ((8368, 8398), 'itertools.chain.from_iterable', 'chain.from_iterable', (['all_genes'], {}), '(all_genes)\n', (8387, 8398), False, 'from itertools import chain\n'), ((8465, 8489), 'os.path.exists', 'os.path.exists', (['genepath'], {}), '(genepath)\n', (8479, 8489), False, 'import os\n'), ((8662, 8701), 'numpy.intersect1d', 'np.intersect1d', (['all_genes', 'select_genes'], {}), '(all_genes, select_genes)\n', (8676, 8701), True, 'import numpy as np\n'), ((9907, 9929), 'os.path.exists', 'os.path.exists', (['chkdir'], {}), '(chkdir)\n', (9921, 9929), False, 'import os\n'), ((9948, 9982), 'os.path.join', 'os.path.join', (['logdir', '"""checkpoint"""'], {}), "(logdir, 'checkpoint')\n", (9960, 9982), False, 'import os\n'), ((10004, 10038), 'os.makedirs', 'os.makedirs', (['chkdir'], {'exist_ok': '(True)'}), '(chkdir, exist_ok=True)\n', (10015, 10038), False, 'import os\n'), ((10445, 10459), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10457, 10459), False, 'from datetime import datetime\n'), ((11412, 11437), 'numpy.arange', 'np.arange', (['expar.shape[0]'], {}), '(expar.shape[0])\n', (11421, 11437), True, 'import numpy as np\n'), ((11653, 11677), 'numpy.zeros', 'np.zeros', (['expar.shape[0]'], {}), '(expar.shape[0])\n', (11661, 11677), True, 'import numpy as np\n'), ((11718, 11742), 'numpy.zeros', 'np.zeros', (['expar.shape[0]'], {}), '(expar.shape[0])\n', (11726, 11742), True, 'import numpy as np\n'), ((17374, 17406), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (17382, 17406), True, 'import numpy as np\n'), ((18033, 18060), 'numpy.array', 'np.array', (["metadf['Barcode']"], {}), "(metadf['Barcode'])\n", (18041, 18060), True, 'import numpy as np\n'), ((18172, 18204), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (18180, 18204), True, 'import numpy as np\n'), ((18310, 18339), 'numpy.array', 'np.array', (['one_hot_ct_encoding'], {}), '(one_hot_ct_encoding)\n', (18318, 18339), True, 'import numpy as np\n'), ((19338, 19360), 'numpy.argsort', 'np.argsort', (['vars_genes'], {}), '(vars_genes)\n', (19348, 19360), True, 'import numpy as np\n'), ((19697, 19733), 'numpy.intersect1d', 'np.intersect1d', (['genes', 'genes_list[i]'], {}), '(genes, genes_list[i])\n', (19711, 19733), True, 'import numpy as np\n'), ((21349, 21402), 'numpy.intersect1d', 'np.intersect1d', (['genes', 'cur_genes'], {'return_indices': '(True)'}), '(genes, cur_genes, return_indices=True)\n', (21363, 21402), True, 'import numpy as np\n'), ((21771, 21800), 'numpy.array', 'np.array', (['one_hot_ct_encoding'], {}), '(one_hot_ct_encoding)\n', (21779, 21800), True, 'import numpy as np\n'), ((23261, 23300), 'os.path.join', 'os.path.join', (['outdir', '"""metadata.tsv.gz"""'], {}), "(outdir, 'metadata.tsv.gz')\n", (23273, 23300), False, 'import os\n'), ((24472, 24523), 'apex.amp.initialize', 'amp.initialize', (['vae', 'optimizer'], {'opt_level': 'opt_level'}), '(vae, optimizer, opt_level=opt_level)\n', (24486, 24523), False, 'from apex import amp\n'), ((24662, 24680), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (24676, 24680), True, 'import numpy as np\n'), ((26008, 26070), 'os.path.join', 'os.path.join', (['outdir', '"""VAE-TF-adjusted-weights_CellxTF.tsv.gz"""'], {}), "(outdir, 'VAE-TF-adjusted-weights_CellxTF.tsv.gz')\n", (26020, 26070), False, 'import os\n'), ((26463, 26506), 'os.path.join', 'os.path.join', (['outdir', '"""VAE_Z-matrix.tsv.gz"""'], {}), "(outdir, 'VAE_Z-matrix.tsv.gz')\n", (26475, 26506), False, 'import os\n'), ((26951, 26995), 'os.path.join', 'os.path.join', (['outdir', '"""VAE_mu-matrix.tsv.gz"""'], {}), "(outdir, 'VAE_mu-matrix.tsv.gz')\n", (26963, 26995), False, 'import os\n'), ((27419, 27469), 'os.path.join', 'os.path.join', (['outdir', '"""VAE_variance-matrix.tsv.gz"""'], {}), "(outdir, 'VAE_variance-matrix.tsv.gz')\n", (27431, 27469), False, 'import os\n'), ((27931, 27955), 'os.path.exists', 'os.path.exists', (['eachpath'], {}), '(eachpath)\n', (27945, 27955), False, 'import os\n'), ((29344, 29383), 'torch.zeros', 'torch.zeros', (['(train1.shape[0], n_batch)'], {}), '((train1.shape[0], n_batch))\n', (29355, 29383), False, 'import torch\n'), ((2165, 2218), 'umap.UMAP', 'umap.UMAP', ([], {'n_neighbors': 'n_neighbors', 'min_dist': 'min_dist'}), '(n_neighbors=n_neighbors, min_dist=min_dist)\n', (2174, 2218), False, 'import umap\n'), ((2331, 2354), 'pandas.DataFrame', 'pd.DataFrame', (['embedding'], {}), '(embedding)\n', (2343, 2354), True, 'import pandas as pd\n'), ((2689, 2783), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""UMAP1"""', 'y': '"""UMAP2"""', 'hue': '"""CellType"""', 'data': 'umap_output', 'height': '(6)', 'aspect': '(1.5)'}), "(x='UMAP1', y='UMAP2', hue='CellType', data=umap_output, height=\n 6, aspect=1.5)\n", (2700, 2783), True, 'import seaborn as sns\n'), ((6794, 6853), 'numpy.array', 'np.array', (["(feature_ref['feature_type'] == b'Gene Expression')"], {}), "(feature_ref['feature_type'] == b'Gene Expression')\n", (6802, 6853), True, 'import numpy as np\n'), ((8914, 8937), 'numpy.where', 'np.where', (['(all_tfs == tf)'], {}), '(all_tfs == tf)\n', (8922, 8937), True, 'import numpy as np\n'), ((12814, 13005), 'model.loss_function', 'loss_function', (["outdict['qz_m']", "outdict['qz_v']", 'train1', "outdict['px_rate']", "outdict['px_r']", "outdict['px_dropout']", "outdict['ql_m']", "outdict['ql_v']", '(True)', 'local_l_mean', 'local_l_var'], {}), "(outdict['qz_m'], outdict['qz_v'], train1, outdict['px_rate'],\n outdict['px_r'], outdict['px_dropout'], outdict['ql_m'], outdict['ql_v'\n ], True, local_l_mean, local_l_var)\n", (12827, 13005), False, 'from model import loss_function\n'), ((13099, 13117), 'torch.mean', 'torch.mean', (['loss_1'], {}), '(loss_1)\n', (13109, 13117), False, 'import torch\n'), ((13139, 13157), 'torch.mean', 'torch.mean', (['loss_2'], {}), '(loss_2)\n', (13149, 13157), False, 'import torch\n'), ((14934, 14951), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (14945, 14951), False, 'import torch\n'), ((15122, 15147), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15145, 15147), False, 'import torch\n'), ((15621, 15646), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15644, 15646), False, 'import torch\n'), ((16105, 16141), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['resp_cell', 'pred_cell'], {}), '(resp_cell, pred_cell)\n', (16119, 16141), False, 'from sklearn.metrics import accuracy_score\n'), ((17080, 17105), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17103, 17105), False, 'import torch\n'), ((18889, 18923), 'train_multitask_ccle.make_labels', 'tmp_fnc', (['metapath', 'expar', 'barcodes'], {}), '(metapath, expar, barcodes)\n', (18896, 18923), True, 'from train_multitask_ccle import make_labels as tmp_fnc\n'), ((20378, 20420), 'numpy.array', 'np.array', (["metadf['CellType']"], {'dtype': '"""|U64"""'}), "(metadf['CellType'], dtype='|U64')\n", (20386, 20420), True, 'import numpy as np\n'), ((21883, 21910), 'numpy.array', 'np.array', (["metadf['Barcode']"], {}), "(metadf['Barcode'])\n", (21891, 21910), True, 'import numpy as np\n'), ((21974, 21998), 'numpy.array', 'np.array', (['celltypes_list'], {}), '(celltypes_list)\n', (21982, 21998), True, 'import numpy as np\n'), ((22018, 22049), 'numpy.array', 'np.array', (["metadf['Batch.Index']"], {}), "(metadf['Batch.Index'])\n", (22026, 22049), True, 'import numpy as np\n'), ((23160, 23189), 'pandas.unique', 'pd.unique', (["metadf['CellType']"], {}), "(metadf['CellType'])\n", (23169, 23189), True, 'import pandas as pd\n'), ((23481, 23494), 'numpy.max', 'np.max', (['expar'], {}), '(expar)\n', (23487, 23494), True, 'import numpy as np\n'), ((25248, 25267), 'pandas.DataFrame', 'pd.DataFrame', (['mumat'], {}), '(mumat)\n', (25260, 25267), True, 'import pandas as pd\n'), ((25403, 25435), 'numpy.array', 'np.array', (['barcodes'], {'dtype': '"""|U64"""'}), "(barcodes, dtype='|U64')\n", (25411, 25435), True, 'import numpy as np\n'), ((30168, 30189), 'numpy.unique', 'np.unique', (['batch_idxs'], {}), '(batch_idxs)\n', (30177, 30189), True, 'import numpy as np\n'), ((2578, 2618), 'os.path.join', 'os.path.join', (['outdir', "(adname + '.tsv.gz')"], {}), "(outdir, adname + '.tsv.gz')\n", (2590, 2618), False, 'import os\n'), ((2858, 2895), 'os.path.join', 'os.path.join', (['outdir', "(adname + '.pdf')"], {}), "(outdir, adname + '.pdf')\n", (2870, 2895), False, 'import os\n'), ((2943, 2980), 'os.path.join', 'os.path.join', (['outdir', "(adname + '.png')"], {}), "(outdir, adname + '.png')\n", (2955, 2980), False, 'import os\n'), ((9060, 9087), 'numpy.where', 'np.where', (['(all_genes == gene)'], {}), '(all_genes == gene)\n', (9068, 9087), True, 'import numpy as np\n'), ((12399, 12450), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.sum', '(1)', 'expar[cur_sidxs, :]'], {}), '(np.sum, 1, expar[cur_sidxs, :])\n', (12418, 12450), True, 'import numpy as np\n'), ((12523, 12574), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.sum', '(1)', 'expar[cur_sidxs, :]'], {}), '(np.sum, 1, expar[cur_sidxs, :])\n', (12542, 12574), True, 'import numpy as np\n'), ((15664, 15688), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15686, 15688), False, 'import torch\n'), ((16437, 16451), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16449, 16451), False, 'from datetime import datetime\n'), ((17143, 17159), 'apex.amp.state_dict', 'amp.state_dict', ([], {}), '()\n', (17157, 17159), False, 'from apex import amp\n'), ((17226, 17258), 'torch.save', 'torch.save', (['checkpoint', 'eachpath'], {}), '(checkpoint, eachpath)\n', (17236, 17258), False, 'import torch\n'), ((25533, 25577), 'os.path.join', 'os.path.join', (['outdir', '"""VAE_mu-matrix.tsv.gz"""'], {}), "(outdir, 'VAE_mu-matrix.tsv.gz')\n", (25545, 25577), False, 'import os\n'), ((28003, 28023), 'torch.load', 'torch.load', (['eachpath'], {}), '(eachpath)\n', (28013, 28023), False, 'import torch\n'), ((28106, 28119), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28117, 28119), False, 'from collections import OrderedDict\n'), ((28397, 28422), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (28420, 28422), False, 'import torch\n'), ((29437, 29465), 'numpy.where', 'np.where', (['(batch_ar_temp == j)'], {}), '(batch_ar_temp == j)\n', (29445, 29465), True, 'import numpy as np\n'), ((13395, 13416), 'torch.max', 'torch.max', (['ct_pred', '(1)'], {}), '(ct_pred, 1)\n', (13404, 13416), False, 'import torch\n'), ((14305, 14325), 'numpy.min', 'np.min', (['loss_scalers'], {}), '(loss_scalers)\n', (14311, 14325), True, 'import numpy as np\n'), ((14660, 14680), 'numpy.min', 'np.min', (['loss_scalers'], {}), '(loss_scalers)\n', (14666, 14680), True, 'import numpy as np\n'), ((14830, 14859), 'torch.tensor', 'torch.tensor', (['loss_scalers[2]'], {}), '(loss_scalers[2])\n', (14842, 14859), False, 'import torch\n'), ((15170, 15201), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (15184, 15201), False, 'from apex import amp\n'), ((16713, 16727), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16725, 16727), False, 'from datetime import datetime\n'), ((28444, 28482), 'apex.amp.load_state_dict', 'amp.load_state_dict', (["checkpoint['amp']"], {}), "(checkpoint['amp'])\n", (28463, 28482), False, 'from apex import amp\n'), ((14710, 14739), 'torch.tensor', 'torch.tensor', (['loss_scalers[0]'], {}), '(loss_scalers[0])\n', (14722, 14739), False, 'import torch\n'), ((14770, 14799), 'torch.tensor', 'torch.tensor', (['loss_scalers[1]'], {}), '(loss_scalers[1])\n', (14782, 14799), False, 'import torch\n'), ((23641, 23685), 'torch.ones', 'torch.ones', (['gmtmat.shape[1]', 'gmtmat.shape[0]'], {}), '(gmtmat.shape[1], gmtmat.shape[0])\n', (23651, 23685), False, 'import torch\n'), ((30753, 30805), 'torch.from_numpy', 'torch.from_numpy', (['expar[idxbatch_st:idxbatch_end, :]'], {}), '(expar[idxbatch_st:idxbatch_end, :])\n', (30769, 30805), False, 'import torch\n'), ((12079, 12116), 'torch.from_numpy', 'torch.from_numpy', (['expar[cur_sidxs, :]'], {}), '(expar[cur_sidxs, :])\n', (12095, 12116), False, 'import torch\n'), ((23571, 23591), 'numpy.transpose', 'np.transpose', (['gmtmat'], {}), '(gmtmat)\n', (23583, 23591), True, 'import numpy as np\n'), ((13257, 13301), 'torch.max', 'torch.max', (['one_hot_ct_encoding[cur_sidxs]', '(1)'], {}), '(one_hot_ct_encoding[cur_sidxs], 1)\n', (13266, 13301), False, 'import torch\n'), ((30944, 30998), 'torch.from_numpy', 'torch.from_numpy', (['batch_idxs[idxbatch_st:idxbatch_end]'], {}), '(batch_idxs[idxbatch_st:idxbatch_end])\n', (30960, 30998), False, 'import torch\n'), ((12228, 12267), 'torch.from_numpy', 'torch.from_numpy', (['batch_idxs[cur_sidxs]'], {}), '(batch_idxs[cur_sidxs])\n', (12244, 12267), False, 'import torch\n')]
|
# Numpy is imported, seed is set
import numpy as np
np.random.seed(123)
# Initialization
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
random_walk.append(step)
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Plot random_walk
plt.plot(random_walk)
# Show the plot
plt.show()
|
[
"numpy.random.randint",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] |
[((52, 71), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (66, 71), True, 'import numpy as np\n'), ((458, 479), 'matplotlib.pyplot.plot', 'plt.plot', (['random_walk'], {}), '(random_walk)\n', (466, 479), True, 'import matplotlib.pyplot as plt\n'), ((497, 507), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (505, 507), True, 'import matplotlib.pyplot as plt\n'), ((168, 191), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (185, 191), True, 'import numpy as np\n'), ((318, 341), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (335, 341), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from netCDF4 import Dataset # pylint: disable=no-name-in-module
import numpy as np
#########################################################
# Class for ROMS grd and clm files
# (For use in various post-processing scripts)
#########################################################
class getGrid(object):
'''
Read the basics of ROMS setup into class for further use in other functions
and classes.
'''
# Read grid file
def __init__(self,grdfile):
# Set grd file
self.grdfile = grdfile
self.ncgrd = Dataset(grdfile, mode='r')
# Read mask
self.mask_rho = self.ncgrd.variables['mask_rho'][:]
self.FillValue = getattr(self.ncgrd.variables['mask_rho'],'_FillValue')
# Read dimensions
self.SY = self.mask_rho.shape[0]
self.SX = self.mask_rho.shape[1]
def getAttrs(self,clmfile):
# Set clm file
self.ncclm = Dataset(clmfile, mode='r')
# Read attributes
try:
self.theta_s = getattr(self.ncclm,'theta_s')
self.theta_b = getattr(self.ncclm,'theta_b')
self.hc = getattr(self.ncclm,'hc')
except AttributeError:
self.theta_s = self.ncclm.variables['theta_s'][0]
self.theta_b = self.ncclm.variables['theta_b'][0]
self.hc = self.ncclm.variables['hc'][0]
# Vertical dimension
self.NZ = self.ncclm.dimensions['s_rho'].size
def setClmFiles(self,clmfile,clm2file):
# Set clm file
if not hasattr(self, 'ncclm'):
self.ncclm = Dataset(clmfile, mode='r')
# Set clm2 file
self.ncclm2 = Dataset(clm2file, mode='r')
def getTopo(self):
# Read topography
self.h = self.ncgrd.variables['h'][:]
self.hmin = getattr(self.ncgrd,'hmin')
self.hmax = getattr(self.ncgrd,'hmax')
def getLatLon(self):
# Read Lat/Lon
self.lon_rho = self.ncgrd.variables['lon_rho'][:]
self.lat_rho = self.ncgrd.variables['lat_rho'][:]
def getArea(self):
# Read pm/pn
self.area = 1/(self.ncgrd.variables['pm'][:]*self.ncgrd.variables['pn'][:])
def getAngle(self):
# Read angle
self.angle = self.ncgrd.variables['angle'][:]
#########################################################
# Vertical sigma level depths and spacing
#########################################################
def compute_zlev(fpin,fpin_grd,NZ,type,zeta=None,stype=3):
# Compute z levels of rho points for ZERO SSH. Input:
#
# fpin: file descriptor pointing to a NetCDF file containing theta_b,
# theta_s and Tcline or hc
# fpin_grd: file descriptor pointing to a NetCDF file containing h
# NZ: number of vertical (rho) levels
# type: 'r': rho points
# 'w': w points
# stype: specifies type of sigma levels used:
# 1: similar to Song, Haidvogel 1994
# 2: Shchepetkin 2006
# 3: Shchepetkin 2010 (or so)
import numpy as np
import sys
h = fpin_grd.variables['h'][:,:]
try:
theta_b = fpin.theta_b
theta_s = fpin.theta_s
except AttributeError:
# theta_b/s may be variables:
theta_b = fpin.variables['theta_b'][0]
theta_s = fpin.variables['theta_s'][0]
if stype == 1:
hmin = min(min(h))
try:
Tcline = fpin.Tcline
hc = min(hmin,Tcline)
except AttributeError:
hc = fpin.hc
hc = min(hmin,hc)
elif stype == 2 or stype == 3:
try:
hc = fpin.hc
except AttributeError:
# hc may be a variable:
hc = fpin.variables['hc'][0]
else:
msg = '{}: Unknown type of sigma levels'.format(stype)
sys.exit(msg)
ds = 1./NZ # float, to prevent integer division in sc
if type == 'w':
lev = np.arange(NZ+1)
sc = (lev - NZ) * ds
nr_zlev = NZ+1 # number of vertical levels
else:
lev = np.arange(1,NZ+1)
sc = -1 + (lev-0.5)*ds
nr_zlev = NZ # number of vertical levels
Ptheta = np.sinh(theta_s*sc)/np.sinh(theta_s)
Rtheta = np.tanh(theta_s*(sc+.5))/(2*np.tanh(.5*theta_s))-.5
if stype <= 2:
Cs = (1-theta_b)*Ptheta+theta_b*Rtheta
elif stype == 3:
if theta_s > 0:
csrf=(1.-np.cosh(theta_s*sc))/(np.cosh(theta_s)-1.)
else:
csrf=-sc**2
if theta_b > 0:
Cs=(np.exp(theta_b*csrf)-1.)/(1.-np.exp(-theta_b))
else:
Cs=csrf
z0 = np.zeros((nr_zlev,h.shape[0],h.shape[1]),np.float)
if stype == 1:
cff = (sc-Cs)*hc
cff1 = Cs
hinv = 1.0 / h
for k in range(nr_zlev):
z0[k,:,:] = cff[k]+cff1[k]*h
if not (zeta is None):
z0[k,:,:] = z0[k,:,:]+zeta*(1.+z0[k,:,:]*hinv)
elif stype == 2 or stype == 3:
hinv = 1.0/(h+hc)
cff = hc*sc
cff1 = Cs
for k in range(nr_zlev):
tmp1 = cff[k]+cff1[k]*h
tmp2 = np.multiply(tmp1,hinv)
if zeta is None:
z0[k,:,:] = np.multiply(h,tmp2)
else:
z0[k,:,:] = zeta + np.multiply((zeta+h),tmp2)
# Return
return z0
def compute_dz(fpin,fpin_grd,NZ,zeta=None,stype=3):
# Compute dz of sigma level rho points for ZERO SSH. Input:
#
# fpin: file descriptor pointing to a NetCDF file containing theta_b,
# theta_s and Tcline or hc
# fpin_grd: file descriptor pointing to a NetCDF file containing h
# NZ: number of vertical (rho) levels
# stype: specifies type of sigma levels used:
# 1: similar to Song, Haidvogel 1994
# 2: Shchepetkin 2006
# 3: Shchepetkin 2010 (or so)
# Compute depth of w sigma levels
depth_w = -compute_zlev(fpin,fpin_grd,NZ,type='w',zeta=zeta,stype=3)
# Compute dz between w sigma levels (= dz of sigma layer)
dz_sigma = depth_w[:-1]-depth_w[1:]
return dz_sigma
#########################################################
# Additions from Max Simon
# Author: <NAME>
# Year: 2020
#########################################################
def get_cell_heights(z_values, depth):
"""
Structure if depth is False:
------------- // surface, top second cell
x // rho point, idx 2
------------- // top first cell, bottom second cell
x // rho point, idx 1
------------- // top zero-th cell, bottom first cell
x // rho point, idx 0
------------- // ground, bottom zero-th cell
Structure if depth is True
------------- // surface, top zero-th cell
x // depth point, idx 0
------------- // top first cell, bottom zero-th cell
x // depth point, idx 1
------------- // top second cell, bottom first cell
x // depth point, idx 2
------------- // ground, bottom second cell
Idea:
- loop from top to bottom (this means for depth = False from last index to first)
- calculate distance from current point to last_depth --> half the cell height
- last_depth is initially 0 and set to _current rho point + half the cell height_ after each iteration
- cell size is _2 x half the cell height_
Note: if depth = False this has to be done for each grid point seperately!
"""
heights = np.zeros_like(z_values)
last_height = 0.0 if depth else np.zeros((z_values.shape[1], z_values.shape[2]))
zero_edge_case = False
for srho_idx in range(z_values.shape[0]):
# go from top to bottom
srho = srho_idx if depth else (z_values.shape[0] - srho_idx - 1)
# handle edge case:
if srho == 0 and (z_values[srho] == 0).any():
assert (z_values[srho] == 0).all()
print('Zero Edge Case detected')
zero_edge_case = True
continue
# calc dist to last height
half = np.abs(z_values[srho]) - last_height
# handle edge case
if srho == 1 and zero_edge_case:
half = 0.5*half
previous_srho = 0 if depth else -1
heights[previous_srho] = half
zero_edge_case = False
print('Zero Edge Case solved')
assert np.array(half >= 0).all(), (srho_idx, srho, z_values[srho], last_height, half)
heights[srho] = 2*half
# update last_height
last_height = np.abs(z_values[srho]) + half
return heights
def create_zlevel_file(grid_path, sample_data_path, out_path):
"""
Create a netCDF file containing the zlevels
"""
sample_data = Dataset(sample_data_path)
is_zslice_file = 'depth' in sample_data.dimensions
if is_zslice_file:
print('Sample Data is z sliced')
z_levels = np.array(sample_data['depth'])
z_thickness = get_cell_heights(z_levels, True)
assert np.sum(z_thickness[:-1]) + 0.5*z_thickness[-1] == abs(z_levels[-1]), (np.sum(z_thickness[:-1]), z_thickness[-1], z_levels[-1])
with Dataset(out_path, mode='w') as new_dataset:
# copy global attributes all at once via dictionary
new_dataset.createDimension('depth', len(z_levels))
# save zlevels
new_dataset.createVariable('z_level', np.float32, dimensions=('depth',))
new_dataset['z_level'][:] = np.abs(z_levels)
new_dataset.createVariable('thickness_z', np.float32, dimensions=('depth'))
new_dataset['thickness_z'][:] = np.abs(z_thickness)
else:
sample_data.close() # just make sure that we dont interfer with other routines
print('Sample Data is raw ROMS output')
# calculate the zlevels
grid = Dataset(grid_path)
sample_data = Dataset(sample_data_path)
n_s_rho = sample_data.dimensions['s_rho'].size
n_eta_rho = sample_data.dimensions['eta_rho'].size
n_xi_rho = sample_data.dimensions['xi_rho'].size
z_levels_rho = compute_zlev(sample_data, grid, n_s_rho, 'r')
z_levels_w = compute_zlev(sample_data, grid, n_s_rho, 'w')
z_thickness_rho = get_cell_heights(z_levels_rho, False)
control = np.sum(z_thickness_rho, axis=0) - np.array(grid['h'])
assert np.max(np.abs(control)) < 5, 'Height calculation differs more than 5m'
with Dataset(out_path, mode='w') as new_dataset:
# copy global attributes all at once via dictionary
new_dataset.createDimension('s_rho', n_s_rho)
new_dataset.createDimension('eta_rho', n_eta_rho)
new_dataset.createDimension('xi_rho', n_xi_rho)
new_dataset.createDimension('s_w', n_s_rho + 1)
# save zlevels
new_dataset.createVariable('z_level', np.float32, dimensions=('s_rho', 'eta_rho', 'xi_rho'))
new_dataset['z_level'][:] = np.abs(z_levels_rho)
new_dataset.createVariable('z_level_w', np.float32, dimensions=('s_w', 'eta_rho', 'xi_rho'))
new_dataset['z_level_w'][:] = np.abs(z_levels_w)
new_dataset.createVariable('thickness_z', np.float32, dimensions=('s_rho', 'eta_rho', 'xi_rho'))
new_dataset['thickness_z'][:] = np.abs(z_thickness_rho)
if __name__ == "__main__":
import argparse
# create parser
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--input', type=str, required=True, help="Sample Input Path")
parser.add_argument('--grid', type=str, required=True, help="Grid path")
parser.add_argument('--output', type=str, help="Output path")
args = parser.parse_args()
# execute
create_zlevel_file(args.grid, args.input, args.output)
|
[
"netCDF4.Dataset",
"numpy.zeros_like",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.tanh",
"numpy.sum",
"numpy.multiply",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.exp",
"numpy.cosh",
"numpy.sinh",
"sys.exit"
] |
[((4031, 4084), 'numpy.zeros', 'np.zeros', (['(nr_zlev, h.shape[0], h.shape[1])', 'np.float'], {}), '((nr_zlev, h.shape[0], h.shape[1]), np.float)\n', (4039, 4084), True, 'import numpy as np\n'), ((6626, 6649), 'numpy.zeros_like', 'np.zeros_like', (['z_values'], {}), '(z_values)\n', (6639, 6649), True, 'import numpy as np\n'), ((7689, 7714), 'netCDF4.Dataset', 'Dataset', (['sample_data_path'], {}), '(sample_data_path)\n', (7696, 7714), False, 'from netCDF4 import Dataset\n'), ((10067, 10092), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10090, 10092), False, 'import argparse\n'), ((534, 560), 'netCDF4.Dataset', 'Dataset', (['grdfile'], {'mode': '"""r"""'}), "(grdfile, mode='r')\n", (541, 560), False, 'from netCDF4 import Dataset\n'), ((857, 883), 'netCDF4.Dataset', 'Dataset', (['clmfile'], {'mode': '"""r"""'}), "(clmfile, mode='r')\n", (864, 883), False, 'from netCDF4 import Dataset\n'), ((1488, 1515), 'netCDF4.Dataset', 'Dataset', (['clm2file'], {'mode': '"""r"""'}), "(clm2file, mode='r')\n", (1495, 1515), False, 'from netCDF4 import Dataset\n'), ((3469, 3486), 'numpy.arange', 'np.arange', (['(NZ + 1)'], {}), '(NZ + 1)\n', (3478, 3486), True, 'import numpy as np\n'), ((3568, 3588), 'numpy.arange', 'np.arange', (['(1)', '(NZ + 1)'], {}), '(1, NZ + 1)\n', (3577, 3588), True, 'import numpy as np\n'), ((3664, 3685), 'numpy.sinh', 'np.sinh', (['(theta_s * sc)'], {}), '(theta_s * sc)\n', (3671, 3685), True, 'import numpy as np\n'), ((3684, 3700), 'numpy.sinh', 'np.sinh', (['theta_s'], {}), '(theta_s)\n', (3691, 3700), True, 'import numpy as np\n'), ((6683, 6731), 'numpy.zeros', 'np.zeros', (['(z_values.shape[1], z_values.shape[2])'], {}), '((z_values.shape[1], z_values.shape[2]))\n', (6691, 6731), True, 'import numpy as np\n'), ((7836, 7866), 'numpy.array', 'np.array', (["sample_data['depth']"], {}), "(sample_data['depth'])\n", (7844, 7866), True, 'import numpy as np\n'), ((8659, 8677), 'netCDF4.Dataset', 'Dataset', (['grid_path'], {}), '(grid_path)\n', (8666, 8677), False, 'from netCDF4 import Dataset\n'), ((8694, 8719), 'netCDF4.Dataset', 'Dataset', (['sample_data_path'], {}), '(sample_data_path)\n', (8701, 8719), False, 'from netCDF4 import Dataset\n'), ((1427, 1453), 'netCDF4.Dataset', 'Dataset', (['clmfile'], {'mode': '"""r"""'}), "(clmfile, mode='r')\n", (1434, 1453), False, 'from netCDF4 import Dataset\n'), ((3374, 3387), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (3382, 3387), False, 'import sys\n'), ((3711, 3740), 'numpy.tanh', 'np.tanh', (['(theta_s * (sc + 0.5))'], {}), '(theta_s * (sc + 0.5))\n', (3718, 3740), True, 'import numpy as np\n'), ((7112, 7134), 'numpy.abs', 'np.abs', (['z_values[srho]'], {}), '(z_values[srho])\n', (7118, 7134), True, 'import numpy as np\n'), ((7508, 7530), 'numpy.abs', 'np.abs', (['z_values[srho]'], {}), '(z_values[srho])\n', (7514, 7530), True, 'import numpy as np\n'), ((7996, 8020), 'numpy.sum', 'np.sum', (['z_thickness[:-1]'], {}), '(z_thickness[:-1])\n', (8002, 8020), True, 'import numpy as np\n'), ((8062, 8089), 'netCDF4.Dataset', 'Dataset', (['out_path'], {'mode': '"""w"""'}), "(out_path, mode='w')\n", (8069, 8089), False, 'from netCDF4 import Dataset\n'), ((8341, 8357), 'numpy.abs', 'np.abs', (['z_levels'], {}), '(z_levels)\n', (8347, 8357), True, 'import numpy as np\n'), ((8472, 8491), 'numpy.abs', 'np.abs', (['z_thickness'], {}), '(z_thickness)\n', (8478, 8491), True, 'import numpy as np\n'), ((9069, 9100), 'numpy.sum', 'np.sum', (['z_thickness_rho'], {'axis': '(0)'}), '(z_thickness_rho, axis=0)\n', (9075, 9100), True, 'import numpy as np\n'), ((9103, 9122), 'numpy.array', 'np.array', (["grid['h']"], {}), "(grid['h'])\n", (9111, 9122), True, 'import numpy as np\n'), ((9211, 9238), 'netCDF4.Dataset', 'Dataset', (['out_path'], {'mode': '"""w"""'}), "(out_path, mode='w')\n", (9218, 9238), False, 'from netCDF4 import Dataset\n'), ((9663, 9683), 'numpy.abs', 'np.abs', (['z_levels_rho'], {}), '(z_levels_rho)\n', (9669, 9683), True, 'import numpy as np\n'), ((9813, 9831), 'numpy.abs', 'np.abs', (['z_levels_w'], {}), '(z_levels_w)\n', (9819, 9831), True, 'import numpy as np\n'), ((9967, 9990), 'numpy.abs', 'np.abs', (['z_thickness_rho'], {}), '(z_thickness_rho)\n', (9973, 9990), True, 'import numpy as np\n'), ((3739, 3761), 'numpy.tanh', 'np.tanh', (['(0.5 * theta_s)'], {}), '(0.5 * theta_s)\n', (3746, 3761), True, 'import numpy as np\n'), ((4424, 4447), 'numpy.multiply', 'np.multiply', (['tmp1', 'hinv'], {}), '(tmp1, hinv)\n', (4435, 4447), True, 'import numpy as np\n'), ((7365, 7384), 'numpy.array', 'np.array', (['(half >= 0)'], {}), '(half >= 0)\n', (7373, 7384), True, 'import numpy as np\n'), ((7926, 7950), 'numpy.sum', 'np.sum', (['z_thickness[:-1]'], {}), '(z_thickness[:-1])\n', (7932, 7950), True, 'import numpy as np\n'), ((9139, 9154), 'numpy.abs', 'np.abs', (['control'], {}), '(control)\n', (9145, 9154), True, 'import numpy as np\n'), ((4483, 4503), 'numpy.multiply', 'np.multiply', (['h', 'tmp2'], {}), '(h, tmp2)\n', (4494, 4503), True, 'import numpy as np\n'), ((3868, 3889), 'numpy.cosh', 'np.cosh', (['(theta_s * sc)'], {}), '(theta_s * sc)\n', (3875, 3889), True, 'import numpy as np\n'), ((3890, 3906), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (3897, 3906), True, 'import numpy as np\n'), ((3959, 3981), 'numpy.exp', 'np.exp', (['(theta_b * csrf)'], {}), '(theta_b * csrf)\n', (3965, 3981), True, 'import numpy as np\n'), ((3988, 4004), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (3994, 4004), True, 'import numpy as np\n'), ((4535, 4562), 'numpy.multiply', 'np.multiply', (['(zeta + h)', 'tmp2'], {}), '(zeta + h, tmp2)\n', (4546, 4562), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader, ConcatDataset
from argparse import ArgumentParser
from models.psp.pspnet import PSPNet
from models.sobel_op import SobelComputer
from dataset import OnlineTransformDataset
from util.logger import BoardLogger
from util.model_saver import ModelSaver
from util.hyper_para import HyperParameters
from util.log_integrator import Integrator
from util.metrics_compute import compute_loss_and_metrics, iou_hooks_to_be_used
from util.image_saver import vis_prediction
import time
import os
import datetime
torch.backends.cudnn.benchmark = True
# Parse command line arguments
para = HyperParameters()
para.parse()
parser = ArgumentParser()
parser.add_argument('data_path', help='Image path')
args = parser.parse_args()
# Logging
if para['id'].lower() != 'null':
long_id = '%s_%s' % (para['id'],datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
else:
long_id = None
logger = BoardLogger(long_id)
logger.log_string('hyperpara', str(para))
print('CUDA Device count: ', torch.cuda.device_count())
# Construct model
model = PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50')
model = nn.DataParallel(
model.cuda(), device_ids=[0,1,2,3]
)
if para['load'] is not None:
model.load_state_dict(torch.load(para['load']))
optimizer = optim.Adam(model.parameters(), lr=para['lr'], weight_decay=para['weight_decay'])
data_dir = args.data_path
dataset = OnlineTransformDataset(data_dir, method=1, perturb=True)
print('dataset size: ', len(dataset))
#train_dataset = ConcatDataset([fss_dataset, duts_tr_dataset, duts_te_dataset, ecssd_dataset, msra_dataset])
#train_dataset = ConcatDataset([ duts_tr_dataset])
# For randomness: https://github.com/pytorch/pytorch/issues/5059
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
# Dataloaders, multi-process data loading
train_loader = DataLoader(dataset, para['batch_size'], shuffle=True, num_workers=8,
worker_init_fn=worker_init_fn, drop_last=True, pin_memory=True)
sobel_compute = SobelComputer()
# Learning rate decay scheduling
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, para['steps'], para['gamma'])
saver = ModelSaver(long_id)
report_interval = 50
save_im_interval = 800
total_epoch = int(para['iterations']/len(train_loader) + 0.5)
print('Actual training epoch: ', total_epoch)
train_integrator = Integrator(logger)
train_integrator.add_hook(iou_hooks_to_be_used)
total_iter = 0
last_time = 0
for e in range(total_epoch):
np.random.seed() # reset seed
epoch_start_time = time.time()
# Train loop
model = model.train()
for im, seg, gt in train_loader:
im, seg, gt = im.cuda(), seg.cuda(), gt.cuda()
total_iter += 1
if total_iter % 5000 == 0:
saver.save_model(model, total_iter)
images = model(im, seg)
images['im'] = im
images['seg'] = seg
images['gt'] = gt
sobel_compute.compute_edges(images)
loss_and_metrics = compute_loss_and_metrics(images, para)
train_integrator.add_dict(loss_and_metrics)
optimizer.zero_grad()
(loss_and_metrics['total_loss']).backward()
optimizer.step()
if total_iter % report_interval == 0:
logger.log_scalar('train/lr', scheduler.get_lr()[0], total_iter)
train_integrator.finalize('train', total_iter)
train_integrator.reset_except_hooks()
# Need to put step AFTER get_lr() for correct logging, see issue #22107 in PyTorch
scheduler.step()
if total_iter % save_im_interval == 0:
predict_vis = vis_prediction(images)
logger.log_cv2('train/predict', predict_vis, total_iter)
# Final save!
saver.save_model(model, total_iter)
|
[
"util.logger.BoardLogger",
"numpy.random.seed",
"models.psp.pspnet.PSPNet",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"numpy.random.get_state",
"util.image_saver.vis_prediction",
"torch.load",
"dataset.OnlineTransformDataset",
"torch.cuda.device_count",
"util.model_saver.ModelSaver",
"time.time",
"models.sobel_op.SobelComputer",
"util.hyper_para.HyperParameters",
"util.metrics_compute.compute_loss_and_metrics",
"datetime.datetime.now",
"util.log_integrator.Integrator",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((696, 713), 'util.hyper_para.HyperParameters', 'HyperParameters', ([], {}), '()\n', (711, 713), False, 'from util.hyper_para import HyperParameters\n'), ((737, 753), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (751, 753), False, 'from argparse import ArgumentParser\n'), ((1002, 1022), 'util.logger.BoardLogger', 'BoardLogger', (['long_id'], {}), '(long_id)\n', (1013, 1022), False, 'from util.logger import BoardLogger\n'), ((1149, 1240), 'models.psp.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(2048)', 'deep_features_size': '(1024)', 'backend': '"""resnet50"""'}), "(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet50')\n", (1155, 1240), False, 'from models.psp.pspnet import PSPNet\n'), ((1524, 1580), 'dataset.OnlineTransformDataset', 'OnlineTransformDataset', (['data_dir'], {'method': '(1)', 'perturb': '(True)'}), '(data_dir, method=1, perturb=True)\n', (1546, 1580), False, 'from dataset import OnlineTransformDataset\n'), ((1998, 2134), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', "para['batch_size']"], {'shuffle': '(True)', 'num_workers': '(8)', 'worker_init_fn': 'worker_init_fn', 'drop_last': '(True)', 'pin_memory': '(True)'}), "(dataset, para['batch_size'], shuffle=True, num_workers=8,\n worker_init_fn=worker_init_fn, drop_last=True, pin_memory=True)\n", (2008, 2134), False, 'from torch.utils.data import DataLoader, ConcatDataset\n'), ((2176, 2191), 'models.sobel_op.SobelComputer', 'SobelComputer', ([], {}), '()\n', (2189, 2191), False, 'from models.sobel_op import SobelComputer\n'), ((2238, 2309), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer', "para['steps']", "para['gamma']"], {}), "(optimizer, para['steps'], para['gamma'])\n", (2268, 2309), False, 'from torch import optim\n'), ((2319, 2338), 'util.model_saver.ModelSaver', 'ModelSaver', (['long_id'], {}), '(long_id)\n', (2329, 2338), False, 'from util.model_saver import ModelSaver\n'), ((2512, 2530), 'util.log_integrator.Integrator', 'Integrator', (['logger'], {}), '(logger)\n', (2522, 2530), False, 'from util.log_integrator import Integrator\n'), ((1095, 1120), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1118, 1120), False, 'import torch\n'), ((2641, 2657), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (2655, 2657), True, 'import numpy as np\n'), ((2694, 2705), 'time.time', 'time.time', ([], {}), '()\n', (2703, 2705), False, 'import time\n'), ((1366, 1390), 'torch.load', 'torch.load', (["para['load']"], {}), "(para['load'])\n", (1376, 1390), False, 'import torch\n'), ((3137, 3175), 'util.metrics_compute.compute_loss_and_metrics', 'compute_loss_and_metrics', (['images', 'para'], {}), '(images, para)\n', (3161, 3175), False, 'from util.metrics_compute import compute_loss_and_metrics, iou_hooks_to_be_used\n'), ((3760, 3782), 'util.image_saver.vis_prediction', 'vis_prediction', (['images'], {}), '(images)\n', (3774, 3782), False, 'from util.image_saver import vis_prediction\n'), ((913, 936), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (934, 936), False, 'import datetime\n'), ((1899, 1920), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1918, 1920), True, 'import numpy as np\n')]
|
import abc
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
import pymunk as pm
from gym import spaces
import xmagical.entities as en
import xmagical.render as r
from xmagical.phys_vars import PhysicsVariablesBase, PhysVar
from xmagical.style import ARENA_ZOOM_OUT, COLORS_RGB, lighten_rgb
class PhysicsVariables(PhysicsVariablesBase):
"""Default values & randomisation ranges for key physical parameters of the environment."""
robot_pos_joint_max_force = PhysVar(5, (3.2, 5.5))
robot_rot_joint_max_force = PhysVar(1, (0.7, 1.5))
robot_finger_max_force = PhysVar(4, (2.5, 4.5))
shape_trans_joint_max_force = PhysVar(1.5, (1.0, 1.8))
shape_rot_joint_max_force = PhysVar(0.1, (0.07, 0.15))
class BaseEnv(gym.Env, abc.ABC):
# Constants for all envs.
ROBOT_RAD = 0.2
ROBOT_MASS = 1.0
SHAPE_RAD = ROBOT_RAD * 0.6
SIZE = 1.1
ARENA_BOUNDS_LRBT = [-SIZE, SIZE, -SIZE, SIZE]
ARENA_SIZE_MAX = max(ARENA_BOUNDS_LRBT)
# Minimum and maximum size of goal regions used during randomisation.
RAND_GOAL_MIN_SIZE = 0.5
RAND_GOAL_MAX_SIZE = 0.8
RAND_GOAL_SIZE_RANGE = RAND_GOAL_MAX_SIZE - RAND_GOAL_MIN_SIZE
# The following are used to standardise what "jitter" means across different
# tasks.
JITTER_PCT = 0.05
JITTER_POS_BOUND = ARENA_SIZE_MAX * JITTER_PCT / 2.0
JITTER_ROT_BOUND = JITTER_PCT * np.pi
JITTER_TARGET_BOUND = JITTER_PCT * RAND_GOAL_SIZE_RANGE / 2
def __init__(
self,
*, # Subclasses can have additional args.
robot_cls: Type[en.embodiments.NonHolonomicEmbodiment],
res_hw: Tuple[int, int] = (256, 256),
fps: float = 20.0,
phys_steps: int = 10,
phys_iter: int = 10,
max_episode_steps: Optional[int] = None,
view_mode: str = "allo",
rand_dynamics: bool = False,
) -> None:
assert view_mode in [
"allo",
"ego",
], "view_mode must be one of ['allo', 'ego']."
self.robot_cls = robot_cls
self.action_dim = robot_cls.DOF
self.phys_iter = phys_iter
self.phys_steps = phys_steps
self.fps = fps
self.res_hw = res_hw
self.max_episode_steps = max_episode_steps
self.rand_dynamics = rand_dynamics
# State/rendering (see reset()).
self._entities = None
self._space = None
self._robot = None
self._episode_steps = None
self._phys_vars = None
self._renderer_func = (
self._use_allo_cam if view_mode == "allo" else self._use_ego_cam
)
# This is for rendering and displaying.
self.renderer = None
self.viewer = None
# Set observation and action spaces.
self.observation_space = spaces.Box(
low=0, high=255, shape=(*self.res_hw, 3), dtype=np.uint8
)
self.action_space = spaces.Box(
np.array([-1] * self.action_dim, dtype=np.float32),
np.array([+1] * self.action_dim, dtype=np.float32),
dtype=np.float32,
)
self.seed()
def seed(self, seed: Optional[int] = None) -> List[int]:
"""Initialise the PRNG and return seed necessary to reproduce results.
The action space should probably be seeded in a downstream RL
application.
"""
if seed is None:
seed = np.random.randint(0, (1 << 31) - 1)
self.rng = np.random.RandomState(seed=seed)
return [seed]
def _make_robot(
self,
init_pos: Union[np.ndarray, Tuple[float, float]],
init_angle: float,
) -> en.embodiments.NonHolonomicEmbodiment:
return self.robot_cls(
radius=self.ROBOT_RAD,
mass=self.ROBOT_MASS,
init_pos=init_pos,
init_angle=init_angle,
)
def _make_shape(self, **kwargs) -> en.Shape:
return en.Shape(shape_size=self.SHAPE_RAD, mass=0.01, **kwargs)
@abc.abstractmethod
def on_reset(self) -> None:
"""Set up entities necessary for this environment, and reset any other
data needed for the env. Must create a robot in addition to any
necessary entities.
"""
pass
def add_entities(self, entities: Sequence[en.Entity]) -> None:
"""Adds a list of entities to the current entities list and sets it up.
Only intended to be used from within on_reset(). Needs to be called for
every created entity or else they will not be added to the space!
"""
for entity in entities:
if isinstance(entity, self.robot_cls):
self._robot = entity
self._entities.append(entity)
entity.setup(self.renderer, self._space, self._phys_vars)
def _use_ego_cam(self) -> None:
"""Egocentric agent view."""
self.renderer.set_cam_follow(
source_xy_world=(
self._robot.body.position.x,
self._robot.body.position.y,
),
target_xy_01=(0.5, 0.15),
viewport_hw_world=(
self._arena_h * ARENA_ZOOM_OUT,
self._arena_w * ARENA_ZOOM_OUT,
),
rotation=self._robot.body.angle,
)
def _use_allo_cam(self) -> None:
"""Allocentric 'god-mode' view."""
self.renderer.set_bounds(
left=self._arena.left * ARENA_ZOOM_OUT,
right=self._arena.right * ARENA_ZOOM_OUT,
bottom=self._arena.bottom * ARENA_ZOOM_OUT,
top=self._arena.top * ARENA_ZOOM_OUT,
)
def reset(self):
self._episode_steps = 0
# Delete old entities/space.
self._entities = []
self._space = None
self._robot = None
self._phys_vars = None
if self.renderer is None:
res_h, res_w = self.res_hw
background_color = lighten_rgb(COLORS_RGB["grey"], times=4)
self.renderer = r.Viewer(res_w, res_h, background_color)
else:
# These will get added back later.
self.renderer.reset_geoms()
self._space = pm.Space()
self._space.collision_slop = 0.01
self._space.iterations = self.phys_iter
if self.rand_dynamics:
# Randomise the physics properties of objects and the robot a
# little bit.
self._phys_vars = PhysicsVariables.sample(self.rng)
else:
self._phys_vars = PhysicsVariables.defaults()
# Set up robot and arena.
arena_l, arena_r, arena_b, arena_t = self.ARENA_BOUNDS_LRBT
self._arena = en.ArenaBoundaries(
left=arena_l, right=arena_r, bottom=arena_b, top=arena_t
)
self._arena_w = arena_r - arena_l
self._arena_h = arena_t - arena_b
self.add_entities([self._arena])
reset_rv = self.on_reset()
assert reset_rv is None, (
f"on_reset method of {type(self)} returned {reset_rv}, but "
f"should return None"
)
assert isinstance(self._robot, self.robot_cls)
assert len(self._entities) >= 1
assert np.allclose(self._arena.left + self._arena.right, 0)
assert np.allclose(self._arena.bottom + self._arena.top, 0)
self._renderer_func()
return self.render(mode="rgb_array")
def _phys_steps_on_frame(self):
spf = 1 / self.fps
dt = spf / self.phys_steps
for i in range(self.phys_steps):
for ent in self._entities:
ent.update(dt)
self._space.step(dt)
@abc.abstractmethod
def score_on_end_of_traj(self) -> float:
"""Compute the score for this trajectory.
Only called at the last step of the trajectory.
Returns:
score: number in [0, 1] indicating the worst possible
performance (0), the best possible performance (1) or something
in between. Should apply to the WHOLE trajectory.
"""
pass # pytype: disable=bad-return-type
@abc.abstractclassmethod
def get_reward(self) -> float:
"""Compute the reward for the current timestep.
This is called at the end of every timestep.
"""
pass # pytype: disable=bad-return-type
def step(self, action) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
self._robot.set_action(action)
self._phys_steps_on_frame()
self._episode_steps += 1
obs = self.render(mode="rgb_array")
reward = self.get_reward()
done = False
eval_score = 0.0
info = {}
if self.max_episode_steps is not None:
if self._episode_steps >= self.max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
if done:
eval_score = self.score_on_end_of_traj()
assert (
0 <= eval_score <= 1
), f"eval score {eval_score} out of range for env {self}"
info.update(eval_score=eval_score)
return obs, reward, done, info
def render(self, mode="human") -> Optional[np.ndarray]:
for ent in self._entities:
ent.pre_draw()
self._renderer_func()
obs = self.renderer.render()
if mode == "human":
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(obs)
else:
return obs
def close(self) -> None:
if self.renderer:
self.renderer.close()
self.renderer = None
if self.viewer:
self.viewer.close()
self.viewer = None
|
[
"pymunk.Space",
"xmagical.entities.ArenaBoundaries",
"numpy.allclose",
"xmagical.phys_vars.PhysVar",
"xmagical.style.lighten_rgb",
"numpy.random.RandomState",
"numpy.random.randint",
"numpy.array",
"gym.spaces.Box",
"xmagical.render.Viewer",
"gym.envs.classic_control.rendering.SimpleImageViewer",
"xmagical.entities.Shape"
] |
[((525, 547), 'xmagical.phys_vars.PhysVar', 'PhysVar', (['(5)', '(3.2, 5.5)'], {}), '(5, (3.2, 5.5))\n', (532, 547), False, 'from xmagical.phys_vars import PhysicsVariablesBase, PhysVar\n'), ((580, 602), 'xmagical.phys_vars.PhysVar', 'PhysVar', (['(1)', '(0.7, 1.5)'], {}), '(1, (0.7, 1.5))\n', (587, 602), False, 'from xmagical.phys_vars import PhysicsVariablesBase, PhysVar\n'), ((632, 654), 'xmagical.phys_vars.PhysVar', 'PhysVar', (['(4)', '(2.5, 4.5)'], {}), '(4, (2.5, 4.5))\n', (639, 654), False, 'from xmagical.phys_vars import PhysicsVariablesBase, PhysVar\n'), ((689, 713), 'xmagical.phys_vars.PhysVar', 'PhysVar', (['(1.5)', '(1.0, 1.8)'], {}), '(1.5, (1.0, 1.8))\n', (696, 713), False, 'from xmagical.phys_vars import PhysicsVariablesBase, PhysVar\n'), ((746, 772), 'xmagical.phys_vars.PhysVar', 'PhysVar', (['(0.1)', '(0.07, 0.15)'], {}), '(0.1, (0.07, 0.15))\n', (753, 772), False, 'from xmagical.phys_vars import PhysicsVariablesBase, PhysVar\n'), ((2826, 2894), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(*self.res_hw, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(*self.res_hw, 3), dtype=np.uint8)\n', (2836, 2894), False, 'from gym import spaces\n'), ((3490, 3522), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (3511, 3522), True, 'import numpy as np\n'), ((3955, 4011), 'xmagical.entities.Shape', 'en.Shape', ([], {'shape_size': 'self.SHAPE_RAD', 'mass': '(0.01)'}), '(shape_size=self.SHAPE_RAD, mass=0.01, **kwargs)\n', (3963, 4011), True, 'import xmagical.entities as en\n'), ((6184, 6194), 'pymunk.Space', 'pm.Space', ([], {}), '()\n', (6192, 6194), True, 'import pymunk as pm\n'), ((6678, 6754), 'xmagical.entities.ArenaBoundaries', 'en.ArenaBoundaries', ([], {'left': 'arena_l', 'right': 'arena_r', 'bottom': 'arena_b', 'top': 'arena_t'}), '(left=arena_l, right=arena_r, bottom=arena_b, top=arena_t)\n', (6696, 6754), True, 'import xmagical.entities as en\n'), ((7201, 7253), 'numpy.allclose', 'np.allclose', (['(self._arena.left + self._arena.right)', '(0)'], {}), '(self._arena.left + self._arena.right, 0)\n', (7212, 7253), True, 'import numpy as np\n'), ((7269, 7321), 'numpy.allclose', 'np.allclose', (['(self._arena.bottom + self._arena.top)', '(0)'], {}), '(self._arena.bottom + self._arena.top, 0)\n', (7280, 7321), True, 'import numpy as np\n'), ((2969, 3019), 'numpy.array', 'np.array', (['([-1] * self.action_dim)'], {'dtype': 'np.float32'}), '([-1] * self.action_dim, dtype=np.float32)\n', (2977, 3019), True, 'import numpy as np\n'), ((3033, 3083), 'numpy.array', 'np.array', (['([+1] * self.action_dim)'], {'dtype': 'np.float32'}), '([+1] * self.action_dim, dtype=np.float32)\n', (3041, 3083), True, 'import numpy as np\n'), ((3435, 3470), 'numpy.random.randint', 'np.random.randint', (['(0)', '((1 << 31) - 1)'], {}), '(0, (1 << 31) - 1)\n', (3452, 3470), True, 'import numpy as np\n'), ((5950, 5990), 'xmagical.style.lighten_rgb', 'lighten_rgb', (["COLORS_RGB['grey']"], {'times': '(4)'}), "(COLORS_RGB['grey'], times=4)\n", (5961, 5990), False, 'from xmagical.style import ARENA_ZOOM_OUT, COLORS_RGB, lighten_rgb\n'), ((6019, 6059), 'xmagical.render.Viewer', 'r.Viewer', (['res_w', 'res_h', 'background_color'], {}), '(res_w, res_h, background_color)\n', (6027, 6059), True, 'import xmagical.render as r\n'), ((9491, 9520), 'gym.envs.classic_control.rendering.SimpleImageViewer', 'rendering.SimpleImageViewer', ([], {}), '()\n', (9518, 9520), False, 'from gym.envs.classic_control import rendering\n')]
|
import unittest
import numpy as np
import cddm.core as core
from cddm.conf import FDTYPE, CDTYPE
from cddm.video import fromarrays
#test arrays
a = [1.,2,3,4]
b = [5,6,7,8]
t1 = [1,3,7,8]
t2 = [2,4,6,8]
#results fo calculations
cross_a_b = np.array([ 70., 100., 62., 28.],FDTYPE)
cross_a_b_t1_t2 = np.array([32., 72., 28., 38., 24., 38., 20., 8.],FDTYPE)
auto_a = np.array([30., 20., 11., 4.], FDTYPE)
auto_a_t1 = np.array([30., 12., 2., 0., 6., 8., 3., 4.],FDTYPE)
auto_sum_a = np.array([10. , 7.5, 5. , 2.5], FDTYPE)
auto_sum_a_t1 = np.array([10. , 3.5, 1.5, 0. , 2.5, 3. , 2. , 2.5],FDTYPE)
cross_sum_a = np.array([10., 15., 10., 5.], FDTYPE)
cross_sum_a_t1_t2 = np.array([ 4., 11., 4., 6., 4., 6., 4., 1.],FDTYPE)
cross_count_10 = np.array([10, 18, 16, 14, 12, 10, 8, 6, 4, 2],FDTYPE)
cross_count_t1_t2 = np.array([1, 5, 1, 3, 1, 3, 1, 1],FDTYPE)
auto_count_10 = np.array([10, 9, 8, 7, 6, 5, 4, 3, 2, 1],FDTYPE)
auto_count_t1 = np.array([4, 1, 1, 0, 1, 1, 1, 1],FDTYPE)
np.random.seed(0)
a2 = [a,a]
b2 = [b,b]
test_data1 = np.random.randn(32,19,8) + np.random.randn(32,19,8)*1j
test_data2 = np.random.randn(32,19,8) + np.random.randn(32,19,8)*1j
test_data1 = np.array(test_data1, CDTYPE)
test_data2 = np.array(test_data2, CDTYPE)
test_mask = np.ones((19,8),bool)
test_mask[0] = False
test_mask[:,0::3] = False
class TestCorrelateDifference(unittest.TestCase):
def setUp(self):
pass
def test_auto_correlate_fft(self):
out = core.auto_correlate_fft(a)
self.assertTrue(np.allclose(out,auto_a))
out = core.auto_correlate_fft(a,t1)
self.assertTrue(np.allclose(out,auto_a_t1, atol = 1e-6))
out = core.auto_correlate_fft(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1*2,atol = 1e-6))
def test_auto_correlate_fft2(self):
out = core.auto_correlate_fft(a2,axis = -1)
self.assertTrue(np.allclose(out[0],auto_a))
out = core.auto_correlate_fft(a2,t1,axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1, atol = 1e-6))
out = core.auto_correlate_fft(a2,t1, axis = -1, aout = out)
self.assertTrue(np.allclose(out[0],auto_a_t1*2, atol = 1e-6))
def test_auto_correlate_fft_n(self):
out = core.auto_correlate_fft(a, n = 3)
self.assertTrue(np.allclose(out,auto_a[0:3]))
out = core.auto_correlate_fft(a,t1,n = 3)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]))
out = core.auto_correlate_fft(a,t1,n = 3, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]*2))
def test_auto_correlate_fft_n2(self):
out = core.auto_correlate_fft(a2, axis = -1, n = 3)
self.assertTrue(np.allclose(out[0],auto_a[0:3]))
out = core.auto_correlate_fft(a2,t1,n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]))
out = core.auto_correlate_fft(a2,t1,n = 3, axis = -1, aout = out)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]*2))
def test_auto_correlate(self):
out = core.auto_correlate(a)
self.assertTrue(np.allclose(out,auto_a))
out = core.auto_correlate(a,t1)
self.assertTrue(np.allclose(out,auto_a_t1))
out = core.auto_correlate(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1*2))
def test_auto_correlate2(self):
out = core.auto_correlate(a2, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a))
out = core.auto_correlate(a2,t1, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1))
out = core.auto_correlate(a2,t1, axis = -1, aout = out)
self.assertTrue(np.allclose(out[0],auto_a_t1*2))
def test_auto_correlate_n(self):
out = core.auto_correlate(a, n = 3)
self.assertTrue(np.allclose(out,auto_a[0:3]))
out = core.auto_correlate(a,t1,n = 3)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]))
out = core.auto_correlate(a,t1,n = 3, aout = out)
self.assertTrue(np.allclose(out,auto_a_t1[0:3]*2))
def test_auto_correlate_n2(self):
out = core.auto_correlate(a2, n = 3,axis = -1)
self.assertTrue(np.allclose(out[0],auto_a[0:3]))
out = core.auto_correlate(a2,t1,n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]))
out = core.auto_correlate(a2,t1,n = 3, aout = out, axis = 1)
self.assertTrue(np.allclose(out[0],auto_a_t1[0:3]*2))
def test_cross_correlate_fft(self):
out = core.cross_correlate_fft(a,b)
self.assertTrue(np.allclose(out,cross_a_b))
out = core.cross_correlate_fft(a,b,t1,t2)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2))
out = core.cross_correlate_fft(a,b,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2*2))
def test_cross_correlate_fft2(self):
out = core.cross_correlate_fft(a2,b2,axis = 1)
self.assertTrue(np.allclose(out[0],cross_a_b))
out = core.cross_correlate_fft(a2,b2,t1,t2,axis = 1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2))
out = core.cross_correlate_fft(a2,b2,t1,t2, aout = out,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2*2))
def test_cross_correlate_fft_n(self):
out = core.cross_correlate_fft(a,b, n = 3)
self.assertTrue(np.allclose(out,cross_a_b[:3]))
out = core.cross_correlate_fft(a,b,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]))
out = core.cross_correlate_fft(a,b,t1,t2, n = 3, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]*2))
def test_cross_correlate_fft_n2(self):
out = core.cross_correlate_fft(a2,b2, n = 3 ,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b[:3]))
out = core.cross_correlate_fft(a2,b2,t1,t2, n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2[:3]))
out = core.cross_correlate_fft(a2,b2,t1,t2, n = 3, aout = out, axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2[:3]*2))
def test_cross_correlate(self):
out = core.cross_correlate(a,b)
self.assertTrue(np.allclose(out,cross_a_b))
out = core.cross_correlate(a,b,t1,t2)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2))
out = core.cross_correlate(a,b,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2*2))
def test_cross_correlate2(self):
out = core.cross_correlate(a2,b2,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b))
out = core.cross_correlate(a2,b2,t1,t2,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2))
out = core.cross_correlate(a2,b2,t1,t2, aout = out,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2*2))
def test_cross_correlate_n(self):
out = core.cross_correlate(a,b, n = 3)
self.assertTrue(np.allclose(out,cross_a_b[:3]))
out = core.cross_correlate(a,b,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]))
out = core.cross_correlate(a,b,t1,t2, n = 3, aout = out)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]*2))
def test_cross_correlate_n2(self):
out = core.cross_correlate(a2,b2, n = 3,axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b[:3]))
out = core.cross_correlate(a2,b2,t1,t2, n = 3, axis = -1)
self.assertTrue(np.allclose(out[0],cross_a_b_t1_t2[:3]))
out = core.cross_correlate(a2,b2,t1,t2, n = 3, aout = out, axis = -1)
self.assertTrue(np.allclose(out,cross_a_b_t1_t2[:3]*2))
class TestSum(unittest.TestCase):
def test_auto_sum(self):
out = core.auto_sum(a)
self.assertTrue(np.allclose(out,auto_sum_a))
out = core.auto_sum(a,t1)
self.assertTrue(np.allclose(out,auto_sum_a_t1))
out = core.auto_sum(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1*2))
def test_auto_sum_n(self):
out = core.auto_sum(a, n = 3)
self.assertTrue(np.allclose(out,auto_sum_a[0:3]))
out = core.auto_sum(a,t1, n = 3)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]))
out = core.auto_sum(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*2))
out = core.auto_sum(a,t1, n = 3, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*3))
def test_auto_sum_fft(self):
out = core.auto_sum_fft(a,t1)
self.assertTrue(np.allclose(out,auto_sum_a_t1))
out = core.auto_sum_fft(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1*2))
def test_auto_sum_fft_n(self):
out = core.auto_sum_fft(a,t1, n = 3)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]))
out = core.auto_sum_fft(a,t1, n =3, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*2))
out = core.auto_sum_fft(a,t1, aout = out)
self.assertTrue(np.allclose(out,auto_sum_a_t1[0:3]*3))
def test_cross_sum(self):
out = core.cross_sum(a)
self.assertTrue(np.allclose(out,cross_sum_a))
out = core.cross_sum(a,t1,t2)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2))
out = core.cross_sum(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2*2))
def test_cross_sum_n(self):
out = core.cross_sum(a, n=3)
self.assertTrue(np.allclose(out,cross_sum_a[0:3]))
out = core.cross_sum(a,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]))
out = core.cross_sum(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]*2))
def test_cross_sum_fft(self):
out = core.cross_sum_fft(a,t1,t2)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2))
out = core.cross_sum_fft(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2*2))
def test_cross_sum_fft_n(self):
out = core.cross_sum_fft(a,t1,t2, n = 3)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]))
out = core.cross_sum_fft(a,t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]*2))
out = core.cross_sum_fft(a,t1,t2, n =3, aout = out)
self.assertTrue(np.allclose(out,cross_sum_a_t1_t2[0:3]*3))
def test_cross_sum_equivalence_ND(self):
for axis in (0,1,2):
t1 = np.arange(test_data1.shape[axis])
t2 = np.arange(test_data1.shape[axis]) + 3
out1 = core.cross_sum(test_data1,t1,t2, axis = axis)
out2 = core.cross_sum_fft(test_data1,t1,t2, axis = axis)
self.assertTrue(np.allclose(out1,out2))
class TestCount(unittest.TestCase):
def test_cross_count(self):
out = core.cross_count(10)
self.assertTrue(np.allclose(out,cross_count_10))
out = core.cross_count(t1,t2)
self.assertTrue(np.allclose(out,cross_count_t1_t2))
out = core.cross_count(t1,t2, aout = out)
self.assertTrue(np.allclose(out,cross_count_t1_t2*2))
def test_cross_count_n(self):
out = core.cross_count(10, n = 5)
self.assertTrue(np.allclose(out,cross_count_10[0:5]))
out = core.cross_count(t1,t2,n=5)
self.assertTrue(np.allclose(out,cross_count_t1_t2[0:5]))
out = core.cross_count(t1,t2, aout = out)
self.assertTrue(np.allclose(out,2*cross_count_t1_t2[0:5]))
def test_auto_count(self):
out = core.auto_count(10)
self.assertTrue(np.allclose(out,auto_count_10))
out = core.auto_count(t1)
self.assertTrue(np.allclose(out,auto_count_t1))
out = core.auto_count(t1, aout = out)
self.assertTrue(np.allclose(out,auto_count_t1*2))
def test_auto_count_n(self):
out = core.auto_count(10, n = 5)
self.assertTrue(np.allclose(out,auto_count_10[0:5]))
out = core.auto_count(t1, n = 5)
self.assertTrue(np.allclose(out,auto_count_t1[:5]))
out = core.auto_count(t1, aout = out)
self.assertTrue(np.allclose(out,2*auto_count_t1[:5]))
class TestIcorr(unittest.TestCase):
def test_cross_equivalence(self):
for method in ("corr","diff","fft"):
bg,var = core.stats(test_data1, test_data2, axis = 0)
data = core.ccorr(test_data1, test_data2,n = 8, norm = 1, method = method)
out1 = core.normalize(data, bg, var)
vid = fromarrays((test_data1, test_data2))
data,bg,var = core.iccorr(vid, count = len(test_data1),chunk_size = 16,n = 8, norm = 1, method = method)
out2 = core.normalize(data, bg, var)
self.assertTrue(np.allclose(out1, out2))
def test_auto_equivalence_2(self):
for method in ("corr",):
bg,var = core.stats(test_data1, axis = 0)
data1 = core.ccorr(test_data1,test_data1, n = 8, norm = 2, method = method)
out1 = core.normalize(data1, bg, var, norm = 2)
data2,bg,var = core.iacorr(test_data1, n = 8, norm = 2, method = method)
out2 = core.normalize(data2, bg, var, norm = 2)
self.assertTrue(np.allclose(out1, out2))
def test_auto_equivalence_1(self):
for method in ("corr","fft","diff"):
bg,var = core.stats(test_data1, axis = 0)
data1 = core.acorr(test_data1, n = 8, norm = 1, method = method)
out1 = core.normalize(data1, bg, var, norm = 1)
data2,bg,var = core.iacorr(test_data1, n = 8, norm = 1, method = method)
out2 = core.normalize(data2, bg, var, norm = 1)
self.assertTrue(np.allclose(out1, out2))
class TestCorr(unittest.TestCase):
def setUp(self):
pass
def test_corr_regular_3(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 3, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_ccorr_regular_3_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
axis = 0
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 3, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 3, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
def test_acorr_regular_3(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, axis = axis)
data = core.ccorr(test_data1, test_data1, norm = 3, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
data = core.acorr(test_data1,norm = 3, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.acorr(test_data1,norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_ccorr_regular_1(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_acorr_regular_1(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, axis = axis)
data = core.acorr(test_data1, norm = 1, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
data = core.acorr(test_data1,norm = 1, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
data = core.acorr(test_data1,norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_1_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
axis = 0
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale,mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 1, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale,mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
data = core.ccorr(test_data1, test_data2, norm = 1, method = "diff", axis = axis)
out_other = core.normalize(data, bg, var, norm = 1, mode = mode, scale = scale,mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
def test_ccorr_regular_0(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_acorr_regular_0(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, axis = axis)
data = core.acorr(test_data1, norm = 0, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
data = core.acorr(test_data1,norm = 0, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_0_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
axis = 0
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale, mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 0, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 0, mode = mode, scale = scale, mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_2(self):
for scale in (True, False):
for mode in ("corr", "diff"):
for axis in (0,1,2):
bg,var = core.stats(test_data1, test_data2, axis = axis)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "fft", axis = axis)
self.out = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "corr", axis = axis)
out_other = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale)
self.assertTrue(np.allclose(self.out, out_other))
def test_corr_regular_2_mask(self):
for scale in (True, False):
for mode in ("corr", "diff"):
bg,var = core.stats(test_data1, test_data2)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "fft")
self.out = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale, mask = test_mask)
data = core.ccorr(test_data1, test_data2, norm = 2, method = "corr")
out_other = core.normalize(data, bg, var, norm = 2, mode = mode, scale = scale,mask = test_mask)
self.assertTrue(np.allclose(self.out, out_other))
class TestRest(unittest.TestCase):
def test_abs2(self):
self.assertTrue(np.allclose(core.abs2(test_data1), np.abs(test_data1)**2))
if __name__ == "__main__":
unittest.main()
|
[
"numpy.random.seed",
"numpy.abs",
"numpy.allclose",
"numpy.ones",
"cddm.core.cross_count",
"cddm.core.ccorr",
"numpy.arange",
"cddm.core.acorr",
"cddm.video.fromarrays",
"cddm.core.cross_correlate_fft",
"unittest.main",
"cddm.core.normalize",
"cddm.core.iacorr",
"cddm.core.abs2",
"numpy.random.randn",
"cddm.core.auto_count",
"cddm.core.cross_sum",
"cddm.core.auto_correlate_fft",
"cddm.core.stats",
"cddm.core.auto_correlate",
"cddm.core.auto_sum_fft",
"cddm.core.cross_sum_fft",
"cddm.core.cross_correlate",
"numpy.array",
"cddm.core.auto_sum"
] |
[((241, 284), 'numpy.array', 'np.array', (['[70.0, 100.0, 62.0, 28.0]', 'FDTYPE'], {}), '([70.0, 100.0, 62.0, 28.0], FDTYPE)\n', (249, 284), True, 'import numpy as np\n'), ((301, 366), 'numpy.array', 'np.array', (['[32.0, 72.0, 28.0, 38.0, 24.0, 38.0, 20.0, 8.0]', 'FDTYPE'], {}), '([32.0, 72.0, 28.0, 38.0, 24.0, 38.0, 20.0, 8.0], FDTYPE)\n', (309, 366), True, 'import numpy as np\n'), ((368, 409), 'numpy.array', 'np.array', (['[30.0, 20.0, 11.0, 4.0]', 'FDTYPE'], {}), '([30.0, 20.0, 11.0, 4.0], FDTYPE)\n', (376, 409), True, 'import numpy as np\n'), ((419, 479), 'numpy.array', 'np.array', (['[30.0, 12.0, 2.0, 0.0, 6.0, 8.0, 3.0, 4.0]', 'FDTYPE'], {}), '([30.0, 12.0, 2.0, 0.0, 6.0, 8.0, 3.0, 4.0], FDTYPE)\n', (427, 479), True, 'import numpy as np\n'), ((485, 524), 'numpy.array', 'np.array', (['[10.0, 7.5, 5.0, 2.5]', 'FDTYPE'], {}), '([10.0, 7.5, 5.0, 2.5], FDTYPE)\n', (493, 524), True, 'import numpy as np\n'), ((544, 603), 'numpy.array', 'np.array', (['[10.0, 3.5, 1.5, 0.0, 2.5, 3.0, 2.0, 2.5]', 'FDTYPE'], {}), '([10.0, 3.5, 1.5, 0.0, 2.5, 3.0, 2.0, 2.5], FDTYPE)\n', (552, 603), True, 'import numpy as np\n'), ((625, 666), 'numpy.array', 'np.array', (['[10.0, 15.0, 10.0, 5.0]', 'FDTYPE'], {}), '([10.0, 15.0, 10.0, 5.0], FDTYPE)\n', (633, 666), True, 'import numpy as np\n'), ((684, 743), 'numpy.array', 'np.array', (['[4.0, 11.0, 4.0, 6.0, 4.0, 6.0, 4.0, 1.0]', 'FDTYPE'], {}), '([4.0, 11.0, 4.0, 6.0, 4.0, 6.0, 4.0, 1.0], FDTYPE)\n', (692, 743), True, 'import numpy as np\n'), ((760, 814), 'numpy.array', 'np.array', (['[10, 18, 16, 14, 12, 10, 8, 6, 4, 2]', 'FDTYPE'], {}), '([10, 18, 16, 14, 12, 10, 8, 6, 4, 2], FDTYPE)\n', (768, 814), True, 'import numpy as np\n'), ((838, 880), 'numpy.array', 'np.array', (['[1, 5, 1, 3, 1, 3, 1, 1]', 'FDTYPE'], {}), '([1, 5, 1, 3, 1, 3, 1, 1], FDTYPE)\n', (846, 880), True, 'import numpy as np\n'), ((896, 945), 'numpy.array', 'np.array', (['[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]', 'FDTYPE'], {}), '([10, 9, 8, 7, 6, 5, 4, 3, 2, 1], FDTYPE)\n', (904, 945), True, 'import numpy as np\n'), ((970, 1012), 'numpy.array', 'np.array', (['[4, 1, 1, 0, 1, 1, 1, 1]', 'FDTYPE'], {}), '([4, 1, 1, 0, 1, 1, 1, 1], FDTYPE)\n', (978, 1012), True, 'import numpy as np\n'), ((1013, 1030), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1027, 1030), True, 'import numpy as np\n'), ((1204, 1232), 'numpy.array', 'np.array', (['test_data1', 'CDTYPE'], {}), '(test_data1, CDTYPE)\n', (1212, 1232), True, 'import numpy as np\n'), ((1246, 1274), 'numpy.array', 'np.array', (['test_data2', 'CDTYPE'], {}), '(test_data2, CDTYPE)\n', (1254, 1274), True, 'import numpy as np\n'), ((1288, 1310), 'numpy.ones', 'np.ones', (['(19, 8)', 'bool'], {}), '((19, 8), bool)\n', (1295, 1310), True, 'import numpy as np\n'), ((1068, 1094), 'numpy.random.randn', 'np.random.randn', (['(32)', '(19)', '(8)'], {}), '(32, 19, 8)\n', (1083, 1094), True, 'import numpy as np\n'), ((1136, 1162), 'numpy.random.randn', 'np.random.randn', (['(32)', '(19)', '(8)'], {}), '(32, 19, 8)\n', (1151, 1162), True, 'import numpy as np\n'), ((23963, 23978), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23976, 23978), False, 'import unittest\n'), ((1095, 1121), 'numpy.random.randn', 'np.random.randn', (['(32)', '(19)', '(8)'], {}), '(32, 19, 8)\n', (1110, 1121), True, 'import numpy as np\n'), ((1163, 1189), 'numpy.random.randn', 'np.random.randn', (['(32)', '(19)', '(8)'], {}), '(32, 19, 8)\n', (1178, 1189), True, 'import numpy as np\n'), ((1511, 1537), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a'], {}), '(a)\n', (1534, 1537), True, 'import cddm.core as core\n'), ((1601, 1631), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a', 't1'], {}), '(a, t1)\n', (1624, 1631), True, 'import cddm.core as core\n'), ((1711, 1751), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a', 't1'], {'aout': 'out'}), '(a, t1, aout=out)\n', (1734, 1751), True, 'import cddm.core as core\n'), ((1887, 1923), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a2'], {'axis': '(-1)'}), '(a2, axis=-1)\n', (1910, 1923), True, 'import cddm.core as core\n'), ((1991, 2031), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a2', 't1'], {'axis': '(-1)'}), '(a2, t1, axis=-1)\n', (2014, 2031), True, 'import cddm.core as core\n'), ((2115, 2165), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a2', 't1'], {'axis': '(-1)', 'aout': 'out'}), '(a2, t1, axis=-1, aout=out)\n', (2138, 2165), True, 'import cddm.core as core\n'), ((2312, 2343), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a'], {'n': '(3)'}), '(a, n=3)\n', (2335, 2343), True, 'import cddm.core as core\n'), ((2415, 2450), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a', 't1'], {'n': '(3)'}), '(a, t1, n=3)\n', (2438, 2450), True, 'import cddm.core as core\n'), ((2523, 2568), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a', 't1'], {'n': '(3)', 'aout': 'out'}), '(a, t1, n=3, aout=out)\n', (2546, 2568), True, 'import cddm.core as core\n'), ((2688, 2729), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a2'], {'axis': '(-1)', 'n': '(3)'}), '(a2, axis=-1, n=3)\n', (2711, 2729), True, 'import cddm.core as core\n'), ((2806, 2851), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a2', 't1'], {'n': '(3)', 'axis': '(-1)'}), '(a2, t1, n=3, axis=-1)\n', (2829, 2851), True, 'import cddm.core as core\n'), ((2929, 2984), 'cddm.core.auto_correlate_fft', 'core.auto_correlate_fft', (['a2', 't1'], {'n': '(3)', 'axis': '(-1)', 'aout': 'out'}), '(a2, t1, n=3, axis=-1, aout=out)\n', (2952, 2984), True, 'import cddm.core as core\n'), ((3102, 3124), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a'], {}), '(a)\n', (3121, 3124), True, 'import cddm.core as core\n'), ((3189, 3215), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a', 't1'], {}), '(a, t1)\n', (3208, 3215), True, 'import cddm.core as core\n'), ((3282, 3318), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a', 't1'], {'aout': 'out'}), '(a, t1, aout=out)\n', (3301, 3318), True, 'import cddm.core as core\n'), ((3434, 3466), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a2'], {'axis': '(-1)'}), '(a2, axis=-1)\n', (3453, 3466), True, 'import cddm.core as core\n'), ((3536, 3572), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a2', 't1'], {'axis': '(-1)'}), '(a2, t1, axis=-1)\n', (3555, 3572), True, 'import cddm.core as core\n'), ((3644, 3690), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a2', 't1'], {'axis': '(-1)', 'aout': 'out'}), '(a2, t1, axis=-1, aout=out)\n', (3663, 3690), True, 'import cddm.core as core\n'), ((3812, 3839), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a'], {'n': '(3)'}), '(a, n=3)\n', (3831, 3839), True, 'import cddm.core as core\n'), ((3911, 3942), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a', 't1'], {'n': '(3)'}), '(a, t1, n=3)\n', (3930, 3942), True, 'import cddm.core as core\n'), ((4015, 4056), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a', 't1'], {'n': '(3)', 'aout': 'out'}), '(a, t1, n=3, aout=out)\n', (4034, 4056), True, 'import cddm.core as core\n'), ((4180, 4217), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a2'], {'n': '(3)', 'axis': '(-1)'}), '(a2, n=3, axis=-1)\n', (4199, 4217), True, 'import cddm.core as core\n'), ((4293, 4334), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a2', 't1'], {'n': '(3)', 'axis': '(-1)'}), '(a2, t1, n=3, axis=-1)\n', (4312, 4334), True, 'import cddm.core as core\n'), ((4412, 4462), 'cddm.core.auto_correlate', 'core.auto_correlate', (['a2', 't1'], {'n': '(3)', 'aout': 'out', 'axis': '(1)'}), '(a2, t1, n=3, aout=out, axis=1)\n', (4431, 4462), True, 'import cddm.core as core\n'), ((4593, 4623), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a', 'b'], {}), '(a, b)\n', (4617, 4623), True, 'import cddm.core as core\n'), ((4690, 4728), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a', 'b', 't1', 't2'], {}), '(a, b, t1, t2)\n', (4714, 4728), True, 'import cddm.core as core\n'), ((4799, 4847), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a', 'b', 't1', 't2'], {'aout': 'out'}), '(a, b, t1, t2, aout=out)\n', (4823, 4847), True, 'import cddm.core as core\n'), ((4973, 5013), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a2', 'b2'], {'axis': '(1)'}), '(a2, b2, axis=1)\n', (4997, 5013), True, 'import cddm.core as core\n'), ((5084, 5132), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a2', 'b2', 't1', 't2'], {'axis': '(1)'}), '(a2, b2, t1, t2, axis=1)\n', (5108, 5132), True, 'import cddm.core as core\n'), ((5207, 5266), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a2', 'b2', 't1', 't2'], {'aout': 'out', 'axis': '(-1)'}), '(a2, b2, t1, t2, aout=out, axis=-1)\n', (5231, 5266), True, 'import cddm.core as core\n'), ((5396, 5431), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a', 'b'], {'n': '(3)'}), '(a, b, n=3)\n', (5420, 5431), True, 'import cddm.core as core\n'), ((5504, 5547), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a', 'b', 't1', 't2'], {'n': '(3)'}), '(a, b, t1, t2, n=3)\n', (5528, 5547), True, 'import cddm.core as core\n'), ((5624, 5677), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a', 'b', 't1', 't2'], {'n': '(3)', 'aout': 'out'}), '(a, b, t1, t2, n=3, aout=out)\n', (5648, 5677), True, 'import cddm.core as core\n'), ((5802, 5848), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a2', 'b2'], {'n': '(3)', 'axis': '(-1)'}), '(a2, b2, n=3, axis=-1)\n', (5826, 5848), True, 'import cddm.core as core\n'), ((5926, 5980), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a2', 'b2', 't1', 't2'], {'n': '(3)', 'axis': '(-1)'}), '(a2, b2, t1, t2, n=3, axis=-1)\n', (5950, 5980), True, 'import cddm.core as core\n'), ((6062, 6126), 'cddm.core.cross_correlate_fft', 'core.cross_correlate_fft', (['a2', 'b2', 't1', 't2'], {'n': '(3)', 'aout': 'out', 'axis': '(-1)'}), '(a2, b2, t1, t2, n=3, aout=out, axis=-1)\n', (6086, 6126), True, 'import cddm.core as core\n'), ((6249, 6275), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a', 'b'], {}), '(a, b)\n', (6269, 6275), True, 'import cddm.core as core\n'), ((6342, 6376), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a', 'b', 't1', 't2'], {}), '(a, b, t1, t2)\n', (6362, 6376), True, 'import cddm.core as core\n'), ((6447, 6491), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a', 'b', 't1', 't2'], {'aout': 'out'}), '(a, b, t1, t2, aout=out)\n', (6467, 6491), True, 'import cddm.core as core\n'), ((6604, 6641), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a2', 'b2'], {'axis': '(-1)'}), '(a2, b2, axis=-1)\n', (6624, 6641), True, 'import cddm.core as core\n'), ((6712, 6757), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a2', 'b2', 't1', 't2'], {'axis': '(-1)'}), '(a2, b2, t1, t2, axis=-1)\n', (6732, 6757), True, 'import cddm.core as core\n'), ((6832, 6887), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a2', 'b2', 't1', 't2'], {'aout': 'out', 'axis': '(-1)'}), '(a2, b2, t1, t2, aout=out, axis=-1)\n', (6852, 6887), True, 'import cddm.core as core\n'), ((7013, 7044), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a', 'b'], {'n': '(3)'}), '(a, b, n=3)\n', (7033, 7044), True, 'import cddm.core as core\n'), ((7117, 7156), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a', 'b', 't1', 't2'], {'n': '(3)'}), '(a, b, t1, t2, n=3)\n', (7137, 7156), True, 'import cddm.core as core\n'), ((7233, 7282), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a', 'b', 't1', 't2'], {'n': '(3)', 'aout': 'out'}), '(a, b, t1, t2, n=3, aout=out)\n', (7253, 7282), True, 'import cddm.core as core\n'), ((7411, 7453), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a2', 'b2'], {'n': '(3)', 'axis': '(-1)'}), '(a2, b2, n=3, axis=-1)\n', (7431, 7453), True, 'import cddm.core as core\n'), ((7530, 7580), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a2', 'b2', 't1', 't2'], {'n': '(3)', 'axis': '(-1)'}), '(a2, b2, t1, t2, n=3, axis=-1)\n', (7550, 7580), True, 'import cddm.core as core\n'), ((7662, 7722), 'cddm.core.cross_correlate', 'core.cross_correlate', (['a2', 'b2', 't1', 't2'], {'n': '(3)', 'aout': 'out', 'axis': '(-1)'}), '(a2, b2, t1, t2, n=3, aout=out, axis=-1)\n', (7682, 7722), True, 'import cddm.core as core\n'), ((7869, 7885), 'cddm.core.auto_sum', 'core.auto_sum', (['a'], {}), '(a)\n', (7882, 7885), True, 'import cddm.core as core\n'), ((7953, 7973), 'cddm.core.auto_sum', 'core.auto_sum', (['a', 't1'], {}), '(a, t1)\n', (7966, 7973), True, 'import cddm.core as core\n'), ((8044, 8074), 'cddm.core.auto_sum', 'core.auto_sum', (['a', 't1'], {'aout': 'out'}), '(a, t1, aout=out)\n', (8057, 8074), True, 'import cddm.core as core\n'), ((8181, 8202), 'cddm.core.auto_sum', 'core.auto_sum', (['a'], {'n': '(3)'}), '(a, n=3)\n', (8194, 8202), True, 'import cddm.core as core\n'), ((8277, 8302), 'cddm.core.auto_sum', 'core.auto_sum', (['a', 't1'], {'n': '(3)'}), '(a, t1, n=3)\n', (8290, 8302), True, 'import cddm.core as core\n'), ((8380, 8410), 'cddm.core.auto_sum', 'core.auto_sum', (['a', 't1'], {'aout': 'out'}), '(a, t1, aout=out)\n', (8393, 8410), True, 'import cddm.core as core\n'), ((8491, 8526), 'cddm.core.auto_sum', 'core.auto_sum', (['a', 't1'], {'n': '(3)', 'aout': 'out'}), '(a, t1, n=3, aout=out)\n', (8504, 8526), True, 'import cddm.core as core\n'), ((8650, 8674), 'cddm.core.auto_sum_fft', 'core.auto_sum_fft', (['a', 't1'], {}), '(a, t1)\n', (8667, 8674), True, 'import cddm.core as core\n'), ((8745, 8779), 'cddm.core.auto_sum_fft', 'core.auto_sum_fft', (['a', 't1'], {'aout': 'out'}), '(a, t1, aout=out)\n', (8762, 8779), True, 'import cddm.core as core\n'), ((8898, 8927), 'cddm.core.auto_sum_fft', 'core.auto_sum_fft', (['a', 't1'], {'n': '(3)'}), '(a, t1, n=3)\n', (8915, 8927), True, 'import cddm.core as core\n'), ((9005, 9044), 'cddm.core.auto_sum_fft', 'core.auto_sum_fft', (['a', 't1'], {'n': '(3)', 'aout': 'out'}), '(a, t1, n=3, aout=out)\n', (9022, 9044), True, 'import cddm.core as core\n'), ((9125, 9159), 'cddm.core.auto_sum_fft', 'core.auto_sum_fft', (['a', 't1'], {'aout': 'out'}), '(a, t1, aout=out)\n', (9142, 9159), True, 'import cddm.core as core\n'), ((9269, 9286), 'cddm.core.cross_sum', 'core.cross_sum', (['a'], {}), '(a)\n', (9283, 9286), True, 'import cddm.core as core\n'), ((9355, 9380), 'cddm.core.cross_sum', 'core.cross_sum', (['a', 't1', 't2'], {}), '(a, t1, t2)\n', (9369, 9380), True, 'import cddm.core as core\n'), ((9454, 9489), 'cddm.core.cross_sum', 'core.cross_sum', (['a', 't1', 't2'], {'aout': 'out'}), '(a, t1, t2, aout=out)\n', (9468, 9489), True, 'import cddm.core as core\n'), ((9600, 9622), 'cddm.core.cross_sum', 'core.cross_sum', (['a'], {'n': '(3)'}), '(a, n=3)\n', (9614, 9622), True, 'import cddm.core as core\n'), ((9696, 9726), 'cddm.core.cross_sum', 'core.cross_sum', (['a', 't1', 't2'], {'n': '(3)'}), '(a, t1, t2, n=3)\n', (9710, 9726), True, 'import cddm.core as core\n'), ((9807, 9842), 'cddm.core.cross_sum', 'core.cross_sum', (['a', 't1', 't2'], {'aout': 'out'}), '(a, t1, t2, aout=out)\n', (9821, 9842), True, 'import cddm.core as core\n'), ((9960, 9989), 'cddm.core.cross_sum_fft', 'core.cross_sum_fft', (['a', 't1', 't2'], {}), '(a, t1, t2)\n', (9978, 9989), True, 'import cddm.core as core\n'), ((10063, 10102), 'cddm.core.cross_sum_fft', 'core.cross_sum_fft', (['a', 't1', 't2'], {'aout': 'out'}), '(a, t1, t2, aout=out)\n', (10081, 10102), True, 'import cddm.core as core\n'), ((10217, 10251), 'cddm.core.cross_sum_fft', 'core.cross_sum_fft', (['a', 't1', 't2'], {'n': '(3)'}), '(a, t1, t2, n=3)\n', (10235, 10251), True, 'import cddm.core as core\n'), ((10332, 10371), 'cddm.core.cross_sum_fft', 'core.cross_sum_fft', (['a', 't1', 't2'], {'aout': 'out'}), '(a, t1, t2, aout=out)\n', (10350, 10371), True, 'import cddm.core as core\n'), ((10454, 10498), 'cddm.core.cross_sum_fft', 'core.cross_sum_fft', (['a', 't1', 't2'], {'n': '(3)', 'aout': 'out'}), '(a, t1, t2, n=3, aout=out)\n', (10472, 10498), True, 'import cddm.core as core\n'), ((11029, 11049), 'cddm.core.cross_count', 'core.cross_count', (['(10)'], {}), '(10)\n', (11045, 11049), True, 'import cddm.core as core\n'), ((11121, 11145), 'cddm.core.cross_count', 'core.cross_count', (['t1', 't2'], {}), '(t1, t2)\n', (11137, 11145), True, 'import cddm.core as core\n'), ((11220, 11254), 'cddm.core.cross_count', 'core.cross_count', (['t1', 't2'], {'aout': 'out'}), '(t1, t2, aout=out)\n', (11236, 11254), True, 'import cddm.core as core\n'), ((11369, 11394), 'cddm.core.cross_count', 'core.cross_count', (['(10)'], {'n': '(5)'}), '(10, n=5)\n', (11385, 11394), True, 'import cddm.core as core\n'), ((11473, 11502), 'cddm.core.cross_count', 'core.cross_count', (['t1', 't2'], {'n': '(5)'}), '(t1, t2, n=5)\n', (11489, 11502), True, 'import cddm.core as core\n'), ((11581, 11615), 'cddm.core.cross_count', 'core.cross_count', (['t1', 't2'], {'aout': 'out'}), '(t1, t2, aout=out)\n', (11597, 11615), True, 'import cddm.core as core\n'), ((11732, 11751), 'cddm.core.auto_count', 'core.auto_count', (['(10)'], {}), '(10)\n', (11747, 11751), True, 'import cddm.core as core\n'), ((11822, 11841), 'cddm.core.auto_count', 'core.auto_count', (['t1'], {}), '(t1)\n', (11837, 11841), True, 'import cddm.core as core\n'), ((11913, 11942), 'cddm.core.auto_count', 'core.auto_count', (['t1'], {'aout': 'out'}), '(t1, aout=out)\n', (11928, 11942), True, 'import cddm.core as core\n'), ((12053, 12077), 'cddm.core.auto_count', 'core.auto_count', (['(10)'], {'n': '(5)'}), '(10, n=5)\n', (12068, 12077), True, 'import cddm.core as core\n'), ((12155, 12179), 'cddm.core.auto_count', 'core.auto_count', (['t1'], {'n': '(5)'}), '(t1, n=5)\n', (12170, 12179), True, 'import cddm.core as core\n'), ((12257, 12286), 'cddm.core.auto_count', 'core.auto_count', (['t1'], {'aout': 'out'}), '(t1, aout=out)\n', (12272, 12286), True, 'import cddm.core as core\n'), ((1562, 1586), 'numpy.allclose', 'np.allclose', (['out', 'auto_a'], {}), '(out, auto_a)\n', (1573, 1586), True, 'import numpy as np\n'), ((1655, 1694), 'numpy.allclose', 'np.allclose', (['out', 'auto_a_t1'], {'atol': '(1e-06)'}), '(out, auto_a_t1, atol=1e-06)\n', (1666, 1694), True, 'import numpy as np\n'), ((1777, 1820), 'numpy.allclose', 'np.allclose', (['out', '(auto_a_t1 * 2)'], {'atol': '(1e-06)'}), '(out, auto_a_t1 * 2, atol=1e-06)\n', (1788, 1820), True, 'import numpy as np\n'), ((1949, 1976), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a'], {}), '(out[0], auto_a)\n', (1960, 1976), True, 'import numpy as np\n'), ((2056, 2098), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a_t1'], {'atol': '(1e-06)'}), '(out[0], auto_a_t1, atol=1e-06)\n', (2067, 2098), True, 'import numpy as np\n'), ((2193, 2239), 'numpy.allclose', 'np.allclose', (['out[0]', '(auto_a_t1 * 2)'], {'atol': '(1e-06)'}), '(out[0], auto_a_t1 * 2, atol=1e-06)\n', (2204, 2239), True, 'import numpy as np\n'), ((2370, 2399), 'numpy.allclose', 'np.allclose', (['out', 'auto_a[0:3]'], {}), '(out, auto_a[0:3])\n', (2381, 2399), True, 'import numpy as np\n'), ((2475, 2507), 'numpy.allclose', 'np.allclose', (['out', 'auto_a_t1[0:3]'], {}), '(out, auto_a_t1[0:3])\n', (2486, 2507), True, 'import numpy as np\n'), ((2595, 2631), 'numpy.allclose', 'np.allclose', (['out', '(auto_a_t1[0:3] * 2)'], {}), '(out, auto_a_t1[0:3] * 2)\n', (2606, 2631), True, 'import numpy as np\n'), ((2758, 2790), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a[0:3]'], {}), '(out[0], auto_a[0:3])\n', (2769, 2790), True, 'import numpy as np\n'), ((2878, 2913), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a_t1[0:3]'], {}), '(out[0], auto_a_t1[0:3])\n', (2889, 2913), True, 'import numpy as np\n'), ((3013, 3052), 'numpy.allclose', 'np.allclose', (['out[0]', '(auto_a_t1[0:3] * 2)'], {}), '(out[0], auto_a_t1[0:3] * 2)\n', (3024, 3052), True, 'import numpy as np\n'), ((3149, 3173), 'numpy.allclose', 'np.allclose', (['out', 'auto_a'], {}), '(out, auto_a)\n', (3160, 3173), True, 'import numpy as np\n'), ((3239, 3266), 'numpy.allclose', 'np.allclose', (['out', 'auto_a_t1'], {}), '(out, auto_a_t1)\n', (3250, 3266), True, 'import numpy as np\n'), ((3344, 3375), 'numpy.allclose', 'np.allclose', (['out', '(auto_a_t1 * 2)'], {}), '(out, auto_a_t1 * 2)\n', (3355, 3375), True, 'import numpy as np\n'), ((3493, 3520), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a'], {}), '(out[0], auto_a)\n', (3504, 3520), True, 'import numpy as np\n'), ((3598, 3628), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a_t1'], {}), '(out[0], auto_a_t1)\n', (3609, 3628), True, 'import numpy as np\n'), ((3718, 3752), 'numpy.allclose', 'np.allclose', (['out[0]', '(auto_a_t1 * 2)'], {}), '(out[0], auto_a_t1 * 2)\n', (3729, 3752), True, 'import numpy as np\n'), ((3866, 3895), 'numpy.allclose', 'np.allclose', (['out', 'auto_a[0:3]'], {}), '(out, auto_a[0:3])\n', (3877, 3895), True, 'import numpy as np\n'), ((3967, 3999), 'numpy.allclose', 'np.allclose', (['out', 'auto_a_t1[0:3]'], {}), '(out, auto_a_t1[0:3])\n', (3978, 3999), True, 'import numpy as np\n'), ((4083, 4119), 'numpy.allclose', 'np.allclose', (['out', '(auto_a_t1[0:3] * 2)'], {}), '(out, auto_a_t1[0:3] * 2)\n', (4094, 4119), True, 'import numpy as np\n'), ((4245, 4277), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a[0:3]'], {}), '(out[0], auto_a[0:3])\n', (4256, 4277), True, 'import numpy as np\n'), ((4361, 4396), 'numpy.allclose', 'np.allclose', (['out[0]', 'auto_a_t1[0:3]'], {}), '(out[0], auto_a_t1[0:3])\n', (4372, 4396), True, 'import numpy as np\n'), ((4491, 4530), 'numpy.allclose', 'np.allclose', (['out[0]', '(auto_a_t1[0:3] * 2)'], {}), '(out[0], auto_a_t1[0:3] * 2)\n', (4502, 4530), True, 'import numpy as np\n'), ((4647, 4674), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b'], {}), '(out, cross_a_b)\n', (4658, 4674), True, 'import numpy as np\n'), ((4750, 4783), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b_t1_t2'], {}), '(out, cross_a_b_t1_t2)\n', (4761, 4783), True, 'import numpy as np\n'), ((4871, 4908), 'numpy.allclose', 'np.allclose', (['out', '(cross_a_b_t1_t2 * 2)'], {}), '(out, cross_a_b_t1_t2 * 2)\n', (4882, 4908), True, 'import numpy as np\n'), ((5038, 5068), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b'], {}), '(out[0], cross_a_b)\n', (5049, 5068), True, 'import numpy as np\n'), ((5155, 5191), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b_t1_t2'], {}), '(out[0], cross_a_b_t1_t2)\n', (5166, 5191), True, 'import numpy as np\n'), ((5291, 5331), 'numpy.allclose', 'np.allclose', (['out[0]', '(cross_a_b_t1_t2 * 2)'], {}), '(out[0], cross_a_b_t1_t2 * 2)\n', (5302, 5331), True, 'import numpy as np\n'), ((5457, 5488), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b[:3]'], {}), '(out, cross_a_b[:3])\n', (5468, 5488), True, 'import numpy as np\n'), ((5571, 5608), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b_t1_t2[:3]'], {}), '(out, cross_a_b_t1_t2[:3])\n', (5582, 5608), True, 'import numpy as np\n'), ((5703, 5744), 'numpy.allclose', 'np.allclose', (['out', '(cross_a_b_t1_t2[:3] * 2)'], {}), '(out, cross_a_b_t1_t2[:3] * 2)\n', (5714, 5744), True, 'import numpy as np\n'), ((5876, 5910), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b[:3]'], {}), '(out[0], cross_a_b[:3])\n', (5887, 5910), True, 'import numpy as np\n'), ((6006, 6046), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b_t1_t2[:3]'], {}), '(out[0], cross_a_b_t1_t2[:3])\n', (6017, 6046), True, 'import numpy as np\n'), ((6154, 6198), 'numpy.allclose', 'np.allclose', (['out[0]', '(cross_a_b_t1_t2[:3] * 2)'], {}), '(out[0], cross_a_b_t1_t2[:3] * 2)\n', (6165, 6198), True, 'import numpy as np\n'), ((6299, 6326), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b'], {}), '(out, cross_a_b)\n', (6310, 6326), True, 'import numpy as np\n'), ((6398, 6431), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b_t1_t2'], {}), '(out, cross_a_b_t1_t2)\n', (6409, 6431), True, 'import numpy as np\n'), ((6515, 6552), 'numpy.allclose', 'np.allclose', (['out', '(cross_a_b_t1_t2 * 2)'], {}), '(out, cross_a_b_t1_t2 * 2)\n', (6526, 6552), True, 'import numpy as np\n'), ((6666, 6696), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b'], {}), '(out[0], cross_a_b)\n', (6677, 6696), True, 'import numpy as np\n'), ((6780, 6816), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b_t1_t2'], {}), '(out[0], cross_a_b_t1_t2)\n', (6791, 6816), True, 'import numpy as np\n'), ((6912, 6952), 'numpy.allclose', 'np.allclose', (['out[0]', '(cross_a_b_t1_t2 * 2)'], {}), '(out[0], cross_a_b_t1_t2 * 2)\n', (6923, 6952), True, 'import numpy as np\n'), ((7070, 7101), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b[:3]'], {}), '(out, cross_a_b[:3])\n', (7081, 7101), True, 'import numpy as np\n'), ((7180, 7217), 'numpy.allclose', 'np.allclose', (['out', 'cross_a_b_t1_t2[:3]'], {}), '(out, cross_a_b_t1_t2[:3])\n', (7191, 7217), True, 'import numpy as np\n'), ((7308, 7349), 'numpy.allclose', 'np.allclose', (['out', '(cross_a_b_t1_t2[:3] * 2)'], {}), '(out, cross_a_b_t1_t2[:3] * 2)\n', (7319, 7349), True, 'import numpy as np\n'), ((7480, 7514), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b[:3]'], {}), '(out[0], cross_a_b[:3])\n', (7491, 7514), True, 'import numpy as np\n'), ((7606, 7646), 'numpy.allclose', 'np.allclose', (['out[0]', 'cross_a_b_t1_t2[:3]'], {}), '(out[0], cross_a_b_t1_t2[:3])\n', (7617, 7646), True, 'import numpy as np\n'), ((7750, 7791), 'numpy.allclose', 'np.allclose', (['out', '(cross_a_b_t1_t2[:3] * 2)'], {}), '(out, cross_a_b_t1_t2[:3] * 2)\n', (7761, 7791), True, 'import numpy as np\n'), ((7910, 7938), 'numpy.allclose', 'np.allclose', (['out', 'auto_sum_a'], {}), '(out, auto_sum_a)\n', (7921, 7938), True, 'import numpy as np\n'), ((7997, 8028), 'numpy.allclose', 'np.allclose', (['out', 'auto_sum_a_t1'], {}), '(out, auto_sum_a_t1)\n', (8008, 8028), True, 'import numpy as np\n'), ((8100, 8135), 'numpy.allclose', 'np.allclose', (['out', '(auto_sum_a_t1 * 2)'], {}), '(out, auto_sum_a_t1 * 2)\n', (8111, 8135), True, 'import numpy as np\n'), ((8229, 8262), 'numpy.allclose', 'np.allclose', (['out', 'auto_sum_a[0:3]'], {}), '(out, auto_sum_a[0:3])\n', (8240, 8262), True, 'import numpy as np\n'), ((8328, 8364), 'numpy.allclose', 'np.allclose', (['out', 'auto_sum_a_t1[0:3]'], {}), '(out, auto_sum_a_t1[0:3])\n', (8339, 8364), True, 'import numpy as np\n'), ((8437, 8477), 'numpy.allclose', 'np.allclose', (['out', '(auto_sum_a_t1[0:3] * 2)'], {}), '(out, auto_sum_a_t1[0:3] * 2)\n', (8448, 8477), True, 'import numpy as np\n'), ((8554, 8594), 'numpy.allclose', 'np.allclose', (['out', '(auto_sum_a_t1[0:3] * 3)'], {}), '(out, auto_sum_a_t1[0:3] * 3)\n', (8565, 8594), True, 'import numpy as np\n'), ((8698, 8729), 'numpy.allclose', 'np.allclose', (['out', 'auto_sum_a_t1'], {}), '(out, auto_sum_a_t1)\n', (8709, 8729), True, 'import numpy as np\n'), ((8805, 8840), 'numpy.allclose', 'np.allclose', (['out', '(auto_sum_a_t1 * 2)'], {}), '(out, auto_sum_a_t1 * 2)\n', (8816, 8840), True, 'import numpy as np\n'), ((8953, 8989), 'numpy.allclose', 'np.allclose', (['out', 'auto_sum_a_t1[0:3]'], {}), '(out, auto_sum_a_t1[0:3])\n', (8964, 8989), True, 'import numpy as np\n'), ((9071, 9111), 'numpy.allclose', 'np.allclose', (['out', '(auto_sum_a_t1[0:3] * 2)'], {}), '(out, auto_sum_a_t1[0:3] * 2)\n', (9082, 9111), True, 'import numpy as np\n'), ((9185, 9225), 'numpy.allclose', 'np.allclose', (['out', '(auto_sum_a_t1[0:3] * 3)'], {}), '(out, auto_sum_a_t1[0:3] * 3)\n', (9196, 9225), True, 'import numpy as np\n'), ((9311, 9340), 'numpy.allclose', 'np.allclose', (['out', 'cross_sum_a'], {}), '(out, cross_sum_a)\n', (9322, 9340), True, 'import numpy as np\n'), ((9403, 9438), 'numpy.allclose', 'np.allclose', (['out', 'cross_sum_a_t1_t2'], {}), '(out, cross_sum_a_t1_t2)\n', (9414, 9438), True, 'import numpy as np\n'), ((9514, 9553), 'numpy.allclose', 'np.allclose', (['out', '(cross_sum_a_t1_t2 * 2)'], {}), '(out, cross_sum_a_t1_t2 * 2)\n', (9525, 9553), True, 'import numpy as np\n'), ((9647, 9681), 'numpy.allclose', 'np.allclose', (['out', 'cross_sum_a[0:3]'], {}), '(out, cross_sum_a[0:3])\n', (9658, 9681), True, 'import numpy as np\n'), ((9751, 9791), 'numpy.allclose', 'np.allclose', (['out', 'cross_sum_a_t1_t2[0:3]'], {}), '(out, cross_sum_a_t1_t2[0:3])\n', (9762, 9791), True, 'import numpy as np\n'), ((9867, 9911), 'numpy.allclose', 'np.allclose', (['out', '(cross_sum_a_t1_t2[0:3] * 2)'], {}), '(out, cross_sum_a_t1_t2[0:3] * 2)\n', (9878, 9911), True, 'import numpy as np\n'), ((10012, 10047), 'numpy.allclose', 'np.allclose', (['out', 'cross_sum_a_t1_t2'], {}), '(out, cross_sum_a_t1_t2)\n', (10023, 10047), True, 'import numpy as np\n'), ((10127, 10166), 'numpy.allclose', 'np.allclose', (['out', '(cross_sum_a_t1_t2 * 2)'], {}), '(out, cross_sum_a_t1_t2 * 2)\n', (10138, 10166), True, 'import numpy as np\n'), ((10276, 10316), 'numpy.allclose', 'np.allclose', (['out', 'cross_sum_a_t1_t2[0:3]'], {}), '(out, cross_sum_a_t1_t2[0:3])\n', (10287, 10316), True, 'import numpy as np\n'), ((10396, 10440), 'numpy.allclose', 'np.allclose', (['out', '(cross_sum_a_t1_t2[0:3] * 2)'], {}), '(out, cross_sum_a_t1_t2[0:3] * 2)\n', (10407, 10440), True, 'import numpy as np\n'), ((10524, 10568), 'numpy.allclose', 'np.allclose', (['out', '(cross_sum_a_t1_t2[0:3] * 3)'], {}), '(out, cross_sum_a_t1_t2[0:3] * 3)\n', (10535, 10568), True, 'import numpy as np\n'), ((10669, 10702), 'numpy.arange', 'np.arange', (['test_data1.shape[axis]'], {}), '(test_data1.shape[axis])\n', (10678, 10702), True, 'import numpy as np\n'), ((10778, 10823), 'cddm.core.cross_sum', 'core.cross_sum', (['test_data1', 't1', 't2'], {'axis': 'axis'}), '(test_data1, t1, t2, axis=axis)\n', (10792, 10823), True, 'import cddm.core as core\n'), ((10843, 10892), 'cddm.core.cross_sum_fft', 'core.cross_sum_fft', (['test_data1', 't1', 't2'], {'axis': 'axis'}), '(test_data1, t1, t2, axis=axis)\n', (10861, 10892), True, 'import cddm.core as core\n'), ((11074, 11106), 'numpy.allclose', 'np.allclose', (['out', 'cross_count_10'], {}), '(out, cross_count_10)\n', (11085, 11106), True, 'import numpy as np\n'), ((11169, 11204), 'numpy.allclose', 'np.allclose', (['out', 'cross_count_t1_t2'], {}), '(out, cross_count_t1_t2)\n', (11180, 11204), True, 'import numpy as np\n'), ((11280, 11319), 'numpy.allclose', 'np.allclose', (['out', '(cross_count_t1_t2 * 2)'], {}), '(out, cross_count_t1_t2 * 2)\n', (11291, 11319), True, 'import numpy as np\n'), ((11421, 11458), 'numpy.allclose', 'np.allclose', (['out', 'cross_count_10[0:5]'], {}), '(out, cross_count_10[0:5])\n', (11432, 11458), True, 'import numpy as np\n'), ((11525, 11565), 'numpy.allclose', 'np.allclose', (['out', 'cross_count_t1_t2[0:5]'], {}), '(out, cross_count_t1_t2[0:5])\n', (11536, 11565), True, 'import numpy as np\n'), ((11641, 11685), 'numpy.allclose', 'np.allclose', (['out', '(2 * cross_count_t1_t2[0:5])'], {}), '(out, 2 * cross_count_t1_t2[0:5])\n', (11652, 11685), True, 'import numpy as np\n'), ((11776, 11807), 'numpy.allclose', 'np.allclose', (['out', 'auto_count_10'], {}), '(out, auto_count_10)\n', (11787, 11807), True, 'import numpy as np\n'), ((11866, 11897), 'numpy.allclose', 'np.allclose', (['out', 'auto_count_t1'], {}), '(out, auto_count_t1)\n', (11877, 11897), True, 'import numpy as np\n'), ((11969, 12004), 'numpy.allclose', 'np.allclose', (['out', '(auto_count_t1 * 2)'], {}), '(out, auto_count_t1 * 2)\n', (11980, 12004), True, 'import numpy as np\n'), ((12104, 12140), 'numpy.allclose', 'np.allclose', (['out', 'auto_count_10[0:5]'], {}), '(out, auto_count_10[0:5])\n', (12115, 12140), True, 'import numpy as np\n'), ((12206, 12241), 'numpy.allclose', 'np.allclose', (['out', 'auto_count_t1[:5]'], {}), '(out, auto_count_t1[:5])\n', (12217, 12241), True, 'import numpy as np\n'), ((12313, 12352), 'numpy.allclose', 'np.allclose', (['out', '(2 * auto_count_t1[:5])'], {}), '(out, 2 * auto_count_t1[:5])\n', (12324, 12352), True, 'import numpy as np\n'), ((12496, 12538), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': '(0)'}), '(test_data1, test_data2, axis=0)\n', (12506, 12538), True, 'import cddm.core as core\n'), ((12560, 12622), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'n': '(8)', 'norm': '(1)', 'method': 'method'}), '(test_data1, test_data2, n=8, norm=1, method=method)\n', (12570, 12622), True, 'import cddm.core as core\n'), ((12647, 12676), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {}), '(data, bg, var)\n', (12661, 12676), True, 'import cddm.core as core\n'), ((12695, 12731), 'cddm.video.fromarrays', 'fromarrays', (['(test_data1, test_data2)'], {}), '((test_data1, test_data2))\n', (12705, 12731), False, 'from cddm.video import fromarrays\n'), ((12868, 12897), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {}), '(data, bg, var)\n', (12882, 12897), True, 'import cddm.core as core\n'), ((13061, 13091), 'cddm.core.stats', 'core.stats', (['test_data1'], {'axis': '(0)'}), '(test_data1, axis=0)\n', (13071, 13091), True, 'import cddm.core as core\n'), ((13114, 13176), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data1'], {'n': '(8)', 'norm': '(2)', 'method': 'method'}), '(test_data1, test_data1, n=8, norm=2, method=method)\n', (13124, 13176), True, 'import cddm.core as core\n'), ((13201, 13239), 'cddm.core.normalize', 'core.normalize', (['data1', 'bg', 'var'], {'norm': '(2)'}), '(data1, bg, var, norm=2)\n', (13215, 13239), True, 'import cddm.core as core\n'), ((13269, 13320), 'cddm.core.iacorr', 'core.iacorr', (['test_data1'], {'n': '(8)', 'norm': '(2)', 'method': 'method'}), '(test_data1, n=8, norm=2, method=method)\n', (13280, 13320), True, 'import cddm.core as core\n'), ((13346, 13384), 'cddm.core.normalize', 'core.normalize', (['data2', 'bg', 'var'], {'norm': '(2)'}), '(data2, bg, var, norm=2)\n', (13360, 13384), True, 'import cddm.core as core\n'), ((13552, 13582), 'cddm.core.stats', 'core.stats', (['test_data1'], {'axis': '(0)'}), '(test_data1, axis=0)\n', (13562, 13582), True, 'import cddm.core as core\n'), ((13605, 13655), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'n': '(8)', 'norm': '(1)', 'method': 'method'}), '(test_data1, n=8, norm=1, method=method)\n', (13615, 13655), True, 'import cddm.core as core\n'), ((13681, 13719), 'cddm.core.normalize', 'core.normalize', (['data1', 'bg', 'var'], {'norm': '(1)'}), '(data1, bg, var, norm=1)\n', (13695, 13719), True, 'import cddm.core as core\n'), ((13749, 13800), 'cddm.core.iacorr', 'core.iacorr', (['test_data1'], {'n': '(8)', 'norm': '(1)', 'method': 'method'}), '(test_data1, n=8, norm=1, method=method)\n', (13760, 13800), True, 'import cddm.core as core\n'), ((13826, 13864), 'cddm.core.normalize', 'core.normalize', (['data2', 'bg', 'var'], {'norm': '(1)'}), '(data2, bg, var, norm=1)\n', (13840, 13864), True, 'import cddm.core as core\n'), ((10721, 10754), 'numpy.arange', 'np.arange', (['test_data1.shape[axis]'], {}), '(test_data1.shape[axis])\n', (10730, 10754), True, 'import numpy as np\n'), ((10921, 10944), 'numpy.allclose', 'np.allclose', (['out1', 'out2'], {}), '(out1, out2)\n', (10932, 10944), True, 'import numpy as np\n'), ((12928, 12951), 'numpy.allclose', 'np.allclose', (['out1', 'out2'], {}), '(out1, out2)\n', (12939, 12951), True, 'import numpy as np\n'), ((13417, 13440), 'numpy.allclose', 'np.allclose', (['out1', 'out2'], {}), '(out1, out2)\n', (13428, 13440), True, 'import numpy as np\n'), ((13897, 13920), 'numpy.allclose', 'np.allclose', (['out1', 'out2'], {}), '(out1, out2)\n', (13908, 13920), True, 'import numpy as np\n'), ((15210, 15255), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (15220, 15255), True, 'import cddm.core as core\n'), ((15281, 15348), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(3)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=3, method='fft', axis=axis)\n", (15291, 15348), True, 'import cddm.core as core\n'), ((15382, 15459), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(3)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=3, mode=mode, scale=scale, mask=test_mask)\n', (15396, 15459), True, 'import cddm.core as core\n'), ((15496, 15564), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(3)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=3, method='corr', axis=axis)\n", (15506, 15564), True, 'import cddm.core as core\n'), ((15599, 15676), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(3)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=3, mode=mode, scale=scale, mask=test_mask)\n', (15613, 15676), True, 'import cddm.core as core\n'), ((15796, 15864), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(3)', 'method': '"""diff"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=3, method='diff', axis=axis)\n", (15806, 15864), True, 'import cddm.core as core\n'), ((15899, 15976), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(3)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=3, mode=mode, scale=scale, mask=test_mask)\n', (15913, 15976), True, 'import cddm.core as core\n'), ((19290, 19335), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (19300, 19335), True, 'import cddm.core as core\n'), ((19361, 19428), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(1)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=1, method='fft', axis=axis)\n", (19371, 19428), True, 'import cddm.core as core\n'), ((19462, 19539), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=1, mode=mode, scale=scale, mask=test_mask)\n', (19476, 19539), True, 'import cddm.core as core\n'), ((19575, 19643), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(1)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=1, method='corr', axis=axis)\n", (19585, 19643), True, 'import cddm.core as core\n'), ((19678, 19755), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=1, mode=mode, scale=scale, mask=test_mask)\n', (19692, 19755), True, 'import cddm.core as core\n'), ((19874, 19942), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(1)', 'method': '"""diff"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=1, method='diff', axis=axis)\n", (19884, 19942), True, 'import cddm.core as core\n'), ((19977, 20054), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=1, mode=mode, scale=scale, mask=test_mask)\n', (19991, 20054), True, 'import cddm.core as core\n'), ((21762, 21807), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (21772, 21807), True, 'import cddm.core as core\n'), ((21833, 21900), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(0)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=0, method='fft', axis=axis)\n", (21843, 21900), True, 'import cddm.core as core\n'), ((21934, 22011), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(0)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=0, mode=mode, scale=scale, mask=test_mask)\n', (21948, 22011), True, 'import cddm.core as core\n'), ((22048, 22116), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(0)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=0, method='corr', axis=axis)\n", (22058, 22116), True, 'import cddm.core as core\n'), ((22151, 22228), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(0)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=0, mode=mode, scale=scale, mask=test_mask)\n', (22165, 22228), True, 'import cddm.core as core\n'), ((23234, 23268), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {}), '(test_data1, test_data2)\n', (23244, 23268), True, 'import cddm.core as core\n'), ((23292, 23348), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(2)', 'method': '"""fft"""'}), "(test_data1, test_data2, norm=2, method='fft')\n", (23302, 23348), True, 'import cddm.core as core\n'), ((23380, 23457), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(2)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=2, mode=mode, scale=scale, mask=test_mask)\n', (23394, 23457), True, 'import cddm.core as core\n'), ((23494, 23551), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(2)', 'method': '"""corr"""'}), "(test_data1, test_data2, norm=2, method='corr')\n", (23504, 23551), True, 'import cddm.core as core\n'), ((23584, 23661), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(2)', 'mode': 'mode', 'scale': 'scale', 'mask': 'test_mask'}), '(data, bg, var, norm=2, mode=mode, scale=scale, mask=test_mask)\n', (23598, 23661), True, 'import cddm.core as core\n'), ((23866, 23887), 'cddm.core.abs2', 'core.abs2', (['test_data1'], {}), '(test_data1)\n', (23875, 23887), True, 'import cddm.core as core\n'), ((14188, 14233), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (14198, 14233), True, 'import cddm.core as core\n'), ((14263, 14330), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(3)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=3, method='fft', axis=axis)\n", (14273, 14330), True, 'import cddm.core as core\n'), ((14368, 14429), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(3)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=3, mode=mode, scale=scale)\n', (14382, 14429), True, 'import cddm.core as core\n'), ((14472, 14540), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(3)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=3, method='corr', axis=axis)\n", (14482, 14540), True, 'import cddm.core as core\n'), ((14579, 14640), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(3)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=3, mode=mode, scale=scale)\n', (14593, 14640), True, 'import cddm.core as core\n'), ((14774, 14842), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(3)', 'method': '"""diff"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=3, method='diff', axis=axis)\n", (14784, 14842), True, 'import cddm.core as core\n'), ((14881, 14942), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(3)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=3, mode=mode, scale=scale)\n', (14895, 14942), True, 'import cddm.core as core\n'), ((15734, 15766), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (15745, 15766), True, 'import numpy as np\n'), ((16034, 16066), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (16045, 16066), True, 'import numpy as np\n'), ((16265, 16298), 'cddm.core.stats', 'core.stats', (['test_data1'], {'axis': 'axis'}), '(test_data1, axis=axis)\n', (16275, 16298), True, 'import cddm.core as core\n'), ((16328, 16395), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data1'], {'norm': '(3)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data1, norm=3, method='fft', axis=axis)\n", (16338, 16395), True, 'import cddm.core as core\n'), ((16433, 16494), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (16447, 16494), True, 'import cddm.core as core\n'), ((16537, 16593), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(3)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, norm=3, method='corr', axis=axis)\n", (16547, 16593), True, 'import cddm.core as core\n'), ((16631, 16692), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (16645, 16692), True, 'import cddm.core as core\n'), ((16826, 16882), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(1)', 'method': '"""diff"""', 'axis': 'axis'}), "(test_data1, norm=1, method='diff', axis=axis)\n", (16836, 16882), True, 'import cddm.core as core\n'), ((16920, 16981), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (16934, 16981), True, 'import cddm.core as core\n'), ((17264, 17309), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (17274, 17309), True, 'import cddm.core as core\n'), ((17339, 17406), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(1)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=1, method='fft', axis=axis)\n", (17349, 17406), True, 'import cddm.core as core\n'), ((17444, 17505), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (17458, 17505), True, 'import cddm.core as core\n'), ((17548, 17616), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(1)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=1, method='corr', axis=axis)\n", (17558, 17616), True, 'import cddm.core as core\n'), ((17655, 17716), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (17669, 17716), True, 'import cddm.core as core\n'), ((17850, 17918), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(1)', 'method': '"""diff"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=1, method='diff', axis=axis)\n", (17860, 17918), True, 'import cddm.core as core\n'), ((17957, 18018), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (17971, 18018), True, 'import cddm.core as core\n'), ((18297, 18330), 'cddm.core.stats', 'core.stats', (['test_data1'], {'axis': 'axis'}), '(test_data1, axis=axis)\n', (18307, 18330), True, 'import cddm.core as core\n'), ((18360, 18415), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(1)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, norm=1, method='fft', axis=axis)\n", (18370, 18415), True, 'import cddm.core as core\n'), ((18454, 18515), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (18468, 18515), True, 'import cddm.core as core\n'), ((18558, 18614), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(1)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, norm=1, method='corr', axis=axis)\n", (18568, 18614), True, 'import cddm.core as core\n'), ((18652, 18713), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (18666, 18713), True, 'import cddm.core as core\n'), ((18847, 18903), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(1)', 'method': '"""diff"""', 'axis': 'axis'}), "(test_data1, norm=1, method='diff', axis=axis)\n", (18857, 18903), True, 'import cddm.core as core\n'), ((18941, 19002), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(1)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=1, mode=mode, scale=scale)\n', (18955, 19002), True, 'import cddm.core as core\n'), ((19812, 19844), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (19823, 19844), True, 'import numpy as np\n'), ((20111, 20143), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (20122, 20143), True, 'import numpy as np\n'), ((20326, 20371), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (20336, 20371), True, 'import cddm.core as core\n'), ((20401, 20468), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(0)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=0, method='fft', axis=axis)\n", (20411, 20468), True, 'import cddm.core as core\n'), ((20506, 20567), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(0)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=0, mode=mode, scale=scale)\n', (20520, 20567), True, 'import cddm.core as core\n'), ((20610, 20678), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(0)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=0, method='corr', axis=axis)\n", (20620, 20678), True, 'import cddm.core as core\n'), ((20717, 20778), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(0)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=0, mode=mode, scale=scale)\n', (20731, 20778), True, 'import cddm.core as core\n'), ((21057, 21090), 'cddm.core.stats', 'core.stats', (['test_data1'], {'axis': 'axis'}), '(test_data1, axis=axis)\n', (21067, 21090), True, 'import cddm.core as core\n'), ((21120, 21175), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(0)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, norm=0, method='fft', axis=axis)\n", (21130, 21175), True, 'import cddm.core as core\n'), ((21214, 21275), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(0)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=0, mode=mode, scale=scale)\n', (21228, 21275), True, 'import cddm.core as core\n'), ((21318, 21374), 'cddm.core.acorr', 'core.acorr', (['test_data1'], {'norm': '(0)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, norm=0, method='corr', axis=axis)\n", (21328, 21374), True, 'import cddm.core as core\n'), ((21412, 21473), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(0)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=0, mode=mode, scale=scale)\n', (21426, 21473), True, 'import cddm.core as core\n'), ((22286, 22318), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (22297, 22318), True, 'import numpy as np\n'), ((22520, 22565), 'cddm.core.stats', 'core.stats', (['test_data1', 'test_data2'], {'axis': 'axis'}), '(test_data1, test_data2, axis=axis)\n', (22530, 22565), True, 'import cddm.core as core\n'), ((22595, 22662), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(2)', 'method': '"""fft"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=2, method='fft', axis=axis)\n", (22605, 22662), True, 'import cddm.core as core\n'), ((22700, 22761), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(2)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=2, mode=mode, scale=scale)\n', (22714, 22761), True, 'import cddm.core as core\n'), ((22804, 22872), 'cddm.core.ccorr', 'core.ccorr', (['test_data1', 'test_data2'], {'norm': '(2)', 'method': '"""corr"""', 'axis': 'axis'}), "(test_data1, test_data2, norm=2, method='corr', axis=axis)\n", (22814, 22872), True, 'import cddm.core as core\n'), ((22911, 22972), 'cddm.core.normalize', 'core.normalize', (['data', 'bg', 'var'], {'norm': '(2)', 'mode': 'mode', 'scale': 'scale'}), '(data, bg, var, norm=2, mode=mode, scale=scale)\n', (22925, 22972), True, 'import cddm.core as core\n'), ((23718, 23750), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (23729, 23750), True, 'import numpy as np\n'), ((23889, 23907), 'numpy.abs', 'np.abs', (['test_data1'], {}), '(test_data1)\n', (23895, 23907), True, 'import numpy as np\n'), ((14704, 14736), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (14715, 14736), True, 'import numpy as np\n'), ((15006, 15038), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (15017, 15038), True, 'import numpy as np\n'), ((16756, 16788), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (16767, 16788), True, 'import numpy as np\n'), ((17045, 17077), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (17056, 17077), True, 'import numpy as np\n'), ((17780, 17812), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (17791, 17812), True, 'import numpy as np\n'), ((18082, 18114), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (18093, 18114), True, 'import numpy as np\n'), ((18777, 18809), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (18788, 18809), True, 'import numpy as np\n'), ((19066, 19098), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (19077, 19098), True, 'import numpy as np\n'), ((20842, 20874), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (20853, 20874), True, 'import numpy as np\n'), ((21537, 21569), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (21548, 21569), True, 'import numpy as np\n'), ((23036, 23068), 'numpy.allclose', 'np.allclose', (['self.out', 'out_other'], {}), '(self.out, out_other)\n', (23047, 23068), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
import pytorch_lightning as pl
from torch.utils.data.dataloader import DataLoader
import utils.data.functions
class SpatioTemporalCSVDataModule(pl.LightningDataModule):
def __init__(
self,
feat_path: str,
adj_path: str,
batch_size: int = 32,
seq_len: int = 12,
pre_len: int = 3,
split_ratio: float = 0.8,
normalize: bool = True,
**kwargs
):
super(SpatioTemporalCSVDataModule, self).__init__()
self._feat_path = feat_path
self._adj_path = adj_path
self.batch_size = batch_size
self.seq_len = seq_len
self.pre_len = pre_len
self.split_ratio = split_ratio
self.normalize = normalize
self._feat = utils.data.functions.load_features(self._feat_path)
self._feat_max_val = np.max(self._feat)
self._adj = utils.data.functions.load_adjacency_matrix(self._adj_path)
self._dis = utils.data.functions.load_distance_matrix(r'data/sz_distance.csv')
self.direct = utils.data.functions.load_distance_matrix(r'data/sz_direct.csv')
@staticmethod
def add_data_specific_arguments(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--seq_len", type=int, default=32)
parser.add_argument("--pre_len", type=int, default=1)
parser.add_argument("--split_ratio", type=float, default=0.8)
parser.add_argument("--normalize", type=bool, default=True)
return parser
def setup(self, stage: str = None):
(
self.train_dataset,
self.val_dataset,
) = utils.data.functions.generate_torch_datasets(
self._feat,
self.seq_len,
self.pre_len,
split_ratio=self.split_ratio,
normalize=self.normalize,
)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=len(self.val_dataset))
@property
def feat_max_val(self):
return self._feat_max_val
@property
def adj(self):
return self._adj
@property
def dis(self):
return self._dis
|
[
"torch.utils.data.dataloader.DataLoader",
"numpy.max",
"argparse.ArgumentParser"
] |
[((863, 881), 'numpy.max', 'np.max', (['self._feat'], {}), '(self._feat)\n', (869, 881), True, 'import numpy as np\n'), ((1223, 1287), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (1246, 1287), False, 'import argparse\n'), ((2024, 2082), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['self.train_dataset'], {'batch_size': 'self.batch_size'}), '(self.train_dataset, batch_size=self.batch_size)\n', (2034, 2082), False, 'from torch.utils.data.dataloader import DataLoader\n')]
|
import os
import cv2
import numpy as np
import sys
caffe_root = os.path.expanduser('~') + "/CNN/ssd"
sys.path.insert(0, caffe_root+'/python')
import caffe
from tqdm import tqdm
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat','bottle', 'bus', 'car', 'cat',
'chair','cow', 'diningtable', 'dog', 'horse','motorbike', 'person',
'pottedplant','sheep', 'sofa', 'train', 'tvmonitor')
# color index please refer to https://zhuanlan.zhihu.com/p/102303256
colors = [[0,0,0], [128,0,0],[0,128,0],[128,128,0],[0,0,128],[128,0,128],
[0,0,128],[128,128,128], [64,0,0],[192,0,0],[64,128,0],
[192,128,0], [64,0,128], [192,0,128], [64,128,128], [192,128,128],
[0,64,0], [128,64,0], [0,192,0], [128,192,0],[0,64,128]]
outputdir="output/preproess"
def showpreprocess(blobs,i,show=False):
data = np.array(blobs['data'].data)
label = np.array(blobs['label'].data)
img = data[0].transpose(1,2,0).copy()
objs = label[0][0]
height, width,_ = img.shape
for obj in objs:
x = int(obj[3]*width)
y = int(obj[4]*height)
x2 = int(obj[5]*width)
y2 = int(obj[6]*height)
cls = int(obj[1])
cv2.rectangle(img,(x,y),(x2,y2),colors[cls])
cv2.putText(img,CLASSES[cls],(x,y),1,1,colors[cls])
if show:
cv2.imshow("img",img)
cv2.waitKey()
cv2.imwrite(outputdir+"/"+str(i)+".jpg",img)
def main(model="voc/MobileNetSSD_preprocess.prototxt",show=False):
net = caffe.Net(model, caffe.TRAIN)
for i in tqdm(range(20)):
blobs = net.forward()
showpreprocess(blobs,i)
if __name__=="__main__":
if not os.path.exists(outputdir):
os.makedirs(outputdir)
main()
|
[
"cv2.putText",
"os.makedirs",
"cv2.waitKey",
"cv2.imshow",
"sys.path.insert",
"os.path.exists",
"numpy.array",
"cv2.rectangle",
"caffe.Net",
"os.path.expanduser"
] |
[((101, 143), 'sys.path.insert', 'sys.path.insert', (['(0)', "(caffe_root + '/python')"], {}), "(0, caffe_root + '/python')\n", (116, 143), False, 'import sys\n'), ((64, 87), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (82, 87), False, 'import os\n'), ((827, 855), 'numpy.array', 'np.array', (["blobs['data'].data"], {}), "(blobs['data'].data)\n", (835, 855), True, 'import numpy as np\n'), ((868, 897), 'numpy.array', 'np.array', (["blobs['label'].data"], {}), "(blobs['label'].data)\n", (876, 897), True, 'import numpy as np\n'), ((1472, 1501), 'caffe.Net', 'caffe.Net', (['model', 'caffe.TRAIN'], {}), '(model, caffe.TRAIN)\n', (1481, 1501), False, 'import caffe\n'), ((1174, 1223), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x2, y2)', 'colors[cls]'], {}), '(img, (x, y), (x2, y2), colors[cls])\n', (1187, 1223), False, 'import cv2\n'), ((1227, 1284), 'cv2.putText', 'cv2.putText', (['img', 'CLASSES[cls]', '(x, y)', '(1)', '(1)', 'colors[cls]'], {}), '(img, CLASSES[cls], (x, y), 1, 1, colors[cls])\n', (1238, 1284), False, 'import cv2\n'), ((1301, 1323), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1311, 1323), False, 'import cv2\n'), ((1331, 1344), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1342, 1344), False, 'import cv2\n'), ((1631, 1656), 'os.path.exists', 'os.path.exists', (['outputdir'], {}), '(outputdir)\n', (1645, 1656), False, 'import os\n'), ((1666, 1688), 'os.makedirs', 'os.makedirs', (['outputdir'], {}), '(outputdir)\n', (1677, 1688), False, 'import os\n')]
|
import os.path
import pickle
import random
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_numbering_dataset
import numpy as np
from PIL import Image
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = [
e[1] for e in sorted(make_numbering_dataset(self.dir_AB, opt.max_dataset_size), key=lambda idx: idx[0])]
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
with open(opt.captions, 'rb') as f:
x = pickle.load(f)
train_captions, test_captions = x[0], x[1]
self.captions = train_captions if opt.phase == "train" else test_captions
self.ixtoword, self.wordtoix = x[2], x[3]
del x, train_captions, test_captions
self.n_words = len(self.ixtoword)
print('Load from: ', opt.captions)
self.captions_per_image = opt.captions_per_image
self.text_words_num = opt.text_words_num
def get_caption(self, sent_ix):
# a list of indices for a sentence
sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')
if (sent_caption == 0).sum() > 0:
print('ERROR: do not need END (0) token', sent_caption)
num_words = len(sent_caption)
# pad with 0s (i.e., '<end>')
x = np.zeros(self.text_words_num, dtype='int64')
x_len = num_words
if num_words <= self.text_words_num:
x[:num_words] = sent_caption
else:
ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum
np.random.shuffle(ix)
ix = ix[:self.text_words_num]
ix = np.sort(ix)
x = sent_caption[ix]
x_len = self.text_words_num
return x, x_len
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
# split AB image into A and B
w, h = AB.size
if w > h:
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
else:
A = AB
B = AB
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
caption_idx = self.captions_per_image * index + random.randint(0, self.captions_per_image - 1)
caption, caption_len = self.get_caption(caption_idx)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path,
"caption": caption, "caption_len": caption_len}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
[
"random.randint",
"data.base_dataset.get_params",
"data.base_dataset.BaseDataset.__init__",
"numpy.asarray",
"numpy.zeros",
"data.image_folder.make_numbering_dataset",
"PIL.Image.open",
"numpy.sort",
"pickle.load",
"numpy.arange",
"data.base_dataset.get_transform",
"numpy.random.shuffle"
] |
[((694, 725), 'data.base_dataset.BaseDataset.__init__', 'BaseDataset.__init__', (['self', 'opt'], {}), '(self, opt)\n', (714, 725), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((2143, 2187), 'numpy.zeros', 'np.zeros', (['self.text_words_num'], {'dtype': '"""int64"""'}), "(self.text_words_num, dtype='int64')\n", (2151, 2187), True, 'import numpy as np\n'), ((3524, 3552), 'data.base_dataset.get_params', 'get_params', (['self.opt', 'A.size'], {}), '(self.opt, A.size)\n', (3534, 3552), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((3575, 3646), 'data.base_dataset.get_transform', 'get_transform', (['self.opt', 'transform_params'], {'grayscale': '(self.input_nc == 1)'}), '(self.opt, transform_params, grayscale=self.input_nc == 1)\n', (3588, 3646), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((3671, 3743), 'data.base_dataset.get_transform', 'get_transform', (['self.opt', 'transform_params'], {'grayscale': '(self.output_nc == 1)'}), '(self.opt, transform_params, grayscale=self.output_nc == 1)\n', (3684, 3743), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((1333, 1347), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1344, 1347), False, 'import pickle\n'), ((2393, 2414), 'numpy.random.shuffle', 'np.random.shuffle', (['ix'], {}), '(ix)\n', (2410, 2414), True, 'import numpy as np\n'), ((2474, 2485), 'numpy.sort', 'np.sort', (['ix'], {}), '(ix)\n', (2481, 2485), True, 'import numpy as np\n'), ((3858, 3904), 'random.randint', 'random.randint', (['(0)', '(self.captions_per_image - 1)'], {}), '(0, self.captions_per_image - 1)\n', (3872, 3904), False, 'import random\n'), ((1894, 1928), 'numpy.asarray', 'np.asarray', (['self.captions[sent_ix]'], {}), '(self.captions[sent_ix])\n', (1904, 1928), True, 'import numpy as np\n'), ((2336, 2356), 'numpy.arange', 'np.arange', (['num_words'], {}), '(num_words)\n', (2345, 2356), True, 'import numpy as np\n'), ((3173, 3192), 'PIL.Image.open', 'Image.open', (['AB_path'], {}), '(AB_path)\n', (3183, 3192), False, 'from PIL import Image\n'), ((872, 929), 'data.image_folder.make_numbering_dataset', 'make_numbering_dataset', (['self.dir_AB', 'opt.max_dataset_size'], {}), '(self.dir_AB, opt.max_dataset_size)\n', (894, 929), False, 'from data.image_folder import make_numbering_dataset\n')]
|
"""
phase.py
Estimate the phase of an oscillation using a waveform-based approach
"""
import numpy as np
def extrema_interpolated_phase(x, Ps, Ts, zeroxR=None, zeroxD=None):
"""
Use peaks (phase 0) and troughs (phase pi/-pi) to estimate
instantaneous phase. Also use rise and decay zerocrossings
(phase -pi/2 and pi/2, respectively) if specified.
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
zeroxR : array-like 1d
indices at which oscillatory rising zerocrossings occur
zeroxD : array-like 1d
indices at which oscillatory decaying zerocrossings occur
Returns
-------
pha : array-like 1d
instantaneous phase
Notes
-----
Sometimes, due to noise, extrema and zerocrossing estimation
is poor, and for example, the same index may be assigned to
both a peak and a decaying zerocrossing. Because of this,
we first assign phase values by zerocrossings, and then
may overwrite them with extrema phases.
"""
# Initialize phase arrays
# 2 phase arrays: trough pi and trough -pi
L = len(x)
t = np.arange(L)
pha_tpi = np.zeros(L) * np.nan
pha_tnpi = np.zeros(L) * np.nan
# If specified, assign phases to zerocrossings
if zeroxR is not None:
pha_tpi[zeroxR] = -np.pi / 2
pha_tnpi[zeroxR] = -np.pi / 2
if zeroxD is not None:
pha_tpi[zeroxD] = np.pi / 2
pha_tnpi[zeroxD] = np.pi / 2
# Define phases
pha_tpi[Ps] = 0
pha_tpi[Ts] = np.pi
pha_tnpi[Ps] = 0
pha_tnpi[Ts] = -np.pi
# Interpolate to find all phases
pha_tpi = np.interp(t, t[~np.isnan(pha_tpi)], pha_tpi[~np.isnan(pha_tpi)])
pha_tnpi = np.interp(t, t[~np.isnan(pha_tnpi)], pha_tnpi[~np.isnan(pha_tnpi)])
# For the phase time series in which the trough is negative pi:
# Replace the decaying periods with these periods in the phase time
# series in which the trough is pi
diffs = np.diff(pha_tnpi)
diffs = np.append(diffs, 99)
pha_tnpi[diffs < 0] = pha_tpi[diffs < 0]
# Assign the periods before the first empirical phase timepoint to NaN
diffs = np.diff(pha_tnpi)
first_empirical_idx = next(i for i, xi in enumerate(diffs) if xi > 0)
pha_tnpi[:first_empirical_idx] = np.nan
# Assign the periods after the last empirical phase timepoint to NaN
diffs = np.diff(pha_tnpi)
last_empirical_idx = next(i for i, xi in enumerate(diffs[::-1]) if xi > 0)
pha_tnpi[-last_empirical_idx + 1:] = np.nan
return pha_tnpi
|
[
"numpy.zeros",
"numpy.isnan",
"numpy.append",
"numpy.diff",
"numpy.arange"
] |
[((1263, 1275), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (1272, 1275), True, 'import numpy as np\n'), ((2105, 2122), 'numpy.diff', 'np.diff', (['pha_tnpi'], {}), '(pha_tnpi)\n', (2112, 2122), True, 'import numpy as np\n'), ((2135, 2155), 'numpy.append', 'np.append', (['diffs', '(99)'], {}), '(diffs, 99)\n', (2144, 2155), True, 'import numpy as np\n'), ((2289, 2306), 'numpy.diff', 'np.diff', (['pha_tnpi'], {}), '(pha_tnpi)\n', (2296, 2306), True, 'import numpy as np\n'), ((2511, 2528), 'numpy.diff', 'np.diff', (['pha_tnpi'], {}), '(pha_tnpi)\n', (2518, 2528), True, 'import numpy as np\n'), ((1290, 1301), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (1298, 1301), True, 'import numpy as np\n'), ((1326, 1337), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (1334, 1337), True, 'import numpy as np\n'), ((1781, 1798), 'numpy.isnan', 'np.isnan', (['pha_tpi'], {}), '(pha_tpi)\n', (1789, 1798), True, 'import numpy as np\n'), ((1810, 1827), 'numpy.isnan', 'np.isnan', (['pha_tpi'], {}), '(pha_tpi)\n', (1818, 1827), True, 'import numpy as np\n'), ((1861, 1879), 'numpy.isnan', 'np.isnan', (['pha_tnpi'], {}), '(pha_tnpi)\n', (1869, 1879), True, 'import numpy as np\n'), ((1892, 1910), 'numpy.isnan', 'np.isnan', (['pha_tnpi'], {}), '(pha_tnpi)\n', (1900, 1910), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
from collections import defaultdict
from copy import deepcopy
from warnings import warn
import numpy as np
import pandas as pd
from pathlib import Path
from simulator.core.DtnBundle import Bundle
from simulator.utils.DtnIO import load_traffic_file
from simulator.utils.DtnUtils import shift_traffic
from simulator.generators.DtnAbstractGenerator import DtnAbstractGenerator
# ============================================================================================================
# === DEFINE LATENCY CATEGORIES - THESE ARE CONSTANT
# ============================================================================================================
# Define latency
lat = np.array([[60, np.nan, np.nan],
[60, np.nan, np.nan],
[60, np.nan, 3600],
[60, 60, np.nan],
[60, 900, 21600],
[60, 300, 3600],
[60, 300, np.nan],
[60, 60, np.nan],
[60, 900, 21600],
[60, 900, 21600],
[60, 900, 21600],
[60, 300, np.nan]])
lat = pd.DataFrame(data=1.0*lat, columns=['seconds','minutes','hours'],
index=['voice','biomedical','caution and warning','command and teleoperation',
'file','health and status','nav type 1 products','nav type 2 message',
'pao hd video','sci hd video','science','sd video'])
# ============================================================================================================
# === FUNCTIONS TO CREATE TWO STATE MARKOV PROCESS AND BUNDLE GENERATION TIMES
# ============================================================================================================
def two_state_markov_process(Tmin, Tmax, DutyCycle, Ton):
# Initialize variables
Tstart = 0
Tend = Tmax - Tmin
Toff = ((1 / DutyCycle) - 1) * Ton
K = 10
ok = False
while not ok:
# Initialize variables
Ns = int(np.ceil(0.5*K*(Tend-Tstart)/(Ton + Toff)))
# Handle special case where duty cycle is 1
if DutyCycle == 1:
state, times = True, Tend
else:
state = np.random.uniform() < DutyCycle
on_dur = np.random.exponential(scale=Ton, size=Ns)
off_dur = np.random.exponential(scale=Toff, size=Ns)
times = np.zeros(2*Ns)
if state == True:
times[0::2] = on_dur
times[1::2] = off_dur
else:
times[0::2] = off_dur
times[1::2] = on_dur
# Finalize the process generated
times = np.insert(np.cumsum(times), 0, 0)
states = np.zeros_like(times, dtype=bool)
states[0::2] = state
states[1::2] = not state
# Validate the sequence
if times[-1] >= Tend: ok = True
else: K += 1
# Trim end of generated sequence to match Tend
times[times > Tend] = Tend
idx = np.argmax(times == Tend)+1
if idx != 0 and DutyCycle != 1.0 and idx != len(times):
times = times[0:idx]
states = states[0:idx]
# Shift times to Tmin, Tmax
times += Tmin
return times, states
def generate_markov_bundles(BS, Rb, Lat, Tmin, Tmax, DutyCycle, Ton):
# Generate Markov intervals
times, states = two_state_markov_process(Tmin, Tmax, DutyCycle, Ton)
# Initial processing entry. If initial state is OFF, skip it
ini = (states[0] == False)
# Initialize variables
t = []
buf = 0
state = True
# Iterate over periods
for i in range(ini, len(states)-1):
# Handle OFF state only if buffer is not empty
if state == False and buf != 0:
# t_ref indicates the time at which the last bundle was sent. If no
# bundles were ever sent, assume 0.
t_ref = 0 if len(t) == 0 else t[-1]
# If waiting for the start of the ON period will make you exceed
# the latency requirement, send a bundle with half data half padding.
while t_ref + Lat < times[i+1] and buf >= BS:
t_ref = max(t_ref, times[i]) + Lat
t.append(t)
buf -= BS
# Handle ON state
if state == True:
dv = buf + Rb * (times[i+1] - times[i])
N_bnd = int(np.floor(dv / BS))
t_bnd = times[i] + np.arange(1,N_bnd+1)*(BS / Rb)
if len(t_bnd) > 0: t_bnd -= buf/Rb
t_bnd = t_bnd[t_bnd <= times[i+1]]
t.extend(t_bnd)
buf = dv - N_bnd * BS
# Switch state
state = not state
# Add one last bundle add the end of t to transmit all unaccounted data.
# Note that this bundle might have some padding data
if buf > 0:
t_ref = times[-1] if len(t) == 0 else t[-1]
if states[-1] == False:
t.append(t_ref + Lat)
else:
t.append(max(t_ref, times[-1])+Lat)
buf = 0
# return times at which a bundle is delivered, and the amount of data left at the end
return t, buf
def generate_bundles(traffic, id2alias, min_bundle_size=1024, max_bundle_size=8e9, lat_frac=0.5):
# Get a map from node alias to ids
alias2id = {v: k for k, v in id2alias.items()}
# Get simulation start time
t0 = min([flow['StartTime'] for _, flow in traffic.items()])
# Iterate over flows
for fid, flow in traffic.items():
# Get the numeric latency
flow['Latency'] = lat.loc[flow['DataType'].lower(), flow['Latency'].lower()]
# Compute bundle size
bundle_lat = flow['Latency']*min(lat_frac, flow['DutyCycle'])
bundle_sz = min(max(min_bundle_size, int(flow['DataRate']*bundle_lat)), max_bundle_size)
# Get start and time for this flow
Tmin = (flow['StartTime'] - t0).total_seconds()
Tmax = (flow['EndTime'] - t0).total_seconds()
# Generate bundles
t, _ = generate_markov_bundles(bundle_sz, flow['DataRate'], flow['Latency'],
Tmin, Tmax, flow['DutyCycle'], flow['Duration'])
# Store the bundle times and size
flow['Bundles'] = t
flow['BundleSize'] = bundle_sz
flow['fid'] = fid
# Transform names of flows from alias to ids
flow['Orig'] = alias2id[flow['TransElementName']]
flow['Dest'] = alias2id[flow['ReceiveElementName']]
return traffic
# ============================================================================================================
# === SIMULATION CLASS
# ============================================================================================================
class DtnMarkovBundleGenerator(DtnAbstractGenerator):
_all_flows = None
def __init__(self, env, parent, props):
super().__init__(env, parent, props)
# Initialize variables
self.traffic_file = self.config['globals'].indir / props.file
def reset(self):
# Reset static variables
super().reset()
self.__class__._all_flows = None
def initialize(self):
# Setting static variables only once
if not self.__class__._all_flows: self.load_flows()
# Get flows for this generator
self.flows = self.__class__._all_flows[self.parent.nid]
# Iterate over all flows for this generator
for _, flow in self.flows.items(): self.env.process(self.run(flow))
def load_flows(self):
# Load generators file
traffic = shift_traffic(load_traffic_file(self.traffic_file), self.epoch)
# Generate bundles
id2alias = {nid: dd.alias for nid, dd in self.config['network'].nodes.items()}
flows = generate_bundles(traffic, id2alias, min_bundle_size=int(self.props.min_bundle_size),
max_bundle_size=float(self.props.max_bundle_size),
lat_frac=float(self.props.latency_fraction))
# Log bundle generation
for fid, flow in flows.items():
if len(flow['Bundles']) == 0:
self.disp('Flow {}: No bundles generated', fid)
else:
self.disp('Flow {}: {} bundles generated between t={:.3f} and t={:.3f}', fid, len(flow['Bundles']),
min(flow['Bundles']), max(flow['Bundles']))
# Create a dictionary of dictionaries or dictionary: {Node ID: {flow id: {flow props}}
d = defaultdict(dict)
for fid, flow in flows.items(): d[flow['Orig']][fid] = flow
# Store all the flows generated
self.__class__._all_flows = d
def run(self, flow):
# If no bundles, return
if len(flow['Bundles']) == 0: return
# Initialize variables
bnd_dt = np.insert(np.diff(flow['Bundles']), 0, flow['Bundles'][0])
# Iterate over bundle transmit times
for dt in bnd_dt:
# Wait until next time to transmit
yield self.env.timeout(dt)
# Create a new bundle and record it
new_bundle = Bundle.from_flow(self.env, flow)
# Monitor the new bundle creation
self.monitor_new_bundle(new_bundle)
# Log the new bundle creation
self.disp('{} is created at node {}', new_bundle, self.parent.nid)
# Schedule routers of bundle
self.parent.forward(new_bundle)
def predicted_data_vol(self):
""" Predicted data volume in [bits] """
return sum(f['DataRate']*((f['EndTime']-f['StartTime']).total_seconds())
for f in self.flows.values())
|
[
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.ceil",
"numpy.argmax",
"numpy.floor",
"numpy.random.exponential",
"numpy.zeros",
"collections.defaultdict",
"numpy.cumsum",
"numpy.diff",
"numpy.array",
"numpy.arange",
"simulator.core.DtnBundle.Bundle.from_flow",
"simulator.utils.DtnIO.load_traffic_file"
] |
[((710, 960), 'numpy.array', 'np.array', (['[[60, np.nan, np.nan], [60, np.nan, np.nan], [60, np.nan, 3600], [60, 60,\n np.nan], [60, 900, 21600], [60, 300, 3600], [60, 300, np.nan], [60, 60,\n np.nan], [60, 900, 21600], [60, 900, 21600], [60, 900, 21600], [60, 300,\n np.nan]]'], {}), '([[60, np.nan, np.nan], [60, np.nan, np.nan], [60, np.nan, 3600], [\n 60, 60, np.nan], [60, 900, 21600], [60, 300, 3600], [60, 300, np.nan],\n [60, 60, np.nan], [60, 900, 21600], [60, 900, 21600], [60, 900, 21600],\n [60, 300, np.nan]])\n', (718, 960), True, 'import numpy as np\n'), ((1156, 1454), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '(1.0 * lat)', 'columns': "['seconds', 'minutes', 'hours']", 'index': "['voice', 'biomedical', 'caution and warning', 'command and teleoperation',\n 'file', 'health and status', 'nav type 1 products',\n 'nav type 2 message', 'pao hd video', 'sci hd video', 'science', 'sd video'\n ]"}), "(data=1.0 * lat, columns=['seconds', 'minutes', 'hours'], index\n =['voice', 'biomedical', 'caution and warning',\n 'command and teleoperation', 'file', 'health and status',\n 'nav type 1 products', 'nav type 2 message', 'pao hd video',\n 'sci hd video', 'science', 'sd video'])\n", (1168, 1454), True, 'import pandas as pd\n'), ((2770, 2802), 'numpy.zeros_like', 'np.zeros_like', (['times'], {'dtype': 'bool'}), '(times, dtype=bool)\n', (2783, 2802), True, 'import numpy as np\n'), ((3052, 3076), 'numpy.argmax', 'np.argmax', (['(times == Tend)'], {}), '(times == Tend)\n', (3061, 3076), True, 'import numpy as np\n'), ((8540, 8557), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (8551, 8557), False, 'from collections import defaultdict\n'), ((2067, 2116), 'numpy.ceil', 'np.ceil', (['(0.5 * K * (Tend - Tstart) / (Ton + Toff))'], {}), '(0.5 * K * (Tend - Tstart) / (Ton + Toff))\n', (2074, 2116), True, 'import numpy as np\n'), ((2318, 2359), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'Ton', 'size': 'Ns'}), '(scale=Ton, size=Ns)\n', (2339, 2359), True, 'import numpy as np\n'), ((2383, 2425), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'Toff', 'size': 'Ns'}), '(scale=Toff, size=Ns)\n', (2404, 2425), True, 'import numpy as np\n'), ((2447, 2463), 'numpy.zeros', 'np.zeros', (['(2 * Ns)'], {}), '(2 * Ns)\n', (2455, 2463), True, 'import numpy as np\n'), ((2729, 2745), 'numpy.cumsum', 'np.cumsum', (['times'], {}), '(times)\n', (2738, 2745), True, 'import numpy as np\n'), ((7621, 7657), 'simulator.utils.DtnIO.load_traffic_file', 'load_traffic_file', (['self.traffic_file'], {}), '(self.traffic_file)\n', (7638, 7657), False, 'from simulator.utils.DtnIO import load_traffic_file\n'), ((8883, 8907), 'numpy.diff', 'np.diff', (["flow['Bundles']"], {}), "(flow['Bundles'])\n", (8890, 8907), True, 'import numpy as np\n'), ((9164, 9196), 'simulator.core.DtnBundle.Bundle.from_flow', 'Bundle.from_flow', (['self.env', 'flow'], {}), '(self.env, flow)\n', (9180, 9196), False, 'from simulator.core.DtnBundle import Bundle\n'), ((2264, 2283), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2281, 2283), True, 'import numpy as np\n'), ((4418, 4435), 'numpy.floor', 'np.floor', (['(dv / BS)'], {}), '(dv / BS)\n', (4426, 4435), True, 'import numpy as np\n'), ((4468, 4491), 'numpy.arange', 'np.arange', (['(1)', '(N_bnd + 1)'], {}), '(1, N_bnd + 1)\n', (4477, 4491), True, 'import numpy as np\n')]
|
"""Multi-agent traffic light example (single shared policy)."""
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from flow.envs.multiagent import MyMultiTrafficLightGridPOEnv
from flow.networks import TrafficLightGridNetwork
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import InFlows, SumoCarFollowingParams, VehicleParams
from flow.controllers import SimCarFollowingController, GridRouter
from ray.tune.registry import register_env
from flow.utils.registry import make_create_env
import numpy as np
# Experiment parameters
N_ROLLOUTS = 20 # number of rollouts per training iteration
N_CPUS = 3 # number of parallel workers
# Environment parameters
HORIZON = 400 # time horizon of a single rollout
V_ENTER = 30 # enter speed for departing vehicles
INNER_LENGTH = 300 # length of inner edges in the traffic light grid network
LONG_LENGTH = 100 # length of final edge in route
SHORT_LENGTH = 300 # length of edges that vehicles start on
# number of vehicles originating in the left, right, top, and bottom edges
N_LEFT, N_RIGHT, N_TOP, N_BOTTOM = 0, 0, 0, 0
EDGE_INFLOW = 300 # inflow rate of vehicles at every edge
N_ROWS = 2 # number of row of bidirectional lanes
N_COLUMNS = 2 # number of columns of bidirectional lanes
# we place a sufficient number of vehicles to ensure they confirm with the
# total number specified above. We also use a "right_of_way" speed mode to
# support traffic light compliance
vehicles = VehicleParams()
num_vehicles = (N_LEFT + N_RIGHT) * N_COLUMNS + (N_BOTTOM + N_TOP) * N_ROWS
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
max_speed=V_ENTER,
decel=7.5, # avoid collisions at emergency stops
speed_mode="right_of_way",
),
routing_controller=(GridRouter, {}),
num_vehicles=num_vehicles)
# inflows of vehicles are place on all outer edges (listed here)
outer_edges = []
outer_edges += ["left{}_{}".format(N_ROWS, i) for i in range(N_COLUMNS)]
outer_edges += ["right0_{}".format(i) for i in range(N_COLUMNS)]
outer_edges += ["bot{}_0".format(i) for i in range(N_ROWS)]
outer_edges += ["top{}_{}".format(i, N_COLUMNS) for i in range(N_ROWS)]
# equal inflows for each edge (as dictate by the EDGE_INFLOW constant)
inflow = InFlows()
for edge in outer_edges:
inflow.add(
veh_type="human",
edge=edge,
# vehs_per_hour=EDGE_INFLOW,
# probability=0.10,
vehs_per_hour = 600,
departLane="free",
departSpeed=V_ENTER)
myNetParams = NetParams(
inflows=inflow,
additional_params={
"speed_limit": V_ENTER + 5, # inherited from grid0 benchmark
"grid_array": {
"short_length": SHORT_LENGTH,
"inner_length": INNER_LENGTH,
"long_length": LONG_LENGTH,
"row_num": N_ROWS,
"col_num": N_COLUMNS,
"cars_left": N_LEFT,
"cars_right": N_RIGHT,
"cars_top": N_TOP,
"cars_bot": N_BOTTOM,
},
"horizontal_lanes": 1,
"vertical_lanes": 1,
},
)
flow_params = dict(
# name of the experiment
exp_tag="grid_0_{}x{}_i{}_multiagent".format(N_ROWS, N_COLUMNS, EDGE_INFLOW),
# name of the flow environment the experiment is running on
env_name=MyMultiTrafficLightGridPOEnv,
# name of the network class the experiment is running on
network=TrafficLightGridNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=True,
sim_step=1,
render=False,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
warmup_steps=0,
sims_per_step=1,
additional_params={
"target_velocity": 50,
"switch_time": 3,
"num_observed": 2,
"discrete": False,
"tl_type": "actuated",
"num_local_edges": 4,
"num_local_lights": 4,
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=myNetParams,
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization
# or reset (see flow.core.params.InitialConfig)
initial=InitialConfig(
spacing='custom',
shuffle=True,
),
)
#############################以下为训练部分#################################
def cover_actions(c_a, s_a,num):
# for i in range(len(c_a)):
# if c_a[i] == 1:
# s_a[i] = abs(s_a[i] - 1)
for i in range(num):
if i == c_a:
s_a[i] = 1
return s_a
def data_collection(env, vels, queues):
vehicles = env.k.vehicle
veh_speeds = vehicles.get_speed(vehicles.get_ids())
vels.append(np.mean(veh_speeds))
queued_vels = len([v for v in veh_speeds if v < 1])
queues.append(queued_vels)
return vels, queues
def normalize_formation(state,Agent_NUM):
_state = [[] for i in range(Agent_NUM)]
for i in range(Agent_NUM):
_state[i] = state["center"+str(i)]
return _state
def record_line(log_path, line):
with open(log_path, 'a') as fp:
fp.writelines(line)
fp.writelines("\n")
return True
if __name__ == "__main__":
myTrafficNet = TrafficLightGridNetwork(
name = 'grid',
vehicles = vehicles,
net_params = myNetParams,
)
env = MyMultiTrafficLightGridPOEnv(
env_params=flow_params['env'], sim_params=flow_params['sim'], network=myTrafficNet)
# print(env.scenario.get_edge_list())
# Perpare agent.
from flow.core.ppo_agent import *
############################################################################
############################################################################
Agent_NUM = N_ROWS * N_COLUMNS
Reward_num = 1 #0代表多个rewards,1代表1个
NAME = '2x2_600_PPO_SOFT_try4'
Epoch = 4000
steps = 400
rnn_train_epi = 25
rnn_agent = PPO(s_dim=42*Agent_NUM,a_dim=Agent_NUM+1,name=NAME)
############################################################################
############################################################################
global_counter = 0
each_line_path = "collected_data/ppo/{}_plot_log.txt".format(NAME)
test_epoch_path = "collected_data/ppo/{}_epoch_log.txt".format(NAME)
for ep in range(Epoch):
#RNN_PPO训练步骤
for i in range(rnn_train_epi):
print("当前训练次数:")
print(i)
global_counter += 1
state = env.reset()
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
ep_r = 0.0
for step in range(steps):
step_r = 0.0
# print(_state)
_state = np.array(_state)
_actions = rnn_agent.choose_action(_state)
# print(_actions)
actions = np.zeros((Agent_NUM,), dtype=int)
rl_actions = cover_actions(_actions, actions,Agent_NUM)
next_state, rewards, done, _ = env.step(rl_actions)
if Reward_num == 0:
for k in range(Agent_NUM):
step_r += rewards[k]/Agent_NUM
ep_r += rewards[k]/Agent_NUM
rnn_agent.experience_store(_state, _actions, step_r)
else:
ep_r += rewards
rnn_agent.experience_store(_state, _actions, rewards)
state = next_state
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
_state = np.array(_state)
if (step + 1) % BATCH == 0 or step == EP_LEN - 1:
rnn_agent.trajction_process(_state)
rnn_agent.update()
rnn_agent.empty_buffer()
_done = True
for i in range(Agent_NUM):
_done *= done["center"+str(i)]
# print('dome?')
# print(_done)
if _done:
break
print('steps rewards:')
print(ep_r)
rnn_agent.summarize(ep_r, global_counter, 'reward')
if ep % 10 == 0:
rnn_agent.save_params(NAME,ep)
# test phase
if ep >= 0:
print('测试阶段:')
print(ep)
record_line(each_line_path, "*** Epoch: {} ***\n".format(ep))
queue, speed, ret = [], [], []
for i in range(3):
ep_r, ep_q, ep_v = [], [], []
state = env.reset()
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
for step in range(steps):
step_r = 0
data_collection(env, ep_v, ep_q)
_state = np.array(_state)
_actions = rnn_agent.choose_action(_state)
actions = np.zeros((Agent_NUM,), dtype=int)
rl_actions = cover_actions(_actions, actions,Agent_NUM)
next_state, rewards, done, _ = env.step(rl_actions)
if Reward_num == 0:
for k in range(Agent_NUM):
step_r += rewards[k]/Agent_NUM
ep_r.append(step_r)
else:
ep_r.append(rewards)
ep_r.append(step_r)
state = next_state
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
_done = True
for i in range(Agent_NUM):
_done *= done["center"+str(i)]
if _done:
break
queue.append(np.array(ep_q).mean())
speed.append(np.array(ep_v).mean())
ret.append(np.array(ep_r).mean())
record_line(each_line_path, "Queue: " + str(ep_q) + "\n")
record_line(each_line_path, "Speed: " + str(ep_v) + "\n")
record_line(each_line_path, "Return: " + str(ep_r) + "\n")
# record...
print("*** Epoch: {} ***\n".format(ep))
print("| Queue: {}, std: {} |".format(np.array(queue).mean(), np.array(queue).std()))
print("| Speed: {}, std: {} |".format(np.array(speed).mean(), np.array(speed).std()))
print("| Return: {}, std: {} |".format(np.array(ret).mean(), np.array(ret).std()))
print("*****************\n")
record_line(test_epoch_path, "*** Epoch: {} ***\n".format(ep))
record_line(test_epoch_path, "| Queue: {}, std: {} |".format(np.array(queue).mean(), np.array(queue).std()))
record_line(test_epoch_path, "| Speed: {}, std: {} |".format(np.array(speed).mean(), np.array(speed).std()))
record_line(test_epoch_path, "| Return: {}, std: {} |".format(np.array(ret).mean(), np.array(ret).std()))
record_line(test_epoch_path, "*****************\n")
|
[
"flow.networks.TrafficLightGridNetwork",
"flow.core.params.EnvParams",
"flow.core.params.VehicleParams",
"flow.envs.multiagent.MyMultiTrafficLightGridPOEnv",
"flow.core.params.SumoParams",
"numpy.zeros",
"flow.core.params.SumoCarFollowingParams",
"numpy.mean",
"numpy.array",
"flow.core.params.InFlows",
"flow.core.params.InitialConfig",
"flow.core.params.NetParams"
] |
[((1495, 1510), 'flow.core.params.VehicleParams', 'VehicleParams', ([], {}), '()\n', (1508, 1510), False, 'from flow.core.params import InFlows, SumoCarFollowingParams, VehicleParams\n'), ((2385, 2394), 'flow.core.params.InFlows', 'InFlows', ([], {}), '()\n', (2392, 2394), False, 'from flow.core.params import InFlows, SumoCarFollowingParams, VehicleParams\n'), ((2645, 3008), 'flow.core.params.NetParams', 'NetParams', ([], {'inflows': 'inflow', 'additional_params': "{'speed_limit': V_ENTER + 5, 'grid_array': {'short_length': SHORT_LENGTH,\n 'inner_length': INNER_LENGTH, 'long_length': LONG_LENGTH, 'row_num':\n N_ROWS, 'col_num': N_COLUMNS, 'cars_left': N_LEFT, 'cars_right':\n N_RIGHT, 'cars_top': N_TOP, 'cars_bot': N_BOTTOM}, 'horizontal_lanes': \n 1, 'vertical_lanes': 1}"}), "(inflows=inflow, additional_params={'speed_limit': V_ENTER + 5,\n 'grid_array': {'short_length': SHORT_LENGTH, 'inner_length':\n INNER_LENGTH, 'long_length': LONG_LENGTH, 'row_num': N_ROWS, 'col_num':\n N_COLUMNS, 'cars_left': N_LEFT, 'cars_right': N_RIGHT, 'cars_top':\n N_TOP, 'cars_bot': N_BOTTOM}, 'horizontal_lanes': 1, 'vertical_lanes': 1})\n", (2654, 3008), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((5719, 5798), 'flow.networks.TrafficLightGridNetwork', 'TrafficLightGridNetwork', ([], {'name': '"""grid"""', 'vehicles': 'vehicles', 'net_params': 'myNetParams'}), "(name='grid', vehicles=vehicles, net_params=myNetParams)\n", (5742, 5798), False, 'from flow.networks import TrafficLightGridNetwork\n'), ((5847, 5964), 'flow.envs.multiagent.MyMultiTrafficLightGridPOEnv', 'MyMultiTrafficLightGridPOEnv', ([], {'env_params': "flow_params['env']", 'sim_params': "flow_params['sim']", 'network': 'myTrafficNet'}), "(env_params=flow_params['env'], sim_params=\n flow_params['sim'], network=myTrafficNet)\n", (5875, 5964), False, 'from flow.envs.multiagent import MyMultiTrafficLightGridPOEnv\n'), ((1707, 1803), 'flow.core.params.SumoCarFollowingParams', 'SumoCarFollowingParams', ([], {'min_gap': '(2.5)', 'max_speed': 'V_ENTER', 'decel': '(7.5)', 'speed_mode': '"""right_of_way"""'}), "(min_gap=2.5, max_speed=V_ENTER, decel=7.5,\n speed_mode='right_of_way')\n", (1729, 1803), False, 'from flow.core.params import InFlows, SumoCarFollowingParams, VehicleParams\n'), ((3751, 3810), 'flow.core.params.SumoParams', 'SumoParams', ([], {'restart_instance': '(True)', 'sim_step': '(1)', 'render': '(False)'}), '(restart_instance=True, sim_step=1, render=False)\n', (3761, 3810), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((3922, 4160), 'flow.core.params.EnvParams', 'EnvParams', ([], {'horizon': 'HORIZON', 'warmup_steps': '(0)', 'sims_per_step': '(1)', 'additional_params': "{'target_velocity': 50, 'switch_time': 3, 'num_observed': 2, 'discrete': \n False, 'tl_type': 'actuated', 'num_local_edges': 4, 'num_local_lights': 4}"}), "(horizon=HORIZON, warmup_steps=0, sims_per_step=1,\n additional_params={'target_velocity': 50, 'switch_time': 3,\n 'num_observed': 2, 'discrete': False, 'tl_type': 'actuated',\n 'num_local_edges': 4, 'num_local_lights': 4})\n", (3931, 4160), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((4717, 4762), 'flow.core.params.InitialConfig', 'InitialConfig', ([], {'spacing': '"""custom"""', 'shuffle': '(True)'}), "(spacing='custom', shuffle=True)\n", (4730, 4762), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((5217, 5236), 'numpy.mean', 'np.mean', (['veh_speeds'], {}), '(veh_speeds)\n', (5224, 5236), True, 'import numpy as np\n'), ((7262, 7278), 'numpy.array', 'np.array', (['_state'], {}), '(_state)\n', (7270, 7278), True, 'import numpy as np\n'), ((7398, 7431), 'numpy.zeros', 'np.zeros', (['(Agent_NUM,)'], {'dtype': 'int'}), '((Agent_NUM,), dtype=int)\n', (7406, 7431), True, 'import numpy as np\n'), ((8160, 8176), 'numpy.array', 'np.array', (['_state'], {}), '(_state)\n', (8168, 8176), True, 'import numpy as np\n'), ((9427, 9443), 'numpy.array', 'np.array', (['_state'], {}), '(_state)\n', (9435, 9443), True, 'import numpy as np\n'), ((9537, 9570), 'numpy.zeros', 'np.zeros', (['(Agent_NUM,)'], {'dtype': 'int'}), '((Agent_NUM,), dtype=int)\n', (9545, 9570), True, 'import numpy as np\n'), ((10440, 10454), 'numpy.array', 'np.array', (['ep_q'], {}), '(ep_q)\n', (10448, 10454), True, 'import numpy as np\n'), ((10492, 10506), 'numpy.array', 'np.array', (['ep_v'], {}), '(ep_v)\n', (10500, 10506), True, 'import numpy as np\n'), ((10542, 10556), 'numpy.array', 'np.array', (['ep_r'], {}), '(ep_r)\n', (10550, 10556), True, 'import numpy as np\n'), ((10916, 10931), 'numpy.array', 'np.array', (['queue'], {}), '(queue)\n', (10924, 10931), True, 'import numpy as np\n'), ((10940, 10955), 'numpy.array', 'np.array', (['queue'], {}), '(queue)\n', (10948, 10955), True, 'import numpy as np\n'), ((11014, 11029), 'numpy.array', 'np.array', (['speed'], {}), '(speed)\n', (11022, 11029), True, 'import numpy as np\n'), ((11038, 11053), 'numpy.array', 'np.array', (['speed'], {}), '(speed)\n', (11046, 11053), True, 'import numpy as np\n'), ((11113, 11126), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (11121, 11126), True, 'import numpy as np\n'), ((11135, 11148), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (11143, 11148), True, 'import numpy as np\n'), ((11346, 11361), 'numpy.array', 'np.array', (['queue'], {}), '(queue)\n', (11354, 11361), True, 'import numpy as np\n'), ((11370, 11385), 'numpy.array', 'np.array', (['queue'], {}), '(queue)\n', (11378, 11385), True, 'import numpy as np\n'), ((11467, 11482), 'numpy.array', 'np.array', (['speed'], {}), '(speed)\n', (11475, 11482), True, 'import numpy as np\n'), ((11491, 11506), 'numpy.array', 'np.array', (['speed'], {}), '(speed)\n', (11499, 11506), True, 'import numpy as np\n'), ((11589, 11602), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (11597, 11602), True, 'import numpy as np\n'), ((11611, 11624), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (11619, 11624), True, 'import numpy as np\n')]
|
""" Module for the KMCRateCalculatorPlugin class """
# Copyright (c) 2013 <NAME>
#
# This file is part of the KMCLib project distributed under the terms of the
# GNU General Public License version 3, see <http://www.gnu.org/licenses/>.
#
import numpy
from KMCLib.Backend import Backend
from KMCLib.Exceptions.Error import Error
class KMCRateCalculatorPlugin(Backend.RateCalculator):
"""
Class for providing an interface to easily extend and customize
the behaviour of the calculation of individual rates in the KMC simulation.
"""
def __init__(self):
"""
Base class constructor.
"""
# Call the C++ base class constructor.
Backend.RateCalculator.__init__(self)
# Call the custom setup.
self.initialize()
def backendRateCallback(self,
cpp_coords,
coords_len,
types_before,
types_after,
rate_constant,
process_number,
global_x,
global_y,
global_z):
"""
Function called from C++ to get the rate. It function recieves
the data from C++ and parse it to a Python friendly format to send it
forward to the custom rate function.
"""
# Call and return the custom rate.
# PERFORMME: Consider creating the numpy array in C++ if possible.
global_coordinate = (global_x, global_y, global_z)
return self.rate(numpy.array(cpp_coords).reshape(coords_len,3),
types_before,
types_after,
rate_constant,
process_number,
global_coordinate)
def initialize(self):
"""
Called as the last statement in the base class constructor
to allow for custom setup of the object.
"""
pass
def rate(self,
coords,
types_before,
types_after,
rate_constant,
process_number,
global_coordinate):
"""
Called from the base class to get the rate for a particular
local geometry. Any class inheriting from the plugin base class
must provide an implementation of this function.
:param coords: The coordinates of the configuration as a Nx3 numpy array
in fractional units of the primitive cell.
:param types_before: The types before the process, as tuple of strings.
:param types_after: The types after the process, as tuple of strings.
:param rate_constant: The rate constant associated with the process
to either update or replace.
:param process_number: The process id number.
:param global_coordinate: The global coordinate of the central index.
:returns: The custom rate of the process. Note that the returned rate must
not be negative or zero.
"""
raise Error("The rate(self,...) API function in the 'KMCRateCalculator' base class must be overloaded when using a custom rate calculator.")
def cutoff(self):
"""
To determine the radial cutoff of the geometry around the central
lattice site to cut out and send down to the rustom rate function.
If not implemented by derrived classes the default is to use
the cutoff of the largetst process local geometry.
:returns: The desiered cutoff in primitive cell internal coordinates.
:rtype: float
"""
# Returning None results in default behaviour.
return None
|
[
"KMCLib.Backend.Backend.RateCalculator.__init__",
"numpy.array",
"KMCLib.Exceptions.Error.Error"
] |
[((691, 728), 'KMCLib.Backend.Backend.RateCalculator.__init__', 'Backend.RateCalculator.__init__', (['self'], {}), '(self)\n', (722, 728), False, 'from KMCLib.Backend import Backend\n'), ((3162, 3306), 'KMCLib.Exceptions.Error.Error', 'Error', (['"""The rate(self,...) API function in the \'KMCRateCalculator\' base class must be overloaded when using a custom rate calculator."""'], {}), '(\n "The rate(self,...) API function in the \'KMCRateCalculator\' base class must be overloaded when using a custom rate calculator."\n )\n', (3167, 3306), False, 'from KMCLib.Exceptions.Error import Error\n'), ((1609, 1632), 'numpy.array', 'numpy.array', (['cpp_coords'], {}), '(cpp_coords)\n', (1620, 1632), False, 'import numpy\n')]
|
import csv
import matplotlib as matplot
import matplotlib.pyplot as plt
import numpy as np
# List the colors that will be used for tracing the track.
colors = ['black','blue','red','green', 'cyan', \
'gray', 'gold', 'lightcoral', 'turquoise','red','blue','green','pink']
patterns = ['-', '--','--','--','--','--','--','--', ':','-', '--', ':','-', '--', ':',\
'-.', '-.', '-.', ':', '--', '-']
markers = ['.',',','o','v','8','s','+','x','X','D','^','<','>','v']
sizes = [10, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]
# Path to the csv file
dir1 = 'C:/Users/limgr/Desktop/Katrina_wind_intensity_8km.csv'
dir2 = 'C:/Users/limgr/Desktop/Maria_wind_intensity_8km.csv'
dir3 = 'C:/Users/limgr/Desktop/Irma_wind_intensity_8km.csv'
dir4 = 'C:/Users/limgr/Desktop/Dorian_wind_intensity_8km.csv'
dir7 = 'C:/Users/limgr/Desktop/Lorenzo_wind_intensity_8km.csv'
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir1, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0[:5], tmp[:5], color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
# plt.legend(["Oussama_NoTurb", "WRF_NoTurb", \
# "WRFSWAN_NoTurb_swdt600_cpdt600_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt60_cpdt600_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt600_cpdt60_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt600_cpdt600_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt600_cpdt600_swgr11p1_swh4",\
# 'WRFSWAN_NoTurb_swdt600_cpdt600_swgr32p0_swh2',\
# 'WRFSWAN_NoTurb_swdt600_cpdt3600_swgr11p1_swh2'],loc = "lower center", \
# prop={'size': 7})
# plt.legend(["Oussama_NoTurb", "WRF_NoTurb", \
# "WRFSWAN_NoTurb_1",\
# "WRFSWAN_NoTurb_2",\
# "WRFSWAN_NoTurb_3",\
# "WRFSWAN_NoTurb_4",\
# "WRFSWAN_NoTurb_5",\
# 'WRFSWAN_NoTurb_6',\
# 'WRFSWAN_NoTurb_7'],loc = "lower center", \
# prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Katrina Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/katrina_wind_intensity_A.png')
plt.show()
# Save the plot
#plt.savefig('Output.png')
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir2, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0[:5], tmp[:5], color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Maria Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/maria_wind_intensity_A.png')
plt.show()
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir3, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0, tmp, color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Irma Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/irma_wind_intensity_A.png')
plt.show()
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir4, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0[:-2], tmp[:-2], color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Dorian Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/dorian_wind_intensity_A.png')
plt.show()
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir7, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0, tmp, color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Lorenzo Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/lorenzo_wind_intensity_A.png')
plt.show()
rows1=[]
Times1=[]
Times1=[]
values1=[]
rows2=[]
Times2=[]
Times2=[]
values2=[]
rows3=[]
Times3=[]
Times3=[]
values3=[]
rows4=[]
Times4=[]
Times4=[]
values4=[]
rows5=[]
Times5=[]
Times5=[]
values5=[]
rows6=[]
Times6=[]
Times6=[]
values6=[]
rows7=[]
Times7=[]
Times7=[]
values7=[]
# Set the working space.
#os.chdir(Dir_Output)
# Initiate the varaibles that will contain the output files.
#Forecast_Outputs_NoTurb = ""
#Real_Output = ""
#########################################################################
# This function returns a list of all the files in the output directory.#
#########################################################################
#def list_files (Dir, Forecast_Outputs_NoTurb, Real_Output):
# for f in os.listdir(Dir):
# if (f == "Real_Output.csv"):
# Real_Output = f
# elif (f.find('NoTurb') != -1):
# Forecast_Outputs_NoTurb = f
# return (Forecast_Outputs_NoTurb, Real_Output)
# Calling the list_files function to classify files according to the turbulence model
#(Forecast_Outputs_NoTurb, Real_Output) = list_files (Dir_Output, Forecast_Outputs_NoTurb, Real_Output)
#print (Real_Output)
#print (Forecast_Outputs_Smag2D)
#print (Forecast_Outputs_NoTurb)
###################################################################
# This function returns a list of wind speed for each output file.#
###################################################################
real1_track=[]
oussama1=[]
wrf1=[]
with open(dir1, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times1.append(list(row.keys()))
real1_track.append(list(row.values()))
line_count += 1
else:
rows1.append(row)
values1.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu1=np.array(values1, dtype=np.float32)
real1=np.array(real1_track, dtype=np.float32)
real1=real1*0.5144444
real1=real1
simu_error1=abs(simu1-real1[:,None])/real1[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu1-real1[:,None]))
real2_track=[]
oussama2=[]
wrf2=[]
with open(dir2, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times2.append(list(row.keys()))
real2_track.append(list(row.values()))
line_count += 1
else:
rows2.append(row)
values2.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu2=np.array(values2, dtype=np.float32)
real2=np.array(real2_track, dtype=np.float32)
real2=real2*0.5144444
real2=real2
simu_error2=abs(simu2-real2[:,None])/real2[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu2-real2[:,None]))
real3_track=[]
oussama3=[]
wrf3=[]
with open(dir3, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times3.append(list(row.keys()))
real3_track.append(list(row.values()))
line_count += 1
else:
rows3.append(row)
values3.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu3=np.array(values3, dtype=np.float32)
real3=np.array(real3_track, dtype=np.float32)
real3=real3*0.5144444
real3=real3
simu_error3=abs(simu3-real3[:,None])/real3[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu3-real3[:,None]))
real4_track=[]
oussama4=[]
wrf4=[]
with open(dir4, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times4.append(list(row.keys()))
real4_track.append(list(row.values()))
line_count += 1
else:
rows4.append(row)
values4.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu4=np.array(values4, dtype=np.float32)
real4=np.array(real4_track, dtype=np.float32)
real4=real4*0.5144444
real4=real4
simu_error4=abs(simu4-real4[:,None])/real4[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu4-real4[:,None]))
real7_track=[]
oussama7=[]
wrf7=[]
with open(dir7, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times7.append(list(row.keys()))
real7_track.append(list(row.values()))
line_count += 1
else:
rows7.append(row)
values7.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu7=np.array(values7, dtype=np.float32)
real7=np.array(real7_track, dtype=np.float32)
real7=real7*0.5144444
real7=real7
simu_error7=abs(simu7-real7[:,None])/real7[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu7-real7[:,None]))
#ouss_all=np.append(ouss1[0][:],ouss2[0][:],ouss3[0][:],ouss4[0][:],axis=0)
#error_all=np.append(error1[0][1][:],error2[0][1][:],error3[0][1][:],error4[0][1][:], axis=0)
ouss_error=np.zeros((4, 4))
wrf_error=np.zeros((4, 4))
par1_error=np.zeros((4, 4))
par2_error=np.zeros((4, 4))
par3_error=np.zeros((4, 4))
par4_error=np.zeros((4, 4))
par5_error=np.zeros((4, 4))
# par6_error=np.zeros((4, 9))
# par7_error=np.zeros((4, 9))
# par8_error=np.zeros((4, 9))
# par9_error=np.zeros((4, 9))
# print(np.shape(values4))
# print(np.shape(error4))
# print(ouss_error)
# print(simu_error)
# par1_error[0]=simu_error1[0][0][:]
# par1_error[1]=simu_error2[0][0][:]
# par1_error[2]=simu_error3[0][0][:]
# par1_error[3]=simu_error4[0][0][:]
# par1_error[4]=simu_error5[0][0][:]
# par1_error[5]=simu_error6[0][0][:]
par1_error=np.concatenate((simu_error1[0][0][0:5],simu_error2[0][0][:],\
simu_error3[0][0][:],simu_error4[0][0][:-2],simu_error7[0][0][:]))
par1_error=par1_error.flatten()
par1_error_mean=np.mean(par1_error)
par1_error_std=np.std(par1_error)
# par2_error[0]=simu_error1[0][1][:]
# par2_error[1]=simu_error2[0][1][:]
# par2_error[2]=simu_error3[0][1][:]
# par2_error[3]=simu_error4[0][1][:]
# par2_error[4]=simu_error5[0][1][:]
# par2_error[5]=simu_error6[0][1][:]
par2_error=np.concatenate((simu_error1[0][1][0:5],simu_error2[0][1][:],\
simu_error3[0][1][:],simu_error4[0][1][:-2],simu_error7[0][1][:]))
par2_error=par2_error.flatten()
par2_error_mean=np.mean(par2_error)
par2_error_std=np.std(par2_error)
# par3_error[0]=simu_error1[0][2][:]
# par3_error[1]=simu_error2[0][2][:]
# par3_error[2]=simu_error3[0][2][:]
# par3_error[3]=simu_error4[0][2][:]
# par3_error[4]=simu_error5[0][2][:]
# par3_error[5]=simu_error6[0][2][:]
par3_error=np.concatenate((simu_error1[0][2][0:5],simu_error2[0][2][:],\
simu_error3[0][2][:],simu_error4[0][2][:-2],simu_error7[0][2][:]))
par3_error=par3_error.flatten()
par3_error_mean=np.mean(par3_error)
par3_error_std=np.std(par3_error)
# par4_error[0]=simu_error1[0][3][:]
# par4_error[1]=simu_error2[0][3][:]
# par4_error[2]=simu_error3[0][3][:]
# par4_error[3]=simu_error4[0][3][:]
# par4_error[4]=simu_error5[0][3][:]
# par4_error[5]=simu_error6[0][3][:]
par4_error=np.concatenate((simu_error1[0][3][0:5],simu_error2[0][3][:],\
simu_error3[0][3][:],simu_error4[0][3][:-2],simu_error7[0][3][:]))
par4_error=par4_error.flatten()
par4_error_mean=np.mean(par4_error)
par4_error_std=np.std(par4_error)
hurricanes = ['C0.0001', 'C0.01', 'C1', 'C100']
x_pos = np.arange(len(hurricanes))
CTEs = [par1_error_mean,par2_error_mean,\
par3_error_mean,par4_error_mean]
errors = [par1_error_std,par2_error_std,\
par3_error_std,par4_error_std]
fig, ax = plt.subplots()
ax.bar(x_pos, CTEs, yerr=errors, align='center', alpha=0.5, ecolor='black', capsize=10)
ax.set_ylabel('Intensity')
ax.set_xticks(x_pos)
ax.set_xticklabels(hurricanes)
ax.set_title('Hurricanes')
ax.yaxis.grid(True)
for i, v in enumerate(CTEs):
ax.text(i, v+0.02, str(round(v, 3)), color='red', fontweight='bold')
# Save the figure and show
fig.autofmt_xdate()
plt.tight_layout()
#plt.savefig('wind_intensity_bar_plot.png')
plt.savefig('C:/Users/limgr/Desktop/wind_intensity_bar_plot.png')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.std",
"csv.DictReader",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((1745, 1847), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100']"], {'loc': '"""upper right"""', 'prop': "{'size': 7}"}), "(['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100'], loc=\n 'upper right', prop={'size': 7})\n", (1755, 1847), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2820), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Step [hr]"""'], {'fontsize': '(14)'}), "('Time Step [hr]', fontsize=14)\n", (2789, 2820), True, 'import matplotlib.pyplot as plt\n'), ((2821, 2857), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(14)'}), "('Intensity', fontsize=14)\n", (2831, 2857), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2903), 'matplotlib.pyplot.title', 'plt.title', (['"""Katrina Intensity """', "{'size': 20}"], {}), "('Katrina Intensity ', {'size': 20})\n", (2867, 2903), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2970), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/limgr/Desktop/katrina_wind_intensity_A.png"""'], {}), "('C:/Users/limgr/Desktop/katrina_wind_intensity_A.png')\n", (2915, 2970), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2979, 2981), True, 'import matplotlib.pyplot as plt\n'), ((3891, 3993), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100']"], {'loc': '"""upper right"""', 'prop': "{'size': 7}"}), "(['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100'], loc=\n 'upper right', prop={'size': 7})\n", (3901, 3993), True, 'import matplotlib.pyplot as plt\n'), ((4065, 4106), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Step [hr]"""'], {'fontsize': '(14)'}), "('Time Step [hr]', fontsize=14)\n", (4075, 4106), True, 'import matplotlib.pyplot as plt\n'), ((4107, 4143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(14)'}), "('Intensity', fontsize=14)\n", (4117, 4143), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4187), 'matplotlib.pyplot.title', 'plt.title', (['"""Maria Intensity """', "{'size': 20}"], {}), "('Maria Intensity ', {'size': 20})\n", (4153, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4188, 4252), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/limgr/Desktop/maria_wind_intensity_A.png"""'], {}), "('C:/Users/limgr/Desktop/maria_wind_intensity_A.png')\n", (4199, 4252), True, 'import matplotlib.pyplot as plt\n'), ((4253, 4263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4261, 4263), True, 'import matplotlib.pyplot as plt\n'), ((5128, 5230), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100']"], {'loc': '"""upper right"""', 'prop': "{'size': 7}"}), "(['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100'], loc=\n 'upper right', prop={'size': 7})\n", (5138, 5230), True, 'import matplotlib.pyplot as plt\n'), ((5302, 5343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Step [hr]"""'], {'fontsize': '(14)'}), "('Time Step [hr]', fontsize=14)\n", (5312, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5344, 5380), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(14)'}), "('Intensity', fontsize=14)\n", (5354, 5380), True, 'import matplotlib.pyplot as plt\n'), ((5381, 5423), 'matplotlib.pyplot.title', 'plt.title', (['"""Irma Intensity """', "{'size': 20}"], {}), "('Irma Intensity ', {'size': 20})\n", (5390, 5423), True, 'import matplotlib.pyplot as plt\n'), ((5424, 5487), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/limgr/Desktop/irma_wind_intensity_A.png"""'], {}), "('C:/Users/limgr/Desktop/irma_wind_intensity_A.png')\n", (5435, 5487), True, 'import matplotlib.pyplot as plt\n'), ((5488, 5498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5496, 5498), True, 'import matplotlib.pyplot as plt\n'), ((6372, 6474), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100']"], {'loc': '"""upper right"""', 'prop': "{'size': 7}"}), "(['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100'], loc=\n 'upper right', prop={'size': 7})\n", (6382, 6474), True, 'import matplotlib.pyplot as plt\n'), ((6547, 6588), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Step [hr]"""'], {'fontsize': '(14)'}), "('Time Step [hr]', fontsize=14)\n", (6557, 6588), True, 'import matplotlib.pyplot as plt\n'), ((6589, 6625), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(14)'}), "('Intensity', fontsize=14)\n", (6599, 6625), True, 'import matplotlib.pyplot as plt\n'), ((6626, 6670), 'matplotlib.pyplot.title', 'plt.title', (['"""Dorian Intensity """', "{'size': 20}"], {}), "('Dorian Intensity ', {'size': 20})\n", (6635, 6670), True, 'import matplotlib.pyplot as plt\n'), ((6671, 6736), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/limgr/Desktop/dorian_wind_intensity_A.png"""'], {}), "('C:/Users/limgr/Desktop/dorian_wind_intensity_A.png')\n", (6682, 6736), True, 'import matplotlib.pyplot as plt\n'), ((6737, 6747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6745, 6747), True, 'import matplotlib.pyplot as plt\n'), ((7608, 7710), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100']"], {'loc': '"""upper right"""', 'prop': "{'size': 7}"}), "(['Real Track', 'C0.0001', 'C0.01', 'C1', 'C100'], loc=\n 'upper right', prop={'size': 7})\n", (7618, 7710), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7824), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Step [hr]"""'], {'fontsize': '(14)'}), "('Time Step [hr]', fontsize=14)\n", (7793, 7824), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7861), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(14)'}), "('Intensity', fontsize=14)\n", (7835, 7861), True, 'import matplotlib.pyplot as plt\n'), ((7862, 7907), 'matplotlib.pyplot.title', 'plt.title', (['"""Lorenzo Intensity """', "{'size': 20}"], {}), "('Lorenzo Intensity ', {'size': 20})\n", (7871, 7907), True, 'import matplotlib.pyplot as plt\n'), ((7908, 7974), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/limgr/Desktop/lorenzo_wind_intensity_A.png"""'], {}), "('C:/Users/limgr/Desktop/lorenzo_wind_intensity_A.png')\n", (7919, 7974), True, 'import matplotlib.pyplot as plt\n'), ((7975, 7985), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7983, 7985), True, 'import matplotlib.pyplot as plt\n'), ((10004, 10039), 'numpy.array', 'np.array', (['values1'], {'dtype': 'np.float32'}), '(values1, dtype=np.float32)\n', (10012, 10039), True, 'import numpy as np\n'), ((10046, 10085), 'numpy.array', 'np.array', (['real1_track'], {'dtype': 'np.float32'}), '(real1_track, dtype=np.float32)\n', (10054, 10085), True, 'import numpy as np\n'), ((10858, 10893), 'numpy.array', 'np.array', (['values2'], {'dtype': 'np.float32'}), '(values2, dtype=np.float32)\n', (10866, 10893), True, 'import numpy as np\n'), ((10900, 10939), 'numpy.array', 'np.array', (['real2_track'], {'dtype': 'np.float32'}), '(real2_track, dtype=np.float32)\n', (10908, 10939), True, 'import numpy as np\n'), ((11722, 11757), 'numpy.array', 'np.array', (['values3'], {'dtype': 'np.float32'}), '(values3, dtype=np.float32)\n', (11730, 11757), True, 'import numpy as np\n'), ((11764, 11803), 'numpy.array', 'np.array', (['real3_track'], {'dtype': 'np.float32'}), '(real3_track, dtype=np.float32)\n', (11772, 11803), True, 'import numpy as np\n'), ((12586, 12621), 'numpy.array', 'np.array', (['values4'], {'dtype': 'np.float32'}), '(values4, dtype=np.float32)\n', (12594, 12621), True, 'import numpy as np\n'), ((12628, 12667), 'numpy.array', 'np.array', (['real4_track'], {'dtype': 'np.float32'}), '(real4_track, dtype=np.float32)\n', (12636, 12667), True, 'import numpy as np\n'), ((13442, 13477), 'numpy.array', 'np.array', (['values7'], {'dtype': 'np.float32'}), '(values7, dtype=np.float32)\n', (13450, 13477), True, 'import numpy as np\n'), ((13484, 13523), 'numpy.array', 'np.array', (['real7_track'], {'dtype': 'np.float32'}), '(real7_track, dtype=np.float32)\n', (13492, 13523), True, 'import numpy as np\n'), ((13898, 13914), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (13906, 13914), True, 'import numpy as np\n'), ((13925, 13941), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (13933, 13941), True, 'import numpy as np\n'), ((13953, 13969), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (13961, 13969), True, 'import numpy as np\n'), ((13981, 13997), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (13989, 13997), True, 'import numpy as np\n'), ((14009, 14025), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (14017, 14025), True, 'import numpy as np\n'), ((14037, 14053), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (14045, 14053), True, 'import numpy as np\n'), ((14065, 14081), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (14073, 14081), True, 'import numpy as np\n'), ((14535, 14670), 'numpy.concatenate', 'np.concatenate', (['(simu_error1[0][0][0:5], simu_error2[0][0][:], simu_error3[0][0][:],\n simu_error4[0][0][:-2], simu_error7[0][0][:])'], {}), '((simu_error1[0][0][0:5], simu_error2[0][0][:], simu_error3[0\n ][0][:], simu_error4[0][0][:-2], simu_error7[0][0][:]))\n', (14549, 14670), True, 'import numpy as np\n'), ((14739, 14758), 'numpy.mean', 'np.mean', (['par1_error'], {}), '(par1_error)\n', (14746, 14758), True, 'import numpy as np\n'), ((14774, 14792), 'numpy.std', 'np.std', (['par1_error'], {}), '(par1_error)\n', (14780, 14792), True, 'import numpy as np\n'), ((15031, 15166), 'numpy.concatenate', 'np.concatenate', (['(simu_error1[0][1][0:5], simu_error2[0][1][:], simu_error3[0][1][:],\n simu_error4[0][1][:-2], simu_error7[0][1][:])'], {}), '((simu_error1[0][1][0:5], simu_error2[0][1][:], simu_error3[0\n ][1][:], simu_error4[0][1][:-2], simu_error7[0][1][:]))\n', (15045, 15166), True, 'import numpy as np\n'), ((15235, 15254), 'numpy.mean', 'np.mean', (['par2_error'], {}), '(par2_error)\n', (15242, 15254), True, 'import numpy as np\n'), ((15270, 15288), 'numpy.std', 'np.std', (['par2_error'], {}), '(par2_error)\n', (15276, 15288), True, 'import numpy as np\n'), ((15528, 15663), 'numpy.concatenate', 'np.concatenate', (['(simu_error1[0][2][0:5], simu_error2[0][2][:], simu_error3[0][2][:],\n simu_error4[0][2][:-2], simu_error7[0][2][:])'], {}), '((simu_error1[0][2][0:5], simu_error2[0][2][:], simu_error3[0\n ][2][:], simu_error4[0][2][:-2], simu_error7[0][2][:]))\n', (15542, 15663), True, 'import numpy as np\n'), ((15732, 15751), 'numpy.mean', 'np.mean', (['par3_error'], {}), '(par3_error)\n', (15739, 15751), True, 'import numpy as np\n'), ((15767, 15785), 'numpy.std', 'np.std', (['par3_error'], {}), '(par3_error)\n', (15773, 15785), True, 'import numpy as np\n'), ((16025, 16160), 'numpy.concatenate', 'np.concatenate', (['(simu_error1[0][3][0:5], simu_error2[0][3][:], simu_error3[0][3][:],\n simu_error4[0][3][:-2], simu_error7[0][3][:])'], {}), '((simu_error1[0][3][0:5], simu_error2[0][3][:], simu_error3[0\n ][3][:], simu_error4[0][3][:-2], simu_error7[0][3][:]))\n', (16039, 16160), True, 'import numpy as np\n'), ((16229, 16248), 'numpy.mean', 'np.mean', (['par4_error'], {}), '(par4_error)\n', (16236, 16248), True, 'import numpy as np\n'), ((16264, 16282), 'numpy.std', 'np.std', (['par4_error'], {}), '(par4_error)\n', (16270, 16282), True, 'import numpy as np\n'), ((16555, 16569), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16567, 16569), True, 'import matplotlib.pyplot as plt\n'), ((16933, 16951), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16949, 16951), True, 'import matplotlib.pyplot as plt\n'), ((16996, 17061), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/limgr/Desktop/wind_intensity_bar_plot.png"""'], {}), "('C:/Users/limgr/Desktop/wind_intensity_bar_plot.png')\n", (17007, 17061), True, 'import matplotlib.pyplot as plt\n'), ((17062, 17072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17070, 17072), True, 'import matplotlib.pyplot as plt\n'), ((984, 1008), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (998, 1008), False, 'import csv\n'), ((1614, 1721), 'matplotlib.pyplot.plot', 'plt.plot', (['Times0[:5]', 'tmp[:5]'], {'color': 'colors[c]', 'marker': '"""s"""', 'linestyle': 'patterns[c]', 'markersize': 'sizes[c]'}), "(Times0[:5], tmp[:5], color=colors[c], marker='s', linestyle=\n patterns[c], markersize=sizes[c])\n", (1622, 1721), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3154), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (3144, 3154), False, 'import csv\n'), ((3760, 3867), 'matplotlib.pyplot.plot', 'plt.plot', (['Times0[:5]', 'tmp[:5]'], {'color': 'colors[c]', 'marker': '"""s"""', 'linestyle': 'patterns[c]', 'markersize': 'sizes[c]'}), "(Times0[:5], tmp[:5], color=colors[c], marker='s', linestyle=\n patterns[c], markersize=sizes[c])\n", (3768, 3867), True, 'import matplotlib.pyplot as plt\n'), ((4375, 4399), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (4389, 4399), False, 'import csv\n'), ((5005, 5103), 'matplotlib.pyplot.plot', 'plt.plot', (['Times0', 'tmp'], {'color': 'colors[c]', 'marker': '"""s"""', 'linestyle': 'patterns[c]', 'markersize': 'sizes[c]'}), "(Times0, tmp, color=colors[c], marker='s', linestyle=patterns[c],\n markersize=sizes[c])\n", (5013, 5103), True, 'import matplotlib.pyplot as plt\n'), ((5609, 5633), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (5623, 5633), False, 'import csv\n'), ((6239, 6348), 'matplotlib.pyplot.plot', 'plt.plot', (['Times0[:-2]', 'tmp[:-2]'], {'color': 'colors[c]', 'marker': '"""s"""', 'linestyle': 'patterns[c]', 'markersize': 'sizes[c]'}), "(Times0[:-2], tmp[:-2], color=colors[c], marker='s', linestyle=\n patterns[c], markersize=sizes[c])\n", (6247, 6348), True, 'import matplotlib.pyplot as plt\n'), ((6855, 6879), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (6869, 6879), False, 'import csv\n'), ((7485, 7583), 'matplotlib.pyplot.plot', 'plt.plot', (['Times0', 'tmp'], {'color': 'colors[c]', 'marker': '"""s"""', 'linestyle': 'patterns[c]', 'markersize': 'sizes[c]'}), "(Times0, tmp, color=colors[c], marker='s', linestyle=patterns[c],\n markersize=sizes[c])\n", (7493, 7583), True, 'import matplotlib.pyplot as plt\n'), ((9511, 9535), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (9525, 9535), False, 'import csv\n'), ((10365, 10389), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (10379, 10389), False, 'import csv\n'), ((11229, 11253), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (11243, 11253), False, 'import csv\n'), ((12093, 12117), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (12107, 12117), False, 'import csv\n'), ((12949, 12973), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (12963, 12973), False, 'import csv\n')]
|
import unittest
import cv2
import numpy as np
from extractor.cropping import clip_to_image_region, \
crop_module, build_merged_index
quadrilaterals = {
('e3e70682-c209-4cac-a29f-6fbed82c07cd',
'frame_000000',
'mask_000000'): {
'quadrilateral': [
[424, 279],
[499, 280],
[499, 327],
[421, 323]
],
'center': (
460.95042812077514,
302.4197085774373
)
},
('f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'frame_000000',
'mask_000001'): {
'quadrilateral': [
[425, 326],
[499, 326],
[499, 377],
[425, 372]
],
'center': (
462.13331381447324,
350.2644805543356
)
},
('eb1167b3-67a9-4378-bc65-c1e582e2e662',
'frame_000000',
'mask_000002'): {
'quadrilateral': [
[164, 358],
[233, 363],
[233, 412],
[164, 408]
],
'center': (
198.48300673606857,
385.4114104919371
)
},
('f7c1bd87-4da5-4709-9471-3d60c8a70639',
'frame_000000',
'mask_000003'): {
'quadrilateral': [
[425, 234],
[497, 231],
[501, 279],
[421, 278]
],
'center': (
461.41970207121716,
255.7820630547903
)
},
('e443df78-9558-467f-9ba9-1faf7a024204',
'frame_000000',
'mask_000004'): {
'quadrilateral': [
[425, 94],
[498, 90],
[502, 136],
[425, 142]
],
'center': (
462.19730041647847,
115.55311355311355
)
}
}
class TestCropping(unittest.TestCase):
def test_clip_to_image_region_no_clip(self):
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
image_width = 640
image_height = 512
quad_clipped_gt = quad
quad_clipped = clip_to_image_region(
np.copy(quad), image_width, image_height)
self.assertTrue(
np.allclose(
quad_clipped,
quad_clipped_gt
)
)
def test_clip_to_image_region_clip_max(self):
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
image_width = 300
image_height = 200
quad_clipped_gt = np.array([
[[299, 199]],
[[299, 199]],
[[299, 199]],
[[299, 199]]
])
quad_clipped = clip_to_image_region(
np.copy(quad), image_width, image_height)
self.assertTrue(
np.allclose(
quad_clipped,
quad_clipped_gt
)
)
def test_clip_to_image_region_clip_min(self):
quad = np.array([
[[ -1, -1]],
[[100, -1]],
[[100, 100]],
[[ -1, 100]]
])
image_width = 200
image_height = 200
quad_clipped_gt = np.array([
[[ 0, 0]],
[[100, 0]],
[[100, 100]],
[[ 0, 100]]
])
quad_clipped = clip_to_image_region(
np.copy(quad), image_width, image_height)
self.assertTrue(
np.allclose(
quad_clipped,
quad_clipped_gt
)
)
def test_build_merged_index_merged_none(self):
merged_modules = None
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_merged_empty(self):
merged_modules = []
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_pair_merged(self):
merged_modules = [[
'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'f7c1bd87-4da5-4709-9471-3d60c8a70639'
]]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_triplet_merged(self):
merged_modules = [[
'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'e3e70682-c209-4cac-a29f-6fbed82c07cd'
]]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_two_pairs_merged(self):
merged_modules = [
['f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'f7c1bd87-4da5-4709-9471-3d60c8a70639'],
['e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204']
]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_all_merged(self):
merged_modules = [[
'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204',
'eb1167b3-67a9-4378-bc65-c1e582e2e662',
]]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'e443df78-9558-467f-9ba9-1faf7a024204': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f7c1bd87-4da5-4709-9471-3d60c8a70639'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_crop_modules_real_data(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
patch_file = "tests/unit/data/frame_000000_mask_000000.tiff"
patch_gt = cv2.imread(patch_file, cv2.IMREAD_ANYDEPTH)
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="portrait"
)
self.assertTrue(np.allclose(patch, patch_gt))
def test_crop_modules_crop_full_frame(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[0, 0]],
[[640, 0]],
[[640, 512]],
[[0, 512]]
])
patch, homography = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="landscape"
)
self.assertTrue(np.allclose(patch, frame[0:-1, 0:-1]))
self.assertTrue(np.allclose(homography, np.eye(3)))
def test_crop_modules_portrait_vs_landscape(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (78, 47))
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="landscape"
)
self.assertEqual(patch.shape, (47, 78))
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode=None
)
self.assertEqual(patch.shape, (47, 78)) # ?
def test_crop_modules_crop_width_and_aspect(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
patch, _ = crop_module(
frame,
quad,
crop_width=50,
crop_aspect=0.625, # 1/1.6
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (50, 31))
patch, _ = crop_module(
frame,
quad,
crop_width=50,
crop_aspect=1,
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (50, 50))
patch, _ = crop_module(
frame,
quad,
crop_width=50,
crop_aspect=0.625, # 1/1.6
rotate_mode="landscape"
)
self.assertEqual(patch.shape, (31, 50))
patch, _ = crop_module(
frame,
quad,
crop_width=300,
crop_aspect=0.625, # 1/1.6
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (300, 187))
|
[
"numpy.copy",
"extractor.cropping.build_merged_index",
"numpy.allclose",
"extractor.cropping.crop_module",
"cv2.imread",
"numpy.array",
"numpy.eye"
] |
[((1901, 1967), 'numpy.array', 'np.array', (['[[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]]'], {}), '([[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]])\n', (1909, 1967), True, 'import numpy as np\n'), ((2458, 2524), 'numpy.array', 'np.array', (['[[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]]'], {}), '([[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]])\n', (2466, 2524), True, 'import numpy as np\n'), ((2665, 2731), 'numpy.array', 'np.array', (['[[[299, 199]], [[299, 199]], [[299, 199]], [[299, 199]]]'], {}), '([[[299, 199]], [[299, 199]], [[299, 199]], [[299, 199]]])\n', (2673, 2731), True, 'import numpy as np\n'), ((3138, 3200), 'numpy.array', 'np.array', (['[[[-1, -1]], [[100, -1]], [[100, 100]], [[-1, 100]]]'], {}), '([[[-1, -1]], [[100, -1]], [[100, 100]], [[-1, 100]]])\n', (3146, 3200), True, 'import numpy as np\n'), ((3345, 3403), 'numpy.array', 'np.array', (['[[[0, 0]], [[100, 0]], [[100, 100]], [[0, 100]]]'], {}), '([[[0, 0]], [[100, 0]], [[100, 100]], [[0, 100]]])\n', (3353, 3403), True, 'import numpy as np\n'), ((4346, 4396), 'extractor.cropping.build_merged_index', 'build_merged_index', (['merged_modules', 'quadrilaterals'], {}), '(merged_modules, quadrilaterals)\n', (4364, 4396), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((5067, 5117), 'extractor.cropping.build_merged_index', 'build_merged_index', (['merged_modules', 'quadrilaterals'], {}), '(merged_modules, quadrilaterals)\n', (5085, 5117), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((5906, 5956), 'extractor.cropping.build_merged_index', 'build_merged_index', (['merged_modules', 'quadrilaterals'], {}), '(merged_modules, quadrilaterals)\n', (5924, 5956), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((6792, 6842), 'extractor.cropping.build_merged_index', 'build_merged_index', (['merged_modules', 'quadrilaterals'], {}), '(merged_modules, quadrilaterals)\n', (6810, 6842), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((7745, 7795), 'extractor.cropping.build_merged_index', 'build_merged_index', (['merged_modules', 'quadrilaterals'], {}), '(merged_modules, quadrilaterals)\n', (7763, 7795), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((8756, 8806), 'extractor.cropping.build_merged_index', 'build_merged_index', (['merged_modules', 'quadrilaterals'], {}), '(merged_modules, quadrilaterals)\n', (8774, 8806), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((8997, 9040), 'cv2.imread', 'cv2.imread', (['frame_file', 'cv2.IMREAD_ANYDEPTH'], {}), '(frame_file, cv2.IMREAD_ANYDEPTH)\n', (9007, 9040), False, 'import cv2\n'), ((9065, 9131), 'numpy.array', 'np.array', (['[[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]]'], {}), '([[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]])\n', (9073, 9131), True, 'import numpy as np\n'), ((9287, 9330), 'cv2.imread', 'cv2.imread', (['patch_file', 'cv2.IMREAD_ANYDEPTH'], {}), '(patch_file, cv2.IMREAD_ANYDEPTH)\n', (9297, 9330), False, 'import cv2\n'), ((9359, 9447), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': 'None', 'crop_aspect': 'None', 'rotate_mode': '"""portrait"""'}), "(frame, quad, crop_width=None, crop_aspect=None, rotate_mode=\n 'portrait')\n", (9370, 9447), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((9719, 9762), 'cv2.imread', 'cv2.imread', (['frame_file', 'cv2.IMREAD_ANYDEPTH'], {}), '(frame_file, cv2.IMREAD_ANYDEPTH)\n', (9729, 9762), False, 'import cv2\n'), ((9787, 9845), 'numpy.array', 'np.array', (['[[[0, 0]], [[640, 0]], [[640, 512]], [[0, 512]]]'], {}), '([[[0, 0]], [[640, 0]], [[640, 512]], [[0, 512]]])\n', (9795, 9845), True, 'import numpy as np\n'), ((9941, 10030), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': 'None', 'crop_aspect': 'None', 'rotate_mode': '"""landscape"""'}), "(frame, quad, crop_width=None, crop_aspect=None, rotate_mode=\n 'landscape')\n", (9952, 10030), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((10377, 10420), 'cv2.imread', 'cv2.imread', (['frame_file', 'cv2.IMREAD_ANYDEPTH'], {}), '(frame_file, cv2.IMREAD_ANYDEPTH)\n', (10387, 10420), False, 'import cv2\n'), ((10445, 10511), 'numpy.array', 'np.array', (['[[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]]'], {}), '([[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]])\n', (10453, 10511), True, 'import numpy as np\n'), ((10598, 10686), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': 'None', 'crop_aspect': 'None', 'rotate_mode': '"""portrait"""'}), "(frame, quad, crop_width=None, crop_aspect=None, rotate_mode=\n 'portrait')\n", (10609, 10686), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((10840, 10929), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': 'None', 'crop_aspect': 'None', 'rotate_mode': '"""landscape"""'}), "(frame, quad, crop_width=None, crop_aspect=None, rotate_mode=\n 'landscape')\n", (10851, 10929), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((11083, 11160), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': 'None', 'crop_aspect': 'None', 'rotate_mode': 'None'}), '(frame, quad, crop_width=None, crop_aspect=None, rotate_mode=None)\n', (11094, 11160), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((11442, 11485), 'cv2.imread', 'cv2.imread', (['frame_file', 'cv2.IMREAD_ANYDEPTH'], {}), '(frame_file, cv2.IMREAD_ANYDEPTH)\n', (11452, 11485), False, 'import cv2\n'), ((11510, 11576), 'numpy.array', 'np.array', (['[[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]]'], {}), '([[[424, 279]], [[499, 280]], [[499, 327]], [[421, 323]]])\n', (11518, 11576), True, 'import numpy as np\n'), ((11663, 11750), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': '(50)', 'crop_aspect': '(0.625)', 'rotate_mode': '"""portrait"""'}), "(frame, quad, crop_width=50, crop_aspect=0.625, rotate_mode=\n 'portrait')\n", (11674, 11750), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((11913, 11991), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': '(50)', 'crop_aspect': '(1)', 'rotate_mode': '"""portrait"""'}), "(frame, quad, crop_width=50, crop_aspect=1, rotate_mode='portrait')\n", (11924, 11991), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((12150, 12238), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': '(50)', 'crop_aspect': '(0.625)', 'rotate_mode': '"""landscape"""'}), "(frame, quad, crop_width=50, crop_aspect=0.625, rotate_mode=\n 'landscape')\n", (12161, 12238), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((12401, 12489), 'extractor.cropping.crop_module', 'crop_module', (['frame', 'quad'], {'crop_width': '(300)', 'crop_aspect': '(0.625)', 'rotate_mode': '"""portrait"""'}), "(frame, quad, crop_width=300, crop_aspect=0.625, rotate_mode=\n 'portrait')\n", (12412, 12489), False, 'from extractor.cropping import clip_to_image_region, crop_module, build_merged_index\n'), ((2179, 2192), 'numpy.copy', 'np.copy', (['quad'], {}), '(quad)\n', (2186, 2192), True, 'import numpy as np\n'), ((2267, 2309), 'numpy.allclose', 'np.allclose', (['quad_clipped', 'quad_clipped_gt'], {}), '(quad_clipped, quad_clipped_gt)\n', (2278, 2309), True, 'import numpy as np\n'), ((2859, 2872), 'numpy.copy', 'np.copy', (['quad'], {}), '(quad)\n', (2866, 2872), True, 'import numpy as np\n'), ((2947, 2989), 'numpy.allclose', 'np.allclose', (['quad_clipped', 'quad_clipped_gt'], {}), '(quad_clipped, quad_clipped_gt)\n', (2958, 2989), True, 'import numpy as np\n'), ((3539, 3552), 'numpy.copy', 'np.copy', (['quad'], {}), '(quad)\n', (3546, 3552), True, 'import numpy as np\n'), ((3627, 3669), 'numpy.allclose', 'np.allclose', (['quad_clipped', 'quad_clipped_gt'], {}), '(quad_clipped, quad_clipped_gt)\n', (3638, 3669), True, 'import numpy as np\n'), ((9549, 9577), 'numpy.allclose', 'np.allclose', (['patch', 'patch_gt'], {}), '(patch, patch_gt)\n', (9560, 9577), True, 'import numpy as np\n'), ((10132, 10169), 'numpy.allclose', 'np.allclose', (['patch', 'frame[0:-1, 0:-1]'], {}), '(patch, frame[0:-1, 0:-1])\n', (10143, 10169), True, 'import numpy as np\n'), ((10219, 10228), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (10225, 10228), True, 'import numpy as np\n')]
|
from .resnet import resnet50
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class fpn_module_global(nn.Module):
def __init__(self, numClass):
super(fpn_module_global, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Top layer
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
# Classify layers
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
# Local2Global: double #channels ####################################
# Top layer
self.toplayer_ext = nn.Conv2d(2048*2, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers
self.latlayer1_ext = nn.Conv2d(1024*2, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2_ext = nn.Conv2d(512*2, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3_ext = nn.Conv2d(256*2, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth = nn.Conv2d(128*4*2, 128*4, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), **self._up_kwargs)
p4 = F.interpolate(p4, size=(H, W), **self._up_kwargs)
p3 = F.interpolate(p3, size=(H, W), **self._up_kwargs)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), **self._up_kwargs) + y
def forward(self, c2, c3, c4, c5, c2_ext=None, c3_ext=None, c4_ext=None, c5_ext=None, ps0_ext=None, ps1_ext=None, ps2_ext=None):
# Top-down
if c5_ext is None:
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
else:
p5 = self.toplayer_ext(torch.cat((c5, c5_ext), dim=1))
p4 = self._upsample_add(p5, self.latlayer1_ext(torch.cat((c4, c4_ext), dim=1)))
p3 = self._upsample_add(p4, self.latlayer2_ext(torch.cat((c3, c3_ext), dim=1)))
p2 = self._upsample_add(p3, self.latlayer3_ext(torch.cat((c2, c2_ext), dim=1)))
ps0 = [p5, p4, p3, p2]
# Smooth
if ps0_ext is None:
p5 = self.smooth1_1(p5)
p4 = self.smooth2_1(p4)
p3 = self.smooth3_1(p3)
p2 = self.smooth4_1(p2)
else:
p5 = self.smooth1_1_ext(torch.cat((p5, ps0_ext[0]), dim=1))
p4 = self.smooth2_1_ext(torch.cat((p4, ps0_ext[1]), dim=1))
p3 = self.smooth3_1_ext(torch.cat((p3, ps0_ext[2]), dim=1))
p2 = self.smooth4_1_ext(torch.cat((p2, ps0_ext[3]), dim=1))
ps1 = [p5, p4, p3, p2]
if ps1_ext is None:
p5 = self.smooth1_2(p5)
p4 = self.smooth2_2(p4)
p3 = self.smooth3_2(p3)
p2 = self.smooth4_2(p2)
else:
p5 = self.smooth1_2_ext(torch.cat((p5, ps1_ext[0]), dim=1))
p4 = self.smooth2_2_ext(torch.cat((p4, ps1_ext[1]), dim=1))
p3 = self.smooth3_2_ext(torch.cat((p3, ps1_ext[2]), dim=1))
p2 = self.smooth4_2_ext(torch.cat((p2, ps1_ext[3]), dim=1))
ps2 = [p5, p4, p3, p2]
# Classify
if ps2_ext is None:
ps3 = self._concatenate(p5, p4, p3, p2)
output = self.classify(ps3)
else:
p = self._concatenate(
torch.cat((p5, ps2_ext[0]), dim=1),
torch.cat((p4, ps2_ext[1]), dim=1),
torch.cat((p3, ps2_ext[2]), dim=1),
torch.cat((p2, ps2_ext[3]), dim=1)
)
ps3 = self.smooth(p)
output = self.classify(ps3)
return output, ps0, ps1, ps2, ps3
class fpn_module_local(nn.Module):
def __init__(self, numClass):
super(fpn_module_local, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Top layer
fold = 2
self.toplayer = nn.Conv2d(2048 * fold, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers [C]
self.latlayer1 = nn.Conv2d(1024 * fold, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512 * fold, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256 * fold, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
# ps0
self.smooth1_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
# ps1
self.smooth1_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
# ps2 is concatenation
# Classify layers
self.smooth = nn.Conv2d(128*4*fold, 128*4, kernel_size=3, stride=1, padding=1)
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), **self._up_kwargs)
p4 = F.interpolate(p4, size=(H, W), **self._up_kwargs)
p3 = F.interpolate(p3, size=(H, W), **self._up_kwargs)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), **self._up_kwargs) + y
def forward(self, c2, c3, c4, c5, c2_ext, c3_ext, c4_ext, c5_ext, ps0_ext, ps1_ext, ps2_ext):
# Top-down
p5 = self.toplayer(torch.cat([c5] + [F.interpolate(c5_ext[0], size=c5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self._upsample_add(p5, self.latlayer1(torch.cat([c4] + [F.interpolate(c4_ext[0], size=c4.size()[2:], **self._up_kwargs)], dim=1)))
p3 = self._upsample_add(p4, self.latlayer2(torch.cat([c3] + [F.interpolate(c3_ext[0], size=c3.size()[2:], **self._up_kwargs)], dim=1)))
p2 = self._upsample_add(p3, self.latlayer3(torch.cat([c2] + [F.interpolate(c2_ext[0], size=c2.size()[2:], **self._up_kwargs)], dim=1)))
ps0 = [p5, p4, p3, p2]
# Smooth
p5 = self.smooth1_1(torch.cat([p5] + [F.interpolate(ps0_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self.smooth2_1(torch.cat([p4] + [F.interpolate(ps0_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1))
p3 = self.smooth3_1(torch.cat([p3] + [F.interpolate(ps0_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1))
p2 = self.smooth4_1(torch.cat([p2] + [F.interpolate(ps0_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1))
ps1 = [p5, p4, p3, p2]
p5 = self.smooth1_2(torch.cat([p5] + [F.interpolate(ps1_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self.smooth2_2(torch.cat([p4] + [F.interpolate(ps1_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1))
p3 = self.smooth3_2(torch.cat([p3] + [F.interpolate(ps1_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1))
p2 = self.smooth4_2(torch.cat([p2] + [F.interpolate(ps1_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1))
ps2 = [p5, p4, p3, p2]
# Classify
# use ps2_ext
ps3 = self._concatenate(
torch.cat([p5] + [F.interpolate(ps2_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p4] + [F.interpolate(ps2_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p3] + [F.interpolate(ps2_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p2] + [F.interpolate(ps2_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1)
)
ps3 = self.smooth(ps3)
output = self.classify(ps3)
return output, ps0, ps1, ps2, ps3
class fpn(nn.Module):
def __init__(self, numClass):
super(fpn, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Res net
self.resnet_global = resnet50(True)
self.resnet_local = resnet50(True)
# fpn module
self.fpn_global = fpn_module_global(numClass)
self.fpn_local = fpn_module_local(numClass)
self.c2_g = None; self.c3_g = None; self.c4_g = None; self.c5_g = None; self.output_g = None
self.ps0_g = None; self.ps1_g = None; self.ps2_g = None; self.ps3_g = None
self.c2_l = []; self.c3_l = []; self.c4_l = []; self.c5_l = [];
self.ps00_l = []; self.ps01_l = []; self.ps02_l = []; self.ps03_l = [];
self.ps10_l = []; self.ps11_l = []; self.ps12_l = []; self.ps13_l = [];
self.ps20_l = []; self.ps21_l = []; self.ps22_l = []; self.ps23_l = [];
self.ps0_l = None; self.ps1_l = None; self.ps2_l = None
self.ps3_l = []#; self.output_l = []
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []#; self.output_b = []
self.patch_n = 0
self.mse = nn.MSELoss()
self.ensemble_conv = nn.Conv2d(128*4 * 2, numClass, kernel_size=3, stride=1, padding=1)
nn.init.normal_(self.ensemble_conv.weight, mean=0, std=0.01)
# init fpn
for m in self.fpn_global.children():
if hasattr(m, 'weight'): nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'): nn.init.constant_(m.bias, 0)
for m in self.fpn_local.children():
if hasattr(m, 'weight'): nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'): nn.init.constant_(m.bias, 0)
def clear_cache(self):
self.c2_g = None; self.c3_g = None; self.c4_g = None; self.c5_g = None; self.output_g = None
self.ps0_g = None; self.ps1_g = None; self.ps2_g = None; self.ps3_g = None
self.c2_l = []; self.c3_l = []; self.c4_l = []; self.c5_l = [];
self.ps00_l = []; self.ps01_l = []; self.ps02_l = []; self.ps03_l = [];
self.ps10_l = []; self.ps11_l = []; self.ps12_l = []; self.ps13_l = [];
self.ps20_l = []; self.ps21_l = []; self.ps22_l = []; self.ps23_l = [];
self.ps0_l = None; self.ps1_l = None; self.ps2_l = None
self.ps3_l = []; self.output_l = []
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []; self.output_b = []
self.patch_n = 0
def _sample_grid(self, fm, bbox, sampleSize):
"""
:param fm: tensor(b,c,h,w) the global feature map
:param bbox: list [b* nparray(x1, y1, x2, y2)] the (x1,y1) is the left_top of bbox, (x2, y2) is the right_bottom of bbox
there are in range [0, 1]. x is corresponding to width dimension and y is corresponding to height dimension
:param sampleSize: (oH, oW) the point to sample in height dimension and width dimension
:return: tensor(b, c, oH, oW) sampled tensor
"""
b, c, h, w = fm.shape
b_bbox = len(bbox)
bbox = [x*2 - 1 for x in bbox] # range transform
if b != b_bbox and b == 1:
fm = torch.cat([fm,]*b_bbox, dim=0)
grid = np.zeros((b_bbox,) + sampleSize + (2,), dtype=np.float32)
gridMap = np.array([[(cnt_w/(sampleSize[1]-1), cnt_h/(sampleSize[0]-1)) for cnt_w in range(sampleSize[1])] for cnt_h in range(sampleSize[0])])
for cnt_b in range(b_bbox):
grid[cnt_b, :, :, 0] = bbox[cnt_b][0] + (bbox[cnt_b][2] - bbox[cnt_b][0])*gridMap[:, :, 0]
grid[cnt_b, :, :, 1] = bbox[cnt_b][1] + (bbox[cnt_b][3] - bbox[cnt_b][1])*gridMap[:, :, 1]
grid = torch.from_numpy(grid).cuda()
return F.grid_sample(fm, grid)
def _crop_global(self, f_global, top_lefts, ratio):
'''
top_lefts: [(top, left)] * b
'''
_, c, H, W = f_global.size()
b = len(top_lefts)
h, w = int(np.round(H * ratio[0])), int(np.round(W * ratio[1]))
# bbox = [ np.array([left, top, left + ratio, top + ratio]) for (top, left) in top_lefts ]
# crop = self._sample_grid(f_global, bbox, (H, W))
crop = []
for i in range(b):
top, left = int(np.round(top_lefts[i][0] * H)), int(np.round(top_lefts[i][1] * W))
# # global's sub-region & upsample
# f_global_patch = F.interpolate(f_global[0:1, :, top:top+h, left:left+w], size=(h, w), mode='bilinear')
f_global_patch = f_global[0:1, :, top:top+h, left:left+w]
crop.append(f_global_patch[0])
crop = torch.stack(crop, dim=0) # stack into mini-batch
return [crop] # return as a list for easy to torch.cat
def _merge_local(self, f_local, merge, f_global, top_lefts, oped, ratio, template):
'''
merge feature maps from local patches, and finally to a whole image's feature map (on cuda)
f_local: a sub_batch_size of patch's feature map
oped: [start, end)
'''
b, _, _, _ = f_local.size()
_, c, H, W = f_global.size() # match global feature size
if merge is None:
merge = torch.zeros((1, c, H, W)).cuda()
h, w = int(np.round(H * ratio[0])), int(np.round(W * ratio[1]))
for i in range(b):
index = oped[0] + i
top, left = int(np.round(H * top_lefts[index][0])), int(np.round(W * top_lefts[index][1]))
merge[:, :, top:top+h, left:left+w] += F.interpolate(f_local[i:i+1], size=(h, w), **self._up_kwargs)
if oped[1] >= len(top_lefts):
template = F.interpolate(template, size=(H, W), **self._up_kwargs)
template = template.expand_as(merge)
# template = Variable(template).cuda()
merge /= template
return merge
def ensemble(self, f_local, f_global):
return self.ensemble_conv(torch.cat((f_local, f_global), dim=1))
def collect_local_fm(self, image_global, patches, ratio, top_lefts, oped, batch_size, global_model=None, template=None, n_patch_all=None):
'''
patches: 1 patch
top_lefts: all top-left
oped: [start, end)
'''
with torch.no_grad():
if self.patch_n == 0:
self.c2_g, self.c3_g, self.c4_g, self.c5_g = global_model.module.resnet_global.forward(image_global)
self.output_g, self.ps0_g, self.ps1_g, self.ps2_g, self.ps3_g = global_model.module.fpn_global.forward(self.c2_g, self.c3_g, self.c4_g, self.c5_g)
# self.output_g = F.interpolate(self.output_g, image_global.size()[2:], mode='nearest')
self.patch_n += patches.size()[0]
self.patch_n %= n_patch_all
self.resnet_local.eval()
self.fpn_local.eval()
c2, c3, c4, c5 = self.resnet_local.forward(patches)
# global's 1x patch cat
output, ps0, ps1, ps2, ps3 = self.fpn_local.forward(
c2, c3, c4, c5,
self._crop_global(self.c2_g, top_lefts[oped[0]:oped[1]], ratio),
c3_ext=self._crop_global(self.c3_g, top_lefts[oped[0]:oped[1]], ratio),
c4_ext=self._crop_global(self.c4_g, top_lefts[oped[0]:oped[1]], ratio),
c5_ext=self._crop_global(self.c5_g, top_lefts[oped[0]:oped[1]], ratio),
ps0_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps0_g ],
ps1_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps1_g ],
ps2_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps2_g ]
)
# output = F.interpolate(output, patches.size()[2:], mode='nearest')
self.c2_b = self._merge_local(c2, self.c2_b, self.c2_g, top_lefts, oped, ratio, template)
self.c3_b = self._merge_local(c3, self.c3_b, self.c3_g, top_lefts, oped, ratio, template)
self.c4_b = self._merge_local(c4, self.c4_b, self.c4_g, top_lefts, oped, ratio, template)
self.c5_b = self._merge_local(c5, self.c5_b, self.c5_g, top_lefts, oped, ratio, template)
self.ps00_b = self._merge_local(ps0[0], self.ps00_b, self.ps0_g[0], top_lefts, oped, ratio, template)
self.ps01_b = self._merge_local(ps0[1], self.ps01_b, self.ps0_g[1], top_lefts, oped, ratio, template)
self.ps02_b = self._merge_local(ps0[2], self.ps02_b, self.ps0_g[2], top_lefts, oped, ratio, template)
self.ps03_b = self._merge_local(ps0[3], self.ps03_b, self.ps0_g[3], top_lefts, oped, ratio, template)
self.ps10_b = self._merge_local(ps1[0], self.ps10_b, self.ps1_g[0], top_lefts, oped, ratio, template)
self.ps11_b = self._merge_local(ps1[1], self.ps11_b, self.ps1_g[1], top_lefts, oped, ratio, template)
self.ps12_b = self._merge_local(ps1[2], self.ps12_b, self.ps1_g[2], top_lefts, oped, ratio, template)
self.ps13_b = self._merge_local(ps1[3], self.ps13_b, self.ps1_g[3], top_lefts, oped, ratio, template)
self.ps20_b = self._merge_local(ps2[0], self.ps20_b, self.ps2_g[0], top_lefts, oped, ratio, template)
self.ps21_b = self._merge_local(ps2[1], self.ps21_b, self.ps2_g[1], top_lefts, oped, ratio, template)
self.ps22_b = self._merge_local(ps2[2], self.ps22_b, self.ps2_g[2], top_lefts, oped, ratio, template)
self.ps23_b = self._merge_local(ps2[3], self.ps23_b, self.ps2_g[3], top_lefts, oped, ratio, template)
self.ps3_b.append(ps3.cpu())
# self.output_b.append(output.cpu()) # each output is 1, 7, h, w
if self.patch_n == 0:
# merged all patches into an image
self.c2_l.append(self.c2_b); self.c3_l.append(self.c3_b); self.c4_l.append(self.c4_b); self.c5_l.append(self.c5_b);
self.ps00_l.append(self.ps00_b); self.ps01_l.append(self.ps01_b); self.ps02_l.append(self.ps02_b); self.ps03_l.append(self.ps03_b)
self.ps10_l.append(self.ps10_b); self.ps11_l.append(self.ps11_b); self.ps12_l.append(self.ps12_b); self.ps13_l.append(self.ps13_b)
self.ps20_l.append(self.ps20_b); self.ps21_l.append(self.ps21_b); self.ps22_l.append(self.ps22_b); self.ps23_l.append(self.ps23_b)
# collected all ps3 and output of patches as a (b) tensor, append into list
self.ps3_l.append(torch.cat(self.ps3_b, dim=0)); # a list of tensors
# self.output_l.append(torch.cat(self.output_b, dim=0)) # a list of 36, 7, h, w tensors
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []# ; self.output_b = []
if len(self.c2_l) == batch_size:
self.c2_l = torch.cat(self.c2_l, dim=0)# .cuda()
self.c3_l = torch.cat(self.c3_l, dim=0)# .cuda()
self.c4_l = torch.cat(self.c4_l, dim=0)# .cuda()
self.c5_l = torch.cat(self.c5_l, dim=0)# .cuda()
self.ps00_l = torch.cat(self.ps00_l, dim=0)# .cuda()
self.ps01_l = torch.cat(self.ps01_l, dim=0)# .cuda()
self.ps02_l = torch.cat(self.ps02_l, dim=0)# .cuda()
self.ps03_l = torch.cat(self.ps03_l, dim=0)# .cuda()
self.ps10_l = torch.cat(self.ps10_l, dim=0)# .cuda()
self.ps11_l = torch.cat(self.ps11_l, dim=0)# .cuda()
self.ps12_l = torch.cat(self.ps12_l, dim=0)# .cuda()
self.ps13_l = torch.cat(self.ps13_l, dim=0)# .cuda()
self.ps20_l = torch.cat(self.ps20_l, dim=0)# .cuda()
self.ps21_l = torch.cat(self.ps21_l, dim=0)# .cuda()
self.ps22_l = torch.cat(self.ps22_l, dim=0)# .cuda()
self.ps23_l = torch.cat(self.ps23_l, dim=0)# .cuda()
self.ps0_l = [self.ps00_l, self.ps01_l, self.ps02_l, self.ps03_l]
self.ps1_l = [self.ps10_l, self.ps11_l, self.ps12_l, self.ps13_l]
self.ps2_l = [self.ps20_l, self.ps21_l, self.ps22_l, self.ps23_l]
# self.ps3_l = torch.cat(self.ps3_l, dim=0)# .cuda()
return self.ps3_l, output# self.output_l
def forward(self, image_global, patches, top_lefts, ratio, mode=1, global_model=None, n_patch=None):
if mode == 1:
# train global model
c2_g, c3_g, c4_g, c5_g = self.resnet_global.forward(image_global)
output_g, ps0_g, ps1_g, ps2_g, ps3_g = self.fpn_global.forward(c2_g, c3_g, c4_g, c5_g)
# imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
return output_g, None
elif mode == 2:
# train global2local model
with torch.no_grad():
if self.patch_n == 0:
# calculate global images only if patches belong to a new set of global images (when self.patch_n % n_patch == 0)
self.c2_g, self.c3_g, self.c4_g, self.c5_g = self.resnet_global.forward(image_global)
self.output_g, self.ps0_g, self.ps1_g, self.ps2_g, self.ps3_g = self.fpn_global.forward(self.c2_g, self.c3_g, self.c4_g, self.c5_g)
# imsize_glb = image_global.size()[2:]
# self.output_g = F.interpolate(self.output_g, imsize_glb, mode='nearest')
self.patch_n += patches.size()[0]
self.patch_n %= n_patch
# train local model #######################################
c2_l, c3_l, c4_l, c5_l = self.resnet_local.forward(patches)
# global's 1x patch cat
output_l, ps0_l, ps1_l, ps2_l, ps3_l = self.fpn_local.forward(c2_l, c3_l, c4_l, c5_l,
self._crop_global(self.c2_g, top_lefts, ratio),
self._crop_global(self.c3_g, top_lefts, ratio),
self._crop_global(self.c4_g, top_lefts, ratio),
self._crop_global(self.c5_g, top_lefts, ratio),
[ self._crop_global(f, top_lefts, ratio) for f in self.ps0_g ],
[ self._crop_global(f, top_lefts, ratio) for f in self.ps1_g ],
[ self._crop_global(f, top_lefts, ratio) for f in self.ps2_g ]
)
# imsize = patches.size()[2:]
# output_l = F.interpolate(output_l, imsize, mode='nearest')
ps3_g2l = self._crop_global(self.ps3_g, top_lefts, ratio)[0] # only calculate loss on 1x
ps3_g2l = F.interpolate(ps3_g2l, size=ps3_l.size()[2:], **self._up_kwargs)
output = self.ensemble(ps3_l, ps3_g2l)
# output = F.interpolate(output, imsize, mode='nearest')
return output, self.output_g, output_l, self.mse(ps3_l, ps3_g2l)
else:
# train local2global model
c2_g, c3_g, c4_g, c5_g = self.resnet_global.forward(image_global)
# local patch cat into global
output_g, ps0_g, ps1_g, ps2_g, ps3_g = self.fpn_global.forward(c2_g, c3_g, c4_g, c5_g, c2_ext=self.c2_l, c3_ext=self.c3_l, c4_ext=self.c4_l, c5_ext=self.c5_l, ps0_ext=self.ps0_l, ps1_ext=self.ps1_l, ps2_ext=self.ps2_l)
# imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
self.clear_cache()
return output_g, ps3_g
|
[
"torch.nn.MSELoss",
"torch.stack",
"torch.nn.functional.grid_sample",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.cat",
"torch.nn.init.normal_",
"torch.nn.init.constant_",
"torch.zeros",
"torch.nn.functional.interpolate",
"torch.no_grad",
"numpy.round",
"torch.from_numpy"
] |
[((328, 384), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(2048, 256, kernel_size=1, stride=1, padding=0)\n', (337, 384), True, 'import torch.nn as nn\n'), ((453, 509), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(1024, 256, kernel_size=1, stride=1, padding=0)\n', (462, 509), True, 'import torch.nn as nn\n'), ((535, 590), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(512, 256, kernel_size=1, stride=1, padding=0)\n', (544, 590), True, 'import torch.nn as nn\n'), ((616, 671), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(256, 256, kernel_size=1, stride=1, padding=0)\n', (625, 671), True, 'import torch.nn as nn\n'), ((721, 776), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 256, kernel_size=3, stride=1, padding=1)\n', (730, 776), True, 'import torch.nn as nn\n'), ((802, 857), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 256, kernel_size=3, stride=1, padding=1)\n', (811, 857), True, 'import torch.nn as nn\n'), ((883, 938), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 256, kernel_size=3, stride=1, padding=1)\n', (892, 938), True, 'import torch.nn as nn\n'), ((964, 1019), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 256, kernel_size=3, stride=1, padding=1)\n', (973, 1019), True, 'import torch.nn as nn\n'), ((1045, 1100), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 128, kernel_size=3, stride=1, padding=1)\n', (1054, 1100), True, 'import torch.nn as nn\n'), ((1126, 1181), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 128, kernel_size=3, stride=1, padding=1)\n', (1135, 1181), True, 'import torch.nn as nn\n'), ((1207, 1262), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 128, kernel_size=3, stride=1, padding=1)\n', (1216, 1262), True, 'import torch.nn as nn\n'), ((1288, 1343), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 128, kernel_size=3, stride=1, padding=1)\n', (1297, 1343), True, 'import torch.nn as nn\n'), ((1394, 1458), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 4)', 'numClass'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(128 * 4, numClass, kernel_size=3, stride=1, padding=1)\n', (1403, 1458), True, 'import torch.nn as nn\n'), ((1584, 1644), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048 * 2)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(2048 * 2, 256, kernel_size=1, stride=1, padding=0)\n', (1593, 1644), True, 'import torch.nn as nn\n'), ((1715, 1775), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024 * 2)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(1024 * 2, 256, kernel_size=1, stride=1, padding=0)\n', (1724, 1775), True, 'import torch.nn as nn\n'), ((1803, 1862), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 * 2)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(512 * 2, 256, kernel_size=1, stride=1, padding=0)\n', (1812, 1862), True, 'import torch.nn as nn\n'), ((1890, 1949), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(256 * 2, 256, kernel_size=1, stride=1, padding=0)\n', (1899, 1949), True, 'import torch.nn as nn\n'), ((2001, 2060), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 256, kernel_size=3, stride=1, padding=1)\n', (2010, 2060), True, 'import torch.nn as nn\n'), ((2088, 2147), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 256, kernel_size=3, stride=1, padding=1)\n', (2097, 2147), True, 'import torch.nn as nn\n'), ((2175, 2234), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 256, kernel_size=3, stride=1, padding=1)\n', (2184, 2234), True, 'import torch.nn as nn\n'), ((2262, 2321), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 256, kernel_size=3, stride=1, padding=1)\n', (2271, 2321), True, 'import torch.nn as nn\n'), ((2349, 2408), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 128, kernel_size=3, stride=1, padding=1)\n', (2358, 2408), True, 'import torch.nn as nn\n'), ((2436, 2495), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 128, kernel_size=3, stride=1, padding=1)\n', (2445, 2495), True, 'import torch.nn as nn\n'), ((2523, 2582), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 128, kernel_size=3, stride=1, padding=1)\n', (2532, 2582), True, 'import torch.nn as nn\n'), ((2610, 2669), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * 2)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * 2, 128, kernel_size=3, stride=1, padding=1)\n', (2619, 2669), True, 'import torch.nn as nn\n'), ((2690, 2757), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 4 * 2)', '(128 * 4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(128 * 4 * 2, 128 * 4, kernel_size=3, stride=1, padding=1)\n', (2699, 2757), True, 'import torch.nn as nn\n'), ((2841, 2890), 'torch.nn.functional.interpolate', 'F.interpolate', (['p5'], {'size': '(H, W)'}), '(p5, size=(H, W), **self._up_kwargs)\n', (2854, 2890), True, 'import torch.nn.functional as F\n'), ((2904, 2953), 'torch.nn.functional.interpolate', 'F.interpolate', (['p4'], {'size': '(H, W)'}), '(p4, size=(H, W), **self._up_kwargs)\n', (2917, 2953), True, 'import torch.nn.functional as F\n'), ((2967, 3016), 'torch.nn.functional.interpolate', 'F.interpolate', (['p3'], {'size': '(H, W)'}), '(p3, size=(H, W), **self._up_kwargs)\n', (2980, 3016), True, 'import torch.nn.functional as F\n'), ((3032, 3066), 'torch.cat', 'torch.cat', (['[p5, p4, p3, p2]'], {'dim': '(1)'}), '([p5, p4, p3, p2], dim=1)\n', (3041, 3066), False, 'import torch\n'), ((6465, 6528), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048 * fold)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(2048 * fold, 256, kernel_size=1, stride=1, padding=0)\n', (6474, 6528), True, 'import torch.nn as nn\n'), ((6601, 6664), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024 * fold)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(1024 * fold, 256, kernel_size=1, stride=1, padding=0)\n', (6610, 6664), True, 'import torch.nn as nn\n'), ((6690, 6752), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 * fold)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(512 * fold, 256, kernel_size=1, stride=1, padding=0)\n', (6699, 6752), True, 'import torch.nn as nn\n'), ((6778, 6840), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(256 * fold, 256, kernel_size=1, stride=1, padding=0)\n', (6787, 6840), True, 'import torch.nn as nn\n'), ((6904, 6966), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 256, kernel_size=3, stride=1, padding=1)\n', (6913, 6966), True, 'import torch.nn as nn\n'), ((6992, 7054), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 256, kernel_size=3, stride=1, padding=1)\n', (7001, 7054), True, 'import torch.nn as nn\n'), ((7080, 7142), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 256, kernel_size=3, stride=1, padding=1)\n', (7089, 7142), True, 'import torch.nn as nn\n'), ((7168, 7230), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 256, kernel_size=3, stride=1, padding=1)\n', (7177, 7230), True, 'import torch.nn as nn\n'), ((7270, 7332), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 128, kernel_size=3, stride=1, padding=1)\n', (7279, 7332), True, 'import torch.nn as nn\n'), ((7358, 7420), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 128, kernel_size=3, stride=1, padding=1)\n', (7367, 7420), True, 'import torch.nn as nn\n'), ((7446, 7508), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 128, kernel_size=3, stride=1, padding=1)\n', (7455, 7508), True, 'import torch.nn as nn\n'), ((7534, 7596), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * fold)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256 * fold, 128, kernel_size=3, stride=1, padding=1)\n', (7543, 7596), True, 'import torch.nn as nn\n'), ((7676, 7746), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 4 * fold)', '(128 * 4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(128 * 4 * fold, 128 * 4, kernel_size=3, stride=1, padding=1)\n', (7685, 7746), True, 'import torch.nn as nn\n'), ((7765, 7829), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 4)', 'numClass'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(128 * 4, numClass, kernel_size=3, stride=1, padding=1)\n', (7774, 7829), True, 'import torch.nn as nn\n'), ((7917, 7966), 'torch.nn.functional.interpolate', 'F.interpolate', (['p5'], {'size': '(H, W)'}), '(p5, size=(H, W), **self._up_kwargs)\n', (7930, 7966), True, 'import torch.nn.functional as F\n'), ((7980, 8029), 'torch.nn.functional.interpolate', 'F.interpolate', (['p4'], {'size': '(H, W)'}), '(p4, size=(H, W), **self._up_kwargs)\n', (7993, 8029), True, 'import torch.nn.functional as F\n'), ((8043, 8092), 'torch.nn.functional.interpolate', 'F.interpolate', (['p3'], {'size': '(H, W)'}), '(p3, size=(H, W), **self._up_kwargs)\n', (8056, 8092), True, 'import torch.nn.functional as F\n'), ((8108, 8142), 'torch.cat', 'torch.cat', (['[p5, p4, p3, p2]'], {'dim': '(1)'}), '([p5, p4, p3, p2], dim=1)\n', (8117, 8142), False, 'import torch\n'), ((12791, 12803), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (12801, 12803), True, 'import torch.nn as nn\n'), ((12834, 12902), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 4 * 2)', 'numClass'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(128 * 4 * 2, numClass, kernel_size=3, stride=1, padding=1)\n', (12843, 12902), True, 'import torch.nn as nn\n'), ((12909, 12969), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.ensemble_conv.weight'], {'mean': '(0)', 'std': '(0.01)'}), '(self.ensemble_conv.weight, mean=0, std=0.01)\n', (12924, 12969), True, 'import torch.nn as nn\n'), ((15157, 15214), 'numpy.zeros', 'np.zeros', (['((b_bbox,) + sampleSize + (2,))'], {'dtype': 'np.float32'}), '((b_bbox,) + sampleSize + (2,), dtype=np.float32)\n', (15165, 15214), True, 'import numpy as np\n'), ((15668, 15691), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['fm', 'grid'], {}), '(fm, grid)\n', (15681, 15691), True, 'import torch.nn.functional as F\n'), ((16538, 16562), 'torch.stack', 'torch.stack', (['crop'], {'dim': '(0)'}), '(crop, dim=0)\n', (16549, 16562), False, 'import torch\n'), ((3806, 3854), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': '(H, W)'}), '(x, size=(H, W), **self._up_kwargs)\n', (3819, 3854), True, 'import torch.nn.functional as F\n'), ((8882, 8930), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': '(H, W)'}), '(x, size=(H, W), **self._up_kwargs)\n', (8895, 8930), True, 'import torch.nn.functional as F\n'), ((15111, 15142), 'torch.cat', 'torch.cat', (['([fm] * b_bbox)'], {'dim': '(0)'}), '([fm] * b_bbox, dim=0)\n', (15120, 15142), False, 'import torch\n'), ((17412, 17475), 'torch.nn.functional.interpolate', 'F.interpolate', (['f_local[i:i + 1]'], {'size': '(h, w)'}), '(f_local[i:i + 1], size=(h, w), **self._up_kwargs)\n', (17425, 17475), True, 'import torch.nn.functional as F\n'), ((17535, 17590), 'torch.nn.functional.interpolate', 'F.interpolate', (['template'], {'size': '(H, W)'}), '(template, size=(H, W), **self._up_kwargs)\n', (17548, 17590), True, 'import torch.nn.functional as F\n'), ((17820, 17857), 'torch.cat', 'torch.cat', (['(f_local, f_global)'], {'dim': '(1)'}), '((f_local, f_global), dim=1)\n', (17829, 17857), False, 'import torch\n'), ((18124, 18139), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18137, 18139), False, 'import torch\n'), ((4304, 4334), 'torch.cat', 'torch.cat', (['(c5, c5_ext)'], {'dim': '(1)'}), '((c5, c5_ext), dim=1)\n', (4313, 4334), False, 'import torch\n'), ((4891, 4925), 'torch.cat', 'torch.cat', (['(p5, ps0_ext[0])'], {'dim': '(1)'}), '((p5, ps0_ext[0]), dim=1)\n', (4900, 4925), False, 'import torch\n'), ((4963, 4997), 'torch.cat', 'torch.cat', (['(p4, ps0_ext[1])'], {'dim': '(1)'}), '((p4, ps0_ext[1]), dim=1)\n', (4972, 4997), False, 'import torch\n'), ((5035, 5069), 'torch.cat', 'torch.cat', (['(p3, ps0_ext[2])'], {'dim': '(1)'}), '((p3, ps0_ext[2]), dim=1)\n', (5044, 5069), False, 'import torch\n'), ((5107, 5141), 'torch.cat', 'torch.cat', (['(p2, ps0_ext[3])'], {'dim': '(1)'}), '((p2, ps0_ext[3]), dim=1)\n', (5116, 5141), False, 'import torch\n'), ((5405, 5439), 'torch.cat', 'torch.cat', (['(p5, ps1_ext[0])'], {'dim': '(1)'}), '((p5, ps1_ext[0]), dim=1)\n', (5414, 5439), False, 'import torch\n'), ((5477, 5511), 'torch.cat', 'torch.cat', (['(p4, ps1_ext[1])'], {'dim': '(1)'}), '((p4, ps1_ext[1]), dim=1)\n', (5486, 5511), False, 'import torch\n'), ((5549, 5583), 'torch.cat', 'torch.cat', (['(p3, ps1_ext[2])'], {'dim': '(1)'}), '((p3, ps1_ext[2]), dim=1)\n', (5558, 5583), False, 'import torch\n'), ((5621, 5655), 'torch.cat', 'torch.cat', (['(p2, ps1_ext[3])'], {'dim': '(1)'}), '((p2, ps1_ext[3]), dim=1)\n', (5630, 5655), False, 'import torch\n'), ((5897, 5931), 'torch.cat', 'torch.cat', (['(p5, ps2_ext[0])'], {'dim': '(1)'}), '((p5, ps2_ext[0]), dim=1)\n', (5906, 5931), False, 'import torch\n'), ((5954, 5988), 'torch.cat', 'torch.cat', (['(p4, ps2_ext[1])'], {'dim': '(1)'}), '((p4, ps2_ext[1]), dim=1)\n', (5963, 5988), False, 'import torch\n'), ((6011, 6045), 'torch.cat', 'torch.cat', (['(p3, ps2_ext[2])'], {'dim': '(1)'}), '((p3, ps2_ext[2]), dim=1)\n', (6020, 6045), False, 'import torch\n'), ((6068, 6102), 'torch.cat', 'torch.cat', (['(p2, ps2_ext[3])'], {'dim': '(1)'}), '((p2, ps2_ext[3]), dim=1)\n', (6077, 6102), False, 'import torch\n'), ((13072, 13115), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'mean': '(0)', 'std': '(0.01)'}), '(m.weight, mean=0, std=0.01)\n', (13087, 13115), True, 'import torch.nn as nn\n'), ((13151, 13179), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (13168, 13179), True, 'import torch.nn as nn\n'), ((13261, 13304), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'mean': '(0)', 'std': '(0.01)'}), '(m.weight, mean=0, std=0.01)\n', (13276, 13304), True, 'import torch.nn as nn\n'), ((13340, 13368), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (13357, 13368), True, 'import torch.nn as nn\n'), ((15623, 15645), 'torch.from_numpy', 'torch.from_numpy', (['grid'], {}), '(grid)\n', (15639, 15645), False, 'import torch\n'), ((15893, 15915), 'numpy.round', 'np.round', (['(H * ratio[0])'], {}), '(H * ratio[0])\n', (15901, 15915), True, 'import numpy as np\n'), ((15922, 15944), 'numpy.round', 'np.round', (['(W * ratio[1])'], {}), '(W * ratio[1])\n', (15930, 15944), True, 'import numpy as np\n'), ((17146, 17168), 'numpy.round', 'np.round', (['(H * ratio[0])'], {}), '(H * ratio[0])\n', (17154, 17168), True, 'import numpy as np\n'), ((17175, 17197), 'numpy.round', 'np.round', (['(W * ratio[1])'], {}), '(W * ratio[1])\n', (17183, 17197), True, 'import numpy as np\n'), ((23010, 23037), 'torch.cat', 'torch.cat', (['self.c2_l'], {'dim': '(0)'}), '(self.c2_l, dim=0)\n', (23019, 23037), False, 'import torch\n'), ((23075, 23102), 'torch.cat', 'torch.cat', (['self.c3_l'], {'dim': '(0)'}), '(self.c3_l, dim=0)\n', (23084, 23102), False, 'import torch\n'), ((23140, 23167), 'torch.cat', 'torch.cat', (['self.c4_l'], {'dim': '(0)'}), '(self.c4_l, dim=0)\n', (23149, 23167), False, 'import torch\n'), ((23205, 23232), 'torch.cat', 'torch.cat', (['self.c5_l'], {'dim': '(0)'}), '(self.c5_l, dim=0)\n', (23214, 23232), False, 'import torch\n'), ((23272, 23301), 'torch.cat', 'torch.cat', (['self.ps00_l'], {'dim': '(0)'}), '(self.ps00_l, dim=0)\n', (23281, 23301), False, 'import torch\n'), ((23341, 23370), 'torch.cat', 'torch.cat', (['self.ps01_l'], {'dim': '(0)'}), '(self.ps01_l, dim=0)\n', (23350, 23370), False, 'import torch\n'), ((23410, 23439), 'torch.cat', 'torch.cat', (['self.ps02_l'], {'dim': '(0)'}), '(self.ps02_l, dim=0)\n', (23419, 23439), False, 'import torch\n'), ((23479, 23508), 'torch.cat', 'torch.cat', (['self.ps03_l'], {'dim': '(0)'}), '(self.ps03_l, dim=0)\n', (23488, 23508), False, 'import torch\n'), ((23548, 23577), 'torch.cat', 'torch.cat', (['self.ps10_l'], {'dim': '(0)'}), '(self.ps10_l, dim=0)\n', (23557, 23577), False, 'import torch\n'), ((23617, 23646), 'torch.cat', 'torch.cat', (['self.ps11_l'], {'dim': '(0)'}), '(self.ps11_l, dim=0)\n', (23626, 23646), False, 'import torch\n'), ((23686, 23715), 'torch.cat', 'torch.cat', (['self.ps12_l'], {'dim': '(0)'}), '(self.ps12_l, dim=0)\n', (23695, 23715), False, 'import torch\n'), ((23755, 23784), 'torch.cat', 'torch.cat', (['self.ps13_l'], {'dim': '(0)'}), '(self.ps13_l, dim=0)\n', (23764, 23784), False, 'import torch\n'), ((23824, 23853), 'torch.cat', 'torch.cat', (['self.ps20_l'], {'dim': '(0)'}), '(self.ps20_l, dim=0)\n', (23833, 23853), False, 'import torch\n'), ((23893, 23922), 'torch.cat', 'torch.cat', (['self.ps21_l'], {'dim': '(0)'}), '(self.ps21_l, dim=0)\n', (23902, 23922), False, 'import torch\n'), ((23962, 23991), 'torch.cat', 'torch.cat', (['self.ps22_l'], {'dim': '(0)'}), '(self.ps22_l, dim=0)\n', (23971, 23991), False, 'import torch\n'), ((24031, 24060), 'torch.cat', 'torch.cat', (['self.ps23_l'], {'dim': '(0)'}), '(self.ps23_l, dim=0)\n', (24040, 24060), False, 'import torch\n'), ((4395, 4425), 'torch.cat', 'torch.cat', (['(c4, c4_ext)'], {'dim': '(1)'}), '((c4, c4_ext), dim=1)\n', (4404, 4425), False, 'import torch\n'), ((4487, 4517), 'torch.cat', 'torch.cat', (['(c3, c3_ext)'], {'dim': '(1)'}), '((c3, c3_ext), dim=1)\n', (4496, 4517), False, 'import torch\n'), ((4579, 4609), 'torch.cat', 'torch.cat', (['(c2, c2_ext)'], {'dim': '(1)'}), '((c2, c2_ext), dim=1)\n', (4588, 4609), False, 'import torch\n'), ((16179, 16208), 'numpy.round', 'np.round', (['(top_lefts[i][0] * H)'], {}), '(top_lefts[i][0] * H)\n', (16187, 16208), True, 'import numpy as np\n'), ((16215, 16244), 'numpy.round', 'np.round', (['(top_lefts[i][1] * W)'], {}), '(top_lefts[i][1] * W)\n', (16223, 16244), True, 'import numpy as np\n'), ((17094, 17119), 'torch.zeros', 'torch.zeros', (['(1, c, H, W)'], {}), '((1, c, H, W))\n', (17105, 17119), False, 'import torch\n'), ((17286, 17319), 'numpy.round', 'np.round', (['(H * top_lefts[index][0])'], {}), '(H * top_lefts[index][0])\n', (17294, 17319), True, 'import numpy as np\n'), ((17326, 17359), 'numpy.round', 'np.round', (['(W * top_lefts[index][1])'], {}), '(W * top_lefts[index][1])\n', (17334, 17359), True, 'import numpy as np\n'), ((22351, 22379), 'torch.cat', 'torch.cat', (['self.ps3_b'], {'dim': '(0)'}), '(self.ps3_b, dim=0)\n', (22360, 22379), False, 'import torch\n'), ((25011, 25026), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25024, 25026), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 21:24:36 2019
@author: wmy
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
from keras import backend as K
from keras.losses import mean_absolute_error, mean_squared_error
from keras.models import load_model
from keras.optimizers import Adam
import random
import os
from model import wdsr_a, wdsr_b
from utils import DataLoader
model = wdsr_b(scale=4, num_res_blocks=32)
model.load_weights('./weights/wdsr-b-32-x4.h5')
data_loader = DataLoader(scale=4)
def evaluate_test(model, setpath='datasets/train', difficulty='easy', name='evaluate'):
images = data_loader.search(setpath)
image = random.choice(images)
hr = data_loader.imread(image)
resize = (hr.size[0]//data_loader.scale, hr.size[1]//data_loader.scale)
hidden_scale = random.uniform(1, 3)
radius = random.uniform(1, 3)
if difficulty=='easy':
hidden_scale = random.uniform(1, 1.5)
radius = random.uniform(1, 1.5)
pass
elif difficulty=='normal':
hidden_scale = random.uniform(1.5, 2)
radius = random.uniform(1.5, 2)
pass
elif difficulty=='hard':
hidden_scale = random.uniform(2, 2.5)
radius = random.uniform(2, 2.5)
pass
elif difficulty=='lunatic':
hidden_scale = random.uniform(2.5, 3)
radius = random.uniform(2.5, 3)
pass
else:
raise ValueError("unknown difficulty")
hidden_resize = (int(resize[0]/hidden_scale), int(resize[1]/hidden_scale))
lr = data_loader.gaussianblur(hr, radius)
lr = lr.resize(hidden_resize)
lr = lr.resize(resize)
lr_resize = lr.resize(hr.size)
lr = np.asarray(lr)
sr = model.predict(np.array([lr]))[0]
sr = np.clip(sr, 0, 255)
sr = sr.astype('uint8')
lr = Image.fromarray(lr)
sr = Image.fromarray(sr)
lr_resize.save("images/" + name + "_lr.jpg")
sr.save("images/" + name + "_sr.jpg")
hr.save("images/" + name + "_hr.jpg")
pass
evaluate_test(model, difficulty='easy', name='easy')
evaluate_test(model, difficulty='normal', name='normal')
evaluate_test(model, difficulty='hard', name='hard')
evaluate_test(model, difficulty='lunatic', name='lunatic')
|
[
"random.uniform",
"utils.DataLoader",
"numpy.asarray",
"random.choice",
"numpy.clip",
"PIL.Image.fromarray",
"numpy.array",
"model.wdsr_b"
] |
[((461, 495), 'model.wdsr_b', 'wdsr_b', ([], {'scale': '(4)', 'num_res_blocks': '(32)'}), '(scale=4, num_res_blocks=32)\n', (467, 495), False, 'from model import wdsr_a, wdsr_b\n'), ((562, 581), 'utils.DataLoader', 'DataLoader', ([], {'scale': '(4)'}), '(scale=4)\n', (572, 581), False, 'from utils import DataLoader\n'), ((728, 749), 'random.choice', 'random.choice', (['images'], {}), '(images)\n', (741, 749), False, 'import random\n'), ((883, 903), 'random.uniform', 'random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (897, 903), False, 'import random\n'), ((918, 938), 'random.uniform', 'random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (932, 938), False, 'import random\n'), ((1766, 1780), 'numpy.asarray', 'np.asarray', (['lr'], {}), '(lr)\n', (1776, 1780), True, 'import numpy as np\n'), ((1834, 1853), 'numpy.clip', 'np.clip', (['sr', '(0)', '(255)'], {}), '(sr, 0, 255)\n', (1841, 1853), True, 'import numpy as np\n'), ((1893, 1912), 'PIL.Image.fromarray', 'Image.fromarray', (['lr'], {}), '(lr)\n', (1908, 1912), False, 'from PIL import Image\n'), ((1923, 1942), 'PIL.Image.fromarray', 'Image.fromarray', (['sr'], {}), '(sr)\n', (1938, 1942), False, 'from PIL import Image\n'), ((991, 1013), 'random.uniform', 'random.uniform', (['(1)', '(1.5)'], {}), '(1, 1.5)\n', (1005, 1013), False, 'import random\n'), ((1032, 1054), 'random.uniform', 'random.uniform', (['(1)', '(1.5)'], {}), '(1, 1.5)\n', (1046, 1054), False, 'import random\n'), ((1125, 1147), 'random.uniform', 'random.uniform', (['(1.5)', '(2)'], {}), '(1.5, 2)\n', (1139, 1147), False, 'import random\n'), ((1166, 1188), 'random.uniform', 'random.uniform', (['(1.5)', '(2)'], {}), '(1.5, 2)\n', (1180, 1188), False, 'import random\n'), ((1805, 1819), 'numpy.array', 'np.array', (['[lr]'], {}), '([lr])\n', (1813, 1819), True, 'import numpy as np\n'), ((1257, 1279), 'random.uniform', 'random.uniform', (['(2)', '(2.5)'], {}), '(2, 2.5)\n', (1271, 1279), False, 'import random\n'), ((1298, 1320), 'random.uniform', 'random.uniform', (['(2)', '(2.5)'], {}), '(2, 2.5)\n', (1312, 1320), False, 'import random\n'), ((1392, 1414), 'random.uniform', 'random.uniform', (['(2.5)', '(3)'], {}), '(2.5, 3)\n', (1406, 1414), False, 'import random\n'), ((1433, 1455), 'random.uniform', 'random.uniform', (['(2.5)', '(3)'], {}), '(2.5, 3)\n', (1447, 1455), False, 'import random\n')]
|
import os
import time
import numpy as np
import paddle.fluid as fluid
import config as cfg
from nets.attention_model import attention_train_net
from nets.crnn_ctc_model import ctc_train_net
from utils import data_reader
from utils.utility import get_ctc_feeder_data, get_attention_feeder_data
def main():
"""OCR training"""
if cfg.use_model == "crnn_ctc":
train_net = ctc_train_net
get_feeder_data = get_ctc_feeder_data
else:
train_net = attention_train_net
get_feeder_data = get_attention_feeder_data
# define network
sum_cost, error_evaluator, inference_program, model_average = train_net(cfg, cfg.data_shape, cfg.num_classes)
# data reader
train_reader = data_reader.train(batch_size=cfg.batch_size,
prefix_path=cfg.train_prefix,
cycle=cfg.total_step > 0,
model=cfg.use_model)
test_reader = data_reader.test(prefix_path=cfg.test_prefix, model=cfg.use_model)
# prepare environment
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# 加载初始化模型
if cfg.init_model:
fluid.load(program=fluid.default_main_program(),
model_path=cfg.init_model,
executor=exe,
var_list=fluid.io.get_program_parameter(fluid.default_main_program()))
print("Init model from: %s." % cfg.init_model)
train_exe = exe
error_evaluator.reset(exe)
if cfg.parallel:
train_exe = fluid.ParallelExecutor(use_cuda=cfg.use_gpu, loss_name=sum_cost.name)
fetch_vars = [sum_cost] + error_evaluator.metrics
def train_one_batch(data):
var_names = [var.name for var in fetch_vars]
if cfg.parallel:
results = train_exe.run(var_names,
feed=get_feeder_data(data, place))
results = [np.array(r).sum() for r in results]
else:
results = exe.run(program=fluid.default_main_program(),
feed=get_feeder_data(data, place),
fetch_list=fetch_vars)
results = [r[0] for r in results]
return results
def test():
error_evaluator.reset(exe)
for data in test_reader():
exe.run(inference_program, feed=get_feeder_data(data, place))
_, test_seq_error = error_evaluator.eval(exe)
return test_seq_error[0]
def save_model():
if not os.path.exists(cfg.model_path):
os.makedirs(cfg.model_path)
fluid.save(program=fluid.default_main_program(),
model_path=os.path.join(cfg.model_path, "model"))
print("Saved model to: %s" % cfg.model_path)
iter_num = 0
stop = False
while not stop:
total_loss = 0.0
total_seq_error = 0.0
# train a pass
for data in train_reader():
if cfg.total_step < iter_num:
stop = True
break
result = train_one_batch(data)
total_loss += result[0]
total_seq_error += result[2]
iter_num += 1
# training log
if iter_num % cfg.log_period == 0:
print("[%s] - Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f"
% (time.asctime(time.localtime(time.time())), iter_num,
total_loss / (cfg.log_period * cfg.batch_size),
total_seq_error / (cfg.log_period * cfg.batch_size)))
total_loss = 0.0
total_seq_error = 0.0
# evaluate
if iter_num % cfg.eval_period == 0:
if model_average:
with model_average.apply(exe):
test_seq_error = test()
else:
test_seq_error = test()
print("\n[%s] - Iter[%d]; Test seq error: %.3f\n" %
(time.asctime(time.localtime(time.time())), iter_num, test_seq_error))
# save model
if iter_num % cfg.save_model_period == 0:
if model_average:
with model_average.apply(exe):
save_model()
else:
save_model()
if __name__ == "__main__":
main()
|
[
"paddle.fluid.Executor",
"paddle.fluid.CUDAPlace",
"paddle.fluid.default_main_program",
"utils.data_reader.test",
"paddle.fluid.default_startup_program",
"os.makedirs",
"os.path.exists",
"time.time",
"utils.data_reader.train",
"numpy.array",
"paddle.fluid.CPUPlace",
"paddle.fluid.ParallelExecutor",
"os.path.join"
] |
[((722, 847), 'utils.data_reader.train', 'data_reader.train', ([], {'batch_size': 'cfg.batch_size', 'prefix_path': 'cfg.train_prefix', 'cycle': '(cfg.total_step > 0)', 'model': 'cfg.use_model'}), '(batch_size=cfg.batch_size, prefix_path=cfg.train_prefix,\n cycle=cfg.total_step > 0, model=cfg.use_model)\n', (739, 847), False, 'from utils import data_reader\n'), ((973, 1039), 'utils.data_reader.test', 'data_reader.test', ([], {'prefix_path': 'cfg.test_prefix', 'model': 'cfg.use_model'}), '(prefix_path=cfg.test_prefix, model=cfg.use_model)\n', (989, 1039), False, 'from utils import data_reader\n'), ((1145, 1166), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (1159, 1166), True, 'import paddle.fluid as fluid\n'), ((1079, 1097), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (1094, 1097), True, 'import paddle.fluid as fluid\n'), ((1118, 1134), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (1132, 1134), True, 'import paddle.fluid as fluid\n'), ((1179, 1210), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (1208, 1210), True, 'import paddle.fluid as fluid\n'), ((1624, 1693), 'paddle.fluid.ParallelExecutor', 'fluid.ParallelExecutor', ([], {'use_cuda': 'cfg.use_gpu', 'loss_name': 'sum_cost.name'}), '(use_cuda=cfg.use_gpu, loss_name=sum_cost.name)\n', (1646, 1693), True, 'import paddle.fluid as fluid\n'), ((2591, 2621), 'os.path.exists', 'os.path.exists', (['cfg.model_path'], {}), '(cfg.model_path)\n', (2605, 2621), False, 'import os\n'), ((2635, 2662), 'os.makedirs', 'os.makedirs', (['cfg.model_path'], {}), '(cfg.model_path)\n', (2646, 2662), False, 'import os\n'), ((1277, 1305), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (1303, 1305), True, 'import paddle.fluid as fluid\n'), ((2690, 2718), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (2716, 2718), True, 'import paddle.fluid as fluid\n'), ((2750, 2787), 'os.path.join', 'os.path.join', (['cfg.model_path', '"""model"""'], {}), "(cfg.model_path, 'model')\n", (2762, 2787), False, 'import os\n'), ((1445, 1473), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (1471, 1473), True, 'import paddle.fluid as fluid\n'), ((2088, 2116), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (2114, 2116), True, 'import paddle.fluid as fluid\n'), ((2000, 2011), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (2008, 2011), True, 'import numpy as np\n'), ((3452, 3463), 'time.time', 'time.time', ([], {}), '()\n', (3461, 3463), False, 'import time\n'), ((4091, 4102), 'time.time', 'time.time', ([], {}), '()\n', (4100, 4102), False, 'import time\n')]
|
import numpy
import qm3
import qm3.engines.gaussian
import io
import os
import sys
cwd = os.path.abspath( os.path.dirname( sys.argv[0] ) ) + os.sep
mol = qm3.molecule()
mol.pdb_read( open( cwd + "charmm.pdb" ) )
mol.psf_read( open( cwd + "charmm.psf" ) )
mol.guess_atomic_numbers()
print( mol.anum )
print( mol.chrg )
sqm = mol.resn == "WAT"
for a in [ "C6", "C9", "H11", "H12", "H13", "H14", "H15" ]:
sqm[mol.indx["A"][1][a]] = True
sqm = numpy.logical_not( sqm )
smm = mol.sph_sel( sqm, 12 )
sla = [ ( mol.indx["A"][1]["C10"], mol.indx["A"][1]["C6"] ) ]
f = io.StringIO( """%chk=gauss.chk
%mem=2048mb
%nproc=2
#p b3lyp/def2svp qm3_job qm3_guess charge prop=(field,read) scf=direct nosymm fchk
.
1 1
qm3_atoms
qm3_charges
qm3_field
""" )
mol.engines["qm"] = qm3.engines.gaussian.run( mol, f, sqm, smm, sla )
mol.engines["qm"].exe = ". ~/Devel/g09/pgi.imac64/g09.profile; g09 gauss.com"
mol.get_grad()
print( mol.func )
assert( numpy.fabs( mol.func - -697633.7375 ) < 0.001 ), "function error"
print( numpy.linalg.norm( mol.grad ) )
assert( numpy.fabs( numpy.linalg.norm( mol.grad ) - 575.7341 ) < 0.001 ), "gradient error"
print( numpy.linalg.norm( mol.grad[mol.indx["A"][1]["C10"]] ) )
assert( numpy.fabs( numpy.linalg.norm( mol.grad[mol.indx["A"][1]["C10"]] ) - 68.4270 ) < 0.001 ), "QM-LA gradient error"
|
[
"io.StringIO",
"qm3.engines.gaussian.run",
"os.path.dirname",
"numpy.logical_not",
"qm3.molecule",
"numpy.fabs",
"numpy.linalg.norm"
] |
[((163, 177), 'qm3.molecule', 'qm3.molecule', ([], {}), '()\n', (175, 177), False, 'import qm3\n'), ((454, 476), 'numpy.logical_not', 'numpy.logical_not', (['sqm'], {}), '(sqm)\n', (471, 476), False, 'import numpy\n'), ((575, 765), 'io.StringIO', 'io.StringIO', (['"""%chk=gauss.chk\n%mem=2048mb\n%nproc=2\n#p b3lyp/def2svp qm3_job qm3_guess charge prop=(field,read) scf=direct nosymm fchk\n\n.\n\n1 1\nqm3_atoms\n\nqm3_charges\n\nqm3_field\n"""'], {}), '(\n """%chk=gauss.chk\n%mem=2048mb\n%nproc=2\n#p b3lyp/def2svp qm3_job qm3_guess charge prop=(field,read) scf=direct nosymm fchk\n\n.\n\n1 1\nqm3_atoms\n\nqm3_charges\n\nqm3_field\n"""\n )\n', (586, 765), False, 'import io\n'), ((778, 825), 'qm3.engines.gaussian.run', 'qm3.engines.gaussian.run', (['mol', 'f', 'sqm', 'smm', 'sla'], {}), '(mol, f, sqm, smm, sla)\n', (802, 825), False, 'import qm3\n'), ((948, 983), 'numpy.fabs', 'numpy.fabs', (['(mol.func - -697633.7375)'], {}), '(mol.func - -697633.7375)\n', (958, 983), False, 'import numpy\n'), ((1021, 1048), 'numpy.linalg.norm', 'numpy.linalg.norm', (['mol.grad'], {}), '(mol.grad)\n', (1038, 1048), False, 'import numpy\n'), ((1151, 1203), 'numpy.linalg.norm', 'numpy.linalg.norm', (["mol.grad[mol.indx['A'][1]['C10']]"], {}), "(mol.grad[mol.indx['A'][1]['C10']])\n", (1168, 1203), False, 'import numpy\n'), ((113, 141), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (128, 141), False, 'import os\n'), ((1073, 1100), 'numpy.linalg.norm', 'numpy.linalg.norm', (['mol.grad'], {}), '(mol.grad)\n', (1090, 1100), False, 'import numpy\n'), ((1228, 1280), 'numpy.linalg.norm', 'numpy.linalg.norm', (["mol.grad[mol.indx['A'][1]['C10']]"], {}), "(mol.grad[mol.indx['A'][1]['C10']])\n", (1245, 1280), False, 'import numpy\n')]
|
"""
Variant of the base class with parallelized, pipelined, and vectorized operations.
The technique pertaining to convolution was reused from https://stackoverflow.com/a/36968434.
Try executing this program with (the other variants will crawl):
python3 conway_v3.py --board-size 160 --interval 20 --configuration patterns/garden-of-eden.cells 30 30
"""
import numpy as np
import dask.array as da
from scipy.ndimage import convolve
from conway_base import Cell, ConwayBase
class ConwayV3(ConwayBase):
def create_buffers(self):
self.board = da.from_array(self.board, chunks=("auto", "auto"))
self._mask = np.ones((3, 3))
self._mask[1, 1] = 0
def _process_cell(self, block, block_id=None):
rows, cols = block.shape
start_row = block_id[0] * rows
start_col = block_id[1] * cols
# We presume that this slicing will fit into memory.
board_slice = self.board[start_row:(start_row + rows), start_col:(start_col + cols)].compute()
# Apply the rules of the game.
block[np.logical_or(block < 2, block > 3)] = Cell.DEAD
block[block == 3] = Cell.LIVE
block[block == 2] = board_slice[block == 2]
return block
def prepare_next_board(self, steps):
for _ in range(steps):
num_live_neighbors = self.board.map_overlap(convolve, depth=1, boundary='none',
weights=self._mask, mode='constant', cval=0)
next_board = num_live_neighbors.map_blocks(self._process_cell, dtype=np.int).compute()
self.board = da.from_array(next_board, chunks=("auto", "auto"))
return next_board
if __name__ == '__main__':
game = ConwayV3(ConwayV3.parse_command_line_args())
game.simulate()
|
[
"dask.array.from_array",
"numpy.logical_or",
"numpy.ones"
] |
[((557, 607), 'dask.array.from_array', 'da.from_array', (['self.board'], {'chunks': "('auto', 'auto')"}), "(self.board, chunks=('auto', 'auto'))\n", (570, 607), True, 'import dask.array as da\n'), ((629, 644), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (636, 644), True, 'import numpy as np\n'), ((1055, 1090), 'numpy.logical_or', 'np.logical_or', (['(block < 2)', '(block > 3)'], {}), '(block < 2, block > 3)\n', (1068, 1090), True, 'import numpy as np\n'), ((1606, 1656), 'dask.array.from_array', 'da.from_array', (['next_board'], {'chunks': "('auto', 'auto')"}), "(next_board, chunks=('auto', 'auto'))\n", (1619, 1656), True, 'import dask.array as da\n')]
|
"""
This script utilises the ground truth label's 2D bounding box to
crop out the the points of interest and feed it into the model so that
it can predict a 3D bounding box for each of the 2D detections
The script will plot the results of the 3D bounding box onto the image
and display them alongside the groundtruth image and it's 3D bounding box.
This is to help with qualitative assesment.
Images to be evaluated should be placed in eval/image_2 folder
Eval Results for each file in the eval/image_2 folder will be saved to "eval/eval-results/"
FLAGS:
--show-single
Show 3D BoundingBox detections one at a time
--hide-imgs
Hides Display of ground truth and bounding box
"""
import os
import cv2
import errno
import argparse
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torchvision.models as models
from lib.DataUtils import *
from lib.Utils import *
from tqdm import tqdm
from lib import Model, ClassAverages
def main():
exp_no = 34
print ("Generating evaluation results for experiment No. ",exp_no)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights/exp_' + str(exp_no) + '/'
weight_list = [x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')]
# Create out folder for pred-labels and pred-imgs
for x in range(len(weight_list)):
check_and_make_dir('Kitti/results/validation/labels/exp_' + str(exp_no) +"/epoch_%s/" % str(x+1))
check_and_make_dir('Kitti/results/validation/pred_imgs/exp_' + str(exp_no) )
if len(weight_list) == 0:
print('We could not find any model weights to load, please train the model first!')
exit()
for model_weight in weight_list:
epoch_no = model_weight.split(".")[0].split('_')[-1]
print ("Evaluating for Epoch: ",epoch_no)
print ('Loading model with %s'%model_weight)
my_vgg = models.vgg19_bn(pretrained=True)
model = Model.Model(features=my_vgg.features, bins=2)
if use_cuda:
checkpoint = torch.load(weights_path + model_weight)
else:
checkpoint = torch.load(weights_path + model_weight)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
# Load Test Images from eval folder
dataset = Dataset(os.path.abspath(os.path.dirname(__file__)) + 'Kitti/validation')
all_images = dataset.all_objects()
print ("Length of eval data",len(all_images))
averages = ClassAverages.ClassAverages()
all_images = dataset.all_objects()
print ("Model is commencing predictions.....")
for key in tqdm(sorted(all_images.keys())):
data = all_images[key]
truth_img = data['Image']
img = np.copy(truth_img)
imgGT = np.copy(truth_img)
objects = data['Objects']
cam_to_img = data['Calib']
filename = "Kitti/results/validation/labels/exp_" +str(exp_no) + '/epoch_' + str(epoch_no) + "/" +str(key)+".txt"
check_and_make_dir(filename)
file = open(filename,"w")
for object in objects:
label = object.label
theta_ray = object.theta_ray
input_img = object.img
input_tensor = torch.zeros([1,3,224,224])
input_tensor[0,:,:,:] = input_img
input_tensor.cuda()
[orient, conf, dim] = model(input_tensor)
orient = orient.cpu().data.numpy()[0, :, :]
conf = conf.cpu().data.numpy()[0, :]
dim = dim.cpu().data.numpy()[0, :]
dim += averages.get_item(label['Class'])
argmax = np.argmax(conf)
orient = orient[argmax, :]
cos = orient[0]
sin = orient[1]
alpha = np.arctan2(sin, cos)
alpha += dataset.angle_bins[argmax]
alpha -= np.pi
location = plot_regressed_3d_bbox_2(img, truth_img, cam_to_img, label['Box_2D'], dim, alpha, theta_ray)
locationGT = plot_regressed_3d_bbox_2(imgGT, truth_img, cam_to_img, label['Box_2D'], label['Dimensions'], label['Alpha'], theta_ray)
file.write( \
# Class label
str(label['Class']) + " -1 -1 " + \
# Alpha
str(round(alpha,2)) + " " + \
# 2D Bounding box coordinates
str(label['Box_2D'][0][0]) + " " + str(label['Box_2D'][0][1]) + " " + \
str(label['Box_2D'][1][0]) + " " + str(label['Box_2D'][1][1]) + " " + \
# 3D Box Dimensions
str(' '.join(str(round(e,2)) for e in dim)) + " " + \
# 3D Box Location
str(' '.join(str(round(e,2)) for e in location)) + " 0.0 " + \
# Ry
str(round(theta_ray + alpha ,2)) + " " + \
# Confidence
str( round(max(softmax(conf)),2) ) + "\n"
)
# print('Estimated pose: %s'%location)
# print('Truth pose: %s'%label['Location'])
# print('-------------')
file.close()
numpy_vertical = np.concatenate((truth_img,imgGT, img), axis=0)
image_name = 'Kitti/results/validation/pred_imgs/exp_' + str(exp_no) + '/' + str(key) + "/epoch_" + epoch_no + '_' + str(key) + '.jpg'
check_and_make_dir(image_name)
cv2.imwrite(image_name, numpy_vertical)
print ("Finished.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--show-single", action="store_true",
help="Show 3D BoundingBox detecions one at a time")
parser.add_argument("--hide-imgs", action="store_true",
help="Hide display of visual results")
FLAGS = parser.parse_args()
main()
|
[
"torchvision.models.vgg19_bn",
"argparse.ArgumentParser",
"numpy.copy",
"numpy.concatenate",
"numpy.argmax",
"cv2.imwrite",
"torch.load",
"numpy.arctan2",
"lib.ClassAverages.ClassAverages",
"os.path.dirname",
"torch.cuda.is_available",
"torch.device",
"torch.zeros",
"os.listdir",
"lib.Model.Model"
] |
[((1096, 1121), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1119, 1121), False, 'import torch\n'), ((1135, 1178), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (1147, 1178), False, 'import torch\n'), ((5836, 5861), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5859, 5861), False, 'import argparse\n'), ((2011, 2043), 'torchvision.models.vgg19_bn', 'models.vgg19_bn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2026, 2043), True, 'import torchvision.models as models\n'), ((2060, 2105), 'lib.Model.Model', 'Model.Model', ([], {'features': 'my_vgg.features', 'bins': '(2)'}), '(features=my_vgg.features, bins=2)\n', (2071, 2105), False, 'from lib import Model, ClassAverages\n'), ((2608, 2637), 'lib.ClassAverages.ClassAverages', 'ClassAverages.ClassAverages', ([], {}), '()\n', (2635, 2637), False, 'from lib import Model, ClassAverages\n'), ((2153, 2192), 'torch.load', 'torch.load', (['(weights_path + model_weight)'], {}), '(weights_path + model_weight)\n', (2163, 2192), False, 'import torch\n'), ((2233, 2272), 'torch.load', 'torch.load', (['(weights_path + model_weight)'], {}), '(weights_path + model_weight)\n', (2243, 2272), False, 'import torch\n'), ((2881, 2899), 'numpy.copy', 'np.copy', (['truth_img'], {}), '(truth_img)\n', (2888, 2899), True, 'import numpy as np\n'), ((2920, 2938), 'numpy.copy', 'np.copy', (['truth_img'], {}), '(truth_img)\n', (2927, 2938), True, 'import numpy as np\n'), ((5473, 5520), 'numpy.concatenate', 'np.concatenate', (['(truth_img, imgGT, img)'], {'axis': '(0)'}), '((truth_img, imgGT, img), axis=0)\n', (5487, 5520), True, 'import numpy as np\n'), ((5726, 5765), 'cv2.imwrite', 'cv2.imwrite', (['image_name', 'numpy_vertical'], {}), '(image_name, numpy_vertical)\n', (5737, 5765), False, 'import cv2\n'), ((1317, 1341), 'os.listdir', 'os.listdir', (['weights_path'], {}), '(weights_path)\n', (1327, 1341), False, 'import os\n'), ((3417, 3446), 'torch.zeros', 'torch.zeros', (['[1, 3, 224, 224]'], {}), '([1, 3, 224, 224])\n', (3428, 3446), False, 'import torch\n'), ((3837, 3852), 'numpy.argmax', 'np.argmax', (['conf'], {}), '(conf)\n', (3846, 3852), True, 'import numpy as np\n'), ((3984, 4004), 'numpy.arctan2', 'np.arctan2', (['sin', 'cos'], {}), '(sin, cos)\n', (3994, 4004), True, 'import numpy as np\n'), ((1215, 1240), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1230, 1240), False, 'import os\n'), ((2443, 2468), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2458, 2468), False, 'import os\n')]
|
import numpy as np
from autocnet.camera.utils import crossform
from cv2 import triangulatePoints
def compute_epipoles(f):
"""
Compute the epipole and epipolar prime
Parameters
----------
f : ndarray
(3,3) fundamental matrix or autocnet Fundamental Matrix object
Returns
-------
e : ndarray
(3,1) epipole
e1 : ndarray
(3,3) epipolar prime matrix
"""
u, _, _ = np.linalg.svd(f)
e = u[:, -1]
e1 = crossform(e)
return e, e1
def idealized_camera():
"""
Create an idealized camera transformation matrix
Returns
-------
: ndarray
(3,4) with diagonal 1
"""
i = np.eye(3, 4)
i[:,-1] = 0
return i
def camera_from_f(F):
"""
Estimate a camera matrix using a fundamental matrix.
Parameters
----------
f : ndarray
(3,3) fundamental matrix or autocnet Fundamental Matrix object
Returns
-------
p1 : ndarray
Estimated camera matrix
"""
e, e1 = compute_epipoles(F)
p1 = np.empty((3, 4))
p1[:, :3] = -e1.dot(F)
p1[:, 3] = e
return p1
def triangulate(pt, pt1, p, p1):
"""
Given two sets of homogeneous coordinates and two camera matrices,
triangulate the 3D coordinates. The image correspondences are
assumed to be implicitly ordered.
References
----------
[Hartley2003]_
Parameters
----------
pt : ndarray
(n, 3) array of homogeneous correspondences
pt1 : ndarray
(n, 3) array of homogeneous correspondences
p : ndarray
(3, 4) camera matrix
p1 : ndarray
(3, 4) camera matrix
Returns
-------
coords : ndarray
(4, n) projection matrix
"""
pt = np.asarray(pt)
pt1 = np.asarray(pt1)
# Transpose for the openCV call if needed
if pt.shape[0] != 3:
pt = pt.T
if pt1.shape[0] != 3:
pt1 = pt1.T
X = triangulatePoints(p, p1, pt[:2], pt1[:2])
X /= X[3] # Homogenize
return X
def projection_error(p1, p, pt, pt1):
"""
Based on Hartley and Zisserman p.285 this function triangulates
image correspondences and computes the reprojection error
by back-projecting the points into the image.
This is the classic cost function (minimization problem) into
the gold standard method for fundamental matrix estimation.
Parameters
-----------
p1 : ndarray
(3,4) camera matrix
p : ndarray
(3,4) idealized camera matrix in the form np.eye(3,4)
pt : dataframe or ndarray
of homogeneous coordinates in the form (x_{i}, y_{i}, 1)
pt1 : dataframe or ndarray
of homogeneous coordinates in the form (x_{i}, y_{i}, 1)
Returns
-------
reproj_error : ndarray
(n, 1) vector of reprojection errors
"""
# SciPy least squares solver needs a vector, so reshape back to a 3x4 c
# camera matrix at each iteration
if p1.shape != (3,4):
p1 = p1.reshape(3,4)
# Triangulate the correspondences
xhat = triangulate(pt, pt1, p, p1)
xhat1 = xhat[:3] / xhat[2]
xhat2 = p1.dot(xhat)
xhat2 /= xhat2[2]
# Compute error
cost = (pt - xhat1)**2 + (pt1 - xhat2)**2
cost = np.sqrt(np.sum(cost, axis=0))
return cost
|
[
"numpy.sum",
"autocnet.camera.utils.crossform",
"numpy.asarray",
"numpy.empty",
"numpy.linalg.svd",
"numpy.eye",
"cv2.triangulatePoints"
] |
[((433, 449), 'numpy.linalg.svd', 'np.linalg.svd', (['f'], {}), '(f)\n', (446, 449), True, 'import numpy as np\n'), ((476, 488), 'autocnet.camera.utils.crossform', 'crossform', (['e'], {}), '(e)\n', (485, 488), False, 'from autocnet.camera.utils import crossform\n'), ((678, 690), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (684, 690), True, 'import numpy as np\n'), ((1051, 1067), 'numpy.empty', 'np.empty', (['(3, 4)'], {}), '((3, 4))\n', (1059, 1067), True, 'import numpy as np\n'), ((1766, 1780), 'numpy.asarray', 'np.asarray', (['pt'], {}), '(pt)\n', (1776, 1780), True, 'import numpy as np\n'), ((1791, 1806), 'numpy.asarray', 'np.asarray', (['pt1'], {}), '(pt1)\n', (1801, 1806), True, 'import numpy as np\n'), ((1952, 1993), 'cv2.triangulatePoints', 'triangulatePoints', (['p', 'p1', 'pt[:2]', 'pt1[:2]'], {}), '(p, p1, pt[:2], pt1[:2])\n', (1969, 1993), False, 'from cv2 import triangulatePoints\n'), ((3275, 3295), 'numpy.sum', 'np.sum', (['cost'], {'axis': '(0)'}), '(cost, axis=0)\n', (3281, 3295), True, 'import numpy as np\n')]
|
from kinematicEnv import KinematicEnv
from QL import QL
from upDDPG import DDPG as uDDPG
import tensorflow as tf
from bottomDDPG import DDPG as bDDPG
import numpy as np
env = KinematicEnv()
s_dim = env.state_dim
a_dim = env.action_dim
a_bound = env.action_bound
g1 = tf.Graph()
isess1 = tf.Session(graph=g1)
with g1.as_default():
isess1.run(tf.global_variables_initializer())
uddpg = uDDPG(a_dim, s_dim, a_bound)
uddpg.restore()
g2 = tf.Graph()
isess2 = tf.Session(graph=g2)
with g2.as_default():
isess2.run(tf.global_variables_initializer())
bddpg = bDDPG(a_dim, s_dim, a_bound)
bddpg.restore()
g3 = tf.Graph()
isess3 = tf.Session(graph=g3)
with g3.as_default():
isess3.run(tf.global_variables_initializer())
Q = QL(2, s_dim)
def initial():
tt = np.random.randint(0, 3)
if tt == 0:
s = env.initialUp()
elif tt == 1:
s = env.initialDown()
else:
s = env.initialOn()
return s
def train():
step = 0
for i_episode in range(6000):
s = initial()
j = 0
for i in range(300):
#env.render()
a0 = Q.choose_action(s)
if a0 == 0:
k = uddpg.choose_action(s)
s_, _, _ = env.stepUp(k)
else:
k = bddpg.choose_action(s)
s_, _, _ = env.stepDown(k)
#rewardReset
label1, label2, label3 = s[0], s[8], s[9] - s[1]
if -20.<label1<20. and -20.<label2<20.:
if label3 < 150.:
if a0 == 0: reward = 1
else: reward = -1
else:
if a0 == 0: reward = -1
else: reward = 1
elif -20.<label1<20. and abs(label2) >= 20.:
if a0 == 0: reward = 1
else: reward = -2
elif abs(label1) >= 20. and -20.<label2<20.:
if a0 == 0: reward = -2
else: reward = 1
Q.store_transition(s, a0, reward, s_)
if step > 300 and step % 50 == 0:
Q.learn()
step+=1
if reward == 1:
j += 1
if reward == -2 or i == 299:
print('Ep: %i | accuracy: %.2f | step: %i' % (i_episode, 1.*j/(i+1)*100, i))
break
with g3.as_default():
Q.save()
#多个计算图训练时,怎么分别存储模型
train()
|
[
"kinematicEnv.KinematicEnv",
"QL.QL",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"upDDPG.DDPG",
"numpy.random.randint",
"tensorflow.Graph",
"bottomDDPG.DDPG"
] |
[((175, 189), 'kinematicEnv.KinematicEnv', 'KinematicEnv', ([], {}), '()\n', (187, 189), False, 'from kinematicEnv import KinematicEnv\n'), ((268, 278), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (276, 278), True, 'import tensorflow as tf\n'), ((288, 308), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g1'}), '(graph=g1)\n', (298, 308), True, 'import tensorflow as tf\n'), ((447, 457), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (455, 457), True, 'import tensorflow as tf\n'), ((467, 487), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g2'}), '(graph=g2)\n', (477, 487), True, 'import tensorflow as tf\n'), ((626, 636), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (634, 636), True, 'import tensorflow as tf\n'), ((646, 666), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g3'}), '(graph=g3)\n', (656, 666), True, 'import tensorflow as tf\n'), ((393, 421), 'upDDPG.DDPG', 'uDDPG', (['a_dim', 's_dim', 'a_bound'], {}), '(a_dim, s_dim, a_bound)\n', (398, 421), True, 'from upDDPG import DDPG as uDDPG\n'), ((572, 600), 'bottomDDPG.DDPG', 'bDDPG', (['a_dim', 's_dim', 'a_bound'], {}), '(a_dim, s_dim, a_bound)\n', (577, 600), True, 'from bottomDDPG import DDPG as bDDPG\n'), ((747, 759), 'QL.QL', 'QL', (['(2)', 's_dim'], {}), '(2, s_dim)\n', (749, 759), False, 'from QL import QL\n'), ((786, 809), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (803, 809), True, 'import numpy as np\n'), ((346, 379), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (377, 379), True, 'import tensorflow as tf\n'), ((525, 558), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (556, 558), True, 'import tensorflow as tf\n'), ((704, 737), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (735, 737), True, 'import tensorflow as tf\n')]
|
import unittest
import warnings
import numpy as np
# import safety_gym
# from safety_gym.envs.engine import Engine
import gym
import gym.spaces as spaces
from gym.envs.registration import register
from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper
from edge.agent import RandomAgent
class SpaceWrappers(unittest.TestCase):
def test_box_wrapper(self):
warnings.filterwarnings('ignore')
gb = spaces.Box(0, 1, (2,2))
eb = BoxWrapper(gb, (10,10,10,10))
eelem = eb.sample()
gelem = gb.sample()
self.assertEqual(eelem.shape, (4,))
self.assertEqual(eb.to_gym((2,3,4,5)).shape, gb.shape)
self.assertEqual(eb.from_gym(gelem).shape, (4,))
gb = spaces.Box(np.array([0,1]), np.array([2,3]))
eb = BoxWrapper(gb, (10, 10))
eelem = eb.sample()
gelem = gb.sample()
self.assertEqual(eelem.shape, (2,))
self.assertEqual(eb.to_gym((2,3)).shape, gb.shape)
self.assertEqual(eb.from_gym(gelem).shape, (2,))
gb = spaces.Box(-np.inf, np.inf, (1,))
eb = BoxWrapper(gb, (10, ), inf_ceiling=5)
for t in range(100):
eelem = eb.sample()
self.assertTrue(np.abs(eelem)[0] <= 5)
self.assertTrue(eelem in eb)
def test_discrete_wrapper(self):
gd = spaces.Discrete(10)
ed = DiscreteWrapper(gd)
g = gd.sample()
e = ed.sample()
self.assertEqual(ed.to_gym(e), int(e))
self.assertEqual(ed.from_gym(g), g)
# class SafetyGymEnvironmentWrappers(unittest.TestCase):
# def test_safety_gym_environment_creation(self):
# senv = gym.make('Safexp-PointGoal1-v0')
# env = GymEnvironmentWrapper(senv)
#
# config = {
# 'robot_base': 'xmls/car.xml',
# 'task': 'push',
# 'observe_goal_lidar': True,
# 'observe_box_lidar': True,
# 'observe_hazards': True,
# 'observe_vases': True,
# 'constrain_hazards': True,
# 'lidar_max_dist': 3,
# 'lidar_num_bins': 16,
# 'hazards_num': 4,
# 'vases_num': 4
# }
#
# senv = Engine(config)
# register(id='SafexpTestEnvironment-v0',
# entry_point='safety_gym.envs.mujoco:Engine',
# kwargs={'config': config})
# env = GymEnvironmentWrapper(senv, failure_critical=True)
#
# def test_safety_gym_random_agent(self):
# senv = gym.make('Safexp-PointGoal1-v0')
# env = GymEnvironmentWrapper(senv)
# random_agent = RandomAgent(env)
#
# ep_ret, ep_cost = 0, 0
# for t in range(1000):
# new_state, reward, failed = random_agent.step()
# ep_ret += reward
# ep_cost += env.info.get('cost', 0)
# env.gym_env.render()
# if env.done:
# print('Episode Return: %.3f \t Episode Cost: %.3f' % (ep_ret, ep_cost))
# ep_ret, ep_cost = 0, 0
# random_agent.reset()
class GymEnvironmentWrappers(unittest.TestCase):
def test_gym_environment_creation(self):
gymenv = gym.make('LunarLander-v2')
env = GymEnvironmentWrapper(gymenv)
env = GymEnvironmentWrapper(gymenv, failure_critical=True)
self.assertTrue(True)
def test_gym_random_agent(self):
gymenv = gym.make('LunarLander-v2')
env = GymEnvironmentWrapper(gymenv)
random_agent = RandomAgent(env)
ep_ret, ep_cost = 0, 0
for t in range(100):
new_state, reward, failed, _ = random_agent.step()
ep_ret += reward
ep_cost += env.info.get('cost', 0)
# env.gym_env.render()
if env.done:
print('Episode Return: %.3f \t Episode Cost: %.3f' % (
ep_ret, ep_cost))
ep_ret, ep_cost = 0, 0
random_agent.reset()
def test_gym_control_frequency(self):
gymenv = gym.make('CartPole-v1')
env = GymEnvironmentWrapper(gymenv, control_frequency=2)
random_agent = RandomAgent(env)
ep_ret, ep_cost = 0, 0
for t in range(100):
new_state, reward, failed, _ = random_agent.step()
ep_ret += reward
ep_cost += env.info.get('cost', 0)
# env.gym_env.render()
if env.done:
print('Episode Return: %.3f \t Episode Cost: %.3f' % (ep_ret, ep_cost))
ep_ret, ep_cost = 0, 0
random_agent.reset()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.abs",
"gym.make",
"warnings.filterwarnings",
"edge.gym_wrappers.DiscreteWrapper",
"gym.spaces.Discrete",
"edge.gym_wrappers.BoxWrapper",
"gym.spaces.Box",
"edge.gym_wrappers.GymEnvironmentWrapper",
"numpy.array",
"edge.agent.RandomAgent"
] |
[((4610, 4625), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4623, 4625), False, 'import unittest\n'), ((396, 429), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (419, 429), False, 'import warnings\n'), ((443, 467), 'gym.spaces.Box', 'spaces.Box', (['(0)', '(1)', '(2, 2)'], {}), '(0, 1, (2, 2))\n', (453, 467), True, 'import gym.spaces as spaces\n'), ((480, 512), 'edge.gym_wrappers.BoxWrapper', 'BoxWrapper', (['gb', '(10, 10, 10, 10)'], {}), '(gb, (10, 10, 10, 10))\n', (490, 512), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((803, 827), 'edge.gym_wrappers.BoxWrapper', 'BoxWrapper', (['gb', '(10, 10)'], {}), '(gb, (10, 10))\n', (813, 827), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((1059, 1092), 'gym.spaces.Box', 'spaces.Box', (['(-np.inf)', 'np.inf', '(1,)'], {}), '(-np.inf, np.inf, (1,))\n', (1069, 1092), True, 'import gym.spaces as spaces\n'), ((1106, 1142), 'edge.gym_wrappers.BoxWrapper', 'BoxWrapper', (['gb', '(10,)'], {'inf_ceiling': '(5)'}), '(gb, (10,), inf_ceiling=5)\n', (1116, 1142), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((1348, 1367), 'gym.spaces.Discrete', 'spaces.Discrete', (['(10)'], {}), '(10)\n', (1363, 1367), True, 'import gym.spaces as spaces\n'), ((1381, 1400), 'edge.gym_wrappers.DiscreteWrapper', 'DiscreteWrapper', (['gd'], {}), '(gd)\n', (1396, 1400), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((3188, 3214), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (3196, 3214), False, 'import gym\n'), ((3229, 3258), 'edge.gym_wrappers.GymEnvironmentWrapper', 'GymEnvironmentWrapper', (['gymenv'], {}), '(gymenv)\n', (3250, 3258), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((3274, 3326), 'edge.gym_wrappers.GymEnvironmentWrapper', 'GymEnvironmentWrapper', (['gymenv'], {'failure_critical': '(True)'}), '(gymenv, failure_critical=True)\n', (3295, 3326), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((3412, 3438), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (3420, 3438), False, 'import gym\n'), ((3453, 3482), 'edge.gym_wrappers.GymEnvironmentWrapper', 'GymEnvironmentWrapper', (['gymenv'], {}), '(gymenv)\n', (3474, 3482), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((3506, 3522), 'edge.agent.RandomAgent', 'RandomAgent', (['env'], {}), '(env)\n', (3517, 3522), False, 'from edge.agent import RandomAgent\n'), ((4024, 4047), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (4032, 4047), False, 'import gym\n'), ((4062, 4112), 'edge.gym_wrappers.GymEnvironmentWrapper', 'GymEnvironmentWrapper', (['gymenv'], {'control_frequency': '(2)'}), '(gymenv, control_frequency=2)\n', (4083, 4112), False, 'from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper\n'), ((4136, 4152), 'edge.agent.RandomAgent', 'RandomAgent', (['env'], {}), '(env)\n', (4147, 4152), False, 'from edge.agent import RandomAgent\n'), ((756, 772), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (764, 772), True, 'import numpy as np\n'), ((773, 789), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (781, 789), True, 'import numpy as np\n'), ((1233, 1246), 'numpy.abs', 'np.abs', (['eelem'], {}), '(eelem)\n', (1239, 1246), True, 'import numpy as np\n')]
|
"""
Implementation of the model.
Parts of the code are inherited from the official CAAE implementation (https://arxiv.org/abs/1702.08423, https://github.com/ZZUTK/Face-Aging-CAAE).
"""
import os
import sys
import time
from glob import glob
import numpy as np
import tensorflow as tf
from scipy.io import loadmat, savemat
from PK_Utils.PK_config import *
from PK_Utils.PK_image_ops import *
from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em
from PK_Utils.PK_vgg_face import face_embedding
from metrics import concordance_cc
class Model(object):
"""
Implementation of the model used.
"""
def __init__(self, session, useEmotion=False):
self.useEmotion = useEmotion
self.session = session
self.vgg_weights = loadmat(vggMat)
# -- INPUT PLACEHOLDERS -----------------------------------------------------------
# ---------------------------------------------------------------------------------
self.input_image = tf.compat.v1.placeholder(
tf.float32,
[size_batch, size_image, size_image, 3],
name='input_images'
)
self.valence = tf.compat.v1.placeholder(
tf.float32,
[size_batch, 1],
name='valence_labels'
)
self.arousal = tf.compat.v1.placeholder(
tf.float32,
[size_batch, 1],
name='arousal_labels'
)
self.z_prior = tf.compat.v1.placeholder(
tf.float32,
[size_batch, num_z_channels],
name='z_prior'
)
# -- GRAPH ------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
print ('\n\t SETTING UP THE GRAPH')
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
# with tf.device('/device:GPU:0'):
with tf.device(device):
# -- NETWORKS -------------------------------------------------------------
# -------------------------------------------------------------------------
# encoder:
self.z = encoder(self.input_image)
# generator: z + arousal + valence --> generated image
self.G = generator(self.z,
valence=self.valence,
arousal=self.arousal)
# Discriminator Z
self.Dz, self.Dz_logits = d_prior(self.z) # Discriminator_Z on encoded image
self.Dz_prior, self.Dz_prior_logits = d_prior(self.z_prior,
reuse_variables=True) # Discriminator_Z on prior image
#Discriminator Image
self.Dimg_G, self.Dimg_G_logits = d_img(self.G,
valence=self.valence,
arousal=self.arousal) # discriminator on Generated
# discriminator on input image
self.Dimg_Original, self.Dimg_Original_logits = d_img(self.input_image,
valence=self.valence,
arousal=self.arousal,
reuse_variables=True) # discriminator on original image
# # discriminator on arousal/valence
# #
if self.useEmotion:
self.D_emArousal, self.D_emValence, self.D_em_arousal_logits, self.D_em_valence_logits = d_em(self.z, reuse_variables=True)
# self.D_emArousal_G, self.D_emValence_G, self.D_em_arousal_logits_G, self.D_em_valence_logits_G = d_em(self.G, reuse_variables=True)
# -- LOSSES ---------------------------------------------------------------
# -------------------------------------------------------------------------
# ---- VGG LOSS ---------------------------------------------------------
# The computation of this loss is inherited from the official ExprGan implementation (https://arxiv.org/abs/1709.03842, https://github.com/HuiDingUMD/ExprGAN).
real_conv1_2, real_conv2_2, real_conv3_2, real_conv4_2, real_conv5_2 = face_embedding(self.vgg_weights, self.input_image)
fake_conv1_2, fake_conv2_2, fake_conv3_2, fake_conv4_2, fake_conv5_2 = face_embedding(self.vgg_weights, self.G)
conv1_2_loss = tf.reduce_mean(tf.abs(real_conv1_2 - fake_conv1_2)) / 224. / 224.
conv2_2_loss = tf.reduce_mean(tf.abs(real_conv2_2 - fake_conv2_2)) / 112. / 112.
conv3_2_loss = tf.reduce_mean(tf.abs(real_conv3_2 - fake_conv3_2)) / 56. / 56.
conv4_2_loss = tf.reduce_mean(tf.abs(real_conv4_2 - fake_conv4_2)) / 28. / 28.
conv5_2_loss = tf.reduce_mean(tf.abs(real_conv5_2 - fake_conv5_2)) / 14. / 14.
# -----------------------------------------------------------------------
# loss function of discriminator on z
self.D_z_loss_z = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits= self.Dz_logits, labels=tf.ones_like(self.Dz_logits))
)
self.D_z_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=1 - self.Dz_prior_logits,
labels=tf.zeros_like(self.Dz_prior_logits))
)
# self.E_z_loss = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_logits, labels=tf.ones_like(self.D_z_logits))
# )
# loss function of discriminator on image
self.D_img_loss_input = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.Dimg_Original_logits, labels=tf.ones_like(self.Dimg_Original_logits))
)
self.D_img_loss_G = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=1 - self.Dimg_G_logits, labels=tf.zeros_like(self.Dimg_G_logits))
)
if self.useEmotion:
# loss function of discriminator on emotion
self.D_em_arousal_loss = tf.compat.v1.losses.mean_squared_error(predictions=self.D_em_arousal_logits, labels=self.arousal)
self.D_em_valence_loss = tf.compat.v1.losses.mean_squared_error(predictions=self.D_em_valence_logits,
labels=self.valence)
# self.G_img_loss = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.Dimg_G_logits, labels=tf.ones_like(self.Dimg_G_logits))
# )
# # loss function of d_em on arousal and valence values
# self.D_em_loss = tf.compat.v1.losses.mean_squared_error(predictions=self.D_em_valence_logits, labels=self.valence) + tf.compat.v1.losses.mean_squared_error(self.D_em_arousal_logits, self.arousal)
#
#
# #CCC for arousal and valence
# self.D_em_ccc_arousal = concordance_cc(self.D_em_arousal_logits, self.arousal)
# self.D_em_ccc_valence = concordance_cc(self.D_em_valence_logits, self.valence)
# ---------------------------------------------------------------------------------
# Losses
# ---------------------------------------------------------------------------------
# reconstruction loss of encoder+generator
# self.loss_rec = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss
self.loss_rec = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss
self.loss_Iden = conv1_2_loss + conv2_2_loss + conv3_2_loss + conv4_2_loss + conv5_2_loss
self.loss_Lz = self.D_z_loss_prior + self.D_z_loss_z
self.loss_Di = self.D_img_loss_input + self.D_img_loss_G
if self.useEmotion:
self.loss_Dem = self.D_em_arousal_loss + self.D_em_valence_loss
self.loss_Total = self.loss_rec + self.loss_Iden * 0.3 + self.loss_Lz * 0.01 + self.loss_Di * 0.01 + self.loss_Dem*0.001
else:
self.loss_Total = self.loss_rec + self.loss_Iden * 0.3 + self.loss_Lz * 0.01 + self.loss_Di * 0.01
# self.loss_EG = self.EG_loss + self.D_em_loss * 0.02 + self.vgg_loss * 0.3 + 0.01 * self.E_z_loss + 0.01 * self.G_img_loss
# -- TRAINABLE VARIABLES ----------------------------------------------------------
# ---------------------------------------------------------------------------------
trainable_variables =tf.compat.v1.trainable_variables()
# variables of encoder
self.E_variables = [var for var in trainable_variables if 'E_' in var.name]
# variables of generator
self.G_variables = [var for var in trainable_variables if 'G_' in var.name]
# variables of discriminator on prior
self.D_z_variables = [var for var in trainable_variables if 'D_prior_' in var.name]
# variables of discriminator on realImage
self.D_img_variables = [var for var in trainable_variables if 'D_img_' in var.name]
# # variables of discriminator on emotions
# self.D_em_variables = [var for var in trainable_variables if 'D_em_' in var.name]
# -- SUMMARY ----------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# with tf.device('/device:CPU:0'):
# self.z_summary = tf.compat.v1.summary.histogram('z', self.z)
# self.z_prior_summary = tf.compat.v1.summary.histogram('z_prior', self.z_prior)
# self.EG_loss_summary = tf.summary.scalar('EG_loss', self.EG_loss)
# self.D_z_loss_z_summary = tf.summary.scalar('D_z_loss_z', self.D_z_loss_z)
# self.D_z_loss_prior_summary = tf.summary.scalar('D_z_loss_prior', self.D_z_loss_prior)
# self.E_z_loss_summary = tf.summary.scalar('E_z_loss', self.E_z_loss)
# self.D_z_logits_summary = tf.compat.v1.summary.histogram('D_z_logits', self.D_z_logits)
# self.D_z_prior_logits_summary = tf.compat.v1.summary.histogram('D_z_prior_logits', self.D_z_prior_logits)
# self.D_img_loss_input_summary = tf.summary.scalar('D_img_loss_input', self.D_img_loss_input)
# self.D_img_loss_G_summary = tf.summary.scalar('D_img_loss_G', self.D_img_loss_G)
# self.G_img_loss_summary = tf.summary.scalar('G_img_loss', self.G_img_loss)
# self.D_G_logits_summary = tf.compat.v1.summary.histogram('D_G_logits', self.D_G_logits)
# self.D_input_logits_summary = tf.compat.v1.summary.histogram('D_input_logits', self.D_input_logits)
# self.D_em_arousal_logits_summary = tf.compat.v1.summary.histogram('D_em_arousal_logits', self.D_em_arousal_logits)
# self.D_em_valence_logits_summary = tf.compat.v1.summary.histogram('D_em_valence_logits',
# self.D_em_valence_logits)
# self.D_em_loss_summary = tf.compat.v1.summary.histogram('D_em_loss', self.D_em_loss)
# self.D_em_ccc_arousal_summary = tf.compat.v1.summary.histogram('D_em_ccc_arousal', self.D_em_ccc_arousal)
# self.D_em_ccc_valence_summary = tf.compat.v1.summary.histogram('D_em_ccc_valence', self.D_em_ccc_valence)
# self.vgg_loss_summary = tf.summary.scalar('VGG_loss', self.vgg_loss)
#
# for saving the graph and variables
self.saver = tf.compat.v1.train.Saver(max_to_keep=10)
def train(self,
num_epochs=2, # number of epochs
learning_rate=0.0002, # learning rate of optimizer
beta1=0.5, # parameter for Adam optimizer
decay_rate=1.0, # learning rate decay (0, 1], 1 means no decay
use_trained_model=False, # used the saved checkpoint to initialize the model
):
enable_shuffle = True
# set learning rate decay
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
with tf.device('/device:CPU:0'):
self.EG_global_step = tf.Variable(0, trainable=False, name='global_step')
# -- LOAD FILE NAMES --------------------------------------------------------------
# ---------------------------------------------------------------------------------
# ---- TRAINING DATA
file_names = [data_path + x for x in os.listdir(data_path)]
file_names = self.fill_up_equally(file_names)
size_data = len(file_names)
np.random.shuffle(file_names)
# ---- VALIDATION DATA
self.validation_files = [validation_path + v for v in os.listdir(validation_path)]
# -- OPTIMIZERS -------------------------------------------------------------------
# ---------------------------------------------------------------------------------
with tf.device(device):
# with tf.device('/device:GPU:0'):
EG_learning_rate = tf.compat.v1.train.exponential_decay(
learning_rate=learning_rate,
global_step=self.EG_global_step,
decay_steps=size_data / size_batch * 2,
decay_rate=decay_rate,
staircase=True
)
# optimizer for encoder + generator
self.EG_optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=EG_learning_rate,
beta1=beta1
).minimize(
loss=self.loss_Total,
global_step=self.EG_global_step,
var_list=self.E_variables + self.G_variables
)
# # optimizer for discriminator on z
# self.D_z_optimizer = tf.compat.v1.train.AdamOptimizer(
# learning_rate=EG_learning_rate,
# beta1=beta1
# ).minimize(
# loss=self.loss_Lz,
# var_list=self.D_z_variables
# )
#
# # optimizer for discriminator on image
# self.D_img_optimizer = tf.compat.v1.train.AdamOptimizer(
# learning_rate=EG_learning_rate,
# beta1=beta1
# ).minimize(
# loss=self.loss_Di,
# var_list=self.D_img_variables
# )
# # optimizer for emotion
# self.D_em_optimizer = tf.compat.v1.train.AdamOptimizer(
# learning_rate=EG_learning_rate,
# beta1=beta1
# ).minimize(
# loss=self.D_em_loss,
# var_list=self.D_em_variables
# )
# # -- TENSORBOARD WRITER ----------------------------------------------------------
# # ---------------------------------------------------------------------------------
# self.writer = tf.summary.create_file_writer(save_dir)
# -- TENSORBOARD SUMMARY ----------------------------------------------------------
# ---------------------------------------------------------------------------------
# with tf.device('/device:CPU:0'):
# self.EG_learning_rate_summary = tf.summary.scalar('EG_learning_rate', EG_learning_rate)
# self.summary = tf.compat.v1.summary.merge([
# self.z_summary, self.z_prior_summary,
# self.D_z_loss_z_summary, self.D_z_loss_prior_summary,
# self.D_z_logits_summary, self.D_z_prior_logits_summary,
# self.EG_loss_summary, self.E_z_loss_summary,
# self.D_img_loss_input_summary, self.D_img_loss_G_summary,
# self.G_img_loss_summary, self.EG_learning_rate_summary,
# self.D_G_logits_summary, self.D_input_logits_summary,
# self.vgg_loss_summary, self.D_em_arousal_logits_summary, self.D_em_valence_logits_summary, self.D_em_loss_summary, self.D_em_ccc_arousal_summary, self.D_em_ccc_valence_summary
# ])
# self.writer = tf.summary.FileWriter(os.path.join(save_dir, 'summary'), self.session.graph)
# ************* get some random samples as testing data to visualize the learning process *********************
sample_files = file_names[0:size_batch]
file_names[0:size_batch] = []
sample = [load_image(
image_path=sample_file,
image_size=size_image,
image_value_range=image_value_range,
is_gray=False,
) for sample_file in sample_files]
sample_images = np.array(sample).astype(np.float32)
sample_label_arousal = np.asarray([[float(x.split('__')[2])] for x in sample_files])
sample_label_valence = np.asarray([[float(x.split('__')[3][0:-4])] for x in sample_files])
# ******************************************* training *******************************************************
print('\n\tPreparing for training ...')
# initialize the graph
tf.compat.v1.global_variables_initializer().run()
# load check point
if use_trained_model:
if self.load_checkpoint():
print("\tSUCCESS ^_^")
else:
print("\tFAILED >_<!")
# epoch iteration
num_batches = len(file_names) // size_batch
for epoch in range(num_epochs):
if enable_shuffle:
np.random.shuffle(file_names)
for ind_batch in range(num_batches):
start_time = time.time()
# read batch images and labels
batch_files = file_names[ind_batch*size_batch:(ind_batch+1)*size_batch]
batch = [load_image(
image_path=batch_file,
image_size=size_image,
image_value_range=image_value_range,
is_gray=False,
) for batch_file in batch_files]
batch_images = np.array(batch).astype(np.float32)
batch_label_valence = np.asarray([[float(x.split('__')[2])] for x in batch_files])
batch_label_arousal = np.asarray([[float(x.split('__')[3][0:-4])] for x in batch_files])
# prior distribution on the prior of z
batch_z_prior = np.random.uniform(
image_value_range[0],
image_value_range[-1],
[size_batch, num_z_channels]
).astype(np.float32)
# # update
# _, _, _, EG_err, Ez_err, Dz_err, Dzp_err, Gi_err, DiG_err, Di_err, vgg, em, arousalCCC, valenceCCC = self.session.run(
# fetches = [
# self.EG_optimizer,
# self.D_z_optimizer,
# self.D_img_optimizer,
# self.EG_loss,
# self.E_z_loss,
# self.D_z_loss_z,
# self.D_z_loss_prior,
# self.G_img_loss,
# self.D_img_loss_G,
# self.D_img_loss_input,
# # self.tv_loss,
# self.vgg_loss,
# self.D_em_loss,
# self.D_em_ccc_arousal,
# self.D_em_ccc_valence
# ],
# feed_dict={
# self.input_image: batch_images,
# self.valence: batch_label_valence,
# self.arousal: batch_label_arousal,
# self.z_prior: batch_z_prior
# }
# )
# update
# _, _, _, EG_err, Ez_err, Dz_err, Dzp_err, Gi_err, DiG_err, Di_err, vgg = self.session.run(
# fetches=[
# self.EG_optimizer,
# self.D_z_optimizer,
# self.D_img_optimizer,
# self.loss_rec,
# self.E_z_loss,
# self.D_z_loss_z,
# self.D_z_loss_prior,
# self.G_img_loss,
# self.D_img_loss_G,
# self.D_img_loss_input,
# # self.tv_loss,
# self.loss_Iden
# ],
# feed_dict={
# self.input_image: batch_images,
# self.valence: batch_label_valence,
# self.arousal: batch_label_arousal,
# self.z_prior: batch_z_prior
# }
# )
# print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tEG_err=%.4f\tVGG=%.4f" %
# (epoch + 1, num_epochs, ind_batch + 1, num_batches, EG_err, vgg))
# print("\tEz=%.4f\tDz=%.4f\tDzp=%.4f" % (Ez_err, Dz_err, Dzp_err))
# print("\tGi=%.4f\tDi=%.4f\tDiG=%.4f" % (Gi_err, Di_err, DiG_err))
#
#
# update
if self.useEmotion:
_, lossTotal, lossRec, lossIden, lossLz, lossLzPrior, lossLzOriginal, lossDimg, lossDimgInput, lossDimgGenerated, lossDem, lossDemArousal, lossDemValence = self.session.run(
fetches=[
self.EG_optimizer,
self.loss_Total,
self.loss_rec,
self.loss_Iden,
self.loss_Lz,
self.D_z_loss_prior,
self.D_z_loss_z,
self.loss_Di,
self.D_img_loss_input,
self.D_img_loss_G,
self.loss_Dem,
self.D_em_arousal_loss,
self.D_em_valence_loss
],
feed_dict={
self.input_image: batch_images,
self.valence: batch_label_valence,
self.arousal: batch_label_arousal,
self.z_prior: batch_z_prior
}
)
print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tLoss_Total=%.4f" %
(epoch + 1, num_epochs, ind_batch + 1, num_batches, lossTotal), file=open(save_dir+"Log.txt", "a"))
print("\tL_rec=%.4f\tL_Iden=%.4f\tL_Z=%.4f\tL_Img=%.4f\tL_em=%.4f" % (lossRec, lossIden, lossLz, lossDimg,lossDem), file=open(save_dir+"Log.txt", "a"))
print("\tL_Z_Prior=%.4f\tL_Z_original=%.4f" % (lossLzPrior, lossLzOriginal), file=open(save_dir+"Log.txt", "a"))
print("\tL_Img_Input=%.4f\tL_Img_Generated=%.4f" % (lossDimgInput, lossDimgGenerated), file=open(save_dir+"Log.txt", "a"))
print("\tL_Dem_Arousal=%.4f\tL_Dem_Valence=%.4f" % (lossDemArousal, lossDemValence), file=open(save_dir+"Log.txt", "a"))
else:
_, lossTotal, lossRec, lossIden, lossLz, lossLzPrior, lossLzOriginal, lossDimg, lossDimgInput, lossDimgGenerated = self.session.run(
fetches=[
self.EG_optimizer,
self.loss_Total,
self.loss_rec,
self.loss_Iden,
self.loss_Lz,
self.D_z_loss_prior,
self.D_z_loss_z,
self.loss_Di,
self.D_img_loss_input,
self.D_img_loss_G,
],
feed_dict={
self.input_image: batch_images,
self.valence: batch_label_valence,
self.arousal: batch_label_arousal,
self.z_prior: batch_z_prior
}
)
print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tLoss_Total=%.4f"%
(epoch + 1, num_epochs, ind_batch + 1, num_batches, lossTotal), file=open(save_dir+"Log.txt", "a"))
print("\tL_rec=%.4f\tL_Iden=%.4f\tL_Z=%.4f\tL_Img=%.4f" % (lossRec, lossIden, lossLz,lossDimg), file=open(save_dir+"Log.txt", "a"))
print("\tL_Z_Prior=%.4f\tL_Z_original=%.4f" % (lossLzPrior, lossLzOriginal), file=open(save_dir+"Log.txt", "a"))
print("\tL_Img_Input=%.4f\tL_Img_Generated=%.4f" % (lossDimgInput, lossDimgGenerated), file=open(save_dir+"Log.txt", "a"))
# print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tEG_err=%.4f\tVGG=%.4f\tEm=%.4f" %
# (epoch+1, num_epochs, ind_batch+1, num_batches, EG_err, vgg, em))
# print("\tArousalCCC=%.4f\tValenceCCC=%.4f" % (arousalCCC, valenceCCC))
# estimate left run time
elapse = time.time() - start_time
time_left = ((num_epochs - epoch - 1) * num_batches + (num_batches - ind_batch - 1)) * elapse
print("\tTime left: %02d:%02d:%02d" %
(int(time_left / 3600), int(time_left % 3600 / 60), time_left % 60))
# # add to summary
# summary = self.summary.eval(
# feed_dict={
# self.input_image: batch_images,
# self.valence: batch_label_valence,
# self.arousal: batch_label_arousal,
# self.z_prior: batch_z_prior
# }
# )
# self.writer.add_summary(summary, self.EG_global_step.eval())
if ind_batch%500 == 0:
# save sample images for each epoch
name = '{:02d}_{:02d}'.format(epoch+1, ind_batch)
self.sample(sample_images, sample_label_valence, sample_label_arousal, name+'.png')
# TEST
test_dir = os.path.join(save_dir, 'test')
if not os.path.exists(test_dir):
os.makedirs(test_dir)
self.test(sample_images, test_dir, name+'.png')
# save checkpoint for each epoch
# VALIDATE
name = '{:02d}_model'.format(epoch+1)
self.validate(name)
self.save_checkpoint(name=name)
def save_checkpoint(self, name=''):
checkpoint_dir = os.path.join(save_dir, 'checkpoint')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(
sess=self.session,
save_path=os.path.join(checkpoint_dir, name)
)
def load_checkpoint(self):
print("\n\tLoading pre-trained model ...")
checkpoint_dir = os.path.join(save_dir, 'checkpoint')
checkpoints = tf.train.get_checkpoint_state(checkpoint_dir)
if checkpoints and checkpoints.model_checkpoint_path:
checkpoints_name = os.path.basename(checkpoints.model_checkpoint_path)
self.saver.restore(self.session, os.path.join(checkpoint_dir, checkpoints_name))
return True
else:
return False
def sample(self, images, valence, arousal, name):
sample_dir = os.path.join(save_dir, 'samples')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
z, G = self.session.run(
[self.z, self.G],
feed_dict={
self.input_image: images,
self.valence: valence,
self.arousal: arousal
}
)
size_frame = int(np.sqrt(size_batch))+1
save_batch_images(
batch_images=G,
save_path=os.path.join(sample_dir, name),
image_value_range=image_value_range,
size_frame=[size_frame, size_frame]
)
save_batch_images(
batch_images=images,
save_path=os.path.join(sample_dir, "input.png"),
image_value_range=image_value_range,
size_frame=[size_frame, size_frame]
)
def validate(self, name):
# Create Validation Directory if needed
val_dir = os.path.join(save_dir, 'validation')
if not os.path.exists(val_dir):
os.makedirs(val_dir)
# Create Name Directory if needed
name_dir = os.path.join(val_dir, name)
if not os.path.exists(name_dir):
os.makedirs(name_dir)
# validate
testFile = self.validation_files[0:10]
for image_path in testFile:
n = image_path.split("/")[-1]+".png"
self.test(np.array([load_image(image_path, image_size=96)]), name_dir, n)
def test(self, images, test_dir, name):
images = images[:1, :, :, :]
# valence
if size_batch == 25:
valence = np.arange(0.75, -0.751, -0.375)
valence = np.repeat(valence, 5).reshape((25, 1))
# valence = np.repeat(valence, 7, axis=0)
# arousal
arousal = [np.arange(0.75, -0.751, -0.375)]
arousal = np.repeat(arousal, 5).reshape((25, 1))
arousal = np.asarray([item for sublist in arousal for item in sublist]).reshape((25, 1))
query_images = np.tile(images, (25, 1, 1, 1))
size_frame = (6,7)
elif size_batch == 49:
valence = np.arange(0.75, -0.751, -0.25)
valence = np.repeat(valence, 7).reshape((49, 1))
# valence = np.repeat(valence, 7, axis=0)
# arousal
arousal = [np.arange(0.75, -0.751, -0.25)]
arousal = np.repeat(arousal, 7).reshape((49, 1))
arousal = np.asarray([item for sublist in arousal for item in sublist]).reshape((49, 1))
query_images = np.tile(images, (49, 1, 1, 1))
size_frame = (8, 9)
z, G = self.session.run(
[self.z, self.G],
feed_dict={
self.input_image: query_images,
self.valence: valence,
self.arousal: arousal
}
)
save_output(
input_image=images,
output=G,
path=os.path.join(test_dir, name),
image_value_range = image_value_range, size_frame = size_frame
)
def fill_up_equally(self, X):
# print ("Value:", X[0])
# print ("Value:", X[0].split("s"))
# input("here")
sorted_samples = [[x for x in X if int(x.split('__')[1]) == r] for r in range(8)]
amounts = [len(x) for x in sorted_samples]
differences = [max(amounts) - a for a in amounts]
for i, d in enumerate(differences):
samples = sorted_samples[i]
added = [samples[x] for x in np.random.choice(range(len(samples)), d)]
sorted_samples[i] = sorted_samples[i] + added
sorted_samples_flat = [item for sublist in sorted_samples for item in sublist]
np.random.seed = 1234567
np.random.shuffle(sorted_samples_flat)
return sorted_samples_flat
class Logger(object):
def __init__(self, output_file):
self.terminal = sys.stdout
self.log = open(output_file, "a")
def write(self, message):
self.terminal.write(message)
if not self.log.closed:
self.log.write(message)
def close(self):
self.log.close()
def flush(self):
self.close()
# needed for python 3 compatibility
pass
|
[
"tensorflow.compat.v1.losses.mean_squared_error",
"scipy.io.loadmat",
"tensorflow.zeros_like",
"tensorflow.compat.v1.get_variable_scope",
"PK_Utils.PK_subnetworks.generator",
"tensorflow.compat.v1.train.exponential_decay",
"PK_Utils.PK_vgg_face.face_embedding",
"numpy.arange",
"numpy.tile",
"tensorflow.Variable",
"os.path.join",
"PK_Utils.PK_subnetworks.d_prior",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.abs",
"tensorflow.compat.v1.placeholder",
"os.path.exists",
"numpy.random.shuffle",
"tensorflow.train.get_checkpoint_state",
"numpy.repeat",
"os.path.basename",
"PK_Utils.PK_subnetworks.encoder",
"numpy.asarray",
"tensorflow.compat.v1.train.Saver",
"PK_Utils.PK_subnetworks.d_img",
"tensorflow.ones_like",
"tensorflow.compat.v1.trainable_variables",
"os.listdir",
"numpy.random.uniform",
"PK_Utils.PK_subnetworks.d_em",
"os.makedirs",
"tensorflow.device",
"time.time",
"tensorflow.compat.v1.train.AdamOptimizer",
"numpy.array",
"numpy.sqrt"
] |
[((779, 794), 'scipy.io.loadmat', 'loadmat', (['vggMat'], {}), '(vggMat)\n', (786, 794), False, 'from scipy.io import loadmat, savemat\n'), ((1015, 1118), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[size_batch, size_image, size_image, 3]'], {'name': '"""input_images"""'}), "(tf.float32, [size_batch, size_image, size_image, 3\n ], name='input_images')\n", (1039, 1118), True, 'import tensorflow as tf\n'), ((1184, 1260), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[size_batch, 1]'], {'name': '"""valence_labels"""'}), "(tf.float32, [size_batch, 1], name='valence_labels')\n", (1208, 1260), True, 'import tensorflow as tf\n'), ((1339, 1415), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[size_batch, 1]'], {'name': '"""arousal_labels"""'}), "(tf.float32, [size_batch, 1], name='arousal_labels')\n", (1363, 1415), True, 'import tensorflow as tf\n'), ((1486, 1573), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[size_batch, num_z_channels]'], {'name': '"""z_prior"""'}), "(tf.float32, [size_batch, num_z_channels], name=\n 'z_prior')\n", (1510, 1573), True, 'import tensorflow as tf\n'), ((13121, 13150), 'numpy.random.shuffle', 'np.random.shuffle', (['file_names'], {}), '(file_names)\n', (13138, 13150), True, 'import numpy as np\n'), ((27272, 27308), 'os.path.join', 'os.path.join', (['save_dir', '"""checkpoint"""'], {}), "(save_dir, 'checkpoint')\n", (27284, 27308), False, 'import os\n'), ((27627, 27663), 'os.path.join', 'os.path.join', (['save_dir', '"""checkpoint"""'], {}), "(save_dir, 'checkpoint')\n", (27639, 27663), False, 'import os\n'), ((27686, 27731), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (27715, 27731), True, 'import tensorflow as tf\n'), ((28109, 28142), 'os.path.join', 'os.path.join', (['save_dir', '"""samples"""'], {}), "(save_dir, 'samples')\n", (28121, 28142), False, 'import os\n'), ((29044, 29080), 'os.path.join', 'os.path.join', (['save_dir', '"""validation"""'], {}), "(save_dir, 'validation')\n", (29056, 29080), False, 'import os\n'), ((29215, 29242), 'os.path.join', 'os.path.join', (['val_dir', 'name'], {}), '(val_dir, name)\n', (29227, 29242), False, 'import os\n'), ((31849, 31887), 'numpy.random.shuffle', 'np.random.shuffle', (['sorted_samples_flat'], {}), '(sorted_samples_flat)\n', (31866, 31887), True, 'import numpy as np\n'), ((8953, 8987), 'tensorflow.compat.v1.trainable_variables', 'tf.compat.v1.trainable_variables', ([], {}), '()\n', (8985, 8987), True, 'import tensorflow as tf\n'), ((12022, 12062), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (12046, 12062), True, 'import tensorflow as tf\n'), ((13473, 13490), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (13482, 13490), True, 'import tensorflow as tf\n'), ((13583, 13768), 'tensorflow.compat.v1.train.exponential_decay', 'tf.compat.v1.train.exponential_decay', ([], {'learning_rate': 'learning_rate', 'global_step': 'self.EG_global_step', 'decay_steps': '(size_data / size_batch * 2)', 'decay_rate': 'decay_rate', 'staircase': '(True)'}), '(learning_rate=learning_rate,\n global_step=self.EG_global_step, decay_steps=size_data / size_batch * 2,\n decay_rate=decay_rate, staircase=True)\n', (13619, 13768), True, 'import tensorflow as tf\n'), ((27324, 27354), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (27338, 27354), False, 'import os\n'), ((27368, 27395), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (27379, 27395), False, 'import os\n'), ((27825, 27876), 'os.path.basename', 'os.path.basename', (['checkpoints.model_checkpoint_path'], {}), '(checkpoints.model_checkpoint_path)\n', (27841, 27876), False, 'import os\n'), ((28158, 28184), 'os.path.exists', 'os.path.exists', (['sample_dir'], {}), '(sample_dir)\n', (28172, 28184), False, 'import os\n'), ((28198, 28221), 'os.makedirs', 'os.makedirs', (['sample_dir'], {}), '(sample_dir)\n', (28209, 28221), False, 'import os\n'), ((29096, 29119), 'os.path.exists', 'os.path.exists', (['val_dir'], {}), '(val_dir)\n', (29110, 29119), False, 'import os\n'), ((29133, 29153), 'os.makedirs', 'os.makedirs', (['val_dir'], {}), '(val_dir)\n', (29144, 29153), False, 'import os\n'), ((29258, 29282), 'os.path.exists', 'os.path.exists', (['name_dir'], {}), '(name_dir)\n', (29272, 29282), False, 'import os\n'), ((29296, 29317), 'os.makedirs', 'os.makedirs', (['name_dir'], {}), '(name_dir)\n', (29307, 29317), False, 'import os\n'), ((29708, 29739), 'numpy.arange', 'np.arange', (['(0.75)', '(-0.751)', '(-0.375)'], {}), '(0.75, -0.751, -0.375)\n', (29717, 29739), True, 'import numpy as np\n'), ((30122, 30152), 'numpy.tile', 'np.tile', (['images', '(25, 1, 1, 1)'], {}), '(images, (25, 1, 1, 1))\n', (30129, 30152), True, 'import numpy as np\n'), ((1887, 1920), 'tensorflow.compat.v1.get_variable_scope', 'tf.compat.v1.get_variable_scope', ([], {}), '()\n', (1918, 1920), True, 'import tensorflow as tf\n'), ((1987, 2004), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (1996, 2004), True, 'import tensorflow as tf\n'), ((2244, 2269), 'PK_Utils.PK_subnetworks.encoder', 'encoder', (['self.input_image'], {}), '(self.input_image)\n', (2251, 2269), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((2367, 2428), 'PK_Utils.PK_subnetworks.generator', 'generator', (['self.z'], {'valence': 'self.valence', 'arousal': 'self.arousal'}), '(self.z, valence=self.valence, arousal=self.arousal)\n', (2376, 2428), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((2576, 2591), 'PK_Utils.PK_subnetworks.d_prior', 'd_prior', (['self.z'], {}), '(self.z)\n', (2583, 2591), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((2681, 2724), 'PK_Utils.PK_subnetworks.d_prior', 'd_prior', (['self.z_prior'], {'reuse_variables': '(True)'}), '(self.z_prior, reuse_variables=True)\n', (2688, 2724), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((2908, 2965), 'PK_Utils.PK_subnetworks.d_img', 'd_img', (['self.G'], {'valence': 'self.valence', 'arousal': 'self.arousal'}), '(self.G, valence=self.valence, arousal=self.arousal)\n', (2913, 2965), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((3221, 3314), 'PK_Utils.PK_subnetworks.d_img', 'd_img', (['self.input_image'], {'valence': 'self.valence', 'arousal': 'self.arousal', 'reuse_variables': '(True)'}), '(self.input_image, valence=self.valence, arousal=self.arousal,\n reuse_variables=True)\n', (3226, 3314), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((4503, 4553), 'PK_Utils.PK_vgg_face.face_embedding', 'face_embedding', (['self.vgg_weights', 'self.input_image'], {}), '(self.vgg_weights, self.input_image)\n', (4517, 4553), False, 'from PK_Utils.PK_vgg_face import face_embedding\n'), ((4641, 4681), 'PK_Utils.PK_vgg_face.face_embedding', 'face_embedding', (['self.vgg_weights', 'self.G'], {}), '(self.vgg_weights, self.G)\n', (4655, 4681), False, 'from PK_Utils.PK_vgg_face import face_embedding\n'), ((6482, 6583), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.compat.v1.losses.mean_squared_error', ([], {'predictions': 'self.D_em_arousal_logits', 'labels': 'self.arousal'}), '(predictions=self.D_em_arousal_logits,\n labels=self.arousal)\n', (6520, 6583), True, 'import tensorflow as tf\n'), ((6623, 6724), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.compat.v1.losses.mean_squared_error', ([], {'predictions': 'self.D_em_valence_logits', 'labels': 'self.valence'}), '(predictions=self.D_em_valence_logits,\n labels=self.valence)\n', (6661, 6724), True, 'import tensorflow as tf\n'), ((7920, 7953), 'tensorflow.abs', 'tf.abs', (['(self.input_image - self.G)'], {}), '(self.input_image - self.G)\n', (7926, 7953), True, 'import tensorflow as tf\n'), ((12549, 12582), 'tensorflow.compat.v1.get_variable_scope', 'tf.compat.v1.get_variable_scope', ([], {}), '()\n', (12580, 12582), True, 'import tensorflow as tf\n'), ((12602, 12628), 'tensorflow.device', 'tf.device', (['"""/device:CPU:0"""'], {}), "('/device:CPU:0')\n", (12611, 12628), True, 'import tensorflow as tf\n'), ((12668, 12719), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (12679, 12719), True, 'import tensorflow as tf\n'), ((13000, 13021), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (13010, 13021), False, 'import os\n'), ((13245, 13272), 'os.listdir', 'os.listdir', (['validation_path'], {}), '(validation_path)\n', (13255, 13272), False, 'import os\n'), ((17117, 17133), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (17125, 17133), True, 'import numpy as np\n'), ((17555, 17598), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (17596, 17598), True, 'import tensorflow as tf\n'), ((17964, 17993), 'numpy.random.shuffle', 'np.random.shuffle', (['file_names'], {}), '(file_names)\n', (17981, 17993), True, 'import numpy as np\n'), ((18072, 18083), 'time.time', 'time.time', ([], {}), '()\n', (18081, 18083), False, 'import time\n'), ((27474, 27508), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'name'], {}), '(checkpoint_dir, name)\n', (27486, 27508), False, 'import os\n'), ((27922, 27968), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'checkpoints_name'], {}), '(checkpoint_dir, checkpoints_name)\n', (27934, 27968), False, 'import os\n'), ((28479, 28498), 'numpy.sqrt', 'np.sqrt', (['size_batch'], {}), '(size_batch)\n', (28486, 28498), True, 'import numpy as np\n'), ((28579, 28609), 'os.path.join', 'os.path.join', (['sample_dir', 'name'], {}), '(sample_dir, name)\n', (28591, 28609), False, 'import os\n'), ((28801, 28838), 'os.path.join', 'os.path.join', (['sample_dir', '"""input.png"""'], {}), "(sample_dir, 'input.png')\n", (28813, 28838), False, 'import os\n'), ((29900, 29931), 'numpy.arange', 'np.arange', (['(0.75)', '(-0.751)', '(-0.375)'], {}), '(0.75, -0.751, -0.375)\n', (29909, 29931), True, 'import numpy as np\n'), ((30237, 30267), 'numpy.arange', 'np.arange', (['(0.75)', '(-0.751)', '(-0.25)'], {}), '(0.75, -0.751, -0.25)\n', (30246, 30267), True, 'import numpy as np\n'), ((30649, 30679), 'numpy.tile', 'np.tile', (['images', '(49, 1, 1, 1)'], {}), '(images, (49, 1, 1, 1))\n', (30656, 30679), True, 'import numpy as np\n'), ((31041, 31069), 'os.path.join', 'os.path.join', (['test_dir', 'name'], {}), '(test_dir, name)\n', (31053, 31069), False, 'import os\n'), ((3776, 3810), 'PK_Utils.PK_subnetworks.d_em', 'd_em', (['self.z'], {'reuse_variables': '(True)'}), '(self.z, reuse_variables=True)\n', (3780, 3810), False, 'from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em\n'), ((13936, 14013), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': 'EG_learning_rate', 'beta1': 'beta1'}), '(learning_rate=EG_learning_rate, beta1=beta1)\n', (13968, 14013), True, 'import tensorflow as tf\n'), ((25690, 25701), 'time.time', 'time.time', ([], {}), '()\n', (25699, 25701), False, 'import time\n'), ((26772, 26802), 'os.path.join', 'os.path.join', (['save_dir', '"""test"""'], {}), "(save_dir, 'test')\n", (26784, 26802), False, 'import os\n'), ((29762, 29783), 'numpy.repeat', 'np.repeat', (['valence', '(5)'], {}), '(valence, 5)\n', (29771, 29783), True, 'import numpy as np\n'), ((29955, 29976), 'numpy.repeat', 'np.repeat', (['arousal', '(5)'], {}), '(arousal, 5)\n', (29964, 29976), True, 'import numpy as np\n'), ((30016, 30077), 'numpy.asarray', 'np.asarray', (['[item for sublist in arousal for item in sublist]'], {}), '([item for sublist in arousal for item in sublist])\n', (30026, 30077), True, 'import numpy as np\n'), ((30428, 30458), 'numpy.arange', 'np.arange', (['(0.75)', '(-0.751)', '(-0.25)'], {}), '(0.75, -0.751, -0.25)\n', (30437, 30458), True, 'import numpy as np\n'), ((4725, 4760), 'tensorflow.abs', 'tf.abs', (['(real_conv1_2 - fake_conv1_2)'], {}), '(real_conv1_2 - fake_conv1_2)\n', (4731, 4760), True, 'import tensorflow as tf\n'), ((4818, 4853), 'tensorflow.abs', 'tf.abs', (['(real_conv2_2 - fake_conv2_2)'], {}), '(real_conv2_2 - fake_conv2_2)\n', (4824, 4853), True, 'import tensorflow as tf\n'), ((4911, 4946), 'tensorflow.abs', 'tf.abs', (['(real_conv3_2 - fake_conv3_2)'], {}), '(real_conv3_2 - fake_conv3_2)\n', (4917, 4946), True, 'import tensorflow as tf\n'), ((5002, 5037), 'tensorflow.abs', 'tf.abs', (['(real_conv4_2 - fake_conv4_2)'], {}), '(real_conv4_2 - fake_conv4_2)\n', (5008, 5037), True, 'import tensorflow as tf\n'), ((5093, 5128), 'tensorflow.abs', 'tf.abs', (['(real_conv5_2 - fake_conv5_2)'], {}), '(real_conv5_2 - fake_conv5_2)\n', (5099, 5128), True, 'import tensorflow as tf\n'), ((5415, 5443), 'tensorflow.ones_like', 'tf.ones_like', (['self.Dz_logits'], {}), '(self.Dz_logits)\n', (5427, 5443), True, 'import tensorflow as tf\n'), ((5662, 5697), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.Dz_prior_logits'], {}), '(self.Dz_prior_logits)\n', (5675, 5697), True, 'import tensorflow as tf\n'), ((6101, 6140), 'tensorflow.ones_like', 'tf.ones_like', (['self.Dimg_Original_logits'], {}), '(self.Dimg_Original_logits)\n', (6113, 6140), True, 'import tensorflow as tf\n'), ((6298, 6331), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.Dimg_G_logits'], {}), '(self.Dimg_G_logits)\n', (6311, 6331), True, 'import tensorflow as tf\n'), ((18515, 18530), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (18523, 18530), True, 'import numpy as np\n'), ((18843, 18939), 'numpy.random.uniform', 'np.random.uniform', (['image_value_range[0]', 'image_value_range[-1]', '[size_batch, num_z_channels]'], {}), '(image_value_range[0], image_value_range[-1], [size_batch,\n num_z_channels])\n', (18860, 18939), True, 'import numpy as np\n'), ((26830, 26854), 'os.path.exists', 'os.path.exists', (['test_dir'], {}), '(test_dir)\n', (26844, 26854), False, 'import os\n'), ((26880, 26901), 'os.makedirs', 'os.makedirs', (['test_dir'], {}), '(test_dir)\n', (26891, 26901), False, 'import os\n'), ((30290, 30311), 'numpy.repeat', 'np.repeat', (['valence', '(7)'], {}), '(valence, 7)\n', (30299, 30311), True, 'import numpy as np\n'), ((30482, 30503), 'numpy.repeat', 'np.repeat', (['arousal', '(7)'], {}), '(arousal, 7)\n', (30491, 30503), True, 'import numpy as np\n'), ((30543, 30604), 'numpy.asarray', 'np.asarray', (['[item for sublist in arousal for item in sublist]'], {}), '([item for sublist in arousal for item in sublist])\n', (30553, 30604), True, 'import numpy as np\n')]
|
"""
==================================
Plotting two simple sine functions
==================================
A simple example plotting a fit of two sine functions.
"""
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y1 = 100 * \
numpy.abs(numpy.sin((X[:, 6]) / 10) - 4.0) + \
10 * numpy.random.normal(size=m)
y2 = 100 * \
numpy.abs(numpy.sin((X[:, 6]) / 2) - 8.0) + \
5 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth(max_degree=3, minspan_alpha=.5)
y_mix = numpy.concatenate((y1[:, numpy.newaxis], y2[:, numpy.newaxis]), axis=1)
model.fit(X, y_mix)
# Print the model
print(model.trace())
print(model.summary())
# Plot the model
y_hat = model.predict(X)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(X[:, 6], y_mix[:, 0], 'r.')
ax.plot(X[:, 6], model.predict(X)[:, 0], 'b.')
ax = fig.add_subplot(1, 2, 2)
ax.plot(X[:, 6], y_mix[:, 1], 'r.')
ax.plot(X[:, 6], model.predict(X)[:, 1], 'b.')
plt.show()
|
[
"numpy.random.uniform",
"numpy.random.seed",
"matplotlib.pyplot.show",
"pyearth.Earth",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.random.normal",
"numpy.concatenate"
] |
[((266, 286), 'numpy.random.seed', 'numpy.random.seed', (['(2)'], {}), '(2)\n', (283, 286), False, 'import numpy\n'), ((583, 621), 'pyearth.Earth', 'Earth', ([], {'max_degree': '(3)', 'minspan_alpha': '(0.5)'}), '(max_degree=3, minspan_alpha=0.5)\n', (588, 621), False, 'from pyearth import Earth\n'), ((629, 700), 'numpy.concatenate', 'numpy.concatenate', (['(y1[:, numpy.newaxis], y2[:, numpy.newaxis])'], {'axis': '(1)'}), '((y1[:, numpy.newaxis], y2[:, numpy.newaxis]), axis=1)\n', (646, 700), False, 'import numpy\n'), ((834, 846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (844, 846), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1083, 1085), True, 'import matplotlib.pyplot as plt\n'), ((313, 346), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(m, n)'}), '(size=(m, n))\n', (333, 346), False, 'import numpy\n'), ((425, 452), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': 'm'}), '(size=m)\n', (444, 452), False, 'import numpy\n'), ((525, 552), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': 'm'}), '(size=m)\n', (544, 552), False, 'import numpy\n'), ((379, 402), 'numpy.sin', 'numpy.sin', (['(X[:, 6] / 10)'], {}), '(X[:, 6] / 10)\n', (388, 402), False, 'import numpy\n'), ((481, 503), 'numpy.sin', 'numpy.sin', (['(X[:, 6] / 2)'], {}), '(X[:, 6] / 2)\n', (490, 503), False, 'import numpy\n')]
|
"""Defines hooks that can run during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lasagne
import numpy as np
from sklearn import metrics
class LoggingHook(object):
"""This hook writes information to a log file."""
def __init__(self, logger):
"""Initializes a new instance of the LoggingHook class.
Args:
logger: A logger instance.
"""
self._logger = logger
def update(self, **kwargs):
"""Executes the hook.
Args:
**kwargs: Optimizer state dictionary.
"""
self._logger.log(
key="status",
message="Log at iteration %d" % kwargs["update_counter"]
)
self._logger.log(
key="update_counter",
message=kwargs["update_counter"]
)
self._logger.log(
key="update_runtime",
message=kwargs["runtime"]
)
self._logger.log(
key="losses",
message=np.asarray(kwargs["losses"])
)
class SnapshotHook(object):
"""Hook for storing snapshots of the network's weights."""
def __init__(self, filename, network, interval):
"""Initializes a new instance of the SnapshotHook class.
Args:
filename: The base filename of the model.
network: The network instance to store.
interval: The snapshot interval.
"""
self._filename = filename
self._network = network
self._interval = interval
def update(self, **kwargs):
"""Executed the hook.
Args:
**kwargs: The optimizer dictionary.
"""
# Run the hook now?
if kwargs["update_counter"] % self._interval == 0:
# Yes
np.savez(
"%s_snapshot_%d.npz" % (
self._filename, kwargs["update_counter"]),
*lasagne.layers.get_all_param_values(self._network))
class SegmentationValidationHook(object):
"""Performs a validation run for semantic segmentation."""
def __init__(self, val_fn, data_provider, logger, interval=300,
num_classes=19):
"""Initializes a new instance of the SegmentationValidationHook class.
Args:
val_fn: A function that returns the predictions for each image and
a list of losses.
data_provider: A chianti data provider.
logger: A logger instance.
interval: The validation interval.
"""
self._val_fn = val_fn
self._data_provider = data_provider
self._logger = logger
self._interval = interval
self._num_classes = num_classes
def update(self, **kwargs):
"""Runs the validation hook."""
update_now = kwargs["update_counter"] % self._interval == 0
if update_now and kwargs["update_counter"] > 0:
self._logger.log(
key="validation_checkpoint",
message=kwargs["update_counter"]
)
self._logger.log(
key="status",
message="-> Start validation run"
)
# Initialize the confusion matrix
conf_matrix = np.zeros(
(self._num_classes, self._num_classes)).astype('int64')
accumulated_loss = 0
self._data_provider.reset()
for batch_counter in range(self._data_provider.get_num_batches()):
self._logger.log(
key="status",
message="--> Validate batch %d/%d" % (
batch_counter + 1,
self._data_provider.get_num_batches()))
batch = self._data_provider.next()
images = batch[0]
targets = batch[1]
predictions, loss = self._val_fn(images, targets)
accumulated_loss += loss
# Mark the don't care predictions
# Flatten the predictions and targets
flat_predictions = predictions.flatten()
non_void_pixels = (np.max(targets, axis=1) != 0.0).flatten()
flat_targets = np.argmax(targets, axis=1).flatten()
# Select the non-don't cares
flat_targets = flat_targets[non_void_pixels]
flat_predictions = flat_predictions[non_void_pixels]
conf_matrix += metrics.confusion_matrix(
flat_targets,
flat_predictions,
labels=np.arange(self._num_classes, dtype='int64'))
accumulated_loss /= self._data_provider.get_num_batches()
self._logger.log(
key="conf_matrix",
message=conf_matrix
)
self._logger.log(
key="validation_loss",
message=accumulated_loss
)
|
[
"lasagne.layers.get_all_param_values",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros",
"numpy.max",
"numpy.arange"
] |
[((1068, 1096), 'numpy.asarray', 'np.asarray', (["kwargs['losses']"], {}), "(kwargs['losses'])\n", (1078, 1096), True, 'import numpy as np\n'), ((1983, 2033), 'lasagne.layers.get_all_param_values', 'lasagne.layers.get_all_param_values', (['self._network'], {}), '(self._network)\n', (2018, 2033), False, 'import lasagne\n'), ((3309, 3357), 'numpy.zeros', 'np.zeros', (['(self._num_classes, self._num_classes)'], {}), '((self._num_classes, self._num_classes))\n', (3317, 3357), True, 'import numpy as np\n'), ((4278, 4304), 'numpy.argmax', 'np.argmax', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (4287, 4304), True, 'import numpy as np\n'), ((4648, 4691), 'numpy.arange', 'np.arange', (['self._num_classes'], {'dtype': '"""int64"""'}), "(self._num_classes, dtype='int64')\n", (4657, 4691), True, 'import numpy as np\n'), ((4205, 4228), 'numpy.max', 'np.max', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (4211, 4228), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the Masci-tools package. #
# (Material science tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/masci-tools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
"""
This module contains utility and functions to work with Green's functions calculated
and written to ``greensf.hdf`` files by fleur
"""
from collections import namedtuple
from itertools import groupby
import numpy as np
import h5py
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation
from masci_tools.util.constants import HTR_TO_EV
GreensfElement = namedtuple('GreensfElement',
['l', 'lp', 'atomType', 'atomTypep', 'sphavg', 'onsite', 'contour', 'nLO', 'atomDiff'])
def _get_sphavg_recipe(group_name, index, contour):
"""
Get the HDF5Reader recipe for reading in a spherically averaged Green's function element
:param group_name: str of the group containing the Green's function elements
:param index: integer index of the element to read in (indexing starts at 1)
:param contour: integer index of the energy contour to read in (indexing starts at 1)
:returns: dict with the recipe reading all the necessary information from the ``greensf.hdf`` file
"""
return {
'datasets': {
'sphavg': {
'h5path':
f'/{group_name}/element-{index}/sphavg',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})
]
},
'energy_points': {
'h5path':
f'/EnergyContours/contour-{contour}/ContourPoints',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
AttribTransformation(name='shift_by_attribute',
attrib_name='fermi_energy',
args=(),
kwargs={
'negative': True,
}),
Transformation(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})
]
},
'energy_weights': {
'h5path':
f'/EnergyContours/contour-{contour}/IntegrationWeights',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})
]
}
},
'attributes': {
'fermi_energy': {
'h5path':
'/general',
'description':
'fermi_energy of the system',
'transforms': [
Transformation(name='get_attribute', args=('FermiEnergy',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
'spins': {
'h5path':
'/general',
'description':
'number of spins',
'transforms': [
Transformation(name='get_attribute', args=('spins',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
'mperp': {
'h5path':
'/general',
'description':
'Switch whether spin offdiagonal elements are included',
'transforms': [
Transformation(name='get_attribute', args=('mperp',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={}),
Transformation(name='apply_lambda', args=(lambda x: x == 1,), kwargs={})
]
},
'lmax': {
'h5path':
f'/{group_name}',
'description':
'Maximum l considered (Determines size of the matrix)',
'transforms': [
Transformation(name='get_attribute', args=('maxl',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
}
}
def _get_radial_recipe(group_name, index, contour):
"""
Get the HDF5Reader recipe for reading in a radial Green's function element
:param group_name: str of the group containing the Green's function elements
:param index: integer index of the element to read in (indexing starts at 1)
:param contour: integer index of the energy contour to read in (indexing starts at 1)
:returns: dict with the recipe reading all the necessary information from the ``greensf.hdf`` file
"""
recipe = _get_sphavg_recipe(group_name, index, contour)
recipe['datasets'].pop('sphavg')
recipe['datasets']['coefficients'] = {
'h5path':
f'/{group_name}/element-{index}',
'transforms': [
Transformation(name='get_all_child_datasets',
args=(),
kwargs={'ignore': ['scalarProducts', 'LOContribution']}),
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})
],
'unpack_dict':
True
}
recipe['attributes']['scalarProducts'] = {
'h5path': f'/{group_name}/element-{index}/scalarProducts',
'transforms': [Transformation(name='get_all_child_datasets', args=(), kwargs={})]
}
recipe['attributes']['radialFunctions'] = {
'h5path': '/RadialFunctions',
'transforms': [Transformation(name='get_all_child_datasets', args=(), kwargs={})]
}
return recipe
def _get_greensf_group_name(hdffile):
"""
Return the name of the group containing the Green's function elements
:param hdffile: h5py.File of the greensf.hdf file
:returns: str of the group name containing the Green's Function elements
"""
if '/GreensFunctionElements' in hdffile:
return 'GreensFunctionElements'
elif '/Hubbard1Elements' in hdffile:
return 'Hubbard1Elements'
def _read_element_header(hdffile, index):
"""
Read the attributes of the given green's function elements
:param hdffile: h5py.File of the greensf.hdf file
:param index: integer index of the element to read in (indexing starts at 1)
:returns: :py:class:`GreensfElement` corresponding to the read in attributes
"""
group_name = _get_greensf_group_name(hdffile)
element = hdffile.get(f'/{group_name}/element-{index}')
l = element.attrs['l'][0]
lp = element.attrs['lp'][0]
atomType = element.attrs['atomType'][0]
atomTypep = element.attrs['atomTypep'][0]
sphavg = element.attrs['l_sphavg'][0] == 1
onsite = element.attrs['l_onsite'][0] == 1
contour = element.attrs['iContour'][0]
atomDiff = np.array(element.attrs['atomDiff'])
atomDiff[abs(atomDiff) < 1e-12] = 0.0
nLO = element.attrs['numLOs'][0]
return GreensfElement(l, lp, atomType, atomTypep, sphavg, onsite, contour, nLO, atomDiff)
def _read_gf_element(file, index):
"""
Read the information needed for a given Green's function element form a ``greensf.hdf``
file
:param file: filepath or handle to be read
:param index: integer index of the element to read in (indexing starts at 1)
:returns: tuple of the information containing the :py:class:`GreensfElement` for the element
and the datasets and attributes dict produced by the corresponding
:py:class:`~masci_tools.io.parsers.hdf5.HDF5Reader`
"""
with HDF5Reader(file) as h5reader:
gf_element = _read_element_header(h5reader._h5_file, index)
group_name = _get_greensf_group_name(h5reader._h5_file)
if gf_element.sphavg:
recipe = _get_sphavg_recipe(group_name, index, gf_element.contour)
else:
recipe = _get_radial_recipe(group_name, index, gf_element.contour, nlo=gf_element.nLO)
data, attributes = h5reader.read(recipe=recipe)
return gf_element, data, attributes
class GreensFunction:
"""
Class for working with Green's functions calculated by the fleur code
:param element: :py:class:`GreensfElement` namedtuple containing the information about the element
:param data: datasets dict produced by one of the hdf recipes for reading Green's functions
:param attributes: attributes dict produced by one of the hdf recipes for reading Green's functions
"""
def __init__(self, element, data, attributes):
self.element = element
self.points = data.pop('energy_points')
self.weights = data.pop('energy_weights')
self.data = data
if not self.sphavg:
self.scalar_products = attributes['scalarProducts']
self.radial_functions = attributes['radialFunctions']
raise NotImplementedError("Radial Green's functions not yet implemented")
self.spins = attributes['spins']
self.mperp = attributes['mperp']
self.lmax = attributes['lmax']
@classmethod
def fromFile(cls, file, index):
"""
Classmethod for creating a :py:class:`GreensFunction` instance directly from a hdf file
:param file: path or opened file handle to a greensf.hdf file
:param index: int index of the element to read in
"""
element, data, attributes = _read_gf_element(file, index)
return cls(element, data, attributes)
def __getattr__(self, attr):
"""
This __getattr__ method redirects lookups of field names of the stored :py:class:`GreensfElement`
to return the value from the namedtuple
:param attr: attribute to look up
:returns: value of the attribute if it is a field name of :py:class:`GreensfElement`
"""
if attr in GreensfElement._fields:
return self.element._asdict()[attr]
raise AttributeError(f'{self.__class__.__name__!r} object has no attribute {attr!r}')
@staticmethod
def to_m_index(m):
"""
Convert between magnetic quantum numbers between -l and l
to 0 and 2l+1 for easier indexing
:param m: int magnetic quantum number to convert
:returns: converted magnetic quantum number
"""
if abs(m) > 3:
raise ValueError('Invalid magnetic quantum number (>3)')
return m + 3
@staticmethod
def to_spin_indices(spin):
"""
Convert between spin index (0 to 3) to the corresponding
two spin indices (0 or 1)
:param spin: int spin index to convert
:returns: tuple of spin indices
"""
if spin < 0 or spin > 3:
raise ValueError('Invalid spin index')
if spin < 2:
spin1 = spin
spin2 = spin
elif spin == 2:
spin1 = 1
spin2 = 0
else:
spin1 = 0
spin2 = 1
return spin1, spin2
@property
def nspins(self):
"""
Return the number of spins of the current element.
If mperp is True for the element it is 4 otherwise it
is determined by the spins attribute
"""
if self.mperp:
return 4
else:
return self.spins
def get_scalar_product_by_key(self, key, spin):
spin1, spin2 = self.to_spin_indices(spin)
return self.scalar_products[f'{key}n'][spin1, spin2]
def __str__(self):
"""
String representation of the :py:class:`GreensFunction`. Chosen to be the
str representation of the stored :py:class:`GreensfElement` instance.
"""
return str(self.element)
def energy_dependence(self, *, m=None, mp=None, spin, imag=True, both_contours=False):
"""
Select data with energy dependence
:param m: optional integer magnetic quantum number between -l and l
:param mp: optional integer magnetic quantum number between -lp and lp
:param spin: optional integer spin between 1 and nspins
:param both_contours: bool id True the data is not added for both energy contours
:param imag: bool if True and both_contours is False the imaginary part 1/2i(G(z)-G(z^*)) is returned
otherwise the real part 1/2(G(z)+G(z^*))
:returns: numpy array with the selected data
"""
if spin is not None:
spin -= 1
spin_index = min(spin, 2 if self.mperp else self.nspins - 1)
else:
spin_index = slice(0, min(3, self.nspins))
if m is not None:
m_index = self.to_m_index(m)
else:
m_index = slice(self.lmax - self.l, self.lmax + self.l + 1, 1)
if mp is not None:
mp_index = self.to_m_index(mp)
else:
mp_index = slice(self.lmax - self.l, self.lmax + self.lp + 1, 1)
gf = self.data['sphavg'][:, spin_index, mp_index, m_index, :].T
if both_contours:
return gf
else:
if imag:
data = -1 / (2 * np.pi * 1j) * (gf[..., 0] - gf[..., 1])
else:
data = -1 / (2 * np.pi) * (gf[..., 0] + gf[..., 1])
return data.real
def trace_energy_dependence(self, spin, imag=True):
"""
Select trace of data with energy dependence
:param spin: integer spin between 1 and nspins
:param imag: bool if True the imaginary part 1/2i(G(z)-G(z^*)) is returned
otherwise the real part 1/2(G(z)+G(z^*))
:returns: numpy array with the selected and traced over data
"""
if self.l != self.lp:
raise ValueError('Trace only supported for l==lp')
data = np.zeros(self.points.shape)
for m in range(-self.l, self.l + 1):
data += self.energy_dependence(m=m, mp=m, spin=spin, imag=imag)
return data
class colors:
"""
Color strings for coloring terminal output
You may need to change color settings in iPython
"""
red = '\033[31m'
endc = '\033[m'
green = '\033[32m'
def printElements(elements, index=None, mark=None):
"""
Print the given list of :py:class:`GreensfElement` in a nice table
:param elements: list of :py:class:`GreensfElement` to be printed
:param index: optional list of indices to show instead of the default index in the list
:param mark: optional list of int with elements to emphasize with an arrow and color
"""
print('Index | l | lp | atom | atomp | sphavg | onsite | iContour | atomDiff |')
print('-----------------------------------------------------------------------------------------')
if index is None:
elem_iter = enumerate(elements)
else:
elem_iter = zip(index, elements)
for elem_index, element in elem_iter:
if mark is not None and elem_index + 1 in mark:
markStr = '<---'
color = colors.green
else:
markStr = ''
color = ''
atomdiff_str = np.array2string(element.atomDiff,
precision=2,
separator=',',
suppress_small=True,
sign=' ',
floatmode='fixed')
print(
color +
f'{elem_index+1:<7d}|{element.l:7d}|{element.lp:7d}|{element.atomType:7d}|{element.atomTypep:7d}|{str(element.sphavg):>8s}|{str(element.onsite):>8s}|{element.contour:10d}|{atomdiff_str}|{markStr}'
+ colors.endc)
def listElements(hdffile, show=False):
"""
Find the green's function elements contained in the given ``greens.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param show: bool if True the found elements are printed in a table
:returns: list of :py:class:`GreensfElement`
"""
with h5py.File(hdffile, 'r') as h5_file:
group_name = _get_greensf_group_name(h5_file)
num_elements = h5_file.get(group_name).attrs['NumElements'][0]
elements = []
for index in range(1, num_elements + 1):
elements.append(_read_element_header(h5_file, index))
if show:
print(f'These Elements are found in {hdffile}:')
printElements(elements)
return elements
def selectOnsite(hdffile, l, atomType, lp=None, show=True):
"""
Find the specified onsite element in the ``greensf.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param l: integer of the orbital quantum number
:param atomType: integer of the atom type
:param lp: optional integer of the second orbital quantum number (default equal to l)
:param show: bool if True the found elements are printed in a table and the selected ones are marked
:returns: list of indexes in the ``greensf.hdf`` file corresponding to the selected criteria
"""
if lp is None:
lp = l
elements = listElements(hdffile)
foundIndices = []
for index, elem in enumerate(elements):
if elem.l != l:
continue
if elem.lp != lp:
continue
if elem.atomType != atomType:
continue
if elem.atomTypep != atomType:
continue
if np.linalg.norm(elem.atomDiff) > 1e-12:
continue
foundIndices.append(index + 1)
if show:
printElements(elements, mark=foundIndices)
return foundIndices
def intersite_shells(hdffile, refAtom, return_greensf=True, show=False):
"""
Construct the green's function pairs to calculate the Jij exchange constants
for a given reference atom from a given ``greensf.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param refAtom: integer of the atom to calculate the Jij's for (correspinds to the i)
:param return_greensf: bool, if True instead of the indices aiterator yielding the
green's functions directly for calculations
:param show: if True the elements belonging to a shell are printed in a shell
:returns: either list of tuples with distance and all indices of pairs in the shell
or flat iterator with distance and the two corresponding :py:class:`GreensFunction`
instances
"""
elements = listElements(hdffile)
distances = [round(np.linalg.norm(elem.atomDiff), 12) for elem in elements]
#sort the elements according to shells
index_sorted = sorted(range(len(elements)), key=lambda k: distances[k])
elements_sorted = [elements[index] for index in index_sorted]
jijPairs = []
for dist, shell in groupby(zip(index_sorted, elements_sorted), key=lambda k: distances[k[0]]):
if dist > 1e-12:
if show:
print(f'\nFound shell at distance: {dist}')
print('The following elements are present:')
shell_list = list(shell)
jijPairsShell = []
#Try to find gij gji pairs for Jij calculations
for indexij, elemij in shell_list:
for indexji, elemji in shell_list:
if elemij.contour != elemji.contour:
continue
if elemij.atomType != refAtom:
continue
if elemij.atomType != elemji.atomTypep:
continue
if elemij.atomTypep != elemji.atomType:
continue
if elemij.l != elemji.l:
continue
if elemij.lp != elemji.lp:
continue
if np.linalg.norm(elemij.atomDiff + elemji.atomDiff) > 1e-12:
continue
#here we have found a pair
#Plus 1 because the indexing starts at 1 in the hdf file
if (indexji + 1, indexij + 1) not in jijPairsShell or \
elemij.atomType == elemij.atomTypep:
jijPairsShell.append((indexij + 1, indexji + 1))
if len(jijPairsShell) > 0:
jijPairs.append((dist, jijPairsShell))
if show:
#print the elements in the shell
elem = [x[1] for x in shell_list]
index = [x[0] for x in shell_list]
printElements(elem, index=index)
def shell_iterator(shells):
for distance, pairs in shells:
for g1, g2 in pairs:
yield (distance,
GreensFunction.fromFile(hdffile, g1),\
GreensFunction.fromFile(hdffile, g2))
if return_greensf:
return shell_iterator(jijPairs)
else:
return jijPairs
|
[
"masci_tools.io.parsers.hdf5.reader.AttribTransformation",
"h5py.File",
"masci_tools.io.parsers.hdf5.HDF5Reader",
"masci_tools.io.parsers.hdf5.reader.Transformation",
"numpy.array2string",
"numpy.zeros",
"numpy.array",
"collections.namedtuple",
"numpy.linalg.norm"
] |
[((1342, 1462), 'collections.namedtuple', 'namedtuple', (['"""GreensfElement"""', "['l', 'lp', 'atomType', 'atomTypep', 'sphavg', 'onsite', 'contour', 'nLO',\n 'atomDiff']"], {}), "('GreensfElement', ['l', 'lp', 'atomType', 'atomTypep', 'sphavg',\n 'onsite', 'contour', 'nLO', 'atomDiff'])\n", (1352, 1462), False, 'from collections import namedtuple\n'), ((7957, 7992), 'numpy.array', 'np.array', (["element.attrs['atomDiff']"], {}), "(element.attrs['atomDiff'])\n", (7965, 7992), True, 'import numpy as np\n'), ((8704, 8720), 'masci_tools.io.parsers.hdf5.HDF5Reader', 'HDF5Reader', (['file'], {}), '(file)\n', (8714, 8720), False, 'from masci_tools.io.parsers.hdf5 import HDF5Reader\n'), ((14880, 14907), 'numpy.zeros', 'np.zeros', (['self.points.shape'], {}), '(self.points.shape)\n', (14888, 14907), True, 'import numpy as np\n'), ((16206, 16321), 'numpy.array2string', 'np.array2string', (['element.atomDiff'], {'precision': '(2)', 'separator': '""","""', 'suppress_small': '(True)', 'sign': '""" """', 'floatmode': '"""fixed"""'}), "(element.atomDiff, precision=2, separator=',',\n suppress_small=True, sign=' ', floatmode='fixed')\n", (16221, 16321), True, 'import numpy as np\n'), ((17121, 17144), 'h5py.File', 'h5py.File', (['hdffile', '"""r"""'], {}), "(hdffile, 'r')\n", (17130, 17144), False, 'import h5py\n'), ((5976, 6092), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_all_child_datasets"""', 'args': '()', 'kwargs': "{'ignore': ['scalarProducts', 'LOContribution']}"}), "(name='get_all_child_datasets', args=(), kwargs={'ignore': [\n 'scalarProducts', 'LOContribution']})\n", (5990, 6092), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6155, 6222), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (6169, 6222), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6236, 6310), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(1.0 / HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})\n", (6250, 6310), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6502, 6567), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_all_child_datasets"""', 'args': '()', 'kwargs': '{}'}), "(name='get_all_child_datasets', args=(), kwargs={})\n", (6516, 6567), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6685, 6750), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_all_child_datasets"""', 'args': '()', 'kwargs': '{}'}), "(name='get_all_child_datasets', args=(), kwargs={})\n", (6699, 6750), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((18509, 18538), 'numpy.linalg.norm', 'np.linalg.norm', (['elem.atomDiff'], {}), '(elem.atomDiff)\n', (18523, 18538), True, 'import numpy as np\n'), ((19603, 19632), 'numpy.linalg.norm', 'np.linalg.norm', (['elem.atomDiff'], {}), '(elem.atomDiff)\n', (19617, 19632), True, 'import numpy as np\n'), ((2201, 2268), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (2215, 2268), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((2290, 2364), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(1.0 / HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})\n", (2304, 2364), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((2575, 2642), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (2589, 2642), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((2664, 2779), 'masci_tools.io.parsers.hdf5.reader.AttribTransformation', 'AttribTransformation', ([], {'name': '"""shift_by_attribute"""', 'attrib_name': '"""fermi_energy"""', 'args': '()', 'kwargs': "{'negative': True}"}), "(name='shift_by_attribute', attrib_name='fermi_energy',\n args=(), kwargs={'negative': True})\n", (2684, 2779), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3009, 3077), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})\n", (3023, 3077), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3294, 3361), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (3308, 3361), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3383, 3451), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})\n", (3397, 3451), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3732, 3802), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('FermiEnergy',)", 'kwargs': '{}'}), "(name='get_attribute', args=('FermiEnergy',), kwargs={})\n", (3746, 3802), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3824, 3884), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (3838, 3884), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4113, 4177), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('spins',)", 'kwargs': '{}'}), "(name='get_attribute', args=('spins',), kwargs={})\n", (4127, 4177), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4199, 4259), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (4213, 4259), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4526, 4590), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('mperp',)", 'kwargs': '{}'}), "(name='get_attribute', args=('mperp',), kwargs={})\n", (4540, 4590), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4612, 4672), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (4626, 4672), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4694, 4766), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""apply_lambda"""', 'args': '(lambda x: x == 1,)', 'kwargs': '{}'}), "(name='apply_lambda', args=(lambda x: x == 1,), kwargs={})\n", (4708, 4766), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((5037, 5100), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('maxl',)", 'kwargs': '{}'}), "(name='get_attribute', args=('maxl',), kwargs={})\n", (5051, 5100), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((5122, 5182), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (5136, 5182), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((20898, 20947), 'numpy.linalg.norm', 'np.linalg.norm', (['(elemij.atomDiff + elemji.atomDiff)'], {}), '(elemij.atomDiff + elemji.atomDiff)\n', (20912, 20947), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
2D sweep of drive power and frequency in Lockin mode.
"""
from typing import List
import h5py
import numpy as np
from presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode
from presto import lockin
from presto.utils import ProgressBar
from _base import Base
DAC_CURRENT = 32_000 # uA
CONVERTER_CONFIGURATION = {
"adc_mode": AdcMode.Mixed,
"adc_fsample": AdcFSample.G4,
"dac_mode": DacMode.Mixed42,
"dac_fsample": DacFSample.G10,
}
class SweepPower(Base):
def __init__(
self,
freq_center: float,
freq_span: float,
df: float,
num_averages: int,
amp_arr: List[float],
output_port: int,
input_port: int,
dither: bool = True,
num_skip: int = 0,
) -> None:
self.freq_center = freq_center
self.freq_span = freq_span
self.df = df # modified after tuning
self.num_averages = num_averages
self.amp_arr = np.atleast_1d(amp_arr).astype(np.float64)
self.output_port = output_port
self.input_port = input_port
self.dither = dither
self.num_skip = num_skip
self.freq_arr = None # replaced by run
self.resp_arr = None # replaced by run
def run(
self,
presto_address: str,
presto_port: int = None,
ext_ref_clk: bool = False,
) -> str:
with lockin.Lockin(
address=presto_address,
port=presto_port,
ext_ref_clk=ext_ref_clk,
**CONVERTER_CONFIGURATION,
) as lck:
assert lck.hardware is not None
lck.hardware.set_adc_attenuation(self.input_port, 0.0)
lck.hardware.set_dac_current(self.output_port, DAC_CURRENT)
lck.hardware.set_inv_sinc(self.output_port, 0)
nr_amps = len(self.amp_arr)
# tune frequencies
_, self.df = lck.tune(0.0, self.df)
f_start = self.freq_center - self.freq_span / 2
f_stop = self.freq_center + self.freq_span / 2
n_start = int(round(f_start / self.df))
n_stop = int(round(f_stop / self.df))
n_arr = np.arange(n_start, n_stop + 1)
nr_freq = len(n_arr)
self.freq_arr = self.df * n_arr
self.resp_arr = np.zeros((nr_amps, nr_freq), np.complex128)
lck.hardware.configure_mixer(
freq=self.freq_arr[0],
in_ports=self.input_port,
out_ports=self.output_port,
)
lck.set_df(self.df)
og = lck.add_output_group(self.output_port, 1)
og.set_frequencies(0.0)
og.set_amplitudes(self.amp_arr[0])
og.set_phases(0.0, 0.0)
lck.set_dither(self.dither, self.output_port)
ig = lck.add_input_group(self.input_port, 1)
ig.set_frequencies(0.0)
lck.apply_settings()
pb = ProgressBar(nr_amps * nr_freq)
pb.start()
for jj, amp in enumerate(self.amp_arr):
og.set_amplitudes(amp)
lck.apply_settings()
for ii, freq in enumerate(self.freq_arr):
lck.hardware.configure_mixer(
freq=freq,
in_ports=self.input_port,
out_ports=self.output_port,
)
lck.hardware.sleep(1e-3, False)
_d = lck.get_pixels(self.num_skip + self.num_averages, quiet=True)
data_i = _d[self.input_port][1][:, 0]
data_q = _d[self.input_port][2][:, 0]
data = data_i.real + 1j * data_q.real # using zero IF
self.resp_arr[jj, ii] = np.mean(data[-self.num_averages:])
pb.increment()
pb.done()
# Mute outputs at the end of the sweep
og.set_amplitudes(0.0)
lck.apply_settings()
return self.save()
def save(self, save_filename: str = None) -> str:
return super().save(__file__, save_filename=save_filename)
@classmethod
def load(cls, load_filename: str) -> 'SweepPower':
with h5py.File(load_filename, "r") as h5f:
freq_center = h5f.attrs["freq_center"]
freq_span = h5f.attrs["freq_span"]
df = h5f.attrs["df"]
num_averages = h5f.attrs["num_averages"]
output_port = h5f.attrs["output_port"]
input_port = h5f.attrs["input_port"]
dither = h5f.attrs["dither"]
num_skip = h5f.attrs["num_skip"]
amp_arr = h5f["amp_arr"][()]
freq_arr = h5f["freq_arr"][()]
resp_arr = h5f["resp_arr"][()]
self = cls(
freq_center=freq_center,
freq_span=freq_span,
df=df,
num_averages=num_averages,
amp_arr=amp_arr,
output_port=output_port,
input_port=input_port,
dither=dither,
num_skip=num_skip,
)
self.freq_arr = freq_arr
self.resp_arr = resp_arr
return self
def analyze(self, norm: bool = True, portrait: bool = True, blit: bool = False):
if self.freq_arr is None:
raise RuntimeError
if self.resp_arr is None:
raise RuntimeError
import matplotlib.pyplot as plt
try:
from resonator_tools import circuit
import matplotlib.widgets as mwidgets
_do_fit = True
except ImportError:
_do_fit = False
nr_amps = len(self.amp_arr)
self._AMP_IDX = nr_amps // 2
if norm:
resp_scaled = np.zeros_like(self.resp_arr)
for jj in range(nr_amps):
resp_scaled[jj] = self.resp_arr[jj] / self.amp_arr[jj]
else:
resp_scaled = self.resp_arr
resp_dB = 20. * np.log10(np.abs(resp_scaled))
amp_dBFS = 20 * np.log10(self.amp_arr / 1.0)
# choose limits for colorbar
cutoff = 1. # %
lowlim = np.percentile(resp_dB, cutoff)
highlim = np.percentile(resp_dB, 100. - cutoff)
# extent
x_min = 1e-9 * self.freq_arr[0]
x_max = 1e-9 * self.freq_arr[-1]
dx = 1e-9 * (self.freq_arr[1] - self.freq_arr[0])
y_min = amp_dBFS[0]
y_max = amp_dBFS[-1]
dy = amp_dBFS[1] - amp_dBFS[0]
if portrait:
fig1 = plt.figure(tight_layout=True, figsize=(6.4, 9.6))
ax1 = fig1.add_subplot(2, 1, 1)
# fig1 = plt.figure(tight_layout=True)
# ax1 = fig1.add_subplot(1, 1, 1)
else:
fig1 = plt.figure(tight_layout=True, figsize=(12.8, 4.8))
ax1 = fig1.add_subplot(1, 2, 1)
im = ax1.imshow(
resp_dB,
origin='lower',
aspect='auto',
interpolation='none',
extent=(x_min - dx / 2, x_max + dx / 2, y_min - dy / 2, y_max + dy / 2),
vmin=lowlim,
vmax=highlim,
)
line_sel = ax1.axhline(amp_dBFS[self._AMP_IDX], ls="--", c="k", lw=3, animated=blit)
# ax1.set_title(f"amp = {amp_arr[AMP_IDX]:.2e}")
ax1.set_xlabel("Frequency [GHz]")
ax1.set_ylabel("Drive amplitude [dBFS]")
cb = fig1.colorbar(im)
if portrait:
cb.set_label("Response amplitude [dB]")
else:
ax1.set_title("Response amplitude [dB]")
fig1.show()
# return fig1
if portrait:
ax2 = fig1.add_subplot(4, 1, 3)
ax3 = fig1.add_subplot(4, 1, 4, sharex=ax2)
else:
ax2 = fig1.add_subplot(2, 2, 2)
ax3 = fig1.add_subplot(2, 2, 4, sharex=ax2)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax3.yaxis.set_label_position("right")
ax3.yaxis.tick_right()
line_a, = ax2.plot(1e-9 * self.freq_arr, resp_dB[self._AMP_IDX], label="measured", animated=blit)
line_p, = ax3.plot(1e-9 * self.freq_arr, np.angle(self.resp_arr[self._AMP_IDX]), animated=blit)
if _do_fit:
line_fit_a, = ax2.plot(1e-9 * self.freq_arr,
np.full_like(self.freq_arr, np.nan),
ls="--",
label="fit",
animated=blit)
line_fit_p, = ax3.plot(1e-9 * self.freq_arr, np.full_like(self.freq_arr, np.nan), ls="--", animated=blit)
f_min = 1e-9 * self.freq_arr.min()
f_max = 1e-9 * self.freq_arr.max()
f_rng = f_max - f_min
a_min = resp_dB.min()
a_max = resp_dB.max()
a_rng = a_max - a_min
p_min = -np.pi
p_max = np.pi
p_rng = p_max - p_min
ax2.set_xlim(f_min - 0.05 * f_rng, f_max + 0.05 * f_rng)
ax2.set_ylim(a_min - 0.05 * a_rng, a_max + 0.05 * a_rng)
ax3.set_xlim(f_min - 0.05 * f_rng, f_max + 0.05 * f_rng)
ax3.set_ylim(p_min - 0.05 * p_rng, p_max + 0.05 * p_rng)
ax3.set_xlabel("Frequency [GHz]")
ax2.set_ylabel("Response amplitude [dB]")
ax3.set_ylabel("Response phase [rad]")
ax2.legend(loc="lower right")
def onbuttonpress(event):
if event.inaxes == ax1:
self._AMP_IDX = np.argmin(np.abs(amp_dBFS - event.ydata))
update()
def onkeypress(event):
if event.inaxes == ax1:
if event.key == "up":
self._AMP_IDX += 1
if self._AMP_IDX >= len(amp_dBFS):
self._AMP_IDX = len(amp_dBFS) - 1
update()
elif event.key == "down":
self._AMP_IDX -= 1
if self._AMP_IDX < 0:
self._AMP_IDX = 0
update()
def update():
line_sel.set_ydata([amp_dBFS[self._AMP_IDX], amp_dBFS[self._AMP_IDX]])
# ax1.set_title(f"amp = {amp_arr[AMP_IDX]:.2e}")
print(
f"drive amp {self._AMP_IDX:d}: {self.amp_arr[self._AMP_IDX]:.2e} FS = {amp_dBFS[self._AMP_IDX]:.1f} dBFS"
)
line_a.set_ydata(resp_dB[self._AMP_IDX])
line_p.set_ydata(np.angle(self.resp_arr[self._AMP_IDX]))
if _do_fit:
line_fit_a.set_ydata(np.full_like(self.freq_arr, np.nan))
line_fit_p.set_ydata(np.full_like(self.freq_arr, np.nan))
# ax2.set_title("")
if blit:
fig1.canvas.restore_region(self._bg)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax3.draw_artist(line_p)
fig1.canvas.blit(fig1.bbox)
fig1.canvas.flush_events()
else:
fig1.canvas.draw()
if _do_fit:
def onselect(xmin, xmax):
port = circuit.notch_port(self.freq_arr, self.resp_arr[self._AMP_IDX])
port.autofit(fcrop=(xmin * 1e9, xmax * 1e9))
if norm:
line_fit_a.set_data(1e-9 * port.f_data,
20 * np.log10(np.abs(port.z_data_sim / self.amp_arr[self._AMP_IDX])))
else:
line_fit_a.set_data(1e-9 * port.f_data, 20 * np.log10(np.abs(port.z_data_sim)))
line_fit_p.set_data(1e-9 * port.f_data, np.angle(port.z_data_sim))
# print(port.fitresults)
print("----------------")
print(f"fr = {port.fitresults['fr']}")
print(f"Qi = {port.fitresults['Qi_dia_corr']}")
print(f"Qc = {port.fitresults['Qc_dia_corr']}")
print(f"Ql = {port.fitresults['Ql']}")
print(f"kappa = {port.fitresults['fr'] / port.fitresults['Qc_dia_corr']}")
print("----------------")
# ax2.set_title(
# f"fr = {1e-6*fr:.0f} MHz, Ql = {Ql:.0f}, Qi = {Qi:.0f}, Qc = {Qc:.0f}, kappa = {1e-3*kappa:.0f} kHz")
if blit:
fig1.canvas.restore_region(self._bg)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax2.draw_artist(line_fit_a)
ax3.draw_artist(line_p)
ax3.draw_artist(line_fit_p)
fig1.canvas.blit(fig1.bbox)
fig1.canvas.flush_events()
else:
fig1.canvas.draw()
rectprops = dict(facecolor='tab:gray', alpha=0.5)
fig1._span_a = mwidgets.SpanSelector(ax2, onselect, 'horizontal', rectprops=rectprops, useblit=blit)
fig1._span_p = mwidgets.SpanSelector(ax3, onselect, 'horizontal', rectprops=rectprops, useblit=blit)
fig1.canvas.mpl_connect('button_press_event', onbuttonpress)
fig1.canvas.mpl_connect('key_press_event', onkeypress)
fig1.show()
if blit:
fig1.canvas.draw()
fig1.canvas.flush_events()
self._bg = fig1.canvas.copy_from_bbox(fig1.bbox)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax3.draw_artist(line_p)
fig1.canvas.blit(fig1.bbox)
return fig1
|
[
"h5py.File",
"numpy.zeros_like",
"numpy.atleast_1d",
"presto.utils.ProgressBar",
"numpy.abs",
"numpy.full_like",
"numpy.angle",
"numpy.zeros",
"presto.lockin.Lockin",
"numpy.percentile",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.widgets.SpanSelector",
"numpy.log10",
"resonator_tools.circuit.notch_port"
] |
[((6151, 6181), 'numpy.percentile', 'np.percentile', (['resp_dB', 'cutoff'], {}), '(resp_dB, cutoff)\n', (6164, 6181), True, 'import numpy as np\n'), ((6200, 6238), 'numpy.percentile', 'np.percentile', (['resp_dB', '(100.0 - cutoff)'], {}), '(resp_dB, 100.0 - cutoff)\n', (6213, 6238), True, 'import numpy as np\n'), ((1413, 1525), 'presto.lockin.Lockin', 'lockin.Lockin', ([], {'address': 'presto_address', 'port': 'presto_port', 'ext_ref_clk': 'ext_ref_clk'}), '(address=presto_address, port=presto_port, ext_ref_clk=\n ext_ref_clk, **CONVERTER_CONFIGURATION)\n', (1426, 1525), False, 'from presto import lockin\n'), ((2209, 2239), 'numpy.arange', 'np.arange', (['n_start', '(n_stop + 1)'], {}), '(n_start, n_stop + 1)\n', (2218, 2239), True, 'import numpy as np\n'), ((2345, 2388), 'numpy.zeros', 'np.zeros', (['(nr_amps, nr_freq)', 'np.complex128'], {}), '((nr_amps, nr_freq), np.complex128)\n', (2353, 2388), True, 'import numpy as np\n'), ((2985, 3015), 'presto.utils.ProgressBar', 'ProgressBar', (['(nr_amps * nr_freq)'], {}), '(nr_amps * nr_freq)\n', (2996, 3015), False, 'from presto.utils import ProgressBar\n'), ((4261, 4290), 'h5py.File', 'h5py.File', (['load_filename', '"""r"""'], {}), "(load_filename, 'r')\n", (4270, 4290), False, 'import h5py\n'), ((5771, 5799), 'numpy.zeros_like', 'np.zeros_like', (['self.resp_arr'], {}), '(self.resp_arr)\n', (5784, 5799), True, 'import numpy as np\n'), ((6042, 6070), 'numpy.log10', 'np.log10', (['(self.amp_arr / 1.0)'], {}), '(self.amp_arr / 1.0)\n', (6050, 6070), True, 'import numpy as np\n'), ((6532, 6581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': '(True)', 'figsize': '(6.4, 9.6)'}), '(tight_layout=True, figsize=(6.4, 9.6))\n', (6542, 6581), True, 'import matplotlib.pyplot as plt\n'), ((6756, 6806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': '(True)', 'figsize': '(12.8, 4.8)'}), '(tight_layout=True, figsize=(12.8, 4.8))\n', (6766, 6806), True, 'import matplotlib.pyplot as plt\n'), ((8148, 8186), 'numpy.angle', 'np.angle', (['self.resp_arr[self._AMP_IDX]'], {}), '(self.resp_arr[self._AMP_IDX])\n', (8156, 8186), True, 'import numpy as np\n'), ((12745, 12834), 'matplotlib.widgets.SpanSelector', 'mwidgets.SpanSelector', (['ax2', 'onselect', '"""horizontal"""'], {'rectprops': 'rectprops', 'useblit': 'blit'}), "(ax2, onselect, 'horizontal', rectprops=rectprops,\n useblit=blit)\n", (12766, 12834), True, 'import matplotlib.widgets as mwidgets\n'), ((12858, 12947), 'matplotlib.widgets.SpanSelector', 'mwidgets.SpanSelector', (['ax3', 'onselect', '"""horizontal"""'], {'rectprops': 'rectprops', 'useblit': 'blit'}), "(ax3, onselect, 'horizontal', rectprops=rectprops,\n useblit=blit)\n", (12879, 12947), True, 'import matplotlib.widgets as mwidgets\n'), ((984, 1006), 'numpy.atleast_1d', 'np.atleast_1d', (['amp_arr'], {}), '(amp_arr)\n', (997, 1006), True, 'import numpy as np\n'), ((5997, 6016), 'numpy.abs', 'np.abs', (['resp_scaled'], {}), '(resp_scaled)\n', (6003, 6016), True, 'import numpy as np\n'), ((8315, 8350), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (8327, 8350), True, 'import numpy as np\n'), ((8551, 8586), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (8563, 8586), True, 'import numpy as np\n'), ((10388, 10426), 'numpy.angle', 'np.angle', (['self.resp_arr[self._AMP_IDX]'], {}), '(self.resp_arr[self._AMP_IDX])\n', (10396, 10426), True, 'import numpy as np\n'), ((11051, 11114), 'resonator_tools.circuit.notch_port', 'circuit.notch_port', (['self.freq_arr', 'self.resp_arr[self._AMP_IDX]'], {}), '(self.freq_arr, self.resp_arr[self._AMP_IDX])\n', (11069, 11114), False, 'from resonator_tools import circuit\n'), ((3811, 3845), 'numpy.mean', 'np.mean', (['data[-self.num_averages:]'], {}), '(data[-self.num_averages:])\n', (3818, 3845), True, 'import numpy as np\n'), ((9446, 9476), 'numpy.abs', 'np.abs', (['(amp_dBFS - event.ydata)'], {}), '(amp_dBFS - event.ydata)\n', (9452, 9476), True, 'import numpy as np\n'), ((10489, 10524), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (10501, 10524), True, 'import numpy as np\n'), ((10563, 10598), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (10575, 10598), True, 'import numpy as np\n'), ((11549, 11574), 'numpy.angle', 'np.angle', (['port.z_data_sim'], {}), '(port.z_data_sim)\n', (11557, 11574), True, 'import numpy as np\n'), ((11315, 11368), 'numpy.abs', 'np.abs', (['(port.z_data_sim / self.amp_arr[self._AMP_IDX])'], {}), '(port.z_data_sim / self.amp_arr[self._AMP_IDX])\n', (11321, 11368), True, 'import numpy as np\n'), ((11467, 11490), 'numpy.abs', 'np.abs', (['port.z_data_sim'], {}), '(port.z_data_sim)\n', (11473, 11490), True, 'import numpy as np\n')]
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides access to the Smart Buidlings dataset for Anomaly Detection."""
from madi.datasets.base_dataset import BaseDataset
import numpy as np
import pandas as pd
import tensorflow as tf
_DATA_FILE = "madi/datasets/data/anomaly_detection_sample_1577622599.csv"
_README_FILE = "madi/datasets/data/anomaly_detection_sample_1577622599_README.md"
class SmartBuildingsDataset(BaseDataset):
"""Smart Buildings data set for Multivariate Anomaly Detection."""
def __init__(self,
datafilepath: str = _DATA_FILE,
readmefilepath: str = _README_FILE):
self._sample = self._load_data_file(datafilepath)
self._description = self._load_readme(readmefilepath)
@property
def sample(self) -> pd.DataFrame:
return self._sample
@property
def name(self) -> str:
return "smart_buildings"
@property
def description(self) -> str:
return self._description
def _load_data_file(self, datafile: str) -> pd.DataFrame:
sample = None
if not tf.io.gfile.exists(datafile):
raise AssertionError("{} does not exist".format(datafile))
with tf.io.gfile.GFile(datafile) as csv_file:
sample = pd.read_csv(csv_file, header="infer", index_col=0)
sample = sample.reindex(np.random.permutation(sample.index))
return sample
|
[
"pandas.read_csv",
"numpy.random.permutation",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile"
] |
[((1632, 1660), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['datafile'], {}), '(datafile)\n', (1650, 1660), True, 'import tensorflow as tf\n'), ((1736, 1763), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['datafile'], {}), '(datafile)\n', (1753, 1763), True, 'import tensorflow as tf\n'), ((1792, 1842), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': '"""infer"""', 'index_col': '(0)'}), "(csv_file, header='infer', index_col=0)\n", (1803, 1842), True, 'import pandas as pd\n'), ((1872, 1907), 'numpy.random.permutation', 'np.random.permutation', (['sample.index'], {}), '(sample.index)\n', (1893, 1907), True, 'import numpy as np\n')]
|
import torch
from torch.utils.data import Dataset
import numpy as np
from scipy.io import loadmat
class NatPatchDataset(Dataset):
def __init__(self, N:int, width:int, height:int, border:int=4, fpath:str='../../data/IMAGES.mat', test=False):
super(NatPatchDataset, self).__init__()
self.N = N
self.width = width
self.height = height
self.border = border
self.fpath = fpath
self.test = test
# holder
self.images = None
# initialize patches
self.extract_patches_()
def __len__(self):
return self.images.shape[0]
def __getitem__(self, idx):
return self.images[idx]
def extract_patches_(self):
# load mat
X = loadmat(self.fpath)
X = X['IMAGES']
img_size = X.shape[0]
n_img = X.shape[2]
self.images = torch.zeros((self.N * n_img, self.width, self.height))
# for every image
counter = 0
# Save the last image for testing
if self.test:
image_indices = [-1]
else:
image_indices = range(n_img)[:-1]
for i in image_indices:
img = X[:, :, i]
for j in range(self.N):
if self.test: # use a deterministic patch for producing figures
x = 63
y = 14
else:
x = np.random.randint(self.border, img_size - self.width - self.border)
y = np.random.randint(self.border, img_size - self.height - self.border)
crop = torch.tensor(img[x:x+self.width, y:y+self.height])
self.images[counter, :, :] = crop - crop.mean()
counter += 1
|
[
"torch.zeros",
"numpy.random.randint",
"torch.tensor",
"scipy.io.loadmat"
] |
[((746, 765), 'scipy.io.loadmat', 'loadmat', (['self.fpath'], {}), '(self.fpath)\n', (753, 765), False, 'from scipy.io import loadmat\n'), ((869, 923), 'torch.zeros', 'torch.zeros', (['(self.N * n_img, self.width, self.height)'], {}), '((self.N * n_img, self.width, self.height))\n', (880, 923), False, 'import torch\n'), ((1589, 1643), 'torch.tensor', 'torch.tensor', (['img[x:x + self.width, y:y + self.height]'], {}), '(img[x:x + self.width, y:y + self.height])\n', (1601, 1643), False, 'import torch\n'), ((1405, 1472), 'numpy.random.randint', 'np.random.randint', (['self.border', '(img_size - self.width - self.border)'], {}), '(self.border, img_size - self.width - self.border)\n', (1422, 1472), True, 'import numpy as np\n'), ((1497, 1565), 'numpy.random.randint', 'np.random.randint', (['self.border', '(img_size - self.height - self.border)'], {}), '(self.border, img_size - self.height - self.border)\n', (1514, 1565), True, 'import numpy as np\n')]
|
from RouteManager import RouteManager
from Route import Route
import numpy as np
class GeneticAlgorithmSolver:
def __init__(self, cities, population_size=50, mutation_rate=0.05, tournament_size=5, elitism=True):
self.cities = cities
self.population_size = population_size
self.mutation_rate = mutation_rate
self.tournament_size = tournament_size
self.elitism = elitism
def solve(self, rm):
rm = self.evolve(rm)
for i in range(100):
rm = self.evolve(rm)
return rm
def evolve(self, routes):
'''This function provides general flow to create a new generation
from given population
Input:
routes: RouteManager object that will be evolved
Output:
child: new generation of RouteManager
'''
selected_routes = RouteManager(self.cities,self.population_size) #to store routes in selection state
#SELECTION STATE
for i in range(self.population_size-int(self.elitism)):
#replace existing routes with tournament winners
#as many as tournament_size particapants are chosen randomly
selected_routes.set_route(i, self.tournament(np.random.choice(routes.routes, self.tournament_size)))
##ELITISM PART
child_routes = RouteManager(self.cities,self.population_size) #to store new child routes
if self.elitism: #if elitism then best route will directly pass to next generation
temporary_route = Route(self.cities)
elite_route = routes.find_best_route()
for i in range(len(elite_route)):
temporary_route.assign_city(i,elite_route.get_city(i))
child_routes.set_route(self.population_size-1, temporary_route)
#CROSS-OVER STATE
for i in range(self.population_size-int(self.elitism)):
#replace existing child routes with actually generated ones
#first route is matched with last, second is matched with second from last and so on.
child_routes.set_route(i, self.crossover(selected_routes.get_route(i),selected_routes.get_route(self.population_size-1-i)))
#MUTATION STATE
for i in range(len(child_routes)-int(self.elitism)):
#send each routes to mutation function
self.mutate(child_routes.get_route(i))
return child_routes
def crossover(self, route_1, route_2):
'''This function creates a crossed-over child route from
two given parent routes.
Input:
route_1: first parent route
route_2: second parent route
Output:
child: generated child route
'''
#determining random start and end genes
#which will stay same as in the first parent
a = np.random.rand()
b = np.random.rand()
low_point=int(min(a,b)*len(self.cities))
up_point=int(max(a,b)*len(self.cities))
child=route_1 #child creation
gen_list=[] #this list stores the cities as in the generated child's order
for i in range(low_point,up_point):
#from randomly generated low to up point cities will stay same
gen_list.append(route_1.get_city(i))
#subset contains cities that hasnot been added to gen list and as in the second parent's order
subset=[item for item in route_2.route if item not in gen_list]
#add the cities in the subset
for i in range(len(self.cities)):
if i not in range(low_point,up_point):
indx=i if i<low_point else i-(up_point-low_point)
child.assign_city(i,subset[indx])
return child
def mutate(self, route):
'''This function randomly deformate the genes with
a given probabiliy
Input:
route: RouteManager object that would mutate
Output:
None
'''
for i in range(len(route)): #each gene can be subject to mutation
if np.random.rand()<self.mutation_rate: #mutation occurs with the probality of mutation_rate
#if probabability occurs given gene is replaced with another random gene
swap_indx=int(len(route)*np.random.rand())
city1 = route.get_city(i)
city2 = route.get_city(swap_indx)
route.assign_city(i,city2)
route.assign_city(swap_indx, city1)
return
def tournament(self, routes):
'''This function returns the route with best fitness score
among a set of routes.
Input:
routes: list of routes
Output:
return_route: route that gives best fitness
'''
best_fitness=0 #first set
for r in routes:
if r.calc_fitness()>best_fitness: #update if better route exist than current best.
best_fitness=r.calc_fitness()
tour_winner=r
return_route = Route(self.cities) #creating the return value
for i in range(len(return_route)):
return_route.assign_city(i,tour_winner.get_city(i))
return return_route
|
[
"numpy.random.rand",
"Route.Route",
"RouteManager.RouteManager",
"numpy.random.choice"
] |
[((866, 913), 'RouteManager.RouteManager', 'RouteManager', (['self.cities', 'self.population_size'], {}), '(self.cities, self.population_size)\n', (878, 913), False, 'from RouteManager import RouteManager\n'), ((1350, 1397), 'RouteManager.RouteManager', 'RouteManager', (['self.cities', 'self.population_size'], {}), '(self.cities, self.population_size)\n', (1362, 1397), False, 'from RouteManager import RouteManager\n'), ((2851, 2867), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2865, 2867), True, 'import numpy as np\n'), ((2880, 2896), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2894, 2896), True, 'import numpy as np\n'), ((5063, 5081), 'Route.Route', 'Route', (['self.cities'], {}), '(self.cities)\n', (5068, 5081), False, 'from Route import Route\n'), ((1545, 1563), 'Route.Route', 'Route', (['self.cities'], {}), '(self.cities)\n', (1550, 1563), False, 'from Route import Route\n'), ((4085, 4101), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4099, 4101), True, 'import numpy as np\n'), ((1238, 1291), 'numpy.random.choice', 'np.random.choice', (['routes.routes', 'self.tournament_size'], {}), '(routes.routes, self.tournament_size)\n', (1254, 1291), True, 'import numpy as np\n'), ((4305, 4321), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4319, 4321), True, 'import numpy as np\n')]
|
# Load libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
# Load data
images_dir = os.listdir("D:\\FBAi\\data\\Flickr_Data")
images_path = 'D:\\FBAi\\data\\Flickr_Data\\Images\\'
captions_path = 'D:\\FBAi\data\Flickr_Data\\Flickr_TextData\\Flickr8k.token.txt'
train_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.trainImages.txt'
val_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.devImages.txt'
test_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.testImages.txt'
captions = open(captions_path, 'r').read().split("\n")
x_train = open(train_path, 'r').read().split("\n")
x_val = open(val_path, 'r').read().split("\n")
x_test = open(test_path, 'r').read().split("\n")
# Loading captions as values and images as key in dictionary
tokens = {}
for ix in range(len(captions)-1):
temp = captions[ix].split("#")
if temp[0] in tokens:
tokens[temp[0]].append(temp[1][2:])
else:
tokens[temp[0]] = [temp[1][2:]]
# displaying an image and captions given to it
temp = captions[10].split("#")
from IPython.display import Image, display
z = Image(filename=images_path+temp[0])
display(z)
for ix in range(len(tokens[temp[0]])):
print(tokens[temp[0]][ix])
# Creating train, test and validation dataset files with header as 'image_id' and 'captions'
train_dataset = open('flickr_8k_train_dataset.txt','wb')
train_dataset.write(b"image_id\tcaptions\n")
val_dataset = open('flickr_8k_val_dataset.txt','wb')
val_dataset.write(b"image_id\tcaptions\n")
test_dataset = open('flickr_8k_test_dataset.txt','wb')
test_dataset.write(b"image_id\tcaptions\n")
# Populating the above created files for train, test and validation dataset with image ids and captions for each of these images
for img in x_train:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
train_dataset.write((img+"\t"+caption+"\n").encode())
train_dataset.flush()
train_dataset.close()
for img in x_test:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
test_dataset.write((img+"\t"+caption+"\n").encode())
test_dataset.flush()
test_dataset.close()
for img in x_val:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
val_dataset.write((img+"\t"+caption+"\n").encode())
val_dataset.flush()
val_dataset.close()
# Loading 50 layer Residual Network Model and getting the summary of the model
from IPython.core.display import display, HTML
display(HTML("""<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>"""))
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# Note: For more details on ResNet50 architecture you can click on hyperlink given below
# Helper function to process images
def preprocessing(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im)
im = np.expand_dims(im, axis=0)
return im
train_data = {}
ctr=0
for ix in x_train:
if ix == "":
continue
if ctr >= 3000:
break
ctr+=1
if ctr%1000==0:
print(ctr)
path = images_path + ix
img = preprocessing(path)
pred = model.predict(img).reshape(2048)
train_data[ix] = pred
train_data['2513260012_03d33305cf.jpg'].shape
# opening train_encoded_images.p file and dumping it's content
with open( "train_encoded_images.p", "wb" ) as pickle_f:
pickle.dump(train_data, pickle_f )
# Loading image and its corresponding caption into a dataframe and then storing values from dataframe into 'ds'
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
ds = pd_dataset.values
print(ds.shape)
pd_dataset.head()
# Storing all the captions from ds into a list
sentences = []
for ix in range(ds.shape[0]):
sentences.append(ds[ix, 1])
print(len(sentences))
# First 5 captions stored in sentences
sentences[:5]
# Splitting each captions stored in 'sentences' and storing them in 'words' as list of list
words = [i.split() for i in sentences]
# Creating a list of all unique words
unique = []
for i in words:
unique.extend(i)
unique = list(set(unique))
print(len(unique))
vocab_size = len(unique)
# Vectorization
word_2_indices = {val:index for index, val in enumerate(unique)}
indices_2_word = {index:val for index, val in enumerate(unique)}
word_2_indices['UNK'] = 0
word_2_indices['raining'] = 8253
indices_2_word[0] = 'UNK'
indices_2_word[8253] = 'raining'
print(word_2_indices['<start>'])
print(indices_2_word[4011])
print(word_2_indices['<end>'])
print(indices_2_word[8051])
vocab_size = len(word_2_indices.keys())
print(vocab_size)
max_len = 0
for i in sentences:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len)
padded_sequences, subsequent_words = [], []
for ix in range(ds.shape[0]):
partial_seqs = []
next_words = []
text = ds[ix, 1].split()
text = [word_2_indices[i] for i in text]
for i in range(1, len(text)):
partial_seqs.append(text[:i])
next_words.append(text[i])
padded_partial_seqs = sequence.pad_sequences(partial_seqs, max_len, padding='post')
next_words_1hot = np.zeros([len(next_words), vocab_size], dtype=np.bool)
#Vectorization
for i,next_word in enumerate(next_words):
next_words_1hot[i, next_word] = 1
padded_sequences.append(padded_partial_seqs)
subsequent_words.append(next_words_1hot)
padded_sequences = np.asarray(padded_sequences)
subsequent_words = np.asarray(subsequent_words)
print(padded_sequences.shape)
print(subsequent_words.shape)
print(padded_sequences[0])
for ix in range(len(padded_sequences[0])):
for iy in range(max_len):
print(indices_2_word[padded_sequences[0][ix][iy]],)
print("\n")
print(len(padded_sequences[0]))
num_of_images = 2000
captions = np.zeros([0, max_len])
next_words = np.zeros([0, vocab_size])
for ix in range(num_of_images):#img_to_padded_seqs.shape[0]):
captions = np.concatenate([captions, padded_sequences[ix]])
next_words = np.concatenate([next_words, subsequent_words[ix]])
np.save("captions.npy", captions)
np.save("next_words.npy", next_words)
print(captions.shape)
print(next_words.shape)
with open('D:\\FBAi\\data\\train_encoded_images.p', 'rb') as f:
encoded_images = pickle.load(f, encoding="bytes")
imgs = []
for ix in range(ds.shape[0]):
if ds[ix, 0].encode() in encoded_images.keys():
# print(ix, encoded_images[ds[ix, 0].encode()])
imgs.append(list(encoded_images[ds[ix, 0].encode()]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
for ix in range(num_of_images):
for iy in range(padded_sequences[ix].shape[0]):
images.append(imgs[ix])
images = np.asarray(images)
np.save("images.npy", images)
print(images.shape)
image_names = []
for ix in range(num_of_images):
for iy in range(padded_sequences[ix].shape[0]):
image_names.append(ds[ix, 0])
image_names = np.asarray(image_names)
np.save("image_names.npy", image_names)
print(len(image_names))
#Model
captions = np.load("captions.npy")
next_words = np.load("next_words.npy")
print(captions.shape)
print(next_words.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("image_names.npy")
print(imag.shape)
embedding_size = 128
max_len = 40
image_model = Sequential()
image_model.add(Dense(embedding_size, input_shape=(2048,), activation='relu'))
image_model.add(RepeatVector(max_len))
image_model.summary()
language_model = Sequential()
language_model.add(Embedding(input_dim=vocab_size, output_dim=embedding_size, input_length=max_len))
language_model.add(LSTM(256, return_sequences=True))
language_model.add(TimeDistributed(Dense(embedding_size)))
language_model.summary()
conca = Concatenate()([image_model.output, language_model.output])
x = LSTM(128, return_sequences=True)(conca)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocab_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[image_model.input, language_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, captions], next_words, batch_size=512, epochs=200)
model.save_weights("model_weights.h5")
#Predictions
def preprocessing(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im)
im = np.expand_dims(im, axis=0)
return im
def get_encoding(model, img):
image = preprocessing(img)
pred = model.predict(image).reshape(2048)
return pred
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "D:\\FBAi\\data\\Flickr_Data\\Images\\1453366750_6e8cf601bf.jpg"
test_img = get_encoding(resnet, img)
def predict_captions(image):
start_word = ["<start>"]
while True:
par_caps = [word_2_indices[i] for i in start_word]
par_caps = sequence.pad_sequences([par_caps], maxlen=max_len, padding='post')
preds = model.predict([np.array([image]), np.array(par_caps)])
word_pred = indices_2_word[np.argmax(preds[0])]
start_word.append(word_pred)
if word_pred == "<end>" or len(start_word) > max_len:
break
return ' '.join(start_word[1:-1])
Argmax_Search = predict_captions(test_img)
z = Image(filename=img)
display(z)
print(Argmax_Search)
|
[
"numpy.load",
"pickle.dump",
"numpy.argmax",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"keras.models.Model",
"IPython.core.display.HTML",
"keras.preprocessing.image.img_to_array",
"pickle.load",
"keras.preprocessing.image.load_img",
"numpy.save",
"IPython.core.display.display",
"numpy.asarray",
"keras.layers.Concatenate",
"keras.layers.RepeatVector",
"os.listdir",
"IPython.display.Image",
"numpy.concatenate",
"keras.layers.Activation",
"keras.layers.LSTM",
"numpy.zeros",
"numpy.expand_dims",
"keras.applications.resnet50.ResNet50",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((564, 605), 'os.listdir', 'os.listdir', (['"""D:\\\\FBAi\\\\data\\\\Flickr_Data"""'], {}), "('D:\\\\FBAi\\\\data\\\\Flickr_Data')\n", (574, 605), False, 'import os\n'), ((1624, 1661), 'IPython.display.Image', 'Image', ([], {'filename': '(images_path + temp[0])'}), '(filename=images_path + temp[0])\n', (1629, 1661), False, 'from IPython.display import Image, display\n'), ((1661, 1671), 'IPython.core.display.display', 'display', (['z'], {}), '(z)\n', (1668, 1671), False, 'from IPython.core.display import display, HTML\n'), ((3323, 3416), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, weights='imagenet', input_shape=(224, 224, 3),\n pooling='avg')\n", (3331, 3416), False, 'from keras.applications.resnet50 import ResNet50\n'), ((4386, 4444), 'pandas.read_csv', 'pd.read_csv', (['"""flickr_8k_train_dataset.txt"""'], {'delimiter': '"""\t"""'}), "('flickr_8k_train_dataset.txt', delimiter='\\t')\n", (4397, 4444), True, 'import pandas as pd\n'), ((6330, 6358), 'numpy.asarray', 'np.asarray', (['padded_sequences'], {}), '(padded_sequences)\n', (6340, 6358), True, 'import numpy as np\n'), ((6379, 6407), 'numpy.asarray', 'np.asarray', (['subsequent_words'], {}), '(subsequent_words)\n', (6389, 6407), True, 'import numpy as np\n'), ((6722, 6744), 'numpy.zeros', 'np.zeros', (['[0, max_len]'], {}), '([0, max_len])\n', (6730, 6744), True, 'import numpy as np\n'), ((6759, 6784), 'numpy.zeros', 'np.zeros', (['[0, vocab_size]'], {}), '([0, vocab_size])\n', (6767, 6784), True, 'import numpy as np\n'), ((6985, 7018), 'numpy.save', 'np.save', (['"""captions.npy"""', 'captions'], {}), "('captions.npy', captions)\n", (6992, 7018), True, 'import numpy as np\n'), ((7020, 7057), 'numpy.save', 'np.save', (['"""next_words.npy"""', 'next_words'], {}), "('next_words.npy', next_words)\n", (7027, 7057), True, 'import numpy as np\n'), ((7465, 7481), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7475, 7481), True, 'import numpy as np\n'), ((7655, 7673), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (7665, 7673), True, 'import numpy as np\n'), ((7677, 7706), 'numpy.save', 'np.save', (['"""images.npy"""', 'images'], {}), "('images.npy', images)\n", (7684, 7706), True, 'import numpy as np\n'), ((7902, 7925), 'numpy.asarray', 'np.asarray', (['image_names'], {}), '(image_names)\n', (7912, 7925), True, 'import numpy as np\n'), ((7929, 7968), 'numpy.save', 'np.save', (['"""image_names.npy"""', 'image_names'], {}), "('image_names.npy', image_names)\n", (7936, 7968), True, 'import numpy as np\n'), ((8020, 8043), 'numpy.load', 'np.load', (['"""captions.npy"""'], {}), "('captions.npy')\n", (8027, 8043), True, 'import numpy as np\n'), ((8058, 8083), 'numpy.load', 'np.load', (['"""next_words.npy"""'], {}), "('next_words.npy')\n", (8065, 8083), True, 'import numpy as np\n'), ((8144, 8165), 'numpy.load', 'np.load', (['"""images.npy"""'], {}), "('images.npy')\n", (8151, 8165), True, 'import numpy as np\n'), ((8197, 8223), 'numpy.load', 'np.load', (['"""image_names.npy"""'], {}), "('image_names.npy')\n", (8204, 8223), True, 'import numpy as np\n'), ((8304, 8316), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8314, 8316), False, 'from keras.models import Sequential, Model\n'), ((8486, 8498), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8496, 8498), False, 'from keras.models import Sequential, Model\n'), ((8967, 9035), 'keras.models.Model', 'Model', ([], {'inputs': '[image_model.input, language_model.input]', 'outputs': 'out'}), '(inputs=[image_model.input, language_model.input], outputs=out)\n', (8972, 9035), False, 'from keras.models import Sequential, Model\n'), ((9600, 9693), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, weights='imagenet', input_shape=(224, 224, 3),\n pooling='avg')\n", (9608, 9693), False, 'from keras.applications.resnet50 import ResNet50\n'), ((10392, 10411), 'IPython.display.Image', 'Image', ([], {'filename': 'img'}), '(filename=img)\n', (10397, 10411), False, 'from IPython.display import Image, display\n'), ((10413, 10423), 'IPython.core.display.display', 'display', (['z'], {}), '(z)\n', (10420, 10423), False, 'from IPython.core.display import display, HTML\n'), ((3202, 3318), 'IPython.core.display.HTML', 'HTML', (['"""<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>"""'], {}), '(\n \'<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>\'\n )\n', (3206, 3318), False, 'from IPython.core.display import display, HTML\n'), ((3594, 3645), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224, 3)'}), '(img_path, target_size=(224, 224, 3))\n', (3608, 3645), False, 'from keras.preprocessing import image, sequence\n'), ((3654, 3676), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['im'], {}), '(im)\n', (3672, 3676), False, 'from keras.preprocessing import image, sequence\n'), ((3687, 3713), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (3701, 3713), True, 'import numpy as np\n'), ((4216, 4249), 'pickle.dump', 'pickle.dump', (['train_data', 'pickle_f'], {}), '(train_data, pickle_f)\n', (4227, 4249), False, 'import pickle\n'), ((5940, 6001), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['partial_seqs', 'max_len'], {'padding': '"""post"""'}), "(partial_seqs, max_len, padding='post')\n", (5962, 6001), False, 'from keras.preprocessing import image, sequence\n'), ((6864, 6912), 'numpy.concatenate', 'np.concatenate', (['[captions, padded_sequences[ix]]'], {}), '([captions, padded_sequences[ix]])\n', (6878, 6912), True, 'import numpy as np\n'), ((6931, 6981), 'numpy.concatenate', 'np.concatenate', (['[next_words, subsequent_words[ix]]'], {}), '([next_words, subsequent_words[ix]])\n', (6945, 6981), True, 'import numpy as np\n'), ((7199, 7231), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (7210, 7231), False, 'import pickle\n'), ((8336, 8397), 'keras.layers.Dense', 'Dense', (['embedding_size'], {'input_shape': '(2048,)', 'activation': '"""relu"""'}), "(embedding_size, input_shape=(2048,), activation='relu')\n", (8341, 8397), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8416, 8437), 'keras.layers.RepeatVector', 'RepeatVector', (['max_len'], {}), '(max_len)\n', (8428, 8437), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8521, 8606), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'vocab_size', 'output_dim': 'embedding_size', 'input_length': 'max_len'}), '(input_dim=vocab_size, output_dim=embedding_size, input_length=max_len\n )\n', (8530, 8606), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8623, 8655), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)'}), '(256, return_sequences=True)\n', (8627, 8655), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8754, 8767), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (8765, 8767), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8818, 8850), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (8822, 8850), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8863, 8896), 'keras.layers.LSTM', 'LSTM', (['(512)'], {'return_sequences': '(False)'}), '(512, return_sequences=False)\n', (8867, 8896), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8905, 8922), 'keras.layers.Dense', 'Dense', (['vocab_size'], {}), '(vocab_size)\n', (8910, 8922), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8933, 8954), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (8943, 8954), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((9324, 9375), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224, 3)'}), '(img_path, target_size=(224, 224, 3))\n', (9338, 9375), False, 'from keras.preprocessing import image, sequence\n'), ((9384, 9406), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['im'], {}), '(im)\n', (9402, 9406), False, 'from keras.preprocessing import image, sequence\n'), ((9417, 9443), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (9431, 9443), True, 'import numpy as np\n'), ((8693, 8714), 'keras.layers.Dense', 'Dense', (['embedding_size'], {}), '(embedding_size)\n', (8698, 8714), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((9960, 10026), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['[par_caps]'], {'maxlen': 'max_len', 'padding': '"""post"""'}), "([par_caps], maxlen=max_len, padding='post')\n", (9982, 10026), False, 'from keras.preprocessing import image, sequence\n'), ((10135, 10154), 'numpy.argmax', 'np.argmax', (['preds[0]'], {}), '(preds[0])\n', (10144, 10154), True, 'import numpy as np\n'), ((10059, 10076), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (10067, 10076), True, 'import numpy as np\n'), ((10078, 10096), 'numpy.array', 'np.array', (['par_caps'], {}), '(par_caps)\n', (10086, 10096), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Buscador
#
# Esse notebook implementa um buscador simples.
# A representação pra cada texto é criada a partir da TF-IDF.
# A representação da query (consulta, ou termos buscados)
# é construída a partir do vocabulário dos textos.
# O ranqueamento dos resultados é feito de acordo com
# a semelhança cosseno da query pros textos.
#
# Há várias oportunidades de melhoria.
# Algumas delas são discutidas ao longo do notebook.
#
# Os resultados, mesmo deste buscador ingênuo,
# são bastante satisfatórios.
# O buscador é capaz de retornar leis (neste caso)
# relacionadas à localidades ou personalidades.
# No entanto, o mesmo mecanismo pode ser utilizado
# pra quaisquer outros textos, por exemplo o Diário Oficial.
# Alguns exemplos de buscas são:
#
# "winterianus" - retorna a Lei Municipal sobre citronelas;
#
# "E<NAME>" - retorna Lei Municipal que concede título de cidadão feirense;
#
# "Rua Espassonavel" - retorna Lei Municipal que cita a rua.
# In[ ]:
import numpy as np
import pandas as pd
from scripts.nlp import remove_portuguese_stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
# In[ ]:
laws = pd.read_json("leis.json")
laws.drop(["documento"], inplace=True, axis=1)
print(laws.info())
print(laws.nunique())
# In[ ]:
laws
# In[ ]:
print(laws.loc[len(laws) - 1, "texto"])
# # Buscas por texto
#
# No notebook _similar_laws_ vimos que TF-IDF encontra Leis bastante similares entre si.
# Será que conseguimos também encontrar leis similares a uma query?
#
# Primeiro, devemos construir a representação das leis com TF-IDF.
# Após termos as representações,
# limpamos o texto da consulta utilizando o mesmo método de limpeza das leis.
# Depois, criar uma representação da consulta utilizando o IDF do modelo treinado.
# Finalmente, calcular a similaridade desta consulta
# para todas as leis na base e retornar as mais próximas.
# In[ ]:
laws["texto_limpo"] = laws["texto"].apply(remove_portuguese_stopwords)
# In[ ]:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(laws["texto_limpo"])
X
# In[ ]:
transformer = TfidfTransformer()
X_tfidf = transformer.fit_transform(X)
X_tfidf
# In[ ]:
query = ["rua espassonavel"]
query[0] = remove_portuguese_stopwords(query[0])
query = vectorizer.transform(query)
query = transformer.transform(query)
# In[ ]:
best_matches = cosine_similarity(query, X_tfidf)
best_matches_idx = np.argsort(best_matches)
for i in range(1, 5):
idx = best_matches_idx[0, -i]
print(laws.loc[idx, "texto"])
print("\n---Next Result:---\n")
# Tcharam! Feito um buscador simples!
#
# Existem limitações.
# A sequência e composição das palavras é uma delas, por exemplo.
# Não adianta buscar pelo nome - sobrenome de uma pessoa.
# Ele vai retornar resultados onde
# algum destes termos sejam mais frequentes.
# Não existe as aspas do Google pra dizer
# "busque por este termo todo junto".
#
# Por exemplo, se eu buscar Elydio,
# o primeiro resultado é a Lei conferindo
# cidadania à Elydio <NAME>.
# Perfeito.
# Mas se eu buscar <NAME>,
# o primeiro resultado sequer tem Azevedo,
# mas o nome Lopes aparece mais de uma vez.
#
# Uma das formas de contornar essa dificuldade é
# usar bigramas ou n-gramas maiores.
#
# ## Outras opções
# ### Indexar
# Há outras formas de indexar os documentos
# e de recuperar, também simples.
# Uma outra forma de indexar, por exemplo,
# é fazer um vetor pra cada palavra
# contando as palavras vizinhas.
# E depois, o vetor do documento seria
# a soma dos vetores das palavras.
# É uma forma interessante porque
# pode gerar visualizações interessantes
# entre a similaridade das palavras.
# Por exemplo, no corpus das Leis Municipais,
# a quais palavras EDUCAÇÃO mais se assemelha?
# Ou SAÚDE? Etc.
#
# Outra forma é contar n-gramas - por exemplo,
# bi-gramas: duas palavras juntas formando um token.
# Dessa forma, você possui uma matriz maior
# e de certa forma uma relação entre a sequencialidade das palavras,
# que pode ser útil pra nomes de pessoas e bairros,
# como citado acima.
#
# ### Recuperar
# Outra forma de recuperar é por
# _local sensitive hashing_.
# Divide em vários planos múltiplas vezes
# e retorna os resultados que estão na mesma região da query.
# No entanto,
# o corpus não é grande o suficiente pra precisar essa estratégia,
# que é mais pra grandes corpora.
# O método acima
# (calcular a simlaridade cosseno e retornar os maiores valores)
# é rápido o suficiente pra parecer instantâneo.
# Talvez com uma demanda mais alta pelo servidor
# venha a necessidade de aumentar a velocidade da busca,
# porém por enquanto não é o caso.
#
# Há ainda um [novo método]
# (https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html)
# e uma lib pra isso,
# lançada pelo Google recentemente,
# no dia 28 de Julho de 2020.
#
# ### Avaliação
# Com múltiplas formas de indexar e recuperar vem o dilema:
# como avaliar se uma é melhor que a outra?
# Repetir o processo acima pra todas as opções?
# Isto é, mostrar N melhores resultados e comparar manualmente?
# Ou colocar labels em algumas leis?
# Ex: essa lei trata disso, com tais entidades.
# Checar formas de avaliação.
# Se tivesse em produção,
# poderiamos avaliar por _click through rate_ (CTR) por ex,
# mas não é o caso
|
[
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.read_json",
"numpy.argsort",
"scripts.nlp.remove_portuguese_stopwords",
"sklearn.feature_extraction.text.TfidfTransformer"
] |
[((1247, 1272), 'pandas.read_json', 'pd.read_json', (['"""leis.json"""'], {}), "('leis.json')\n", (1259, 1272), True, 'import pandas as pd\n'), ((2097, 2114), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (2112, 2114), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2194, 2212), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (2210, 2212), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2314, 2351), 'scripts.nlp.remove_portuguese_stopwords', 'remove_portuguese_stopwords', (['query[0]'], {}), '(query[0])\n', (2341, 2351), False, 'from scripts.nlp import remove_portuguese_stopwords\n'), ((2453, 2486), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['query', 'X_tfidf'], {}), '(query, X_tfidf)\n', (2470, 2486), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2506, 2530), 'numpy.argsort', 'np.argsort', (['best_matches'], {}), '(best_matches)\n', (2516, 2530), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.