prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
from scipy.signal import argrelmax, argrelmin
from colonyscopy.tools import smoothen, color_distance, expand_mask, color_sum, circle_mask
import matplotlib.pyplot as plt
from warnings import warn
class ColonyscopyFailedHeuristic(Exception):
pass
class Colony(object):
"""
Class representing a single colony.
Parameters
----------
images : NumPy array
A four-dimensional array containing the data for the actual colony. This can be a slice of a bigger array. The first dimension is time; the next two dimensions are spacial; the last dimension has length 3 and is colour.
background : NumPy array
A three-dimensional array containing the background, i.e., what to expect in the absence of a colony. The first two dimensions are spacial; the last dimension has length 3 and is colour.
"""
def __init__(self,images,background,speckle_mask,centre):
self.images = images
self.background = background
self.speckle_mask = speckle_mask
self.centre = centre
self.resolution = images.shape[1:3]
self.n_colours = images.shape[3]
self.n_times = images.shape[0]
@property
def temp_mean(self):
if not hasattr(self,"_temp_mean"):
self._temp_mean = np.average(self.images,axis=0)
return self._temp_mean
@property
def colony_intensity(self):
if not hasattr(self, "_colony_intensity"):
self.create_colony_intensity()
return self._colony_intensity
def create_colony_intensity(self):
bg_intensity = np.empty(self.n_times)
col_intensity = np.empty(self.n_times)
self._colony_intensity = np.empty(self.n_times)
# Loop is necessary due to mask destroying structure
for t in range(self.n_times):
col_intensity[t] = np.mean(color_sum(self.images[t,:,:])[self.mask])
bg_intensity[t] = np.median(color_sum(self.images[t,:,:])[self.background_mask])
intensity = (col_intensity - bg_intensity)
self._colony_intensity = intensity
if (np.mean(intensity[:6]) < 0):
factor = 1.5 if (np.mean(intensity[:6]) < -20) else 1.8
background_px = color_sum(self.images[0])[self.background_mask]
bright_px = np.multiply(color_sum(self.images[0]), self.background_mask) > np.mean(background_px)+factor*np.std(background_px)
bright_px = expand_mask(bright_px, width=3)
self._background_mask &= np.logical_not(bright_px)
try:
self.create_colony_intensity()
except(RecursionError):
bg_intensity = np.empty(self.n_times)
col_intensity = np.empty(self.n_times)
# Loop is necessary due to mask destroying structure
for t in range(self.n_times):
col_intensity[t] = np.mean(color_sum(self.images[t,:,:])[self.mask])
bg_intensity[t] = np.median(color_sum(self.images[t,:,:])[self.background_mask])
intensity = (col_intensity - bg_intensity)
@property
def mask(self):
"""
Returns the mask for colony area for this segment.
"""
if not hasattr(self,"_mask"):
self.create_mask()
return self._mask
def create_mask(self, cutoff_factor = 0.5, inner_circle_width = 14, colony_mask_width = 10):
"""
Creates a mask for colony area in this segment.
"""
t = self.threshold_timepoint
inner_circle = circle_mask(self.resolution,self.centre,inner_circle_width)
if t is None:
warn("Growth threshold was not reached. Mask created from circle around segment center.")
self._mask = circle_mask(self.resolution, self.centre, colony_mask_width) & self.speckle_mask
else:
max = np.max(color_sum(self.images[t])[self.speckle_mask])
min = np.min(color_sum(self.images[t])[self.speckle_mask])
self._mask = np.empty(self.resolution, dtype=bool)
self._mask = np.multiply(color_sum(self.images[t]),self.speckle_mask) > cutoff_factor * (max+min)
if t == self.n_times-1:
warn("Segment intensity threshold was not reached. Colony area mask was created from last picture in time lapse.")
if np.sum(self._mask) < 120:
warn("Colony area mask too sparse to give reliable results.")
if np.sum(np.multiply(self._mask, np.logical_not(inner_circle)))/np.sum(self._mask) > 0.12:
warn("Significant part of colony area mask outside of inner part of the segment. Contamination is likely.")
@property
def threshold_timepoint(self):
"""
Returns the timepoint when intensity thresholding should be done to determine colony area.
TODO: explain parameter
"""
if not hasattr(self,"_threshold_timepoint"):
self.create_threshold_timepoint()
return self._threshold_timepoint
def create_threshold_timepoint(self, seg_intensity_threshold = 1000, smooth_width = 10, growth_threshold = 600):
a = self.segment_intensity()
a = smoothen(a, smooth_width)
#self._threshold_timepoint = None
#"""
try:
self._threshold_timepoint = np.where(a > seg_intensity_threshold)[0][0]
except(IndexError):
self._threshold_timepoint = self.n_times-1
warn("Segment intensity threshold was not reached. Colony area mask will be created from last picture in time lapse.")
if not np.any(a > growth_threshold):
self._threshold_timepoint = None
#"""
@property
def background_mask(self):
"""
Returns the mask for background pixels in this segment.
TODO: explain parameter
"""
if not hasattr(self,"_background_mask"):
self.create_background_mask()
return self._background_mask
@background_mask.setter
def background_mask(self,value):
if (np.shape(value) == self.resolution) & (value.dtype == bool): #Check if value boolean
self._background_mask = value
def create_background_mask(self, expansion = 6):
"""
Creates a mask that only includes background pixels of this segment.
TODO: explain parameter
"""
if self.threshold_timepoint is None:
self._background_mask = np.logical_not(expand_mask(self.mask, width = int(1.6*expansion)) + np.logical_not(self.speckle_mask))
else:
self._background_mask = np.logical_not(expand_mask(self.mask, width = expansion) + np.logical_not(self.speckle_mask))
def segment_intensity(self):
seg_intensity = np.array([np.mean(color_sum(self.images[t])[self.speckle_mask]) for t in range(self.n_times)])
return seg_intensity - np.average(np.sort(seg_intensity)[:7])
def plot_segment_intensity(self,smooth_width = 10, seg_intensity_threshold = 1000):
plt.plot(self.segment_intensity(), label='Segment intensity')
plt.plot(smoothen(self.segment_intensity(), smooth_width), label='Smoothened segment intensity')
plt.plot(seg_intensity_threshold * np.ones(self.n_times), '--', label='Threshold')
plt.legend()
plt.show()
def generation_time(self, fit_interval_length = 0.7, min_lower_bound = 1.8, smooth_width = 7): # New intensity measure
N_t = self.n_times
time = np.linspace(0,(N_t-1)*0.25,N_t)
pl = np.empty((3,N_t))
pl = self.colony_intensity()
if np.min(pl) < 0:
pl = pl+1.05*abs(
|
np.min(pl)
|
numpy.min
|
""" Group Factor Analysis (extended model) """
#Author: <NAME> (<EMAIL>)
#Date: 22 February 2021
import numpy as np
from scipy.special import digamma, gammaln
from scipy.optimize import fmin_l_bfgs_b as lbfgsb
class GFA_DiagonalNoiseModel(object):
def __init__(self, X, params, imputation=False):
self.s = params['num_groups'] #number of groups
# ensure the data was generated with the correct number of groups
assert self.s == len(X)
#number of features in each group
self.d = []
for s in range(self.s):
self.d.append(X[s].shape[1])
self.td = np.sum(self.d) #total number of features
self.k = params['K'] #number of factors
self.N = X[0].shape[0] #number of samples
# Check scenario ('complete' for complete data; 'incomplete' for incomplete data)
if imputation:
self.scenario = 'complete'
else:
self.scenario = params['scenario']
#hyperparameters
self.a0_alpha = self.b0_alpha = self.a0_tau = self.b0_tau = 1e-14
# Initialising variational parameters
#latent variables
self.means_z = np.reshape(np.random.normal(0, 1, self.N*self.k),(self.N, self.k))
self.sigma_z = np.zeros((self.k, self.k, self.N))
self.sum_sigmaZ = self.N * np.identity(self.k)
#loading matrices
self.means_w = [[] for _ in range(self.s)]
self.sigma_w = [[] for _ in range(self.s)]
self.E_WW = [[] for _ in range(self.s)]
self.Lqw = [[] for _ in range(self.s)]
#ARD parameters
self.a_alpha = [[] for _ in range(self.s)]
self.b_alpha = [[] for _ in range(self.s)]
self.E_alpha = [[] for _ in range(self.s)]
#noise parameters
self.a_tau = [[] for _ in range(self.s)]
self.b_tau = [[] for _ in range(self.s)]
self.E_tau = [[] for _ in range(self.s)]
if self.scenario == 'incomplete':
#initialise variables to incomplete data sets
self.X_nan = [[] for _ in range(self.s)]
self.N_clean = [[] for _ in range(self.s)]
#constants for ELBO
self.logalpha = [[] for _ in range(self.s)]
self.logtau = [[] for _ in range(self.s)]
self.L_const = [[] for _ in range(self.s)]
for i in range(0, self.s):
if self.scenario == 'incomplete':
# Checking NaNs
X_new = np.zeros((1, X[i].size))
X_new[0, np.flatnonzero(np.isnan(X[i]))] = 1
self.X_nan[i] = np.reshape(X_new,(self.N, self.d[i]))
self.N_clean[i] = np.sum(~np.isnan(X[i]),axis=0)
#loading matrices
self.means_w[i] = np.zeros((self.d[i], self.k))
self.sigma_w[i] = np.zeros((self.k,self.k,self.d[i]))
#ARD parameters
self.a_alpha[i] = self.a0_alpha + self.d[i]/2.0
self.b_alpha[i] = np.ones((1, self.k))
self.E_alpha[i] = self.a_alpha[i] / self.b_alpha[i]
#noise parameters
if self.scenario == 'incomplete':
self.a_tau[i] = self.a0_tau + (self.N_clean[i])/2
else:
self.a_tau[i] = self.a0_tau + (self.N) * np.ones((1,self.d[i]))/2
self.b_tau[i] = np.zeros((1, self.d[i]))
self.E_tau[i] = 1000.0 * np.ones((1, self.d[i]))
#ELBO constant
if self.scenario == 'complete':
self.L_const[i] = -0.5 * self.N * self.d[i] * np.log(2*np.pi)
else:
self.L_const[i] = -0.5 * np.sum(self.N_clean[i]) * np.log(2*np.pi)
# Rotation parameters
self.DoRotation = True
def update_w(self, X):
"""
Update the variational parameters of the loading matrices.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
self.sum_sigmaW = [np.zeros((self.k,self.k)) for _ in range(self.s)]
for i in range(0, self.s):
self.Lqw[i] = np.zeros((1, self.d[i]))
if self.scenario == 'complete':
S1 = self.sum_sigmaZ + np.dot(self.means_z.T,self.means_z)
S2 = np.dot(X[i].T,self.means_z)
for j in range(0, self.d[i]):
# Update covariance matrices of Ws
self.sigma_w[i][:,:,j] = np.diag(self.E_alpha[i]) + \
self.E_tau[i][0,j] * S1
cho = np.linalg.cholesky(self.sigma_w[i][:,:,j])
invCho = np.linalg.inv(cho)
self.sigma_w[i][:,:,j] = np.dot(invCho.T,invCho)
self.sum_sigmaW[i] += self.sigma_w[i][:,:,j]
# Update expectations of Ws
self.means_w[i][j,:] = np.dot(S2[j,:],self.sigma_w[i][:,:,j]) * \
self.E_tau[i][0,j]
# Compute determinant for ELBO
self.Lqw[i][0,j] = -2 * np.sum(np.log(np.diag(cho)))
else:
for j in range(0, self.d[i]):
samples = np.array(np.where(self.X_nan[i][:,j] == 0))
x = np.reshape(X[i][samples[0,:], j],(1, samples.shape[1]))
Z = np.reshape(self.means_z[samples[0,:],:],(samples.shape[1],self.k))
S1 = self.sum_sigmaZ + np.dot(Z.T,Z)
S2 = np.dot(x,Z)
# Update covariance matrices of Ws
self.sigma_w[i][:,:,j] = np.diag(self.E_alpha[i]) + \
self.E_tau[i][0,j] * S1
cho = np.linalg.cholesky(self.sigma_w[i][:,:,j])
invCho = np.linalg.inv(cho)
self.sigma_w[i][:,:,j] = np.dot(invCho.T,invCho)
self.sum_sigmaW[i] += self.sigma_w[i][:,:,j]
# Update expectations of Ws
self.means_w[i][j,:] = np.dot(S2,self.sigma_w[i][:,:,j]) * \
self.E_tau[i][0,j]
# Compute determinant for ELBO
self.Lqw[i][0,j] = -2 * np.sum(np.log(np.diag(cho)))
# Calculate E[W^T W]
self.E_WW[i] = self.sum_sigmaW[i] + \
np.dot(self.means_w[i].T, self.means_w[i])
def update_z(self, X):
"""
Update the variational parameters of the latent variables.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
self.means_z = self.means_z * 0
if self.scenario == 'complete':
# Update covariance matrix of Z
self.sigma_z = np.identity(self.k)
for i in range(0, self.s):
for j in range(0, self.d[i]):
w = np.reshape(self.means_w[i][j,:], (1,self.k))
ww = self.sigma_w[i][:,:, j] + np.dot(w.T, w)
self.sigma_z += ww * self.E_tau[i][0,j]
#efficient way of computing sigmaZ
cho = np.linalg.cholesky(self.sigma_z)
invCho = np.linalg.inv(cho)
self.sigma_z = np.dot(invCho.T,invCho)
self.sum_sigmaZ = self.N * self.sigma_z
# Compute determinant for ELBO
self.Lqz = -2 * np.sum(np.log(np.diag(cho)))
# Update expectations of Z
self.means_z = self.means_z * 0
for i in range(0, self.s):
for j in range(0, self.d[i]):
x = np.reshape(X[i][:, j],(self.N,1))
w = np.reshape(self.means_w[i][j,:], (1,self.k))
self.means_z += np.dot(x, w) * self.E_tau[i][0,j]
self.means_z = np.dot(self.means_z, self.sigma_z)
else:
self.sigma_z = np.zeros((self.k,self.k,self.N))
self.sum_sigmaZ = np.zeros((self.k,self.k))
self.Lqz = np.zeros((1, self.N))
for n in range(0, self.N):
self.sigma_z[:,:,n] = np.identity(self.k)
S1 = np.zeros((1,self.k))
for i in range(0, self.s):
dim = np.array(np.where(self.X_nan[i][n,:] == 0))
for j in range(dim.shape[1]):
w = np.reshape(self.means_w[i][dim[0,j],:], (1,self.k))
ww = self.sigma_w[i][:,:, dim[0,j]] + np.dot(w.T, w)
self.sigma_z[:,:,n] += ww * self.E_tau[i][0,dim[0,j]]
x = np.reshape(X[i][n, dim[0,:]],(1, dim.size))
tau = np.reshape(self.E_tau[i][0,dim[0,:]],(1, dim.size))
S1 += np.dot(x, np.diag(tau[0])).dot(self.means_w[i][dim[0,:],:])
# Update covariance matrix of Z
cho = np.linalg.cholesky(self.sigma_z[:,:,n])
invCho = np.linalg.inv(cho)
self.sigma_z[:,:,n] = np.dot(invCho.T,invCho)
self.sum_sigmaZ += self.sigma_z[:,:,n]
# Update expectations of Z
self.means_z[n,:] = np.dot(S1, self.sigma_z[:,:,n])
# Compute determinant for ELBO
self.Lqz[0,n] = -2 * np.sum(np.log(np.diag(cho)))
# Calculate E[Z^T Z]
self.E_zz = self.sum_sigmaZ + np.dot(self.means_z.T, self.means_z)
def update_alpha(self):
"""
Update the variational parameters of the alphas.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
for i in range(0, self.s):
# Update b_alpha
self.b_alpha[i] = self.b0_alpha + np.diag(self.E_WW[i])/2
# Update expectation of alpha
self.E_alpha[i] = self.a_alpha[i] / self.b_alpha[i]
def update_tau(self, X):
"""
Update the variational parameters of the taus.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
for i in range(0, self.s):
for j in range(0, self.d[i]):
if self.scenario == 'complete':
w = np.reshape(self.means_w[i][j,:], (1,self.k))
ww = self.sigma_w[i][:,:, j] + np.dot(w.T, w)
x = np.reshape(X[i][:, j],(self.N,1))
z = self.means_z
ZZ = self.E_zz
else:
samples = np.array(np.where(self.X_nan[i][:,j] == 0))
w = np.reshape(self.means_w[i][j,:], (1,self.k))
ww = self.sigma_w[i][:,:,j] + np.dot(w.T,w)
x = np.reshape(X[i][samples[0,:],j],(samples.size,1))
z = np.reshape(self.means_z[samples[0,:],:],(samples.size,self.k))
sum_covZ = np.sum(self.sigma_z[:,:,samples[0,:]],axis=2)
ZZ = sum_covZ + np.dot(z.T,z)
# Update b_tau
self.b_tau[i][0,j] = self.b0_tau + 0.5 * (np.dot(x.T,x) + \
np.trace(np.dot(ww, ZZ)) - 2 * np.dot(x.T,z).dot(w.T))
# Update expectation of tau
self.E_tau[i] = self.a_tau[i]/self.b_tau[i]
def lower_bound(self, X):
"""
Calculate Evidence Lower Bound (ELBO).
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
Returns
-------
L : float
Evidence Lower Bound (ELBO).
"""
# Calculate E[ln p(X|Z,W,tau)]
L = 0
for i in range(0, self.s):
#calculate E[ln alpha] and E[ln tau]
self.logalpha[i] = digamma(self.a_alpha[i]) - np.log(self.b_alpha[i])
self.logtau[i] = digamma(self.a_tau[i]) - np.log(self.b_tau[i])
if self.scenario == 'complete':
L += self.L_const[i] + np.sum(self.N * self.logtau[i]) / 2 - \
np.sum(self.E_tau[i] * (self.b_tau[i] - self.b0_tau))
else:
L += self.L_const[i] + np.sum(self.N_clean[i] * self.logtau[i]) / 2 - \
np.sum(self.E_tau[i] * (self.b_tau[i] - self.b0_tau))
# Calculate E[ln p(Z)] - E[ln q(Z)]
self.Lpz = - 1/2 * np.sum(np.diag(self.E_zz))
if self.scenario == 'complete':
self.Lqz = - self.N * 0.5 * (self.Lqz + self.k)
else:
self.Lqz = - 0.5 * (np.sum(self.Lqz) + self.k)
L += self.Lpz - self.Lqz
# Calculate E[ln p(W|alpha)] - E[ln q(W|alpha)]
self.Lpw = 0
for i in range(0, self.s):
self.Lpw += 0.5 * self.d[i] * np.sum(self.logalpha[i]) - np.sum(
np.diag(self.E_WW[i]) * self.E_alpha[i])
self.Lqw[i] = - 0.5 * np.sum(self.Lqw[i]) - 0.5 * self.d[i] * self.k
L += self.Lpw - sum(self.Lqw)
# Calculate E[ln p(alpha) - ln q(alpha)]
self.Lpa = self.Lqa = 0
for i in range(0, self.s):
self.Lpa += self.k * (-gammaln(self.a0_alpha) + self.a0_alpha * np.log(self.b0_alpha)) \
+ (self.a0_alpha - 1) * np.sum(self.logalpha[i]) - self.b0_alpha * np.sum(self.E_alpha[i])
self.Lqa += -self.k * gammaln(self.a_alpha[i]) + self.a_alpha[i] * np.sum(np.log(
self.b_alpha[i])) + ((self.a_alpha[i] - 1) * np.sum(self.logalpha[i])) - \
np.sum(self.b_alpha[i] * self.E_alpha[i])
L += self.Lpa - self.Lqa
# Calculate E[ln p(tau) - ln q(tau)]
self.Lpt = self.Lqt = 0
for i in range(0, self.s):
self.Lpt += self.d[i] * (-gammaln(self.a0_tau) + self.a0_tau * np.log(self.b0_tau)) \
+ (self.a0_tau -1) * np.sum(self.logtau[i]) - self.b0_tau * np.sum(self.E_tau[i])
self.Lqt += -np.sum(gammaln(self.a_tau[i])) + np.sum(self.a_tau[i] * np.log(self.b_tau[i])) + \
np.sum((self.a_tau[i] - 1) * self.logtau[i]) - np.sum(self.b_tau[i] * self.E_tau[i])
L += self.Lpt - self.Lqt
return L
def fit(self, X, iterations=10000, threshold=1e-6):
"""
Fit the original GFA model.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
iterations : int
Maximum number of iterations.
thr : float
Threshold to check model convergence. The model stops when
a relative difference in the lower bound falls below this
value.
"""
L_previous = 0
self.L = []
for i in range(iterations):
self.remove_factors()
self.update_w(X)
self.update_z(X)
if i > 0 and self.DoRotation == True:
self.update_Rot()
self.update_alpha()
self.update_tau(X)
L_new = self.lower_bound(X)
self.L.append(L_new)
diff = L_new - L_previous
if abs(diff)/abs(L_new) < threshold:
print("ELBO (last value):", L_new)
print("Number of iterations:", i+1)
self.iter = i+1
break
elif i == iterations:
print("ELBO did not converge")
L_previous = L_new
if i < 1:
print("ELBO (1st value):", L_new)
def update_Rot(self):
"""
Optimization of the rotation.
"""
r = np.matrix.flatten(np.identity(self.k))
r_opt = lbfgsb(self.Er, r, self.gradEr)
if r_opt[2]['warnflag'] == 0:
# Update transformation matrix R
Rot = np.reshape(r_opt[0],(self.k,self.k))
u, s, v = np.linalg.svd(Rot)
Rotinv = np.dot(v.T * np.outer(np.ones((1,self.k)), 1/s), u.T)
det = np.sum(np.log(s))
# Update Z
self.means_z = np.dot(self.means_z, Rotinv.T)
if self.scenario == 'complete':
self.sigma_z = np.dot(Rotinv, self.sigma_z).dot(Rotinv.T)
else:
for n in range(0, self.N):
self.sigma_z[:,:,n] =
|
np.dot(Rotinv, self.sigma_z[:,:,n])
|
numpy.dot
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
hsrlca.calc: High-speed rail life cycle assessment calculations
===============================================================
hsrlca.calc provides requisite functions for stepwise construction of a high-
speed rail life cycle assessment model, as designed for research on Chinese
rail development in continental Southeast Asia. It provides for dynamic
modeling of trade arrangements and national energy mix scenarios.
The functions of this module are, at present, specific to six countries and
require inputs of the same format as included in the hsrlca package's example
documents. Should this project evolve in the future, the first aim will be to
increase generalizability to other scenarios.
Dependencies
------------
numpy
pandas
'''
import numpy as np
import pandas as pd
def tradeSchedule(home_country, up_countries, up_inputs_base):
'''Creates a trade schedule given the specified home country and the
countries in which various unit processes occur.'''
# Extract list of unit processes from master unit process dataset
up_list = pd.DataFrame({'unit_process': up_inputs_base['unit_process']})
# Fill home_country column for every unit process row in up_list
home_list = pd.DataFrame({'home_country': [home_country]*len(up_list)},
index = up_list['unit_process'])
# Check up_countries index and set to up_list values if not already same
up_index = pd.Index(up_list)
up_countries_idx = up_countries.index
if not up_countries_idx.equals(up_index):
up_countries.index = up_list['unit_process']
up_countries.drop('unit_process', axis=1, inplace=True)
# Concatenate with the list of producing countries to provide a trade schedule
trade_schedule = pd.concat([home_list, up_countries], axis=1)
return trade_schedule
def transportSchedule(trade_schedule, trade_distances, up_inputs_base):
'''Creates a pandas dataframe containing estimated average transport
distances, covering all possible routes.'''
# Extract list of unit processes from master unit process dataset
up_list = pd.DataFrame({'unit_process': up_inputs_base['unit_process']})
# Map distances to the trade schedule found via tradeSchedule(...)
transport_schedule = pd.merge(trade_schedule, trade_distances, how='left', on=['home_country', 'up_countries'])
# Set index to unit processes
transport_schedule.index = up_list['unit_process']
return transport_schedule
def transportUpdate(transport_schedule, up_inputs_base, rail_allocation=0.5):
'''Updates the given inputs dataframe with calculated transport requirements.'''
# Extract transport_distances
transport_distances = pd.DataFrame(transport_schedule['avg_export_distance'])
# Mask unit processes that do not entail transportation by setting value to 0
no_transport = ['electricity_Cambodia_kWh',
'electricity_China_kWh',
'electricity_LaoPDR_kWh',
'electricity_Myanmar_kWh',
'electricity_Thailand_kWh',
'electricity_Vietnam_kWh',
'lorry_raw_material_transport_kg-km',
'rail_raw_material_transport_kg-km',
'lorry_intermediate_component_transport_kg-km',
'rail_intermediate_component_transport_kg-km',
'lorry_final_component_transport_kg-km',
'rail_final_component_transport_kg-km',
'high_speed_rail_operation_p-km'
]
for unit_process in no_transport:
transport_distances.loc[unit_process] = 0
# Create copy of up_inputs_base for transformation
up_inputs_transport_update = up_inputs_base.copy()
# Set up_inputs_base indices
up_inputs_transport_update.set_index(['phase', 'unit_process'], inplace=True)
for up in transport_distances.index:
if up_inputs_transport_update.xs(up, level = 1, drop_level=False).index[0][0] == "raw_material_extraction":
up_inputs_transport_update.loc['raw_material_extraction', up]['lorry_raw_material_transport_kg-km'] = (1 - rail_allocation) * transport_distances.loc[up]['avg_export_distance']
up_inputs_transport_update.loc['raw_material_extraction', up]['rail_raw_material_transport_kg-km'] = rail_allocation * transport_distances.loc[up]['avg_export_distance']
elif up_inputs_transport_update.xs(up, level = 1, drop_level=False).index[0][0] == "intermediate_component_production_i":
up_inputs_transport_update.loc['intermediate_component_production_i', up]['lorry_intermediate_component_transport_kg-km'] = (1 - rail_allocation) * transport_distances.loc[up]['avg_export_distance']
up_inputs_transport_update.loc['intermediate_component_production_i', up]['rail_intermediate_component_transport_kg-km'] = rail_allocation * transport_distances.loc[up]['avg_export_distance']
elif up_inputs_transport_update.xs(up, level = 1, drop_level=False).index[0][0] == "intermediate_component_production_ii":
up_inputs_transport_update.loc['intermediate_component_production_ii', up]['lorry_intermediate_component_transport_kg-km'] = (1 - rail_allocation) * transport_distances.loc[up]['avg_export_distance']
up_inputs_transport_update.loc['intermediate_component_production_ii', up]['rail_intermediate_component_transport_kg-km'] = rail_allocation * transport_distances.loc[up]['avg_export_distance']
elif up_inputs_transport_update.xs(up, level = 1, drop_level=False).index[0][0] == "final_component_production":
up_inputs_transport_update.loc['final_component_production', up]['lorry_final_component_transport_kg-km'] = (1 - rail_allocation) * transport_distances.loc[up]['avg_export_distance']
up_inputs_transport_update.loc['final_component_production', up]['rail_final_component_transport_kg-km'] = rail_allocation * transport_distances.loc[up]['avg_export_distance']
return up_inputs_transport_update
def energyMixes(national_energy_supply):
'''Creates a dataframe containing percentage energy mixes for each country
from given raw values.'''
import numpy as np
# Create national_energy_mixes dataframe
national_energy_mixes = national_energy_supply.copy()
# Set country column as index
national_energy_mixes.set_index('country', drop=True, inplace=True)
# Calculate country energy totals
national_energy_mixes['total_gw'] = national_energy_mixes.sum(axis=1)
# Prepare column names
national_energy_mixes.columns = national_energy_mixes.columns.str.rstrip('_gw')
# Tabulate % energy mix by fuel type and assign to new "% Total" columns
for col in national_energy_mixes:
national_energy_mixes[col + "_pct_total"] = national_energy_mixes[col] / national_energy_mixes['total']
# Remove raw values
national_energy_mixes.drop(national_energy_mixes.iloc[:,0:8], axis=1, inplace=True)
# Confirm totals add up to 100% and, if so, remove totals column
if
|
np.mean(national_energy_mixes['total_pct_total'])
|
numpy.mean
|
# pylint: disable=R0201
import operator
from abc import ABC
from copy import deepcopy
from typing import List, Type, Union
import numpy as np
import pytest
from PartSegCore.algorithm_describe_base import ROIExtractionProfile
from PartSegCore.analysis.algorithm_description import analysis_algorithm_dict
from PartSegCore.analysis.analysis_utils import SegmentationPipeline, SegmentationPipelineElement
from PartSegCore.analysis.calculate_pipeline import calculate_pipeline
from PartSegCore.convex_fill import _convex_fill, convex_fill
from PartSegCore.image_operations import RadiusType
from PartSegCore.mask_create import MaskProperty, calculate_mask
from PartSegCore.roi_info import BoundInfo, ROIInfo
from PartSegCore.segmentation import ROIExtractionAlgorithm, algorithm_base
from PartSegCore.segmentation import restartable_segmentation_algorithms as sa
from PartSegCore.segmentation.noise_filtering import noise_filtering_dict
from PartSegCore.segmentation.watershed import flow_dict
from PartSegImage import Image
def get_two_parts_array():
data = np.zeros((1, 50, 100, 100, 1), dtype=np.uint16)
data[0, 10:40, 10:40, 10:90] = 50
data[0, 10:40, 50:90, 10:90] = 50
data[0, 15:35, 15:35, 15:85] = 70
data[0, 15:35, 55:85, 15:85] = 70
data[0, 10:40, 40:50, 10:90] = 40
return data
def get_two_parts():
return Image(get_two_parts_array(), (100, 50, 50), "")
def get_two_parts_reversed():
data = get_two_parts_array()
data = 100 - data
return Image(data, (100, 50, 50), "")
def get_multiple_part_array(part_num):
data = np.zeros((1, 20, 40, 40 * part_num, 1), dtype=np.uint8)
data[0, 4:16, 8:32, 8 : 40 * part_num - 8] = 40
for i in range(part_num):
data[0, 5:15, 10:30, 40 * i + 10 : 40 * i + 30] = 50
data[0, 7:13, 15:25, 40 * i + 15 : 40 * i + 25] = 70
return data
def get_multiple_part(part_num):
return Image(get_multiple_part_array(part_num), (100, 50, 50), "")
def get_multiple_part_reversed(part_num):
data = 100 - get_multiple_part_array(part_num)
return Image(data, (100, 50, 50), "")
def get_two_parts_side():
data = get_two_parts_array()
data[0, 25, 40:45, 50] = 49
data[0, 25, 45:50, 51] = 49
return Image(data, (100, 50, 50), "")
def get_two_parts_side_reversed():
data = get_two_parts_array()
data[0, 25, 40:45, 50] = 49
data[0, 25, 45:50, 51] = 49
data = 100 - data
return Image(data, (100, 50, 50), "")
def empty(_s: str, _i: int):
"""mock function for callback"""
@pytest.mark.parametrize("algorithm_name", analysis_algorithm_dict.keys())
def test_base_parameters(algorithm_name):
algorithm_class = analysis_algorithm_dict[algorithm_name]
assert algorithm_class.get_name() == algorithm_name
algorithm_class: Type[ROIExtractionAlgorithm]
obj = algorithm_class()
values = algorithm_class.get_default_values()
obj.set_parameters(**values)
parameters = obj.get_segmentation_profile()
assert parameters.algorithm == algorithm_name
assert parameters.values == values
class BaseThreshold:
def check_result(self, result, sizes, op, parameters):
assert result.roi.max() == len(sizes)
assert np.all(op(np.bincount(result.roi.flat)[1:], np.array(sizes)))
assert result.parameters.values == parameters
assert result.parameters.algorithm == self.get_algorithm_class().get_name()
def get_parameters(self) -> dict:
if hasattr(self, "parameters") and isinstance(self.parameters, dict):
return deepcopy(self.parameters)
raise NotImplementedError
def get_shift(self):
if hasattr(self, "shift"):
return deepcopy(self.shift)
raise NotImplementedError
@staticmethod
def get_base_object():
raise NotImplementedError
@staticmethod
def get_side_object():
raise NotImplementedError
def get_algorithm_class(self) -> Type[ROIExtractionAlgorithm]:
raise NotImplementedError()
class BaseOneThreshold(BaseThreshold, ABC): # pylint: disable=W0223
def test_simple(self):
image = self.get_base_object()
alg: ROIExtractionAlgorithm = self.get_algorithm_class()()
parameters = self.get_parameters()
alg.set_image(image)
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
self.check_result(result, [96000, 72000], operator.eq, parameters)
parameters["threshold"]["values"]["threshold"] += self.get_shift()
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
self.check_result(result, [192000], operator.eq, parameters)
def test_side_connection(self):
image = self.get_side_object()
alg: ROIExtractionAlgorithm = self.get_algorithm_class()()
parameters = self.get_parameters()
parameters["side_connection"] = True
alg.set_image(image)
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
self.check_result(result, [96000 + 5, 72000 + 5], operator.eq, parameters)
parameters["side_connection"] = False
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
self.check_result(result, [96000 + 5 + 72000 + 5], operator.eq, parameters)
class TestLowerThreshold(BaseOneThreshold):
parameters = {
"channel": 0,
"minimum_size": 30000,
"threshold": {"name": "Manual", "values": {"threshold": 45}},
"noise_filtering": {"name": "None", "values": {}},
"side_connection": False,
}
shift = -6
@staticmethod
def get_base_object():
return get_two_parts()
@staticmethod
def get_side_object():
return get_two_parts_side()
def get_algorithm_class(self) -> Type[ROIExtractionAlgorithm]:
return sa.LowerThresholdAlgorithm
class TestUpperThreshold(BaseOneThreshold):
parameters = {
"channel": 0,
"minimum_size": 30000,
"threshold": {"name": "Manual", "values": {"threshold": 55}},
"noise_filtering": {"name": "None", "values": {}},
"side_connection": False,
}
shift = 6
@staticmethod
def get_base_object():
return get_two_parts_reversed()
@staticmethod
def get_side_object():
return get_two_parts_side_reversed()
def get_algorithm_class(self) -> Type[ROIExtractionAlgorithm]:
return sa.UpperThresholdAlgorithm
class TestRangeThresholdAlgorithm:
def test_simple(self):
image = get_two_parts()
alg = sa.RangeThresholdAlgorithm()
parameters = {
"lower_threshold": 45,
"upper_threshold": 60,
"channel": 0,
"minimum_size": 8000,
"noise_filtering": {"name": "None", "values": {}},
"side_connection": False,
}
alg.set_parameters(**parameters)
alg.set_image(image)
result = alg.calculation_run(empty)
assert np.max(result.roi) == 2
assert np.all(
np.bincount(result.roi.flat)[1:] == np.array([30 * 40 * 80 - 20 * 30 * 70, 30 * 30 * 80 - 20 * 20 * 70])
)
assert result.parameters.values == parameters
assert result.parameters.algorithm == alg.get_name()
parameters["lower_threshold"] -= 6
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
assert np.max(result.roi) == 1
assert np.bincount(result.roi.flat)[1] == 30 * 80 * 80 - 20 * 50 * 70
assert result.parameters.values == parameters
assert result.parameters.algorithm == alg.get_name()
def test_side_connection(self):
image = get_two_parts_side()
alg = sa.RangeThresholdAlgorithm()
parameters = {
"lower_threshold": 45,
"upper_threshold": 60,
"channel": 0,
"minimum_size": 8000,
"noise_filtering": {"name": "None", "values": {}},
"side_connection": True,
}
alg.set_parameters(**parameters)
alg.set_image(image)
result = alg.calculation_run(empty)
assert np.max(result.roi) == 2
assert np.all(
np.bincount(result.roi.flat)[1:]
== np.array([30 * 40 * 80 - 20 * 30 * 70 + 5, 30 * 30 * 80 - 20 * 20 * 70 + 5])
)
assert result.parameters.values == parameters
assert result.parameters.algorithm == alg.get_name()
parameters["side_connection"] = False
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
assert np.max(result.roi) == 1
assert np.bincount(result.roi.flat)[1] == 30 * 70 * 80 - 20 * 50 * 70 + 10
assert result.parameters.values == parameters
assert result.parameters.algorithm == alg.get_name()
class BaseFlowThreshold(BaseThreshold, ABC): # pylint: disable=W0223
@pytest.mark.parametrize("sprawl_algorithm_name", flow_dict.keys())
@pytest.mark.parametrize("compare_op", [operator.eq, operator.ge])
@pytest.mark.parametrize("components", [2] + list(range(3, 15, 2)))
def test_multiple(self, sprawl_algorithm_name, compare_op, components):
alg = self.get_algorithm_class()()
parameters = self.get_parameters()
image = self.get_multiple_part(components)
alg.set_image(image)
sprawl_algorithm = flow_dict[sprawl_algorithm_name]
parameters["sprawl_type"] = {"name": sprawl_algorithm_name, "values": sprawl_algorithm.get_default_values()}
if compare_op(1, 0):
parameters["threshold"]["values"]["base_threshold"]["values"]["threshold"] += self.get_shift()
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
self.check_result(result, [4000] * components, compare_op, parameters)
@pytest.mark.parametrize("algorithm_name", flow_dict.keys())
def test_side_connection(self, algorithm_name):
image = self.get_side_object()
alg = self.get_algorithm_class()()
parameters = self.get_parameters()
parameters["side_connection"] = True
alg.set_image(image)
val = flow_dict[algorithm_name]
parameters["sprawl_type"] = {"name": algorithm_name, "values": val.get_default_values()}
alg.set_parameters(**parameters)
result = alg.calculation_run(empty)
self.check_result(result, [96000 + 5, 72000 + 5], operator.eq, parameters)
def get_multiple_part(self, parts_num):
raise NotImplementedError
class TestLowerThresholdFlow(BaseFlowThreshold):
parameters = {
"channel": 0,
"minimum_size": 30,
"threshold": {
"name": "Base/Core",
"values": {
"core_threshold": {"name": "Manual", "values": {"threshold": 55}},
"base_threshold": {"name": "Manual", "values": {"threshold": 45}},
},
},
"noise_filtering": {"name": "None", "values": {}},
"side_connection": False,
"sprawl_type": {"name": "Euclidean sprawl", "values": {}},
}
shift = -6
get_base_object = staticmethod(get_two_parts)
get_side_object = staticmethod(get_two_parts_side)
get_multiple_part = staticmethod(get_multiple_part)
def get_algorithm_class(self) -> Type[ROIExtractionAlgorithm]:
return sa.LowerThresholdFlowAlgorithm
class TestUpperThresholdFlow(BaseFlowThreshold):
parameters = {
"channel": 0,
"minimum_size": 30,
"threshold": {
"name": "Base/Core",
"values": {
"core_threshold": {"name": "Manual", "values": {"threshold": 45}},
"base_threshold": {"name": "Manual", "values": {"threshold": 55}},
},
},
"noise_filtering": {"name": "None", "values": {}},
"side_connection": False,
"sprawl_type": {"name": "Euclidean sprawl", "values": {}},
}
shift = 6
get_base_object = staticmethod(get_two_parts_reversed)
get_side_object = staticmethod(get_two_parts_side_reversed)
get_multiple_part = staticmethod(get_multiple_part_reversed)
def get_algorithm_class(self) -> Type[ROIExtractionAlgorithm]:
return sa.UpperThresholdFlowAlgorithm
class TestMaskCreate:
def test_simple_mask(self):
mask_array = np.zeros((10, 20, 20), dtype=np.uint8)
mask_array[3:7, 6:14, 6:14] = 1
prop = MaskProperty(
dilate=RadiusType.NO,
dilate_radius=0,
fill_holes=RadiusType.NO,
max_holes_size=0,
save_components=False,
clip_to_mask=False,
)
new_mask = calculate_mask(prop, mask_array, None, (1, 1, 1))
assert np.all(new_mask == mask_array)
mask_array2 = np.copy(mask_array)
mask_array2[4:6, 8:12, 8:12] = 2
new_mask = calculate_mask(prop, mask_array2, None, (1, 1, 1))
assert np.all(new_mask == mask_array)
prop2 = MaskProperty(
dilate=RadiusType.NO,
dilate_radius=0,
fill_holes=RadiusType.NO,
max_holes_size=0,
save_components=True,
clip_to_mask=False,
)
new_mask = calculate_mask(prop2, mask_array2, None, (1, 1, 1))
assert np.all(new_mask == mask_array2)
def test_fill_holes(self):
mask_base_array = np.zeros((1, 20, 30, 30), dtype=np.uint8)
mask_base_array[:, 4:16, 8:22, 8:22] = 1
mask1_array = np.copy(mask_base_array)
mask1_array[:, 4:16, 10:15, 10:15] = 0
prop = MaskProperty(
dilate=RadiusType.NO,
dilate_radius=0,
fill_holes=RadiusType.R2D,
max_holes_size=0,
save_components=False,
clip_to_mask=False,
)
new_mask = calculate_mask(prop, mask1_array, None, (1, 1, 1))
assert np.all(mask_base_array == new_mask)
prop = MaskProperty(
dilate=RadiusType.NO,
dilate_radius=0,
fill_holes=RadiusType.R3D,
max_holes_size=0,
save_components=False,
clip_to_mask=False,
)
new_mask = calculate_mask(prop, mask1_array, None, (1, 1, 1))
assert np.all(mask1_array == new_mask)
mask2_array = np.copy(mask1_array)
mask2_array[:, 5:15, 10:15, 17:20] = 0
new_mask = calculate_mask(prop, mask2_array, None, (1, 1, 1))
assert
|
np.all(mask1_array == new_mask)
|
numpy.all
|
import os
os.chdir("git_repository/BetaVAEImputation")
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from autoencodersbetaVAE import VariationalAutoencoder
import pandas as pd
import random
import tensorflow as tf
import sys
import pickle
from sklearn.decomposition import KernelPCA
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config.json', help='configuration json file')
tf.reset_default_graph()
if __name__ == '__main__':
#args = parser.parse_args()
#with open(args.config) as f:
# config = json.load(f)
with open("5foldCV_config_VAE.json") as f:
config = json.load(f)
training_epochs=config["training_epochs"] #250
batch_size=config["batch_size"] #250
learning_rate=config["learning_rate"] #0.0005
latent_size = config["latent_size"] #200
hidden_size_1=config["hidden_size_1"]
hidden_size_2=config["hidden_size_2"]
beta=config["beta"]
data_path = config["data_path"]
corrupt_data_path = config["corrupt_data_path"]
restore_root = config["save_rootpath"]
trial_ind = config ["trial_ind"]
rp=restore_root+"ep"+str(training_epochs)+"_bs"+str(batch_size)+"_lr"+str(learning_rate)+"_bn"+str(latent_size)+"_opADAM"+"_beta"+str(beta)+"_betaVAE"+".ckpt"
print("restore path: ", rp)
imp_out=restore_root+"imputed_datasets/"
conv_out=restore_root+"convergence_plots/"
na_out=restore_root+"NA_indices/"
# LOAD DATA
data= pd.read_csv(data_path).values
data_missing = pd.read_csv(corrupt_data_path).values
n_row = data_missing.shape[1] # dimensionality of data space
non_missing_row_ind= np.where(np.isfinite(np.sum(data_missing,axis=1)))
na_ind = np.where(np.isnan(data_missing))
na_count= len(na_ind[0])
sc = StandardScaler()
data_missing_complete = np.copy(data_missing[non_missing_row_ind[0],:])
sc.fit(data_missing_complete)
data_missing[na_ind] = 0
#Scale the testing data with model's trianing data mean and variance
data_missing = sc.transform(data_missing)
data_missing[na_ind] = np.nan
del data_missing_complete
data_missing2 = np.copy(data_missing)
# VAE network size:
Decoder_hidden1 = hidden_size_1 #6000
Decoder_hidden2 = hidden_size_2 #2000
Encoder_hidden1 = hidden_size_2 #2000
Encoder_hidden2 = hidden_size_1 #6000
# specify number of imputation iterations:
ImputeIter = 10 # looks like both strategies converge around 4 iterations
# define dict for network structure:
network_architecture = \
dict(n_hidden_recog_1=Encoder_hidden1, # 1st layer encoder neurons
n_hidden_recog_2=Encoder_hidden2, # 2nd layer encoder neurons
n_hidden_gener_1=Decoder_hidden1, # 1st layer decoder neurons
n_hidden_gener_2=Decoder_hidden2, # 2nd layer decoder neurons
n_input=n_row, # data input size
n_z=latent_size) # dimensionality of latent space
## Now we go into autoencodersbetaVAE.py and try and deconstruct the impute function
max_iter = ImputeIter
# Generate m plausible datasets via impute_multiple() function
print("Imputing CV dataset", corrupt_data_path)
print("Saving imputed results in", imp_out)
# Let's do the same with multiple imputation
vae_mult = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,istrain=False,restore_path=rp,beta=beta)
# Let's run a for loop where we copy data_missing2 at the beginning and feed that into impute_multiple()
m = int(10)
mult_imp_datasets = []
mult_convs = []
mult_convs_lik = []
mult_largest_imp_vals = []
mult_avg_imp_vals = []
for i in range(m):
print("Generating plausible dataset", i+1)
data_missing_mult = np.copy(data_missing2)
mult_imputed_data, mult_conv, mult_conv_lik, mult_largest_imp_val, mult_avg_imp_val = \
vae_mult.impute_multiple(data_corrupt = data_missing_mult, max_iter = max_iter)
# Add to list
mult_imp_datasets.append(np.copy(mult_imputed_data))
mult_convs.append(np.copy(mult_conv))
mult_convs_lik.append(np.copy(mult_conv_lik))
mult_largest_imp_vals.append(
|
np.copy(mult_largest_imp_val)
|
numpy.copy
|
#!/usr/local/bin/python
# -*- coding: UTF-8 -*-
'''
Description: Trajectory generator (3D Hopper)
Email: <EMAIL>
Author: <NAME>
Update time: 2021-10-21
'''
import numpy as np
from scipy.special import comb
import pinocchio as pin
####################################
####################################
###### Bezier traj for leg #######
####################################
####################################
class BezierTrajGenerator(object):
def __init__(self):
pass
# private method
def __bernstein_poly(self, k, n, s):
'''
Description: The Bernstein polynomial of n, k as a function of t
Input: k, n -> int
s -> time scaling [0 ~ 1], float
Output: Bernstein polynomial
Note: https://pages.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-der.html
'''
B = comb(n, k) * (s ** k) * (1 - s) ** (n - k)
return B
def __bezier_traj(self, control_points, T, s):
'''
Description: The bezier trajectory as a function of control_points
Input: control points should be an array:
[[x1,y1,z1],
[x2,y2,z2],
..
[Xn, Yn, Zn]] -> (n,3)
T_sw -> trajectory period, float
s -> time scaling [0 ~ 1], float
Output: p_d, v_d, a_d -> w.r.t hip frame, 1d array (3,)
'''
n = control_points.shape[0] - 1
B_vec = np.zeros((1, n + 1)) # a vector of each bernstein polynomial
d_B_vec = np.zeros((1, n)) # B_n-1,i_vec
dd_B_vec = np.zeros((1,n-1)) # B_n-2,i_vec
# 位置
for k in range(n + 1):
B_vec[0, k] = self.__bernstein_poly(k, n, s)
p_d = (B_vec @ control_points).ravel() # (3,)
# 速度
for k in range(n):
d_B_vec[0, k] = self.__bernstein_poly(k, n-1, s)
d_control_points = n * (control_points[1:] - control_points[:-1])
v_d = 1/T * (d_B_vec @ d_control_points).ravel()
# 加速度
for k in range(n-1):
dd_B_vec[0, k] = self.__bernstein_poly(k, n-2, s)
dd_control_points = (n-1) * (d_control_points[1:] - d_control_points[:-1])
a_d = 1/T/T * (dd_B_vec @ dd_control_points).ravel()
return p_d, v_d, a_d
# 参数:轨迹中性点 半步长
def bezier_sw_traj(self, p_hf0, p_f, h, T_sw, s):
'''
Description: The bezier curve for swing phase w.r.t hip frame
Input: p_hf0 -> vector from hip frame to foot frame, 1d array (3,)
p_f -> foot placement w.r.t foot frame, 1d array (3,)
h -> ground clearance w.r.t foot frame, float
T_sw -> trajectory period, float
s -> time scaling [0 ~ 1], float
Output: p_d, v_d, a_d -> w.r.t hip frame, 1d array (3,)
Note: control points should be an array:
[[x1,y1,z1],
[x2,y2,z2],
..
[Xn, Yn, Zn]]
'''
# xy_scalar = np.array([-1., -1, -1, -1.1, -1.2, -1.2, 0., 0., 1.2, 1.2, 1.1, 1., 1., 1.]) # (14,)
# z_scalar = np.zeros(xy_scalar.shape[0]) # default 0, (14,)
# scalar = np.array([xy_scalar, xy_scalar, z_scalar]).T # (14,3)
# h_scalar = np.array([0., 0., 0., 0., 0.9, 0.9, 0.9, 1.2, 1.2, 1.2, 0., 0., 0., 0.]) # (14,)
# h_term = h * np.array([np.zeros(h_scalar.shape[0]), np.zeros(h_scalar.shape[0]), h_scalar]).T
# control_points = scalar * np.array([p_f] * scalar.shape[0]) # (14,3) * (14,3) = (14,3)
# control_points = control_points + h_term
# control_points = control_points + np.array([p_hf0] * scalar.shape[0]) # (14,3) + (14,3) = (14,3)
# like cycloid
xy_scalar = np.array([-1., -1, -1, -1, 1., 1., 1., 1.]) # (8,)
z_scalar = np.zeros(xy_scalar.shape[0]) # default 0, (8,)
scalar = np.array([xy_scalar, xy_scalar, z_scalar]).T # (8,3)
h_scalar = np.array([0., 0., 0., 2., 2., 0., 0., 0.]) # (8,)
h_term = h * np.array([np.zeros(h_scalar.shape[0]),
|
np.zeros(h_scalar.shape[0])
|
numpy.zeros
|
################################################################################
# Copyright (c) 2017-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Base class for accessing a store of chunks (i.e. N-dimensional arrays)."""
from __future__ import print_function, division, absolute_import
from builtins import next, zip, range, object
from future.utils import raise_from
import contextlib
import functools
import uuid
import io
import numpy as np
import dask
import dask.array as da
import dask.highlevelgraph
class ChunkStoreError(Exception):
""""Base class for all standard ChunkStore errors."""
class StoreUnavailable(OSError, ChunkStoreError):
"""Could not access underlying storage medium (offline, auth failed, etc)."""
class ChunkNotFound(KeyError, ChunkStoreError):
"""The store was accessible but a chunk with the given name was not found."""
def __str__(self):
"""Avoid the implicit repr() of KeyError since we'll have explanatory text."""
return ChunkStoreError.__str__(self)
class BadChunk(ValueError, ChunkStoreError):
"""The chunk is malformed, e.g. bad dtype or slices, wrong buffer size."""
def _floor_power_of_two(x):
"""The largest power of two smaller than or equal to `x`."""
return 2 ** int(np.floor(np.log2(x)))
def generate_chunks(shape, dtype, max_chunk_size, dims_to_split=None,
power_of_two=False, max_dim_elements=None):
"""Generate dask chunk specification from ndarray parameters.
Parameters
----------
shape : sequence of int
Array shape
dtype : :class:`numpy.dtype` object or equivalent
Array data type
max_chunk_size : float or int
Upper limit on chunk size (if allowed by `dims_to_split`), in bytes
dims_to_split : sequence of int, optional
Indices of dimensions that may be split into chunks (default all dims)
power_of_two : bool, optional
True if chunk size should be rounded down to a power of two
(the last chunk size along each dimension will potentially be smaller)
max_dim_elements : dict, optional
Maximum number of elements on each dimension (each key is a dimension
index). Dimensions that are not in `dims_to_split` are ignored.
Returns
-------
chunks : tuple of tuple of int
Dask chunk specification, indicating chunk sizes along each dimension
"""
if dims_to_split is None:
dims_to_split = range(len(shape))
if max_dim_elements is None:
max_dim_elements = {}
dim_elements = list(shape)
for i in dims_to_split:
if i in max_dim_elements and max_dim_elements[i] < shape[i]:
if power_of_two:
dim_elements[i] = _floor_power_of_two(max_dim_elements[i])
else:
dim_elements[i] = max_dim_elements[i]
# The ideal number of elements per chunk to achieve requested chunk size
# (can be float).
max_elements = max_chunk_size / np.dtype(dtype).itemsize
# Split the array greedily along each dimension, in order of `dims_to_split`
for dim in dims_to_split:
cur_elements = int(np.prod(dim_elements))
if cur_elements <= max_elements:
break # We have already split enough to meet the budget
# Compute number of elements per chunk in this dimension to exactly
# reach budget.
trg_elements_real = dim_elements[dim] * max_elements / cur_elements
if trg_elements_real < 1:
trg_elements = 1
elif power_of_two:
trg_elements = _floor_power_of_two(trg_elements_real)
else:
# Try to split into a number of equal-as-possible sized pieces
pieces = int(np.ceil(shape[dim] / trg_elements_real))
# Note: np.ceil rather than np.floor here means the max_chunk_size
# could be breached. It's done like this for backwards
# compatibility.
trg_elements = int(np.floor(shape[dim] / pieces))
dim_elements[dim] = trg_elements
return da.core.blockdims_from_blockshape(shape, dim_elements)
def _add_offset_to_slices(func, offset):
"""Modify chunk get/put/has to add an offset to its `slices` parameter."""
def func_with_offset(array_name, slices, *args, **kwargs):
"""Shift `slices` to start at `offset`."""
offset_slices = tuple(slice(s.start + i, s.stop + i)
for (s, i) in zip(slices, offset))
return func(array_name, offset_slices, *args, **kwargs)
return func_with_offset
def _scalar_to_chunk(func):
"""Modify chunk get/put/has to turn a scalar return value into a chunk.
This modifies the given function so that it returns its result as an
ndarray with the same number of (singleton) dimensions as the corresponding
chunk to enable assembly into a dask array.
"""
def func_returning_chunk(array_name, slices, *args, **kwargs):
"""Turn scalar return value into chunk of appropriate dimension."""
value = func(array_name, slices, *args, **kwargs)
singleton_shape = len(slices) * (1,)
return np.full(singleton_shape, value)
return func_returning_chunk
def npy_header_and_body(chunk):
"""Prepare a chunk for low-level writing.
Returns the `.npy` header and a view of the chunk corresponding to that
header. The two should be concatenated (as buffer objects) to form a
valid `.npy` file.
This is useful for high-performance code, as it allows a chunk to be
encoded as a .npy file more efficiently than saving to a
:class:`io.BytesIO`.
"""
# Note: don't use ascontiguousarray as it turns 0D into 1D.
# See https://github.com/numpy/numpy/issues/5300
chunk =
|
np.asarray(chunk, order='C')
|
numpy.asarray
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import MITgcmutils as mit
from scipy import interpolate
import sys
plt.ion()
# generate new grid in mygendata first!!
if len(sys.argv) > 1:
iexp = int(sys.argv[1])
else:
print("usage: python restart.py iexp")
print("with iexp the number of the experiment")
sys.exit(1)
binprec = '>f4'
dir0 = '../run/mnc_test_' + str(format(iexp)).zfill(4) + '/'
file1 = 'state.*'
#f1 = netcdf.netcdf_file(dir0 + file1,'r')
f1 = mit.mnc_files(dir0 + file1)
T = f1.variables['T'][:]
nt = len(T)-1
xp1 = f1.variables['Xp1'][:]
yp1 = f1.variables['Xp1'][:]
z = -f1.variables['Z'][:]
zl = -f1.variables['Zl'][:]
Lx = xp1[-1]
Ly = yp1[-1]
Lz = 2*z[-1]-zl[-1]
si_x = len(xp1) - 1
si_y = len(yp1) - 1
si_z = len(z)
uvel = f1.variables['U' ][nt,:,:,:]
vvel = f1.variables['V' ][nt,:,:,:]
theta = f1.variables['Temp'][nt,:,:,:]
eta = f1.variables['Eta' ][nt,:,:]
# new grid
dxn = np.fromfile("dx.box",dtype=binprec, count=-1, sep='')
dyn = np.fromfile("dy.box",dtype=binprec, count=-1, sep='')
dzn = np.fromfile("dz.box",dtype=binprec, count=-1, sep='')
si_xn = len(dxn)
si_yn = len(dyn)
si_zn = len(dzn)
zc = np.cumsum(dzn)-0.5*dzn
zind1 = np.zeros(si_zn,dtype=int)
zind2 = np.zeros(si_zn,dtype=int)
wei1 = np.zeros(si_zn)
wei2 = np.zeros(si_zn)
for iz in range(0,si_zn):
if zc[iz]<z[0]:
zind1[iz] = 0
zind2[iz] = 0
wei1[iz] = 1
wei2[iz] = 0
elif zc[iz]>z[-1]:
zind1[iz] = si_z-1
zind2[iz] = si_z-1
wei1[iz] = 0
wei2[iz] = 1
else:
pos = np.where(z-zc[iz]<0,100000,z-zc[iz])
zind2[iz] = np.argmin(np.abs(pos))
zind1[iz] = zind2[iz] - 1
wei1[iz] = -(z[zind2[iz]]-zc[iz])/(z[zind1[iz]]-z[zind2[iz]])
wei2[iz] = (z[zind1[iz]]-zc[iz])/(z[zind1[iz]]-z[zind2[iz]])
uvel = uvel[:si_z,:si_y,:si_x]
vvel = vvel[:si_z,:si_y,:si_x]
if si_xn != si_x:
# old grid
xx = np.linspace(0,1,si_x)
yy = np.linspace(0,1,si_y)
xog,yog = np.meshgrid(xx,yy)
#new grid
xn = np.linspace(0,1,si_xn)
yn = np.linspace(0,1,si_yn)
xng,yng = np.meshgrid(xn,yn)
# fix theta on walls
theta2 = 1.0*theta
theta2[:,:,0] = theta2[:,:,1]
theta2[:,-1,:] = theta2[:,-2,:]
for nz in range(0,si_z):
tz = np.mean(theta2[nz,:,:])
theta2[nz,:,:] = np.where(theta2[nz,:,:] == 0,tz,theta2[nz,:,:])
uvel_t = np.zeros((si_z,si_yn,si_xn))
vvel_t = np.zeros((si_z,si_yn,si_xn))
theta_t = np.zeros((si_z,si_yn,si_xn))
eta_n = np.zeros((si_yn,si_xn))
for nz in range(0,si_z):
fint = interpolate.interp2d(xx, yy,uvel[nz,:,:], kind='linear')
uvel_t[nz,:,:] = fint(xn,yn)
fint = interpolate.interp2d(xx, yy,vvel[nz,:,:], kind='linear')
vvel_t[nz,:,:] = fint(xn,yn)
fint = interpolate.interp2d(xx, yy,theta2[nz,:,:], kind='linear')
theta_t[nz,:,:] = fint(xn,yn)
fint = interpolate.interp2d(xx, yy,eta, kind='linear')
eta_n = fint(xn,yn)
uvel_n =
|
np.zeros((si_zn,si_yn,si_xn))
|
numpy.zeros
|
import numpy as np
import cv2
import os
from feature_detection import *
from anms import *
def extract_descriptor(features, img, kernel, stride):
img_padded = img.copy()
img_padded = cv2.copyMakeBorder(img, int(kernel*stride/2), int(kernel*stride/2), int(kernel*stride/2), int(kernel*stride/2), cv2.BORDER_CONSTANT)
feature_vector_list = []
for i in range(features.shape[0]):
poi = features[i,:].astype(np.int32) + 20
patch_size = kernel*stride
patch = img_padded[poi[0]-int(patch_size/2):poi[0]+int(patch_size/2), poi[1]-int(patch_size/2):poi[1]+int(patch_size/2)]
patch = cv2.GaussianBlur(patch, (stride,stride), 0)
feature_matrix = cv2.resize(patch, (kernel, kernel), interpolation=cv2.INTER_NEAREST)
feature_vector = feature_matrix.flatten().astype(np.float32)
feature_vector -= np.mean(feature_vector)
feature_vector /=
|
np.std(feature_vector)
|
numpy.std
|
# ___________ ____ ___ __ _ _ _
### MAIN
# ___________ ____ ___ __ _ _ _
# ______________________________________________________________________________
# %% imports
import glob
import os.path
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
from osgeo import ogr
from metadata import MetaData
# keras tensorflow etc are missing
from src.test_transform import simple_angle_converter
from src.transform import transform
then = time.time() # TIC
Image.MAX_IMAGE_PIXELS = None # disarm PIL Decompression Bomb Error
# ___________________________________________________________________________________________________
# %% user_input
# MAX IS 5 by 5 degree bbox!
UL_LON = 40
UL_LAT = 40
LR_LON = 45
LR_LAT = 35
# ___________________________________________________________________________________________________
# %% clean_up previous_run_remnants
remove_tiles_path = glob.glob('./output/*.csv')
for i1 in remove_tiles_path:
os.remove(i1)
# ___________________________________________________________________________________________________
# %% 0) get ptif and meta data from NASA
# use api to get data for bbox CHALLENGE #1
# ___________________________________________________________________________________________________
# %% 1) image selection algorithm
# use algorithm to select best NAC images for the CNN CHALLENGE #1
# ___________________________________________________________________________________________________
# %% 2) image ingestion algorithm
# use algorithm to download all NAC images + metadata from NASA and put everything in 'input' CHALLENGE #2
# USE FAKE LIST TO START
# ___________________________________________________________________________________________________
# %% 3) build_lists
# list of all NAC images in input folder
image_list = glob.glob('./input/*.tif')
# list of all metadata files in input folder
# [NAC resolution, upper right LAT, upper right LON, lower right LAT, lower right LON, lower left LAT, lower left LON, upper left LAT, upper left LON] --> order used by LROC page
meta_list = glob.glob('./input/*.txt')
def create_meta_obj(file_name):
return MetaData(np.loadtxt(file_name))
def process_image(parent, meta_data):
global remove_tiles_path
# ______________________________________________________________________________
# %% 4) nac_tiling
print("Tiling NAC. . .")
# NACs are tiled here - NOT INCLUDED HERE
print("NAC Tiling COMPLETE!")
# ______________________________________________________________________________
# %% 5) cnn_loop
print("Running RetinaNet. . .")
# run CNN to predict on tiles - NOT INCLUDED HERE
# USE FAKE .npy TO START
print("RetinaNet COMPLETE!")
# ______________________________________________________________________________
# %% 6) coordinate_transform & final_output
print("Running Coordinate Transformation. . .")
# ___________________________________________________________________________________________________
# %% 6a) image_metadata
# CHALLENGE #2
# USE FAKE .npy TO START
# [NAC resolution, upper right LAT, upper right LON, lower right LAT, lower right LON, lower left LAT, lower left LON, upper left LAT, upper left LON] --> order used by LROC page
image_id = str(image_list[0]) # NAC image ID
image_id = image_id[8:]
image_id = image_id[:-4]
# ___________________________________________________________________________________________________
# %% 6b) load_image_coordinates
image_coord = np.load(
'./output/{}/output_img_coord_02.npy'.format(
image_id[:-4])) # load results of RetinaNet, always the identical name
img_coord_split = np.hsplit(image_coord, 6) # split loaded results for next steps
upper_left_x = img_coord_split[0]
upper_left_y = img_coord_split[1]
lower_right_x = img_coord_split[2]
lower_right_y = img_coord_split[3]
confidence = img_coord_split[4]
class_type = img_coord_split[5]
# ___________________________________________________________________________________________________
# %% 6g) Calculations
dimensions = parent.shape
x_len = dimensions[1]
y_len = dimensions[0]
x_deg_len = meta_data.corner_ur_lon - meta_data.corner_ul_lon
y_deg_len = meta_data.corner_ul_lat - meta_data.corner_ll_lat
deg_per_pix_xdir = x_deg_len / x_len
deg_per_pix_ydir = y_deg_len / y_len
# LON NAC rotation correction - appears not to help accuracy, subject to change!
# lon_alpha_rad = math.atan(x_deg_len/y_deg_len)
# x_len_corr = x_len * math.cos(lon_alpha_rad)
# LAT NAC rotation correction - appears not to help accuracy, subject to change!
# lat_alpha_rad = math.atan(y_deg_len/x_deg_len)
# y_len_corr = y_len * math.sin(lat_alpha_rad)
# deg_per_pix_xdir = x_deg_len/x_len_corr
# deg_per_pix_ydir = y_deg_len/y_len_corr
# ___________________________________________________________________________________________________
# %% 6h) Detected rectangle location correction according to Subject & Real world coordinate determination
# -------> X
# |
# |
# |
# v
# Y
lon_array = []
lat_array = []
try:
for rec_ul_x, rec_ul_y in zip(upper_left_x, upper_left_y):
# print(rec_ul_x, rec_ul_y)
if meta_data.subject == 1: # NO CHANGE
rec_ul_x_corr = rec_ul_x
rec_ul_y_corr = rec_ul_y
if meta_data.subject == 2: # X FLIP
rec_ul_x_corr = x_len - rec_ul_x
rec_ul_y_corr = rec_ul_y
if meta_data.subject == 3: # Y FLIP
rec_ul_x_corr = rec_ul_x
rec_ul_y_corr = y_len - rec_ul_y
if meta_data.subject == 4: # XY FLIP
rec_ul_x_corr = x_len - rec_ul_x
rec_ul_y_corr = y_len - rec_ul_y
bolder_point = np.array([x_len - rec_ul_x_corr[0], rec_ul_y_corr[0]])
ur = np.array([meta_data.corner_ur_lon, meta_data.corner_ur_lat])
ul = np.array([meta_data.corner_ul_lon, meta_data.corner_ul_lat])
ll = np.array([meta_data.corner_ll_lon, meta_data.corner_ll_lat])
size = np.array([x_len, y_len])
result = transform(bolder_point,
ur,
ul,
ll,
size)
rec_ul_lon = (rec_ul_x_corr * deg_per_pix_xdir) + meta_data.corner_ul_lon
rec_ul_lat = -1 * (
rec_ul_y_corr * deg_per_pix_ydir) + meta_data.corner_ul_lat - 90 # 90 degree correction lifted
rec_ul_lon2 = result[1]
rec_ul_lat2 = result[0] - 90 # 90 degree correction lifted
(rec_ul_lon3, rec_ul_lat3) = simple_angle_converter(bolder_point,
ur,
ul,
ll,
np.array([meta_data.corner_lr_lon, meta_data.corner_lr_lat]),
size)
print("diff: {}, {}".format(rec_ul_lon - rec_ul_lon2, rec_ul_lat - rec_ul_lat2))
x_length_array = abs(lower_right_x - upper_left_x) # bbox x dimension
y_length_array = abs(lower_right_y - upper_left_y) # bbox y dimension
lon_array =
|
np.append(lon_array, rec_ul_lon)
|
numpy.append
|
import pandas as pd
import numpy as np
import multiprocessing as mp
from tqdm import tqdm
import h5py
import os
def check_hh_pdb():
# make sure the hhsuite seq and the seq from ATOM records match.
# make sure the group num is the correct index of hhsuite seq.
pdb_list = pd.read_csv('hhsuite_beads/hhsuite_pdb_beads_list.txt')['pdb'].values
amino_acids = pd.read_csv('amino_acids.csv')
vocab = {x.upper(): y for x, y in zip(amino_acids.AA3C, amino_acids.AA)}
match_pdb_list = []
bad_pdb_list = []
for pdb in tqdm(pdb_list):
df_pdb = pd.read_csv(f'hhsuite_beads/hhsuite/{pdb}_bead.csv')
df_hh = pd.read_csv(f'~/bio/hhsuite/chimeric/{pdb}.chemeric')
seq_hh = df_hh['seq'].values[0]
group_num = df_pdb['group_num'].values
# in some cases, the chains ids do not match.
if len(seq_hh) <= group_num.max():
bad_pdb_list.append(pdb)
print(len(seq_hh), group_num.max())
# break
continue
# use group num as index to extract residues from hh seq
seq_hh_pdb = ''.join(np.array(list(seq_hh))[group_num])
seq_pdb = df_pdb['group_name'].apply(lambda x: vocab[x])
seq_pdb = ''.join(seq_pdb.values)
if seq_pdb == seq_hh_pdb:
match_pdb_list.append(pdb)
# df_pdb.to_csv(f'hhsuite_beads/hhsuite/{pdb}_bead.csv', index=False)
else:
bad_pdb_list.append(pdb)
# print(seq_pdb)
# print(seq_hh_pdb)
# break
df_match = pd.DataFrame({'pdb': match_pdb_list})
df_match.to_csv('hhsuite_beads/hhsuite_pdb_beads_list_match.txt', index=False)
###########################################
def _rotation_matrix(c1, c2):
z = np.cross(c1, c2)
x = c1
y = np.cross(z, x)
x = x / np.sqrt(np.sum(x ** 2))
y = y / np.sqrt(np.sum(y ** 2))
z = z / np.sqrt(np.sum(z ** 2))
R = np.vstack([x, y, z])
# Rinv = np.linalg.inv(R.T)
return R
def extract_one_topk(pdb_id, df_beads, local_rot_dir, k=10, mode='CA'):
if df_beads.shape[0] < 20:
return
group_num = df_beads['group_num'].values
group_name = df_beads['group_name'].values
if mode == 'CA':
group_coords = df_beads[['xca', 'yca', 'zca']].values
elif mode == 'CB':
group_coords = df_beads[['xcb', 'ycb', 'zcb']].values
elif mode == 'CAS':
group_coords = (df_beads[['xca', 'yca', 'zca']].values + df_beads[['xs', 'ys', 'zs']].values) / 2
else:
raise ValueError('mode should be CA / CB / CAS.')
df_list = []
count_res = []
for i, gc in enumerate(group_num):
if (gc-1 not in group_num) | (gc+1 not in group_num) | (gc-2 not in group_num) | (gc+2 not in group_num):
continue
# coords of the previous 2 and next 2 groups in local peptide segment
cen_i = (group_num == gc)
pre_i = (group_num == gc-1)
next_i = (group_num == gc+1)
pre2_i = (group_num == gc-2)
next2_i = (group_num == gc+2)
# get central segment
ind = (cen_i | pre_i | next_i | pre2_i | next2_i)
gnum_seg = group_num[ind]
gname_seg = group_name[ind]
if len(gnum_seg) != 5:
continue
coords = group_coords - group_coords[cen_i] # center
c1 = coords[pre_i]
c2 = coords[next_i]
if np.sum(c1**2) == 0:
break
if np.sum(c2**2) == 0:
break
rotate_mat = _rotation_matrix(c1, c2)
coords_seg = coords[ind]
coords_seg = np.squeeze(np.matmul(rotate_mat[None, :, :], coords_seg[:, :, None]))
# get nearest k residues from other residues
gnum_others = group_num[~ind]
gname_others = group_name[~ind]
coords_others = coords[~ind]
dist_i = np.sqrt((coords_others**2).sum(axis=1))
dist_i_arg = np.argsort(dist_i)
topk_arg = dist_i_arg[:k]
# topk_arg = (dist_i < 8)
# count_6a = dist_i[dist_i < 6].shape[0]
count_8a = dist_i[dist_i < 8].shape[0]
count_10a = dist_i[dist_i < 10].shape[0]
count_12a = dist_i[dist_i < 12].shape[0]
# count_res.append(np.array([count_6a, count_8a, count_10a, count_12a]))
gnum_topk = gnum_others[topk_arg]
gname_topk = gname_others[topk_arg]
coords_topk = coords_others[topk_arg]
coords_topk = np.squeeze(
|
np.matmul(rotate_mat[None, :, :], coords_topk[:, :, None])
|
numpy.matmul
|
#!/usr/bin/env python
# coding: utf-8
# # ICME rate update plots
#
# adapted from
# https://github.com/helioforecast/Papers/tree/master/Moestl2020_PSP_rate
# makes a prediction of the ICME rate in solar cycle 25
#
# Main author: <NAME>, IWF Graz, Austria; twitter @chrisoutofspace; https://github.com/cmoestl
#
# ssn is automatically loaded from http://www.sidc.be/silso/DATA/SN_d_tot_V2.0.csv
#
# plots are saved to '/nas/helio/data/insitu_python/icme_rate_cycle_update'
#
# Convert this notebook to a script with:
#
# import os
#
# os.system('jupyter nbconvert --to script icme_rate.ipynb')
#
#
#
#
# ---
#
# **MIT LICENSE**
#
# Copyright 2020, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# In[62]:
#real time updates: icme_rate.py
import matplotlib
#for server runs
matplotlib.use('Agg')
#for notebook use
#%matplotlib inline
#set directory for daily update
#outputdirectory='results/icme_rate_cycle_update'
outputdirectory='/nas/helio/data/insitu_python/icme_rate_cycle_update'
#Convert this notebook to a script with:
#import os
#os.system('jupyter nbconvert --to script icme_rate.ipynb')
# In[2]:
from scipy import stats
import scipy.io
from matplotlib import cm
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import datetime
from datetime import timedelta
import astropy.constants as const
from sunpy.time import parse_time
import sunpy.time
import time
import pickle
import seaborn as sns
import os
import urllib
import json
import warnings
import importlib
import heliopy.spice as spice
import heliopy.data.spice as spicedata
import astropy
import copy
#our own package
from heliocats import stats as hs
from heliocats import data as hd
#where the 6 in situ data files are located is read from input.py
#as data_path=....
from config import data_path
#reload again while debugging
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#%matplotlib inline
#matplotlib.use('Qt5Agg')
#matplotlib.use('Agg')
#warnings.filterwarnings('ignore') # some numpy mean-of-empty-slice runtime warnings
########### make directories first time
resdir='results'
if os.path.isdir(resdir) == False: os.mkdir(resdir)
datadir='data'
if os.path.isdir(datadir) == False: os.mkdir(datadir)
if os.path.isdir(outputdirectory) == False: os.mkdir(outputdirectory)
#animdirectory='results/plots_rate/anim'
#if os.path.isdir(animdirectory) == False: os.mkdir(animdirectory)
#animdirectory2='results/plots_rate/anim2'
#if os.path.isdir(animdirectory2) == False: os.mkdir(animdirectory2)
plt.rcParams["figure.figsize"] = (15,8)
print('done')
# ## 1 Settings and load data
# In[3]:
plt.close('all')
print('icme_rate main program.')
print('<NAME> et al., IWF Graz, Austria')
#constants:
#solar radiusx
Rs_in_AU=float(const.R_sun/const.au)
#define AU in km
AU_in_km=const.au.value/1e3
#set for loading
load_data=1
get_new_sunspots=1
get_new_sunspots_ms=1
get_new_sunspots_mean=1
if load_data > 0:
print('load data (takes a minute or so)')
print('')
#####################
print('get RC ICME list')
#download richardson and cane list
rc_url='http://www.srl.caltech.edu/ACE/ASC/DATA/level3/icmetable2.htm'
try: urllib.request.urlretrieve(rc_url,data_path+'rc_list.htm')
except urllib.error.URLError as e:
print('Failed downloading ', rc_url,' ',e)
#read RC list into pandas dataframe
rc_dfull=pd.read_html(data_path+'rc_list.htm')
rc_df=rc_dfull[0]
##################
print('get sunspot number from SIDC')
#get daily sunspot number from SIDC
#http://www.sidc.be/silso/datafiles
#parameters
#http://www.sidc.be/silso/infosndtot
#daily sunspot number
if get_new_sunspots==1:
ssn=pd.read_csv('http://www.sidc.be/silso/DATA/SN_d_tot_V2.0.csv',sep=';',names=['year','month','day','year2','spot','stand','obs','prov'])
ssn_time=np.zeros(len(ssn))
for k in np.arange(len(ssn)):
ssn_time[k]=parse_time(str(ssn.year[k])+'-'+str(ssn.month[k])+'-'+str(ssn.day[k])).plot_date
print('time convert done')
ssn.insert(0,'time',ssn_time)
ssn.spot.loc[np.where(ssn.spot< 0)[0]]=np.nan
ssn.stand.loc[np.where(ssn.stand< 0)[0]]=np.nan
fileout='ssn.p'
pickle.dump(ssn, open(data_path+fileout, "wb"))
#also get preliminary data for current month for plotting
#does not work
#ssn_prelim_raw=pd.csv('http://www.sidc.be/silso/DATA/EISN/EISN_current.csv',sep=',',names=['year','month','day','year2','spot','stand','stat calc','stat avail'])
#download manually
ssn_prelim_url='http://www.sidc.be/silso/DATA/EISN/EISN_current.csv'
try: urllib.request.urlretrieve(ssn_prelim_url,'data/EISN_current.csv')
except urllib.error.URLError as e:
print('Failed downloading ', ssn_prelim_url,' ',e)
ssn_prelim_raw = np.loadtxt('data/EISN_current.csv', delimiter=',',usecols=(0,1,2,4,5))
ssn_p_int=ssn_prelim_raw.astype(int)
ssn_p=pd.DataFrame(ssn_p_int,columns=['year','month','day','spot','stand'])
ssn_p_time=np.zeros(len(ssn_p))
for k in np.arange(len(ssn_p)):
ssn_p_time[k]=parse_time(str(ssn_p.year[k])+'-'+str(ssn_p.month[k])+'-'+str(ssn_p.day[k])).plot_date
ssn_p.insert(0,'time',ssn_p_time)
ssn_p.spot.loc[np.where(ssn_p.spot< 0)[0]]=np.nan
ssn_p.stand.loc[np.where(ssn_p.stand< 0)[0]]=np.nan
fileout='ssn_prelim.p'
pickle.dump(ssn_p, open(data_path+fileout, "wb"))
if get_new_sunspots_ms==1:
#ssn_ms=pd.read_csv('data/SN_ms_tot_V2.0.csv',sep=';')
ssn_ms=pd.read_csv('http://www.sidc.be/silso/DATA/SN_ms_tot_V2.0.csv',sep=';',names=['year','month','year2','spot','stand','obs','check'])
ssn_ms_time=np.zeros(len(ssn_ms))
for k in np.arange(len(ssn_ms)):
ssn_ms_time[k]=parse_time(str(ssn_ms.year[k])+'-'+str(ssn_ms.month[k])+'-01').plot_date
#print(mdates.num2date(ssn_ms_time[k]))
#print(ssn_ms.spot[k])
print('time convert done')
ssn_ms.insert(0,'time',ssn_ms_time)
ssn_ms.spot.loc[np.where(ssn_ms.spot< 0)[0]]=np.nan
fileout='ssn_13ms.p'
pickle.dump(ssn_ms, open(data_path+fileout, "wb"))
if get_new_sunspots_mean==1:
ssn_m=pd.read_csv('http://www.sidc.be/silso/DATA/SN_m_tot_V2.0.csv',sep=';',names=['year','month','year2','spot','stand','obs','check'])
ssn_m_time=np.zeros(len(ssn_m))
for k in np.arange(len(ssn_m)):
ssn_m_time[k]=parse_time(str(ssn_m.year[k])+'-'+str(ssn_m.month[k])+'-01').plot_date
print('time convert done')
ssn_m.insert(0,'time',ssn_m_time)
ssn_m.spot.loc[np.where(ssn_m.spot< 0)[0]]=np.nan
fileout='ssn_m.p'
pickle.dump(ssn_m, open(data_path+fileout, "wb"))
file='ssn.p'
ssn=pickle.load(open(data_path+file, "rb"))
#make 13 month running mean
runmean_months=13.0
ssn_mean_13=hs.running_mean(ssn.spot,int(np.rint(30.42*runmean_months+1)))
ssn_std_13=hs.running_mean(ssn.stand,int(np.rint(30.42*runmean_months+1)))
ssn.insert(1,'spot_mean_13',ssn_mean_13)
ssn.insert(2,'spot_std_13',ssn_std_13)
print('SIDC sunspots done')
################## Spacecraft
#completed missions
filevex='vex_2007_2014_sceq_removed.p'
[vex,hvex]=pickle.load(open(data_path+filevex, 'rb' ) )
filevex='vex_2007_2014_sceq.p'
[vexnon,hvexnon]=pickle.load(open(data_path+filevex, 'rb' ) )
filemes='messenger_2007_2015_sceq_removed.p'
[mes,hmes]=pickle.load(open(data_path+filemes, 'rb' ) )
filemes='messenger_2007_2015_sceq.p'
[mesnon,hmesnon]=pickle.load(open(data_path+filemes, 'rb' ) )
filestb='stereob_2007_2014_sceq.p'
[stb,hstb]=pickle.load(open(data_path+filestb, "rb" ) )
fileuly='ulysses_1990_2009_rtn.p'
[uly,huly]=pickle.load(open(data_path+fileuly, "rb" ) )
##active mission
filemav='maven_2014_2018_removed_smoothed.p'
[mav,hmav]=pickle.load(open(data_path+filemav, 'rb' ) )
print('load and merge Wind data HEEQ')
#from HELCATS HEEQ until 2018 1 1 + new self-processed data with heliosat and hd.save_wind_data
filewin="wind_2007_2018_heeq_helcats.p"
[win1,hwin1]=pickle.load(open(data_path+filewin, "rb" ) )
#or use: filewin2="wind_2018_now_heeq.p"
filewin2="wind_2018_now_heeq.p"
[win2,hwin2]=pickle.load(open(data_path+filewin2, "rb" ) )
#merge Wind old and new data
#cut off HELCATS data at end of 2017, win2 begins exactly after this
win1=win1[np.where(win1.time < parse_time('2018-Jan-01 00:00').datetime)[0]]
#make array
win=np.zeros(np.size(win1.time)+np.size(win2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)])
#convert to recarray
win = win.view(np.recarray)
win.time=np.hstack((win1.time,win2.time))
win.bx=np.hstack((win1.bx,win2.bx))
win.by=np.hstack((win1.by,win2.by))
win.bz=np.hstack((win1.bz,win2.bz))
win.bt=np.hstack((win1.bt,win2.bt))
win.vt=np.hstack((win1.vt,win2.vt))
win.np=np.hstack((win1.np,win2.np))
win.tp=np.hstack((win1.tp,win2.tp))
win.x=np.hstack((win1.x,win2.x))
win.y=np.hstack((win1.y,win2.y))
win.z=np.hstack((win1.z,win2.z))
win.r=np.hstack((win1.r,win2.r))
win.lon=np.hstack((win1.lon,win2.lon))
win.lat=np.hstack((win1.lat,win2.lat))
print('Wind merging done')
########### STA
print('load and merge STEREO-A data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ
filesta1='stereoa_2007_2020_sceq.p'
sta1=pickle.load(open(data_path+filesta1, "rb" ) )
#beacon data
#filesta2="stereoa_2019_2020_sceq_beacon.p"
#filesta2='stereoa_2019_2020_sept_sceq_beacon.p'
#filesta2='stereoa_2019_now_sceq_beacon.p'
#filesta2="stereoa_2020_august_november_sceq_beacon.p"
filesta2='stereoa_2020_now_sceq_beacon.p'
[sta2,hsta2]=pickle.load(open(data_path+filesta2, "rb" ) )
#cutoff with end of science data
sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]
#make array
sta=np.zeros(np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=np.hstack((sta1.time,sta2.time))
sta.bx=np.hstack((sta1.bx,sta2.bx))
sta.by=np.hstack((sta1.by,sta2.by))
sta.bz=np.hstack((sta1.bz,sta2.bz))
sta.bt=np.hstack((sta1.bt,sta2.bt))
sta.vt=np.hstack((sta1.vt,sta2.vt))
sta.np=np.hstack((sta1.np,sta2.np))
sta.tp=np.hstack((sta1.tp,sta2.tp))
sta.x=np.hstack((sta1.x,sta2.x))
sta.y=np.hstack((sta1.y,sta2.y))
sta.z=np.hstack((sta1.z,sta2.z))
sta.r=np.hstack((sta1.r,sta2.r))
sta.lon=np.hstack((sta1.lon,sta2.lon))
sta.lat=np.hstack((sta1.lat,sta2.lat))
print('STA Merging done')
print('load PSP data SCEQ') #from heliosat, converted to SCEQ similar to STEREO-A/B
filepsp='psp_2018_2021_sceq.p'
[psp,hpsp]=pickle.load(open(data_path+filepsp, "rb" ) )
##############################################
print('load Bepi Colombo SCEQ')
filebepi='bepi_2019_2021_sceq.p'
bepi=pickle.load(open(data_path+filebepi, "rb" ) )
##############################################
print('load Solar Orbiter SCEQ')
filesolo='solo_2020_april_december_sceq.p'
solo=pickle.load(open(data_path+filesolo, "rb" ) )
#set all plasma data to NaN
solo.vt=np.nan
solo.np=np.nan
solo.tp=np.nan
fileomni='omni_1963_2020.p'
[omni,homni]=pickle.load(open(data_path+fileomni, "rb" ) )
print('load all data done')
# In[4]:
########### load ICMECAT v2.0, made with icmecat.py or ipynb
file='icmecat/HELCATS_ICMECAT_v20_pandas.p'
print()
print('loaded ', file)
print()
print('Keys (parameters) in this pandas data frame are:')
[ic,h,p]=pickle.load(open(file, "rb" ) )
print(ic.keys())
print()
################### get indices of events for each spacecraft
mercury_orbit_insertion_time= parse_time('2011-03-18').datetime
#spacecraft near the 4 terrestrial planets
#get indices for Mercury after orbit insertion in March 2011
merci=np.where(np.logical_and(ic.sc_insitu =='MESSENGER', ic.icme_start_time > mercury_orbit_insertion_time))[0]
vexi=np.where(ic.sc_insitu == 'VEX')[:][0]
wini=np.where(ic.sc_insitu == 'Wind')[:][0]
mavi=np.where(ic.sc_insitu == 'MAVEN')[:][0]
#other spacecraft
#all MESSENGER events including cruise phase
mesi=np.where(ic.sc_insitu == 'MESSENGER')[:][0]
stbi=np.where(ic.sc_insitu == 'STEREO-B')[:][0]
ulyi=np.where(ic.sc_insitu == 'ULYSSES')[:][0]
pspi=np.where(ic.sc_insitu == 'PSP')[:][0]
soli=np.where(ic.sc_insitu == 'SolarOrbiter')[:][0]
beci=np.where(ic.sc_insitu == 'BepiColombo')[:][0]
stai=np.where(ic.sc_insitu == 'STEREO-A')[:][0]
############### set limits of solar minimum, rising/declining phase and solar maximum
# minimim maximum times as given by
#http://www.sidc.be/silso/cyclesmm
#24 2008 12 2.2 2014 04 116.4
solarmin=parse_time('2008-12-01').datetime
minstart=solarmin-datetime.timedelta(days=366*1.5)
minend=solarmin+datetime.timedelta(days=365)
minstart_num=parse_time(minstart).plot_date
minend_num=parse_time(minend).plot_date
solarmax=parse_time('2014-04-01').datetime
maxstart=solarmax-datetime.timedelta(days=365*3)
maxend=solarmax+datetime.timedelta(days=365/2)
maxstart_num=parse_time(maxstart).plot_date
maxend_num=parse_time(maxend).plot_date
#rising phase not used
# risestart=parse_time('2010-01-01').datetime
# riseend=parse_time('2011-06-30').datetime
# risestart_num=parse_time('2010-01-01').plot_date
# riseend_num=parse_time('2011-06-30').plot_date
# declstart=parse_time('2015-01-01').datetime
# declend=parse_time('2018-12-31').datetime
# declstart_num=parse_time('2015-01-01').plot_date
# declend_num=parse_time('2018-12-31').plot_date
############### extract events by limits of solar minimum and maximum
iall_min=np.where(np.logical_and(ic.icme_start_time > minstart,ic.icme_start_time < minend))[0]
#iall_rise=np.where(np.logical_and(ic.icme_start_time > risestart,ic.icme_start_time < riseend))[0]
iall_max=np.where(np.logical_and(ic.icme_start_time > maxstart,ic.icme_start_time < maxend))[0]
wini_min=iall_min[np.where(ic.sc_insitu[iall_min]=='Wind')]
#wini_rise=iall_rise[np.where(ic.sc_insitu[iall_rise]=='Wind')]
wini_max=iall_max[np.where(ic.sc_insitu[iall_max]=='Wind')]
pspi_min=iall_min[np.where(ic.sc_insitu[iall_min]=='PSP')]
#wini_rise=iall_rise[np.where(ic.sc_insitu[iall_rise]=='Wind')]
pspi_max=iall_max[np.where(ic.sc_insitu[iall_max]=='PSP')]
vexi_min=iall_min[np.where(ic.sc_insitu[iall_min]=='VEX')]
#vexi_rise=iall_rise[np.where(ic.sc_insitu[iall_rise]=='VEX')]
vexi_max=iall_max[np.where(ic.sc_insitu[iall_max]=='VEX')]
mesi_min=iall_min[np.where(ic.sc_insitu[iall_min]=='MESSENGER')]
#mesi_rise=iall_rise[np.where(ic.sc_insitu[iall_rise]=='MESSENGER')]
mesi_max=iall_max[np.where(ic.sc_insitu[iall_max]=='MESSENGER')]
stai_min=iall_min[np.where(ic.sc_insitu[iall_min]=='STEREO-A')]
#stai_rise=iall_rise[np.where(ic.sc_insitu[iall_rise]=='STEREO-A')]
stai_max=iall_max[np.where(ic.sc_insitu[iall_max]=='STEREO-A')]
stbi_min=iall_min[np.where(ic.sc_insitu[iall_min]=='STEREO-B')]
#stbi_rise=iall_rise[np.where(ic.sc_insitu[iall_rise]=='STEREO-B')]
stbi_max=iall_max[np.where(ic.sc_insitu[iall_max]=='STEREO-B')]
# select the events at Mercury extra after orbit insertion, note that no events available for solar minimum
merci_min=iall_min[np.where(np.logical_and(ic.sc_insitu[iall_min] =='MESSENGER',ic.icme_start_time[iall_min] > parse_time('2011-03-18').datetime))]
#merci_rise=iall_rise[np.where(np.logical_and(ic.sc_insitu[iall_rise] =='MESSENGER',ic.icme_start_time[iall_rise] > parse_time('2011-03-18').datetime))]
merci_max=iall_max[np.where(np.logical_and(ic.sc_insitu[iall_max] =='MESSENGER',ic.icme_start_time[iall_max] > parse_time('2011-03-18').datetime))]
print(len(ic))
print('done')
# In[5]:
ic
# ## 2 ICME rate for solar cycles 23/24 from the Heliophysics System Observatory (ICMECAT and Richardson and Cane)
# ### Check data days available each year for each planet or spacecraft
# In[6]:
######################## make bin for each year for yearly histograms
#define dates of January 1 from 2007 to end year
last_year=2022 #2022 means last date is 2021 Dec 31
years_jan_1_str=[str(i)+'-01-01' for i in np.arange(2007,last_year) ]
yearly_start_times=parse_time(years_jan_1_str).datetime
yearly_start_times_num=parse_time(years_jan_1_str).plot_date
#same for July 1 as middle of the year
years_jul_1_str=[str(i)+'-07-01' for i in np.arange(2007,last_year) ]
yearly_mid_times=parse_time(years_jul_1_str).datetime
yearly_mid_times_num=parse_time(years_jul_1_str).plot_date
#same for december 31
years_dec_31_str=[str(i)+'-12-31' for i in np.arange(2007,last_year) ]
yearly_end_times=parse_time(years_dec_31_str).datetime
yearly_end_times_num=parse_time(years_dec_31_str).plot_date
########### define arrays for total data days and fill with nan
total_data_days_yearly_win=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_win.fill(np.nan)
total_data_days_yearly_psp=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_psp.fill(np.nan)
total_data_days_yearly_solo=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_solo.fill(np.nan)
total_data_days_yearly_bepi=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_bepi.fill(np.nan)
total_data_days_yearly_sta=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_sta.fill(np.nan)
total_data_days_yearly_stb=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_stb.fill(np.nan)
total_data_days_yearly_mes=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_mes.fill(np.nan)
total_data_days_yearly_vex=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_vex.fill(np.nan)
total_data_days_yearly_mav=np.zeros(np.size(yearly_mid_times))
total_data_days_yearly_mav.fill(np.nan)
######################## go through each year and search for available data
#time is available for all dates, so there are no NaNs in time, thus need to search for all not NaNs in Btotal variable
for i in range(np.size(yearly_mid_times)):
print(yearly_start_times[i])
#get indices of Wind time for the current year
thisyear=np.where(np.logical_and((win.time > yearly_start_times[i]),(win.time < yearly_end_times[i])))[0]
#get np.size of available data for each year
datas=np.size(np.where(np.isnan(win.bt[thisyear])==False))
#wind is in 1 minute resolution until 31 Dec 2017, from 1 Jan 2018 its 2 minutes
min_in_days=1/(60*24)
if i > 10: min_in_days=1/(60*24)
#calculate available days from number of datapoints (each 1 minute)
#divided by number of minutes in 1 days
#this should only be the case if data is available this year, otherwise set to NaN
if datas > 0: total_data_days_yearly_win[i]=datas*min_in_days
#manual override because Wind data for 2018 and 2019 are heavily despiked
total_data_days_yearly_win[-4]=360
total_data_days_yearly_win[-3]=360
total_data_days_yearly_win[-2]=360
total_data_days_yearly_win[-1]=180
#all other data is in 1 min resolution
min_in_days=1/(60*24)
#for PSP
thisyear=np.where(np.logical_and((psp.time > yearly_start_times[i]),(psp.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(psp.bt[thisyear])==False))
if datas >0: total_data_days_yearly_psp[i]=datas*min_in_days
#for Bepi
thisyear=np.where(np.logical_and((bepi.time > yearly_start_times[i]),(bepi.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(bepi.bt[thisyear])==False))
if datas >0: total_data_days_yearly_bepi[i]=datas*min_in_days
#for solo
thisyear=np.where(np.logical_and((solo.time > yearly_start_times[i]),(solo.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(solo.bt[thisyear])==False))
if datas >0: total_data_days_yearly_solo[i]=datas*min_in_days
#same for STEREO-A
thisyear=np.where(np.logical_and((sta.time > yearly_start_times[i]),(sta.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(sta.bt[thisyear])==False))
if datas >0: total_data_days_yearly_sta[i]=datas*min_in_days
#same for STEREO-B
thisyear=np.where(np.logical_and((stb.time > yearly_start_times[i]),(stb.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(stb.bt[thisyear])==False))
if datas >0: total_data_days_yearly_stb[i]=datas*min_in_days
#same for MESSENGER
thisyear=np.where(np.logical_and((mesnon.time > yearly_start_times[i]),(mesnon.time < yearly_end_times[i])))
datas=np.size(np.where(np.isnan(mes.bt[thisyear])==False))
if datas >0: total_data_days_yearly_mes[i]=datas*min_in_days
#same for Mercury alone with non-removed dataset
#start with 2011
# if i == 4:
# thisyear=np.where(np.logical_and((mesnon.time > mercury_orbit_insertion_time),(mesnon.time < yearly_end_times[i])))[0]
# datas=np.size(np.where(np.isnan(mesnon.bt[thisyear])==False))
# if datas >0: total_data_days_yearly_merc[i]=datas*min_in_days
# #2012 onwards
# if i > 4:
# thisyear=np.where(np.logical_and((mesnon.time > yearly_start_times[i]),(mesnon.time < yearly_end_times[i])))
# datas=np.size(np.where(np.isnan(mesnon.bt[thisyear])==False))
# if datas >0: total_data_days_yearly_merc[i]=datas*min_in_days
#same for VEX
thisyear=np.where(np.logical_and((vexnon.time > yearly_start_times[i]),(vexnon.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(vexnon.bt[thisyear])==False))
if datas >0: total_data_days_yearly_vex[i]=datas*min_in_days
#for MAVEN different time resolution
thisyear=np.where(np.logical_and((mav.time > yearly_start_times[i]),(mav.time < yearly_end_times[i])))[0]
datas=np.size(np.where(np.isnan(mav.bt[thisyear])==False))
datas_ind=np.where(np.isnan(mav.bt[thisyear])==False)
#sum all time intervals for existing data points, but avoid counting gaps where diff is > 1 orbit (0.25 days)
alldiff=np.diff(parse_time(mav.time[datas_ind]).plot_date)
smalldiff_ind=np.where(alldiff <0.25)
if datas >0: total_data_days_yearly_mav[i]=np.sum(alldiff[smalldiff_ind])
print('Data days each year:')
print()
print('MESSENGER')
print(np.round(total_data_days_yearly_mes,1))
print()
print('VEX at Venus')
print(np.round(total_data_days_yearly_vex,1))
print()
print('STB')
print(np.round(total_data_days_yearly_stb,1))
print()
print('MAVEN')
print(np.round(total_data_days_yearly_mav,1))
print()
print()
print('Wind')
print(np.round(total_data_days_yearly_win,1))
print('STA')
print(np.round(total_data_days_yearly_sta,1))
print('PSP')
print(np.round(total_data_days_yearly_psp,1))
print('Bepi')
print(np.round(total_data_days_yearly_bepi,1))
print('Solar Orbiter')
print(np.round(total_data_days_yearly_solo,1))
print()
print('done')
# ### get yearly ICME rates at each spacecraft
# In[7]:
#define dates of January 1 from 2007 to 2022
years_jan_1_str_plus1=[str(i)+'-01-01' for i in np.arange(2007,last_year+1) ]
yearly_bin_edges=parse_time(years_jan_1_str_plus1).plot_date
#bin width in days
binweite=365/8
(histmes1, bin_edgesmes) = np.histogram(parse_time(ic.icme_start_time[mesi]).plot_date, yearly_bin_edges)
(histvex1, bin_edgesvex) = np.histogram(parse_time(ic.icme_start_time[vexi]).plot_date, yearly_bin_edges)
(histmav1, bin_edgesmav) = np.histogram(parse_time(ic.icme_start_time[mavi]).plot_date, yearly_bin_edges)
(histstb1, bin_edgesstb) = np.histogram(parse_time(ic.icme_start_time[stbi]).plot_date, yearly_bin_edges)
(histwin1, bin_edgeswin) = np.histogram(parse_time(ic.icme_start_time[wini]).plot_date, yearly_bin_edges)
(histsta1, bin_edgessta) = np.histogram(parse_time(ic.icme_start_time[stai]).plot_date, yearly_bin_edges)
(histpsp1, bin_edgespsp) = np.histogram(parse_time(ic.icme_start_time[pspi]).plot_date, yearly_bin_edges)
(histsolo1, bin_edgessolo) = np.histogram(parse_time(ic.icme_start_time[soli]).plot_date, yearly_bin_edges)
(histbepi1, bin_edgesbepi) = np.histogram(parse_time(ic.icme_start_time[beci]).plot_date, yearly_bin_edges)
binedges=bin_edgeswin
#normalize each dataset for data gaps, so correcting ICME rate for actual data availability
#note that for VEX and MESSENGER this was done with the non-removed datasets (vexnon, mesnon)
histvex=np.round(histvex1/total_data_days_yearly_vex*365.24,1)
histmes=np.round(histmes1/total_data_days_yearly_mes*365.24,1)
#ok for these spacecraft as continously in the solar wind and the MAVEN data set is made without orbit gaps
histsta=np.round(histsta1/total_data_days_yearly_sta*365.24,1)
#STA beacon data used in 2019 - set manually
histsta[-3]=13
#STA beacon data used in 2020 - set manually
histsta[-2]=10*4/3
histmav=np.round(histmav1/total_data_days_yearly_mav*365.24,1)
histmav[7]=np.nan #not enough data for 2014
histstb=np.round(histstb1/total_data_days_yearly_stb*365.24,1)
histwin=np.round(histwin1/total_data_days_yearly_win*365.24,1)
histpsp=np.round(histpsp1/total_data_days_yearly_psp*365.24,1)
histsolo=np.round(histsolo1/total_data_days_yearly_solo*365.24,1)
histbepi=np.round(histbepi1/total_data_days_yearly_bepi*365.24,1)
print('corrected ICME rates for years')
print(yearly_mid_times)
print('MESSENGER',histmes)
print('VEX',histvex)
print('Wind',histwin)
print('PSP',histpsp)
print('STA',histsta)
print('STB',histstb)
print('MAVEN',histmav)
sns.set_context("talk")
sns.set_style('darkgrid')
plt.figure(11,figsize=(12,6),dpi=60)
plt.ylabel('ICMEs per year, ICMECAT')
plt.plot(yearly_mid_times,histmes,'-',label='MESSENGER')
plt.plot(yearly_mid_times,histvex,'-',label='VEX')
plt.plot(yearly_mid_times,histwin,'-',label='Wind')
plt.plot(yearly_mid_times,histsta,'-',label='STA')
plt.plot(yearly_mid_times,histstb,'-',label='STB')
plt.plot(yearly_mid_times,histmav,'-',label='MAVEN')
plt.plot(yearly_mid_times,histpsp,'-',label='PSP')
plt.plot(yearly_mid_times,histsolo,'-',label='SolarOrbiter')
plt.plot(yearly_mid_times,histbepi,'-',label='BepiColombo')
plt.legend(loc=1,fontsize=10)
################### calculate general parameters
print()
print()
print('calculate ICME rate matrix std, mean, max, min')
#arrange icmecat rate data so each row contains std, mean, max, min
icrate=pd.DataFrame(np.zeros([len(yearly_mid_times),6]), columns=['year','std1', 'median1','mean1', 'max1', 'min1'] )
for i in np.arange(0,len(yearly_mid_times)):
icrate.at[i,'year']=yearly_mid_times_num[i]
icrate.at[i,'median1']=np.round(np.nanmedian([histwin[i],histvex[i],histsta[i],histstb[i],histmes[i],histmav[i],histpsp[i]]),1)
icrate.at[i,'mean1']=np.round(np.nanmean([histwin[i],histvex[i],histsta[i],histstb[i],histmes[i],histmav[i],histpsp[i]]),1)
icrate.at[i,'std1']=np.round(np.nanstd([histwin[i],histvex[i],histsta[i],histstb[i],histmes[i],histmav[i],histpsp[i]]),1)
icrate.at[i,'max1']=np.nanmax([histwin[i],histvex[i],histsta[i],histstb[i],histmes[i],histmav[i],histpsp[i]])
icrate.at[i,'min1']=np.nanmin([histwin[i],histvex[i],histsta[i],histstb[i],histmes[i],histmav[i],histpsp[i]])
#change matplotlib time before plotting
icrate_year2=icrate.year + mdates.date2num(np.datetime64('0000-12-31'))
plt.plot([icrate_year2,icrate_year2],[icrate.mean1-icrate.std1,icrate.mean1+icrate.std1],'-k',lw=0.5)
plt.plot(icrate_year2,icrate.mean1,'ok',markerfacecolor='white')
# icrate=pd.DataFrame(np.zeros([len(histvex)*6,3]), columns=['year','rate','sc'] )
# #write all icme rates into this array
# icrate.at[0:12,'rate']=histmes
# icrate.at[0:12,'year']=yearly_start_times_num
# icrate.at[0:12,'sc']='MESSENGER'
# icrate.at[13:25,'rate']=histvex
# icrate.at[13:25,'year']=yearly_start_times_num
# icrate.at[13:25,'sc']='VEX'
# icrate.at[26:38,'rate']=histwin
# icrate.at[26:38,'year']=yearly_start_times_num
# icrate.at[26:38,'sc']='Wind'
# icrate.at[39:51,'rate']=histvex
# icrate.at[39:51,'year']=yearly_start_times_num
# icrate.at[39:51,'sc']='STA'
# icrate.at[52:64,'rate']=histvex
# icrate.at[52:64,'year']=yearly_start_times_num
# icrate.at[52:64,'sc']='STB'
# icrate.at[65:77,'rate']=histvex
# icrate.at[65:77,'year']=yearly_start_times_num
# icrate.at[65:77,'sc']='MAVEN'
# sns.boxplot(x='year',y='rate',data=icrate)
icrate
# ### get Richardson and Cane ICME rate for comparison
# In[8]:
#convert times in dataframe from richardson and cane list to numpy array
r1=np.array(rc_df['Disturbance Y/M/D (UT) (a)'])
#to get ICME rate, go through all rows
rc_year=np.zeros(len(r1))
#extract string and check whether its a viable float and non nan:
for p in np.arange(0,len(r1)):
rc_yearstr=str(r1[p,0])
if hs.is_float(rc_yearstr[0:4]):
if np.isfinite(float(rc_yearstr[0:4])):
rc_year[p]=float(rc_yearstr[0:4]) #rc_year contains all ICME
rc_year.sort()
rc_icme_per_year=np.trim_zeros(rc_year)
#print(rc_year)
#plot check whats in this array
sns.set_style('darkgrid')
fig=plt.figure(12,figsize=(12,5),dpi=80)
ax11=sns.distplot(rc_icme_per_year,bins=24,kde=None)
plt.ylabel('ICMEs per year, RC list')
#count all full years from 1996-2021
bins_years=2022-1996
#get yearly ICME rate (use range to get correct numbers)
rc_rate_values=np.histogram(rc_icme_per_year,bins=bins_years,range=(1996,2022))[0]
rc_rate_time=np.histogram(rc_icme_per_year,bins=bins_years,range=(1996,2022))[1][0:-1]
print(rc_rate_values)
print(rc_rate_time)
years_jul_1_str_rc=[str(i)+'-07-01' for i in np.arange(1996,2021) ]
yearly_mid_times_rc=parse_time(years_jul_1_str_rc).datetime
yearly_mid_times_num_rc=parse_time(years_jul_1_str_rc).plot_date
print(yearly_mid_times_rc)
#plt.figure(2)
#plt.plot(yearly_mid_times_rc,rc_rate_values)
# ### **Figure 1** plot ICME frequency cycle 24
# In[9]:
sns.set_context("talk")
#sns.set_style('whitegrid',{'grid.linestyle': '--'})
sns.set_style("ticks",{'grid.linestyle': '--'})
fsize=15
fig=plt.figure(1,figsize=(12,10),dpi=80)
######################## Fig 1a - sc positions during ICMEs vs time
ax1 = plt.subplot(211)
msize=5
plt.plot_date(ic.icme_start_time[mesi],ic.mo_sc_heliodistance[mesi],fmt='o',color='coral',markersize=msize,label='MESSENGER')
plt.plot_date(ic.icme_start_time[vexi],ic.mo_sc_heliodistance[vexi],fmt='o',color='orange',markersize=msize,label='VEX')
plt.plot_date(ic.icme_start_time[wini],ic.mo_sc_heliodistance[wini],fmt='o',color='mediumseagreen',markersize=msize,label='Wind')
plt.plot_date(ic.icme_start_time[stai],ic.mo_sc_heliodistance[stai],fmt='o',color='red',markersize=msize,label='STEREO-A')
plt.plot_date(ic.icme_start_time[stbi],ic.mo_sc_heliodistance[stbi],fmt='o',color='royalblue',markersize=msize,label='STEREO-B')
plt.plot_date(ic.icme_start_time[mavi],ic.mo_sc_heliodistance[mavi],fmt='o',color='steelblue',markersize=msize,label='MAVEN')
plt.plot_date(ic.icme_start_time[pspi],ic.mo_sc_heliodistance[pspi],fmt='o',color='black',markersize=msize,label='PSP')
plt.plot_date(ic.icme_start_time[soli],ic.mo_sc_heliodistance[soli],'o',c='black',markerfacecolor='white', markersize=msize,label='Solar Orbiter')
plt.plot_date(ic.icme_start_time[beci],ic.mo_sc_heliodistance[beci],'s',c='darkblue',markerfacecolor='lightgrey', markersize=msize, label='BepiColombo')
plt.legend(loc='upper left',fontsize=10)
plt.ylabel('Heliocentric distance R [AU]',fontsize=fsize)
plt.xticks(yearly_start_times,fontsize=fsize)
#change matplotlib time before plotting
yearly_bin_edges2=yearly_bin_edges + mdates.date2num(np.datetime64('0000-12-31'))
plt.xlim(yearly_bin_edges2[0],yearly_bin_edges2[-1])
plt.ylim([0,1.7])
plt.yticks(np.arange(0,1.9,0.2),fontsize=fsize)
ax1.xaxis_date()
myformat = mdates.DateFormatter('%Y')
ax1.xaxis.set_major_formatter(myformat)
#grid for icme rate
for i in np.arange(0,2.0,0.2):
ax1.plot([datetime.datetime(2007,1,1),datetime.datetime(2024,1,1)],np.zeros(2)+i,linestyle='--',color='grey',alpha=0.5,lw=0.8,zorder=0)
for i in np.arange(0,14):
ax1.plot([yearly_start_times[i],yearly_start_times[i]],[0,2],linestyle='--',color='grey',alpha=0.5,lw=0.8,zorder=0)
#################### Fig 1b
sns.set_style("ticks",{'grid.linestyle': '--'})
ax2 = plt.subplot(212)
ax3=ax2.twinx()
#change matplotlib time before plotting
#ssn_time2=ssn.time + mdates.date2num(np.datetime64('0000-12-31'))
ax3.plot(ssn_ms.time,ssn_ms.spot,'-k',alpha=0.5,linewidth=1.5,label='monthly smoothed sunspot number',zorder=0)
ax3.set_ylabel('Sunspot number SIDC')
ax3.set_ylim(0,155)
ax3.legend(loc=1,fontsize=10)
#grid for icme rate
for i in np.arange(0,50,10):
ax2.plot([datetime.datetime(2007,1,1),datetime.datetime(2023,1,1)],np.zeros(2)+i,linestyle='--',color='k',alpha=0.4,lw=0.8,zorder=0)
binweite=int(np.round(360/8))
bin_edges=bin_edgeswin[:-1]
#change matplotlib time before plotting
bin_edges2=bin_edges + mdates.date2num(np.datetime64('0000-12-31'))
alp=0.8
ax2.bar(bin_edges2+5+binweite,histmes, width=binweite,color='coral', alpha=alp,label='MESSENGER')
ax2.bar(bin_edges2+5+binweite*2,histvex, width=binweite,color='orange', alpha=alp,label='VEX')
ax2.bar(bin_edges2+5+binweite*3,histstb, width=binweite,color='royalblue', alpha=alp,label='STEREO-B')
ax2.bar(bin_edges2+5+binweite*2,histbepi, width=binweite,color='lightgrey', alpha=alp,label='BepiColombo')
ax2.bar(bin_edges2+5+binweite*3,histsolo, width=binweite,color='white', edgecolor='black', alpha=alp,label='Solar Orbiter')
ax2.bar(bin_edges2+5+binweite*4,histwin, width=binweite,color='mediumseagreen', alpha=alp,label='Wind')
ax2.bar(bin_edges2+5+binweite*5,histsta, width=binweite,color='red', alpha=alp,label='STEREO-A')
ax2.bar(bin_edges2+5+binweite*6,histpsp, width=binweite,color='black', alpha=alp,label='PSP')
ax2.bar(bin_edges2+5+binweite*7,histmav, width=binweite,color='steelblue', alpha=alp,label='MAVEN')
#ax2.bar(bin_edgeswin[:-1]+5+binweite*3,rc_rate_values[-14:-1], width=binweite,color='darkgreen', alpha=0.8,label='Wind')
#ax2.boxplot(histmes)
#RC values
#ax2.plot(bin_edgeswin[:-1]+5+binweite*3,rc_rate_values[-14:-1],'ok',markerfacecolor='white',marker='o',markersize=5,label='Earth RC list')
#mean and standard deviation and min max
ax2.plot([icrate_year2,icrate_year2],[icrate.mean1-icrate.std1,icrate.mean1+icrate.std1],'-k',lw=1.1)
#ax2.plot([icrate.year,icrate.year],[icrate.min1,icrate.max1],'--k',lw=1.1)
ax2.plot(icrate_year2,icrate.mean1,'ok',markerfacecolor='white',label='yearly ICME rate mean',zorder=3)
ax2.plot([icrate_year2[1],icrate_year2[1]],[icrate.mean1[1]-icrate.std1[1],icrate.mean1[1]+icrate.std1[1]],'--k',lw=1.1,label='yearly ICME rate std')
ax2.set_ylim(0,48)
ax2.set_xlim(yearly_bin_edges2[0],yearly_bin_edges2[-1])
ax2.legend(loc=2,fontsize=10)
fsize=15
ax2.set_ylabel('normalized ICME rate per year',fontsize=fsize)
#ax2.set_yticks(fontsize=fsize)
ax2.xaxis_date()
myformat = mdates.DateFormatter('%Y')
ax2.xaxis.set_major_formatter(myformat)
plt.xticks(yearly_start_times, fontsize=fsize)
plt.xlabel('Year',fontsize=fsize)
plt.tight_layout()
#plt.annotate('(a)',[0.01,0.96],xycoords='figure fraction')
#plt.annotate('(b)',[0.01,0.47],xycoords='figure fraction')
#plt.savefig('results/cycle25_icme_rate.pdf', dpi=100)
plt.savefig(outputdirectory+'/icmecat_icme_rate.png', dpi=100)
#
#
#
#
#
# # 3 get solar cycle results on ICME rates and sunspot numbers
# ## solar cycle 23
# In[10]:
print('cycle 23\n')
############################# times
print('times:')
#these years cover solar cycle 23
years23=np.arange(1996,2009)
print(years23)
last_year=years23[-1]
years_jan_1_str_23=[str(i)+'-01-01' for i in np.arange(1996,last_year+1) ]
yearly_start_times_23=parse_time(years_jan_1_str_23).datetime
yearly_start_times_num_23=parse_time(years_jan_1_str_23).plot_date
#same for July 1 as middle of the year
years_jul_1_str_23=[str(i)+'-07-01' for i in np.arange(1996,last_year+1) ]
yearly_mid_times_23=parse_time(years_jul_1_str_23).datetime
yearly_mid_times_num_23=parse_time(years_jul_1_str_23).plot_date
print(yearly_mid_times_23)
#same for december 31
years_dec_31_str_23=[str(i)+'-12-31' for i in np.arange(1996,last_year+1) ]
yearly_end_times_23=parse_time(years_dec_31_str_23).datetime
yearly_end_times_num_23=parse_time(years_dec_31_str_23).plot_date
# minimim maximum times as given by
#http://www.sidc.be/silso/cyclesmm
#1996 08 11.2 2001 11 180.3 12 04
solarmin23=parse_time('1996-08-01').datetime
# minstart_23=solarmin_23-datetime.timedelta(days=366*1.5)
# minend=solarmin+datetime.timedelta(days=365)
# minstart_num=parse_time(minstart).plot_date
# minend_num=parse_time(minend).plot_date
solarmax23=parse_time('2001-11-01').datetime
# maxstart=solarmax-datetime.timedelta(days=365*3)
# maxend=solarmax+datetime.timedelta(days=365/2)
# maxstart_num=parse_time(maxstart).plot_date
# maxend_num=parse_time(maxend).plot_date
print('min/max',solarmin23,solarmax23)
print()
#################### spots
#get yearly smoothed 12 month spot rate
spots23=np.zeros(len(years23))
counter=0
for q in years23:
spots23[counter]=np.mean(ssn.spot[np.where(ssn.year==q)[0] ] )
counter=counter+1
print('spots:')
print('spots yearly mean', np.rint(spots23))
print()
#################### ICME rate
#number of MFR events in Wind ICME catalog,
#for years as in yearly_mid_times_23 but start aug 1996 and end with
#nov 2008 #note : halloween events at ACE! Wind not?
wind_mfr_number_23=[2,8,29,11,30,21,20,6,12,16,13,7,3]
#wind_mfr_number_23_err=[2,8,29,11,30,21,20,6,12,16,13,7,3]
rc_rate23=rc_rate_values[0:13]
print('icme rate:')
print('icmes RC',rc_rate23)
print()
# ## solar cycle 24
# In[11]:
print('cycle 24\n')
#################### times
print('times:')
#these years cover solar cycle 24
years24=np.arange(2009,2020)
print(years24)
#same for July 1 as middle of the year
last_year=2020
years_jul_1_str_24=[str(i)+'-07-01' for i in np.arange(2009,last_year) ]
yearly_mid_times_24=parse_time(years_jul_1_str_24).datetime
yearly_mid_times_num_24=parse_time(years_jul_1_str_24).plot_date
print(yearly_mid_times_24)
print()
#################### spots
print('spots:')
#get yearly smoothed 12 month spot rate
spots24=np.zeros(len(years24))
counter=0
for q in years24:
spots24[counter]=np.mean(ssn.spot[np.where(ssn.year==q)[0] ] )
counter=counter+1
print('spots yearly mean:', np.rint(spots24))
print()
print('----------')
################# ICME rates
print('ICME rate:')
print()
#2008 to 2019 are indices 12:24
rc_rate24=rc_rate_values[13:24]
print(years24)
print('icmes RC',rc_rate24)
print()
#here also from 2008 to 2019
ic_rate24=icrate[2:13].mean1.to_numpy()
ic_rate24_std=icrate[2:13].std1.to_numpy()
print('icmes ICMECAT mean',ic_rate24)
print('icmes ICMECAT std',ic_rate24_std)
icrate_years_24=parse_time(mdates.num2date(icrate_year2[2:13])).iso
print(icrate_years_24)
print()
print('ratio RC to ICMECAT:')
print(np.round(np.mean(rc_rate24/ic_rate24),2))
# ## solar cycle 25
# In[12]:
print('cycle 25\n')
#################### times from 2020 onwards
print('times:')
#these years cover solar cycle 24
years25=np.arange(2020,2022)
print(years25)
#same for July 1 as middle of the year
last_year=2022
years_jul_1_str_25=[str(i)+'-07-01' for i in np.arange(2020,last_year) ]
yearly_mid_times_25=parse_time(years_jul_1_str_25).datetime
yearly_mid_times_num_25=parse_time(years_jul_1_str_25).plot_date
print(yearly_mid_times_25)
print()
#################### spots
print('spots:')
#get yearly smoothed 12 month spot rate
spots25=np.zeros(len(years25))
counter=0
for q in years25:
spots25[counter]=np.mean(ssn.spot[np.where(ssn.year==q)[0] ] )
counter=counter+1
print('spots yearly mean:', np.rint(spots25))
print()
print('----------')
################# ICME rates
print('ICME rate:')
print()
#2020 is index 25
rc_rate25=rc_rate_values[24:26]
print(years25)
print('icmes RC',rc_rate25)
print()
ic_rate25=icrate[13:len(icrate)].mean1.to_numpy()
ic_rate25_std=icrate[13:len(icrate)].std1.to_numpy()
print('icmes ICMECAT mean',ic_rate25)
print('icmes ICMECAT std',ic_rate25_std)
icrate_years_25=parse_time(mdates.num2date(icrate_year2[13:len(icrate)])).iso
print(icrate_years_25)
print()
#print('ratio RC to ICMECAT:')
#print(np.round(np.mean(rc_rate24/ic_rate24),2))
# ## **Figure 2** correlation SSN with ICME rate and fit
# plot SSN vs ICME rate, linear fit with confidence interval
# In[13]:
#add spots23/24 and rc_rate23/24 into 1 array for correlation
spots_corr=np.hstack([spots23,spots24])
rc_rate_corr=np.hstack([rc_rate23,rc_rate24])
#quick check with seaborn for correlation
#seaborn uses this :import statsmodels
#kind{ “scatter” | “reg” | “resid” | “kde” | “hex” }, optional
#sns.jointplot(spots_corr,rc_rate_corr,kind='reg',xlim=[0,np.max(spots_corr)+20],ylim=[0,np.max(rc_rate_corr+10)], \
# marginal_kws=dict(bins=5, rug=True),x_ci=95).set_axis_labels("yearly mean SSN (12 month running mean)", "ICME rate (Richardson & Cane)")
############################## make linear fit
#https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html
print('linear fit SSN vs. ICME rate')
linfit=scipy.stats.linregress(spots_corr,rc_rate_corr)
print(linfit)
print('icme_rate[per year] = (',np.round(linfit.slope,3),'+/-',np.round(linfit.stderr,3),') * sunspot number [yearly average] + ',np.round(linfit.intercept,3))
print('inverse slope:',np.round(1/linfit.slope,2))
print('Pearson correlation coefficient:',np.round(linfit.rvalue,2))
######################### Function for conversion SSN to ICMERATE, with errors on fit confidence 1 time std
#with these results from the linear fit, make a conversion function from ssn to icme_rate
#old
#def ssn_to_rate(ssn,fitresult):
# rate=linfit.slope*ssn+linfit.intercept
# rate_low=(linfit.slope-1*linfit.stderr)*ssn+linfit.intercept
# rate_up=(linfit.slope+1*linfit.stderr)*ssn+linfit.intercept
# return rate, rate_low, rate_up
#with these results from the linear fit, make a conversion function from ssn to icme_rate
def ssn_to_rate(ssn,fitresult):
rate=linfit.slope*ssn+linfit.intercept
rate_low=(linfit.slope-1*linfit.stderr)*ssn+linfit.intercept
rate_up=(linfit.slope+1*linfit.stderr)*ssn+linfit.intercept
return rate, rate_low, rate_up
print('mean difference icme rate SC23 to predicted rate with linear fit, over full cycle:', np.round(np.mean(ssn_to_rate(spots23,linfit)[0]-rc_rate23),2),'+/-', np.round(np.std(ssn_to_rate(spots23,linfit)[0]-rc_rate23),1))#
print('mean difference icme rate SC24 to predicted rate with linear fit, over full cycle:', np.round(np.mean(ssn_to_rate(spots24,linfit)[0]-rc_rate24),2),'+/-', np.round(np.std(ssn_to_rate(spots24,linfit)[0]-rc_rate24),1))#
print()
mean_stddev=np.mean([np.std(ssn_to_rate(spots23,linfit)[0]-rc_rate23),np.std(ssn_to_rate(spots24,linfit)[0]-rc_rate24)])
print('mean stddev for both cycles gives a 1 sigma spread in the linear fit',linfit.intercept,'+/-', mean_stddev)
print()
#with these results from the linear fit, make a conversion function from ssn to icme_rate
def ssn_to_rate(ssn,fitresult):
rate=linfit.slope*ssn+linfit.intercept
rate_low=(linfit.slope-1*linfit.stderr)*ssn+linfit.intercept - mean_stddev
rate_up=(linfit.slope+1*linfit.stderr)*ssn+linfit.intercept + mean_stddev
return rate, rate_low, rate_up
############################ plot figure
sns.set_context("talk")
sns.set_style('darkgrid')
fsize=15
fig=plt.figure(2,figsize=(10,7),dpi=80)
plt.plot(spots23,rc_rate23,color='black',marker='o',linestyle='',markersize=10,label='solar cycle 23')
plt.plot(spots24,rc_rate24,color='black',markerfacecolor='white',marker='o',linestyle='',markersize=10,label='solar cycle 24')
plt.plot(spots25,rc_rate25,color='black',markerfacecolor='white',marker='s',linestyle='',markersize=10,label='solar cycle 25 (not fitted)')
plt.xlim(0,320)
plt.ylim(0,np.max(rc_rate_corr)+30)
plt.xlabel("yearly mean SSN (from 13 month running mean)")
plt.ylabel("ICME rate per year (Richardson & Cane)")
#errors
#ylinfit_1=(linfit.slope-linfit.stderr)*xlinfit+linfit.intercept
#ylinfit_2=(linfit.slope+linfit.stderr)*xlinfit+linfit.intercept
#plt.plot(xlinfit,ylinfit_1,'--k')
#plt.plot(xlinfit,ylinfit_2,'--k')
#https://seaborn.pydata.org/generated/seaborn.regplot.html
#sns.regplot(spots_corr,rc_rate_corr, x_ci='ci',ci=95,label=r'fit confidence 2$\mathrm{\sigma}$',truncate=False)
#sns.regplot(spots_corr,rc_rate_corr, x_ci='ci',ci=68,label=r'fit confidence 1$\mathrm{\sigma}$',truncate=False)
xlinfit=np.arange(0,350)
#1 sigma interval by using mean difference as +/- to linear fit
ylinfit_1=(linfit.slope+linfit.stderr)*xlinfit+linfit.intercept+mean_stddev
ylinfit_2=(linfit.slope-linfit.stderr)*xlinfit+linfit.intercept-mean_stddev
plt.fill_between(xlinfit,ylinfit_1,ylinfit_2,alpha=0.2,color='coral',label='fit confidence 1$\mathrm{\sigma}$')
#plt.plot(xlinfit,ylinfit_1,'--k')
#plt.plot(xlinfit,ylinfit_2,'--k')
ylinfit=linfit.slope*xlinfit+linfit.intercept
plt.plot(xlinfit,ylinfit,'-k',label='linear fit')
plt.plot(xlinfit,np.zeros(len(xlinfit))+52,'--k',alpha=0.5)
plt.plot(xlinfit,np.zeros(len(xlinfit))+26,'--k',alpha=0.5)
plt.legend(loc=2)
plt.tight_layout()
#plt.savefig(outputdirectory+'/fig2_rate_ssn.pdf', dpi=300)
plt.savefig(outputdirectory+'/fig2_rate_ssn.png', dpi=300)
# ## predictions for solar cycle 25: SSN and ICME rate
# ### 1. Mean cycle model
# In[14]:
# from heliocats import stats as hs
# importlib.reload(hs) #reload again while debugging
print('---------------------------------')
print('cycle 25')
print()
print('calculate several Hathaway function models for SSN and predict the ICME rate')
print('')
print('---------------------------------')
############# set yearly times
last_year=2033
years_jan_1_str_25=[str(i)+'-01-01' for i in np.arange(2020,last_year) ]
yearly_start_times_25=parse_time(years_jan_1_str_25,format='iso').datetime
yearly_start_times_num_25=parse_time(years_jan_1_str_25,format='iso').plot_date
#same for July 1 as middle of the year
years_jul_1_str_25=[str(i)+'-07-01' for i in np.arange(2020,last_year) ]
yearly_mid_times_25=parse_time(years_jul_1_str_25,format='iso').datetime
yearly_mid_times_num_25=parse_time(years_jul_1_str_25,format='iso').plot_date
#### for smooth plotting daily list of times
#t0 of solar cycler 25 is unclear at time of writing! assumed 2020 june 1
start_25=parse_time('2020-06-01',format='iso').datetime
#not end time of next solar cycle, but end time of plotting
end_25=parse_time('2033-01-01',format='iso').datetime
#create an array with 1 day resolution between t start and end
times_25_daily = [ start_25 + datetime.timedelta(days=n) for n in range(int ((end_25 - start_25).days))]
times_25_daily_mat=mdates.date2num(times_25_daily)
######################################################## 1. Mean cycle model
#hathaway 2015: t0 minus 4 months is good match
#t0 is start date of cycle
shift_t0=timedelta(days=4*30+1)
print()
print('1. mean cycle')
#mean of all cycles function: Hathaway 1994
#parameters taken from Hathaway 2015 review section 4.5 DOI 10.1007/lrsp-2015-4
#https://link.springer.com/article/10.1007/lrsp-2015-4
#but this is based on different SSN definitions?
# am=195
# amerr=50
# bm=56
# cm=0.8
print('load min max for all cycles from SILSO')
#http://sidc.oma.be/silso/DATA/Cycles/TableCyclesMiMa.txt
#Table of minima, maxima and cycle durations based on
#13-month smoothed monthly mean sunspot numbers (Version 2.0).
mima_num=np.loadtxt('data/TableCyclesMiMa.txt', skiprows=2)
mima = pd.DataFrame(mima_num)
mima.columns = ['cycle','min_year','min_month','min_sn','max_year','max_month','max_sn','dur_year','dur_months']
print()
print('Average maximum sunspot number (SILSO) of all cycles, 13 months smoothed:',np.round(mima.max_sn.mean(),1),' +/- ',np.round(mima.max_sn.std(),1))
print()
#use all parameters as in Hathaway but adjust amplitude to average for the SILSO numbers:
am=342
bm=56
cm=0.8
print('average cycle a,b,c:', am,bm,cm)
#mean of all cycles yearly ssn numbers
spots_predict_25m=hs.hathaway(yearly_mid_times_25, start_25-shift_t0,am,bm,cm)
#and same for 1 day resolution
spots_predict_25m_daily=hs.hathaway(times_25_daily, start_25-shift_t0,am,bm,cm)
print('t0 in SC25 PP prediction is:',start_25-shift_t0)
#time of maximum and ssn
print('max ssn',np.rint(np.max(spots_predict_25m_daily)))
print('at time',str(times_25_daily[np.argmax(spots_predict_25m_daily)])[0:11])
print()
print(yearly_mid_times_25)
print('spots yearly: ',np.rint(spots_predict_25m))
#yearly icme numbers: convert with function
icmes_predict_25m=ssn_to_rate(spots_predict_25m,linfit)[0]
#same daily resolution
icmes_predict_25m_daily=ssn_to_rate(spots_predict_25m_daily,linfit)[0]
print('icmes yearly: ',np.rint(icmes_predict_25m))
print()
print()
print('Merge error from 1. ssn prediction from fit with 2. spread in icme rate observed with ICMECAT (not used in plots later as PP19 is quite similar)')
#1. error from SSN fit yearly, low and high
icmes_predict_25m_low=ssn_to_rate(spots_predict_25m,linfit)[1]
icmes_predict_25m_high=ssn_to_rate(spots_predict_25m,linfit)[2]
#this is the range in the icme rate arising from the fit, symmetric for high and low values
ic_rate25_m_std_fit=np.round(icmes_predict_25m-icmes_predict_25m_low,1)
print('error from SSN to ICME fit',ic_rate25_m_std_fit)
#2. error from ICME rate
#assumption icrate_std=2 for last 3 years
ic_rate25_std=np.hstack([ic_rate24_std[0:-1],np.array([2.0,2.0,2.0])])
print('spread in ICME rate from SC24, assuming 2009=> 2020',ic_rate25_std)
#add both errors as sigma_new=sqrt(sigma1^2+sigma2^2)
ic_rate_25_m_std=np.round(np.sqrt(ic_rate25_m_std_fit**2+ic_rate25_std**2),1)
print('Std in ICME rate from fit and ICMECAT range for each year:')
print(ic_rate_25_m_std)
# In[15]:
########################################################### 2. SC25 panel prediction (SC25PP)
print('---------------------------- 2. SC25 PP 2019')
print()
print('get PP25 prediction from JSON file from NOAA (2020 May 27) ')
#download PP25 prediction from NOAA with timestamp
#sc25pp_url='https://services.swpc.noaa.gov/json/solar-cycle/predicted-solar-cycle.json'
#try: urllib.request.urlretrieve(sc25pp_url,data_path+'predicted-solar-cycle_2020_may_27.json')
#except urllib.error.URLError as e:
# print('Failed downloading ', sc25pp_url,' ',e)
pp25_df=pd.read_json('data/predicted-solar-cycle_2020_may_27.json')
#kill first few rows and start with May 2020
pp25_df=pp25_df.drop([0,1,2,3,4,5],axis=0)
pp25_df['time-tag']
pp25_df_times=parse_time(pp25_df['time-tag']).datetime
pp25_df_times_num=parse_time(pp25_df['time-tag']).plot_date
pp25_df_ssn=pp25_df['predicted_ssn']
pp25_df_ssn_high=pp25_df['high_ssn']
pp25_df_ssn_low=pp25_df['low_ssn']
#make hathaway fit
hw_param_pp25 = scipy.optimize.curve_fit(hs.hathaway_fit, pp25_df_times_num-pp25_df_times_num[0],pp25_df_ssn, p0=[-300,200,60,1])
print('Hathaway function fit parameters x0,a,b,c:',np.round(hw_param_pp25[0][0:3],1),np.round(hw_param_pp25[0][3],2))
#get t0 date for hathway function from fit x0
x0fit_in_days=int(np.rint(hw_param_pp25[0][0]))
start_25_fit=mdates.num2date(pp25_df_times_num[0]+ mdates.date2num(np.datetime64('0000-12-31')))+timedelta(days=x0fit_in_days)
print('t0 in SC25 PP prediction is:',start_25_fit)
# same for low and high
hw_param_pp25_low = scipy.optimize.curve_fit(hs.hathaway_fit, pp25_df_times_num-pp25_df_times_num[0],pp25_df_ssn_low, p0=[-300,200,60,1])
x0fit_in_days_low=int(np.rint(hw_param_pp25_low[0][0]))
start_25_fit_low=mdates.num2date(pp25_df_times_num[0]+mdates.date2num(np.datetime64('0000-12-31')))+timedelta(days=x0fit_in_days_low)
print('lower error: Hathaway function fit parameters x0,a,b,c:',np.round(hw_param_pp25_low[0][0:3],1),np.round(hw_param_pp25_low[0][3],2))
hw_param_pp25_high = scipy.optimize.curve_fit(hs.hathaway_fit, pp25_df_times_num-pp25_df_times_num[0],pp25_df_ssn_high, p0=[-300,200,60,1])
x0fit_in_days_high=int(np.rint(hw_param_pp25_high[0][0]))
start_25_fit_high=mdates.num2date(pp25_df_times_num[0]+mdates.date2num(np.datetime64('0000-12-31')))+timedelta(days=x0fit_in_days_high)
print('higher error: Hathaway function fit parameters x0,a,b,c:',np.round(hw_param_pp25_high[0][0:3],1),np.round(hw_param_pp25_high[0][3],2))
print('note that high and low error ranges are calculated here with a Hathaway function, small error to PP forecast')
spots_predict_25pp=hs.hathaway(yearly_mid_times_25,start_25_fit,hw_param_pp25[0][1],hw_param_pp25[0][2],hw_param_pp25[0][3])
spots_predict_25pp_daily=hs.hathaway(times_25_daily,start_25_fit,hw_param_pp25[0][1],hw_param_pp25[0][2],hw_param_pp25[0][3])
#fit parameters for PP19
#start_25_fit
app=hw_param_pp25[0][1]
bpp=hw_param_pp25[0][2]
cpp=hw_param_pp25[0][3]
spots_predict_25pp_low=hs.hathaway(yearly_mid_times_25,start_25_fit_low,hw_param_pp25_low[0][1],hw_param_pp25_low[0][2],hw_param_pp25_low[0][3])
spots_predict_25pp_daily_low=hs.hathaway(times_25_daily,start_25_fit_low,hw_param_pp25_low[0][1],hw_param_pp25_low[0][2],hw_param_pp25_low[0][3])
spots_predict_25pp_high=hs.hathaway(yearly_mid_times_25,start_25_fit_high,hw_param_pp25_high[0][1],hw_param_pp25_high[0][2],hw_param_pp25_high[0][3])
spots_predict_25pp_daily_high=hs.hathaway(times_25_daily,start_25_fit_high,hw_param_pp25_high[0][1],hw_param_pp25_high[0][2],hw_param_pp25_high[0][3])
plt.figure(12,figsize=(10,6),dpi=60)
plt.plot(pp25_df_times,pp25_df_ssn,'-k',markerfacecolor='white')
plt.plot(pp25_df_times,pp25_df['high_ssn'],'-k',markerfacecolor='white')
plt.plot(pp25_df_times,pp25_df['low_ssn'],'-k',markerfacecolor='white')
plt.plot_date(yearly_mid_times_25,spots_predict_25pp,'ok')
plt.plot_date(times_25_daily,spots_predict_25pp_daily,'-y')
plt.plot_date(yearly_mid_times_25,spots_predict_25pp_low,'ok')
plt.plot_date(yearly_mid_times_25,spots_predict_25pp_high,'ok')
plt.plot_date(times_25_daily,spots_predict_25pp_daily_low,'-g')
plt.plot_date(times_25_daily,spots_predict_25pp_daily_high,'-b')
plt.plot_date(times_25_daily,spots_predict_25m_daily,'-r')
plt.ylabel('SSN, PP 25 prediction')
#time of maximum and ssn
print('max ssn',np.rint(np.max(spots_predict_25pp_daily)))
print('at time',str(times_25_daily[np.argmax(spots_predict_25pp_daily)])[0:11])
print()
print('spots yearly: ',np.rint(spots_predict_25pp))
#yearly spots numbers
icmes_predict_25pp=ssn_to_rate(spots_predict_25pp,linfit)[0]
icmes_predict_25pp_low=ssn_to_rate(spots_predict_25pp_low,linfit)[0]
icmes_predict_25pp_high=ssn_to_rate(spots_predict_25pp_high,linfit)[0]
#same daily
icmes_predict_25pp_daily=ssn_to_rate(spots_predict_25pp_daily,linfit)[0]
icmes_predict_25pp_daily_low=ssn_to_rate(spots_predict_25pp_daily_low,linfit)[0]
icmes_predict_25pp_dailyhigh=ssn_to_rate(spots_predict_25pp_daily_high,linfit)[0]
print()
print('Merge error from 1. ssn prediction 2. from SSN to ICME from fit and 3. spread in icme rate observed with ICMECAT')
#1. error from SSN prediction
ic_rate25_std_pp_ssnpred=np.round(((icmes_predict_25pp_high-icmes_predict_25pp)+abs(icmes_predict_25pp_low-icmes_predict_25pp))/2,2)
print('ICME rate error from SSN prediction',ic_rate25_std_pp_ssnpred)
#2. error from fit SSN to ICME rate
icmes_predict_25_pp=ssn_to_rate(spots_predict_25pp,linfit)[0]
icmes_predict_25_pp_low=ssn_to_rate(spots_predict_25pp,linfit)[1]
icmes_predict_25_pp_high=ssn_to_rate(spots_predict_25pp,linfit)[2]
#this is the range in the icme rate arising from the fit, symmetric for high and low values
ic_rate25_std_pp_ssnfit=np.round(icmes_predict_25_pp-icmes_predict_25_pp_low,1)
print('error from SSN to ICME fit',ic_rate25_std_pp_ssnfit)
#3. error from ICME rate spread
print('spread in ICME rate', ic_rate25_std)
print()
#add all 3 errors as sigma_new=sqrt(sigma1^2+sigma2^2)
ic_rate_25_pp_std=np.round(np.sqrt(ic_rate25_std_pp_ssnpred**2+ic_rate25_std_pp_ssnfit**2+ic_rate25_std**2),1)
print('final Std in ICME rate from SSN prediction, SSN to ICME fit and ICMECAT range for each year:')
print(ic_rate_25_pp_std)
# In[16]:
################################### SC25MC
#SC25MC prediction or MC20 (see paper)
#https://arxiv.org/abs/2006.15263
print('-------------------------- 3. SC25 MC 2020')
a=444
aerr68=48 #MC20: 204-254 68, 153 305 95
aerr95=147 #153 305 95, +/- 76 95
b=60
c=0.8
print('a,b,c:', a,b,c)
print('range for a:',a-aerr68,a+aerr68)
print('start of sc25 here in MC20',start_25-shift_t0)
#yearly_numbers
spots_predict_25=hs.hathaway(yearly_mid_times_25, start_25-shift_t0,a,b,c)
#error ranges
spots_predict_25_lower68=hs.hathaway(yearly_mid_times_25, start_25-shift_t0,a-aerr68,b,c)
spots_predict_25_upper68=hs.hathaway(yearly_mid_times_25, start_25-shift_t0,a+aerr68,b,c)
#spots_predict_25_lower95=hs.hathaway(yearly_mid_times_25, start_25-shift_t0,a-aerr95,b,c)
#spots_predict_25_upper95=hs.hathaway(yearly_mid_times_25, start_25-shift_t0,a+aerr95,b,c)
#daily numbers
spots_predict_25_daily=hs.hathaway(times_25_daily, start_25-shift_t0,a,b,c)
#error ranges
spots_predict_25_daily_lower68=hs.hathaway(times_25_daily, start_25-shift_t0,a-aerr68,b,c)
spots_predict_25_daily_upper68=hs.hathaway(times_25_daily, start_25-shift_t0,a+aerr68,b,c)
#spots_predict_25_daily_lower95=hs.hathaway(times_25_daily, start_25-shift_t0,a-aerr95,b,c)
#spots_predict_25_daily_upper95=hs.hathaway(times_25_daily, start_25-shift_t0,a+aerr95,b,c)
#time of maximum and ssn
print('max ssn',np.rint(np.max(spots_predict_25_daily)))
print('at time',str(times_25_daily[
|
np.argmax(spots_predict_25_daily)
|
numpy.argmax
|
import os
import torch
import shutil
import VGGNet
import argparse
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from models_jt import VAE
from pathlib import Path
from models_jt import Camera
import torch.utils.data as data
from PIL import Image, ImageFile
from torchvision import transforms
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
from plyfile import PlyElement, PlyData
from Style_function import calc_mean_std
# from pytorch3d.structures import Pointclouds
# from pytorch3d.renderer import compositing
# from pytorch3d.renderer.points import rasterize_points
# cudnn.benchmark = True
# Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
# # Disable OSError: image file is truncated
# ImageFile.LOAD_TRUNCATED_IMAGES = True
def InfiniteSampler(n):
# i = 0
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class InfiniteSamplerWrapper(data.sampler.Sampler):
def __init__(self, data_source):
# super(InfiniteSamplerWrapper, self).__init__()
self.num_samples = len(data_source)
def __iter__(self):
return iter(InfiniteSampler(self.num_samples))
def __len__(self):
return 2 ** 31
def train_transform():
transform_list = [
transforms.Resize(size=(512, 512)),
transforms.RandomCrop(256),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
def train_transform2():
transform_list = [
transforms.Resize(size=(512, 512)),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
def default_transform():
transform_list = [
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform=None):
super(FlatFolderDataset, self).__init__()
self.root = root
self.paths = list(Path(self.root).glob('*'))
transform = default_transform() if transform is None else transform
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(str(path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset'
class CoorImageDataset(data.Dataset):
def __init__(self, root):
super(CoorImageDataset, self).__init__()
self.root = root
self.image_paths = sorted(list(Path(self.root).glob('rgb_*.png')))
self.geo_paths = sorted(list(Path(self.root).glob('geometry_*.npz')))
data = np.load(str(self.geo_paths[0]))
self.hwf = data['hwf']
# self.near, self.far = data['near'], data['far']
self.near, self.far = 0., 1.
self.transform = default_transform()
def __getitem__(self, index):
image_path, geo_path = self.image_paths[index], self.geo_paths[index]
img = Image.open(str(image_path)).convert('RGB')
img = self.transform(img)
geo = np.load(str(geo_path))
coor_map, cps = geo['coor_map'], geo['cps']
return img, coor_map, cps
def __len__(self):
return len(self.image_paths)
def name(self):
return 'FlatFolderDataset'
class CoorImageDataset_pl(data.Dataset):
def __init__(self, root, factor=0.01):
super(CoorImageDataset_pl, self).__init__()
self.root = root
self.image_paths = sorted(list(Path(self.root).glob('rgb_*.png')))
self.geo_paths = sorted(list(Path(self.root).glob('geometry_*.npz')))
data = np.load(str(self.geo_paths[0]))
self.hwf = data['hwf']
# self.near, self.far = data['near'], data['far']
self.near, self.far = 0., 1.
self.factor = factor
self.transform = default_transform()
ts = np.zeros([len(self.geo_paths), 3], dtype=np.float32)
for i in range(len(self.geo_paths)):
ts[i] = np.load(str(self.geo_paths[i]))['cps'][:3, 3]
dist = ts[np.newaxis] - ts[:, np.newaxis]
dist = dist ** 2
dist = dist.sum(-1) ** 0.5
self.dist = dist
def get_batch(self, batch_size, index=None):
if index is None:
index = np.random.randint(0, len(self.image_paths))
dists = self.dist[index]
inds = np.argsort(dists)
prange = max(int(self.factor*len(self.image_paths)), batch_size)
inds = inds[:prange]
inds = np.random.choice(inds, [batch_size], replace=(prange <= batch_size))
imgs, coor_maps, cps = [], [], []
for i in range(batch_size):
img, coor_map, cp = self.__getitem__(inds[i])
imgs.append(img)
coor_maps.append(coor_map)
cps.append(cp)
imgs = torch.stack(imgs).float()
coor_maps = torch.from_numpy(np.stack(coor_maps)).float()
cps = torch.from_numpy(np.stack(cps)).float()
return imgs, coor_maps, cps
def __getitem__(self, index):
image_path, geo_path = self.image_paths[index], self.geo_paths[index]
img = Image.open(str(image_path)).convert('RGB')
img = self.transform(img)
geo = np.load(str(geo_path))
coor_map, cps = geo['coor_map'], geo['cps']
return img, coor_map, cps
def __len__(self):
return len(self.image_paths)
def name(self):
return 'FlatFolderDataset'
def adjust_learning_rate(optimizer, iteration_count):
"""Imitating the original implementation"""
lr = args.lr / (1.0 + args.lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def finetune_decoder(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(args.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
decoder = VGGNet.decoder
vgg = VGGNet.vgg
decoder.load_state_dict(torch.load('./models/decoder.pth'))
vgg.load_state_dict(torch.load('./models/vgg_normalised.pth'))
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])
network = VGGNet.Net(vgg, decoder)
network.train()
network.to(device)
content_tf = train_transform()
style_tf = train_transform()
content_dataset = FlatFolderDataset(args.content_dir, content_tf)
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
content_iter = iter(data.DataLoader(
content_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(content_dataset),
num_workers=args.n_threads))
style_iter = iter(data.DataLoader(
style_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=args.n_threads))
optimizer = torch.optim.Adam(network.decoder.parameters(), lr=args.lr)
for i in tqdm(range(args.max_iter)):
adjust_learning_rate(optimizer, iteration_count=i)
content_images = next(content_iter).to(device)
style_images = next(style_iter).to(device)
loss_c, loss_s = network(content_images, style_images)
loss_c = args.content_weight * loss_c
loss_s = args.style_weight * loss_s
loss = loss_c + loss_s
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('loss_content', loss_c.item(), i + 1)
writer.add_scalar('loss_style', loss_s.item(), i + 1)
if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
state_dict = network.decoder.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict, save_dir /
'decoder_iter_{:d}.pth.tar'.format(i + 1))
writer.close()
def train_vae(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(args.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
vgg = VGGNet.vgg
vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth'))
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])
vgg.eval()
vgg.to(device)
style_tf = train_transform()
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
style_iter = iter(data.DataLoader(
style_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=args.n_threads))
vae = VAE(data_dim=1024, latent_dim=args.vae_latent, W=args.vae_w, D=args.vae_d, kl_lambda=args.vae_kl_lambda)
vae.train()
vae.to(device)
vae_ckpt = './pretrained/vae.pth'
if os.path.exists(vae_ckpt):
vae.load_state_dict(torch.load(vae_ckpt))
optimizer = torch.optim.Adam(vae.parameters(), lr=args.lr)
for i in tqdm(range(args.max_iter)):
adjust_learning_rate(optimizer, iteration_count=i)
style_images = next(style_iter).to(device)
style_features = vgg(style_images)
style_mean, style_std = calc_mean_std(style_features)
style_features = torch.cat([style_mean.squeeze(), style_std.squeeze()], dim=-1)
recon, _, mu, logvar = vae(style_features)
loss, recon_loss, kl_loss = vae.loss(style_features, recon, mu, logvar, return_losses=True)
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('Reconstruction Loss', recon_loss.item(), i + 1)
writer.add_scalar('KL Loss', kl_loss.item(), i + 1)
if (i + 1) % 100 == 0:
print("Loss: %.3f | Recon Loss: %.3f| KL Loss: %.3f" % (loss.item(), recon_loss.item(), kl_loss.item()))
if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
state_dict = vae.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict, vae_ckpt)
writer.close()
def train_temporal_invoke(save_dir, sv_name, log_dir, is_ndc, nerf_content_dir, style_dir, batch_size, n_threads=8, lr=1e-3, max_iter=1000):
if is_ndc:
print("Using NDC Coordinate System! Check Nerf and dataset to be LLFF !!!!!!!")
temporal_weight, content_weight, style_weight = 50., 1.0, 1.
else:
temporal_weight, content_weight, style_weight = 50., 1.0, 1.
print_interval = 20
save_model_interval = 200
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
save_dir = Path(save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
save_dir, log_dir = str(save_dir), str(log_dir)
decoder = VGGNet.decoder
vgg = VGGNet.vgg
ckpts = [os.path.join(save_dir, f) for f in sorted(os.listdir(save_dir)) if sv_name in f]
if len(ckpts) > 0:
ld_dict = torch.load(ckpts[-1])
decoder.load_state_dict(ld_dict['decoder'])
step = ld_dict['step']
else:
print('From original pth file')
decoder.load_state_dict(torch.load('./pretrained/decoder.pth'))
shutil.copy('./pretrained/decoder.pth', save_dir + '/' + sv_name)
step = 0
vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth'))
vgg = nn.Sequential(*list(vgg.children())[:31])
network = VGGNet.Net(vgg, decoder)
network.train()
network.to(device)
style_tf = train_transform2()
content_dataset = CoorImageDataset(nerf_content_dir)
style_dataset = FlatFolderDataset(style_dir, style_tf)
# Camera for Rendering
h, w, focal = content_dataset.hwf
h, w = int(h), int(w)
cx, cy = w/2, h/2
near_prj, far_prj = 1e-3, 1e5
projectionMatrix = np.array([[-2*focal/w, 0, 1-2*cx/w, 0],
[0, 2*focal/h, 2*cy/h-1, 0],
[0, 0, -(far_prj+near_prj)/(far_prj-near_prj), -2*far_prj*near_prj/(far_prj-near_prj)],
[0, 0, -1, 0]])
camera = Camera(projectionMatrix=projectionMatrix)
camera.to(device)
content_iter = iter(data.DataLoader(
content_dataset, batch_size=batch_size,
sampler=InfiniteSamplerWrapper(content_dataset),
num_workers=n_threads))
style_iter = iter(data.DataLoader(
style_dataset, batch_size=1,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=n_threads))
optimizer = torch.optim.Adam(network.decoder.parameters(), lr=lr)
space_dist_threshold = 5e-2
def adjust_learning_rate_local(optimizer, iteration_count):
"""Imitating the original implementation"""
lr = 1e-4 / (1.0 + 5e-5 * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
for i in tqdm(range(step, max_iter)):
# Sampling Patch
patch_size = 512
if patch_size > 0:
patch_h_min, patch_w_min =
|
np.random.randint(0, h - patch_size)
|
numpy.random.randint
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This script validates graphically the homography.py functions
# Dependencies :
# - opencv 2.4.11
# - numpy, matplotlib
# - utm
# - argparse
# <EMAIL>
# 20150917
def project(points, homography):
"""
Expected format for n points : 2Xn
"""
if points.shape[0] != 2:
raise Exception('array points of dimension {0} {1}'. \
format(points.shape[0], points.shape[1]))
if (homography is not None) and homography.size > 0:
import numpy as np
augmentedPoints = np.append(points,[[1]*points.shape[1]], 0)
proj_pts = np.dot(homography, augmentedPoints)
proj_cart = proj_pts[0:2]/proj_pts[2]
return proj_cart
return points
def homography_error(pc_filename):
from homography import create_point_correspondences, homography_matrix
world, image = create_point_correspondences(pc_filename)
homography = homography_matrix(pc_filename)
import numpy as np
inv_homography = np.linalg.inv(homography)
img_pts_proj = project(image.T, homography)
img_pts_reprojected = project(img_pts_proj, inv_homography)
error = 0
for i in range(img_pts_proj.shape[1]):
pt = image[i]
reproj = img_pts_reprojected.T[i]
error += np.linalg.norm(reproj-pt)
return error
def projection_error_matrix(pc_filename, frame):
from homography import homography_matrix
homography = homography_matrix(pc_filename)
import numpy as np
inv_homography = np.linalg.inv(homography)
import cv2
video_image = cv2.imread(frame_filename)
rows,columns,_ = video_image.shape
error_m = np.zeros((rows,columns))
for row in range(rows):
for col in range(columns):
pt = np.array(([row], [col]))
proj = project(pt, homography)
reproj = project(proj, inv_homography)
error =
|
np.linalg.norm(reproj-pt)
|
numpy.linalg.norm
|
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
import math
import operator
import os.path
import re
import string
from itertools import islice
from stop_words import get_stop_words
def remove_stop_words(corpus):
stop_words = list(get_stop_words("en"))
stop_words.extend(list(np.loadtxt("files/common_words", dtype="str", ndmin=1)))
results = []
regex = re.compile('[%s]' % re.escape(string.punctuation))
for text in corpus:
text = text.lower()
text = regex.sub('', text)
tmp = text.split(' ')
for _ in tmp:
for stop_word in stop_words:
if stop_word in tmp:
tmp.remove(stop_word)
results.append(" ".join(tmp))
return results
def prepare_training_set(corpus):
words = []
for text in corpus:
for word in text.split(' '):
words.append(word)
words = set(words)
word2int = {}
for i, word in enumerate(words):
word2int[word] = i
sentences = []
for sentence in corpus:
sentences.append(sentence.split())
WINDOW_SIZE = 2
data = []
for sentence in sentences:
for idx, word in enumerate(sentence):
for neighbor in sentence[max(idx - WINDOW_SIZE, 0): min(idx + WINDOW_SIZE, len(sentence)) + 1]:
if neighbor != word:
data.append([word, neighbor])
return words, data, word2int
# function to convert numbers to one hot vectors
def to_one_hot_encoding(data_point_index, one_hot_dim):
one_hot_encoding = np.zeros(one_hot_dim)
one_hot_encoding[data_point_index] = 1
return one_hot_encoding
def train(data, words, word2int):
df = pd.DataFrame(data, columns=['input', 'label'])
X = [] # input word
Y = [] # target word
one_hot_dim = len(words)
for x, y in zip(df['input'], df['label']):
X.append(to_one_hot_encoding(word2int[x], one_hot_dim))
Y.append(to_one_hot_encoding(word2int[y], one_hot_dim))
# convert them to numpy arrays
X_train = np.asarray(X)
Y_train = np.asarray(Y)
# making placeholders for X_train and Y_train
x = tf.placeholder(tf.float32, shape=(None, one_hot_dim))
y_label = tf.placeholder(tf.float32, shape=(None, one_hot_dim))
# word embedding will be 2 dimension for 2d visualization
EMBEDDING_DIM = 2
# hidden layer: which represents word vector eventually
W1 = tf.Variable(tf.random_normal([one_hot_dim, EMBEDDING_DIM]))
b1 = tf.Variable(tf.random_normal([1])) # bias
hidden_layer = tf.add(tf.matmul(x, W1), b1)
# output layer
W2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, one_hot_dim]))
b2 = tf.Variable(tf.random_normal([1]))
prediction = tf.nn.softmax(tf.add(tf.matmul(hidden_layer, W2), b2))
# loss function: cross entropy
loss = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(prediction), axis=[1]))
# training operation
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
iteration = 30000
for i in range(iteration):
# input is X_train which is one hot encoded word
# label is Y_train which is one hot encoded neighbor word
sess.run(train_op, feed_dict={x: X_train, y_label: Y_train})
if i % 3000 == 0:
print('iteration ' + str(i) + ' loss is : ', sess.run(loss, feed_dict={x: X_train, y_label: Y_train}))
# Now the hidden layer (W1 + b1) is actually the word look up table
return sess.run(W1 + b1)
# function to save both the coordinates of words and the single words
def save_results(result_filename, vectors, words):
np.savetxt(result_filename, vectors)
with open(result_filename + "_words", 'w') as file:
for i in words:
file.write(i + "\n")
# function to load saved results
def load_results(result_filename, result_words_filename):
results = np.loadtxt(result_filename)
words = np.loadtxt(result_words_filename, dtype="str")
return results, words
# function to plot all the points in a graph
# if with_nearest_words is True both target_word and nearest_words are mandatory
def plot(vectors, words, with_nearest_words=False, target_word="", nearest_words=None):
w2v_df = pd.DataFrame(vectors, columns=['x1', 'x2'])
w2v_df['word'] = words
w2v_df = w2v_df[['word', 'x1', 'x2']]
_, ax = plt.subplots()
for word, x1, x2 in zip(w2v_df['word'], w2v_df['x1'], w2v_df['x2']):
if with_nearest_words:
if target_word == word:
ax.scatter(x1, x2, c='g')
elif word in nearest_words:
ax.scatter(x1, x2, c='r')
else:
ax.scatter(x1, x2, c='b')
else:
ax.scatter(x1, x2, c='b')
ax.annotate(word, (x1, x2))
PADDING = 1.0
x_axis_min = np.amin(vectors, axis=0)[0] - PADDING
y_axis_min = np.amin(vectors, axis=0)[1] - PADDING
x_axis_max = np.amax(vectors, axis=0)[0] + PADDING
y_axis_max =
|
np.amax(vectors, axis=0)
|
numpy.amax
|
import math,numpy
import scipy.misc
from PIL import Image
# gamma correction
started = Image.open('../images/lena512.bmp')
b =
|
numpy.asarray(started)
|
numpy.asarray
|
"""
Tester to figure which of two spiking methods is faster
<NAME>
August 23rd, 2021
The word today is 'Squatter's Rights'
"""
import pickle
import numpy as np
import time
"""
########################################################################################################################
SPIKING METHOD 1
Check if each neuron is a 'spiker' or not (nonspiking and spiking neurons are implemented differently)
"""
def spike1(spikeMask,net,numSteps,theta,thetaLast,spikes):
"""
Spiking and NonSpiking are handled differently
:param spikeMask: binary vector, 1==spiking
:param net: vector of neural states
:param numSteps: number of simulation steps
:param theta: vector of neural thresholds
:param thetaLast: vector of neural thresholds at the previous timestep
:param spikes: vector of spikes at the current timestep
:return:
"""
for step in range(numSteps): # run for the specified number of steps
for i in range(len(net)): # access each neuron
if spikeMask[i] > 0: # if spikeMask[i] is 1, then the neuron spikes
theta[i] = thetaLast[i] + 1.0*(-thetaLast[i]+1.0+1.0*net[i]) # filler for threshold dynamics
if spikeMask[i] > 0: # filler for checking if the membrane is above threshold
spikes[i] = 1 # write a spike
net[i] = 0 # reset the membrane
def testSpike1(netSize,numSteps,percentSpiking):
"""
Run spiking method 1 following certain constraints
:param netSize: number of neurons in the network
:param numSteps: number of simulation steps
:param percentSpiking: percent of network which is spiking (between 0 and 1)
:return: the average execution time for a single timestep
"""
spikeMask = np.zeros(netSize) # create a vector which stores if a neuron is spiking
i = 0 # initialize counter
while i < (netSize*percentSpiking): # set the first chunk of the mask as spiking
spikeMask[i] = 1 # spiking means the value must be 1
i += 1 # increment the counter
# create vectors to represent the different variables
net = np.zeros(netSize)
theta = np.zeros(netSize)
thetaLast = np.zeros(netSize)
spikes = np.zeros(netSize)
start = time.time() # start a timer
spike1(spikeMask,net,numSteps,theta,thetaLast,spikes) # run the method
end = time.time() # stop the timer
tDiff = end-start # get the elapsed time
return tDiff/numSteps
"""
########################################################################################################################
SPIKING METHOD 2
Assume all neurons are spiking (NonSpiking neurons just have an impossibly high threshold
"""
def spike2(spikeMask, net, numSteps, theta, thetaLast, spikes):
"""
Spiking and NonSpiking are handled the same
:param spikeMask: binary vector, 1==spiking
:param net: vector of neural states
:param numSteps: number of simulation steps
:param theta: vector of neural thresholds
:param thetaLast: vector of neural thresholds at the previous timestep
:param spikes: vector of spikes at the current timestep
:return:
"""
for step in range(numSteps): # run for the specified number of steps
theta = thetaLast + 1.0 * (-thetaLast + 1.0 + 1.0 * net) # filler for threshold dynamics
for i in range(len(net)):
if spikeMask[i] > 0: # filler for checking if the membrane is above threshold
spikes[i] = 1 # write a spike
net[i] = 0 # reset the membrane
def testSpike2(netSize, numSteps, percentSpiking):
"""
Run spiking method 2 following certain constraints
:param netSize: number of neurons in the network
:param numSteps: number of simulation steps
:param percentSpiking: percent of network which is spiking (between 0 and 1)
:return: the average execution time for a single timestep
"""
spikeMask = np.zeros(netSize) # create a vector which stores if a neuron is spiking
i = 0 # initialize counter
while i < (netSize * percentSpiking): # set the first chunk of the mask as spiking
spikeMask[i] = 1 # spiking means the value must be 1
i += 1 # increment the counter
# create vectors to represent the different variables
net = np.zeros(netSize)
theta = np.zeros(netSize)
thetaLast = np.zeros(netSize)
spikes = np.zeros(netSize)
start = time.time() # start a timer
spike2(spikeMask, net, numSteps, theta, thetaLast, spikes) # run the method
end = time.time() # stop the timer
tDiff = end-start # get the elapsed time
return tDiff / numSteps
"""
########################################################################################################################
EXECUTION COMPARISONS
Benchmark the two methods, and see which way is faster (and how much faster)
"""
numSamples = 100 # number of timing samples
# Vary network size
netSize = np.logspace(0,5,num=numSamples)
numSteps = 10000
percentSpiking = 0.5
times1 = np.zeros(len(netSize))
times2 = np.zeros(len(netSize))
for i in range(len(netSize)):
print('Vary Size: %d/%d'%(i+1,numSamples))
times1[i] = testSpike1(int(netSize[i]),numSteps,percentSpiking)
times2[i] = testSpike2(int(netSize[i]), numSteps, percentSpiking)
min1SizeTime = np.min(times1)
max1SizeTime = np.max(times1)
min2SizeTime = np.min(times2)
max2SizeTime = np.max(times2)
pickleData = {'numSamples': numSamples,
'numSteps': numSteps,
'sizes': np.copy(netSize),
'sizePercentSplit': percentSpiking,
'sizeTimes1': np.copy(times1),
'sizeTimes2': np.copy(times2)
}
# Vary percent spiking
netSize = 1000
numSteps = 10000
percentSpiking = np.linspace(0.0,1.0,num=numSamples)
times1 = np.zeros(len(percentSpiking))
times2 = np.zeros(len(percentSpiking))
for i in range(len(percentSpiking)):
print('Vary Percent: %d/%d'%(i+1,numSamples))
times1[i] = testSpike1(netSize,numSteps,percentSpiking[i])
times2[i] = testSpike2(netSize, numSteps, percentSpiking[i])
min1PercentTime = np.min(times1)
max1PercentTime = np.max(times1)
min2PercentTime = np.min(times2)
max2PercentTime =
|
np.max(times2)
|
numpy.max
|
from typing import List
from tkinter import *
import numpy as np
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
import sympy as sp
from io import BytesIO
from PIL import Image, ImageTk
from pylatex import Document, Section, Subsection, Math, Matrix, VectorName, Package
Vector = List[float]
class Point:
def __init__(self, vector:Vector):
self.vector = vector
class MathSection:
def __init__(self, parent, d:dict):
self.parent = parent
self.d = d
self.canvasFigure = None
def on_latex(self):
expr = "$\displaystyle " + 'test' + "$"
#This creates a ByteIO stream and saves there the output of sympy.preview
f = BytesIO()
the_color = "{" + self.parent.window.cget('bg')[1:].upper()+"}"
sp.preview(expr, euler = False, preamble = self.strvar.get(),
viewer = "BytesIO", output = "ps", outputbuffer=f)
f.seek(0)
#Open the image as if it were a file. This works only for .ps!
img = Image.open(f)
#See note at the bottom
img.load(scale = 2)
#img = img.resize((int(img.size[0]/2),int(img.size[1]/2)),Image.BILINEAR)
photo = ImageTk.PhotoImage(img)
self.label.config(image = photo)
self.label.image = photo
f.close()
def addCanvas(self):
#self.canvas = self.parent.window #remove this and fix
self.frame1=Frame(self.parent.window,bg='#FFFFFF',width=200,height=200)
self.frame1.pack(expand=True, fill=BOTH, pady=50, side=TOP )
self.strvar = StringVar()
self.label = Label(self.parent.window)
self.label.pack(side=TOP)
#self.entry = Entry(self.parent.window, textvariable = self.strvar, width=200)
#self.entry.pack(side=TOP)
#self.button = Button(self.parent.window, text = "LaTeX!", command = self.on_latex)
#self.button.pack(side=TOP)
self.frame2=Frame(self.parent.window,bg='#FFFFFF',width=200,height=200)
self.frame2.pack(expand=True, fill=BOTH, side=TOP)
def addUI(self):
self.transformSliderLabel = Label(self.frame2,bg='#FFFFFF',text='Transform')
self.transformSliderLabel.config(font=('Arial', 14))
self.transformSliderLabel.grid( row=0,column=1 )
self.transformSlider = Scale(self.frame2, from_=0, to=self.parent.frames,
tickinterval=1, command = self.update, bg='#FFFFFF')
self.transformSlider.grid( row=1,column=1 )
self.elevationSliderLabel = Label(self.frame2, bg='#FFFFFF',text='Elevation')
self.elevationSliderLabel.config(font=('Arial', 14))
self.elevationSliderLabel.grid( row=0,column=2 )
self.elevationSlider = Scale(self.frame2, from_=-180, to=180,
tickinterval=1, command = self.updateElevation, bg='#FFFFFF')
self.elevationSlider.grid( row=1,column=2 )
self.elevationSlider.set(0)
self.angleSliderLabel = Label(self.frame2, bg='#FFFFFF',text='Angle')
self.angleSliderLabel.config(font=('Arial', 14))
self.angleSliderLabel.grid( row=0,column=3 )
self.angleSlider = Scale(self.frame2, from_=180, to=-180,
tickinterval=1, command = self.updateAngle, bg='#FFFFFF', orient=HORIZONTAL)
self.angleSlider.grid( row=1,column=3 )
self.angleSlider.set(0)
self.distanceSliderLabel = Label(self.frame2, bg='#FFFFFF',text='Distance')
self.distanceSliderLabel.config(font=('Arial', 14))
self.distanceSliderLabel.grid( row=0,column=4 )
self.distanceSlider = Scale(self.frame2, from_=1, to=20,
tickinterval=1, command = self.updateDistance, bg='#FFFFFF')
self.distanceSlider.grid( row=1,column=4 )
self.distanceSlider.set(5)
self.canvasFigure = FigureCanvasTkAgg(self.parent.figure,
master = self.frame2)
self.canvasFigure.get_tk_widget().grid( row=2,column=1,columnspan=3 )
#self.toolbar = NavigationToolbar2Tk(self.parent.window,
#self.parent.window)
#self.toolbar.update()
# placing the toolbar on the Tkinter window
#self.parent.window.get_tk_widget().pack()
self.canvasFigure.draw()
self.is_hidden = False
self.update(0)
def updateDistance(self, i):
i = int(i)
self.parent.dist = i
self.parent.ax.dist = self.parent.dist
self.canvasFigure.draw()
def updateElevation(self, i):
i = int(i)
self.parent.elevation = i
self.parent.ax.view_init(self.parent.elevation, self.parent.angle)
self.canvasFigure.draw()
def updateAngle(self, i):
i = int(i)
self.parent.angle = i
self.parent.ax.view_init(self.parent.elevation, self.parent.angle)
self.canvasFigure.draw()
def update(self, i):
i = int(i)
self.parent.matrix = np.array((self.parent.i_vector, self.parent.j_vector, self.parent.k_vector)).T
self.parent.plus_matrix = np.array((self.parent.i_plus_vector, self.parent.j_plus_vector, self.parent.k_plus_vector)).T
if self.canvasFigure != None:
try:
self.parent.quiver.remove()
self.parent.plus_quiver.remove()
except:
print("do nothing")
self.parent.matrix = (1 - self.parent.sigmoid(i)) * np.array((self.parent.i_origin, self.parent.j_origin, self.parent.k_origin)) + self.parent.sigmoid(i) * self.parent.matrix
self.parent.plus_matrix = (1 - self.parent.sigmoid(i)) * np.array((self.parent.i_origin, self.parent.j_origin, self.parent.k_origin)) + self.parent.sigmoid(i) * self.parent.plus_matrix
else:
self.parent.matrix = np.array((self.parent.i_origin, self.parent.j_origin, self.parent.k_origin)) + self.parent.matrix
self.parent.plus_matrix = np.array((self.parent.i_origin, self.parent.j_origin, self.parent.k_origin)) + self.parent.plus_matrix
# Compute a matrix that is in the middle between the full transformation matrix and the identity
self.parent.vector_location = np.array((self.parent.matrix.dot(self.parent.i_origin), self.parent.matrix.dot(self.parent.j_origin), self.parent.matrix.dot(self.parent.k_origin))).T
self.parent.quiver = self.parent.ax.quiver(0, 0, 0, self.parent.vector_location[0], self.parent.vector_location[1], self.parent.vector_location[2], length=1, color='r', arrow_length_ratio=0.1)
self.parent.plus_vector_location = np.array((self.parent.plus_matrix.dot(self.parent.i_origin), self.parent.plus_matrix.dot(self.parent.j_origin), self.parent.plus_matrix.dot(self.parent.k_origin))).T
self.parent.plus_quiver = self.parent.ax.quiver(0, 0, 0, self.parent.plus_vector_location[0], self.parent.plus_vector_location[1], self.parent.plus_vector_location[2], length=1, color='y', arrow_length_ratio=0.1)
# Set vector location - must transpose since we need U and V representing x and y components
# of each vector respectively (without transposing, e ach column represents each unit vector)
self.parent.transform = np.array([self.parent.matrix.dot(k) for k in self.parent.x])
#ax.view_init((1 - self.parent.sigmoid(i)) * elevation, (1 - self.parent.sigmoid(i)) * angle)
if self.canvasFigure != None:
self.parent.scat._offsets3d = [self.parent.transform[:, 0], self.parent.transform[:, 1], self.parent.transform[:, 2]]
self.canvasFigure.draw()
a = self.parent.plus_vector_location
#with pylatex.config.active.change(indent=False):
doc = Document()
doc.packages.append(Package('geometry', options = ['paperwidth=6in','paperheight=2.5in']))
section = Section('Linear Combination')
subsection = Subsection('Using the dot product')
V1 = np.transpose(np.array([[1,1]]))
M1 = np.array((self.parent.i_vector, self.parent.j_vector)).T
math = Math(data=[Matrix(M1), 'dot', Matrix(V1),'=', Matrix(
|
np.dot(M1, V1)
|
numpy.dot
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import sys
from unittest import TestCase, skipUnless
import numpy as np
import pandas as pd
from prophet import Prophet
from prophet.serialize import model_to_json, model_from_json, PD_SERIES, PD_DATAFRAME
DATA = pd.read_csv(
os.path.join(os.path.dirname(__file__), 'data.csv'),
parse_dates=['ds'],
)
class TestSerialize(TestCase):
def test_simple_serialize(self):
m = Prophet()
days = 30
N = DATA.shape[0]
df = DATA.head(N - days)
m.fit(df)
future = m.make_future_dataframe(2, include_history=False)
fcst = m.predict(future)
model_str = model_to_json(m)
# Make sure json doesn't get too large in the future
self.assertTrue(len(model_str) < 200000)
z = json.loads(model_str)
self.assertEqual(z['__prophet_version'], '1.0')
m2 = model_from_json(model_str)
# Check that m and m2 are equal
self.assertEqual(m.__dict__.keys(), m2.__dict__.keys())
for k, v in m.__dict__.items():
if k in ['stan_fit', 'stan_backend']:
continue
if k == 'params':
self.assertEqual(v.keys(), m2.params.keys())
for kk, vv in v.items():
self.assertTrue(np.array_equal(vv, m2.params[kk]))
elif k in PD_SERIES and v is not None:
self.assertTrue(v.equals(m2.__dict__[k]))
elif k in PD_DATAFRAME and v is not None:
pd.testing.assert_frame_equal(v, m2.__dict__[k])
elif k == 'changepoints_t':
self.assertTrue(np.array_equal(v, m.__dict__[k]))
else:
self.assertEqual(v, m2.__dict__[k])
self.assertTrue(m2.stan_fit is None)
self.assertTrue(m2.stan_backend is None)
# Check that m2 makes the same forecast
future2 = m2.make_future_dataframe(2, include_history=False)
fcst2 = m2.predict(future2)
self.assertTrue(np.array_equal(fcst['yhat'].values, fcst2['yhat'].values))
def test_full_serialize(self):
# Construct a model with all attributes
holidays = pd.DataFrame({
'ds': pd.to_datetime(['2012-06-06', '2013-06-06']),
'holiday': ['seans-bday'] * 2,
'lower_window': [0] * 2,
'upper_window': [1] * 2,
})
# Test with holidays and country_holidays
m = Prophet(
holidays=holidays,
seasonality_mode='multiplicative',
changepoints=['2012-07-01', '2012-10-01', '2013-01-01'],
)
m.add_country_holidays(country_name='US')
m.add_seasonality(name='conditional_weekly', period=7, fourier_order=3,
prior_scale=2., condition_name='is_conditional_week')
m.add_seasonality(name='normal_monthly', period=30.5, fourier_order=5,
prior_scale=2.)
df = DATA.copy()
df['is_conditional_week'] = [0] * 255 + [1] * 255
m.add_regressor('binary_feature', prior_scale=0.2)
m.add_regressor('numeric_feature', prior_scale=0.5)
m.add_regressor(
'numeric_feature2', prior_scale=0.5, mode='multiplicative'
)
m.add_regressor('binary_feature2', standardize=True)
df['binary_feature'] = ['0'] * 255 + ['1'] * 255
df['numeric_feature'] = range(510)
df['numeric_feature2'] = range(510)
df['binary_feature2'] = [1] * 100 + [0] * 410
train = df.head(400)
test = df.tail(100)
m.fit(train)
future = m.make_future_dataframe(periods=100, include_history=False)
fcst = m.predict(test)
# Serialize!
m2 = model_from_json(model_to_json(m))
# Check that m and m2 are equal
self.assertEqual(m.__dict__.keys(), m2.__dict__.keys())
for k, v in m.__dict__.items():
if k in ['stan_fit', 'stan_backend']:
continue
if k == 'params':
self.assertEqual(v.keys(), m2.params.keys())
for kk, vv in v.items():
self.assertTrue(
|
np.array_equal(vv, m2.params[kk])
|
numpy.array_equal
|
#!/usr/bin/python
#-----------------------------------------------------------------------------------------------------------------------------------------
# Script Description:
# Script to take unconstrained moment tensor inversion result (e.g. from MTFIT) and mseed data and calculate moment magnitude from specified stations.
# The code currently relies on a moment tensor inversion of the event (for the radiation pattern) based on the output of MTFIT (see: https://djpugh.github.io/MTfit/)
# Script returns moment magnitude as calculated at each station, as well as combined value with uncertainty estimate.
# Input variables:
# Output variables:
# Created by <NAME>, 10th January 2018
#-----------------------------------------------------------------------------------------------------------------------------------------
# Import neccessary modules:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import obspy
import sys, os
import scipy.io as sio # For importing .mat MT solution data
from obspy.imaging.scripts.mopad import MomentTensor, BeachBall # For getting nodal planes for unconstrained moment tensors
from obspy.core.event.source import farfield # For calculating MT radiation patterns
import glob
# from scipy import fft
from scipy.signal import periodogram
import matplotlib.pyplot as plt
from obspy.geodetics.base import gps2dist_azimuth # Function to calc distance and azimuth between two coordinates (see help(gps2DistAzimuth) for more info)
from obspy import UTCDateTime as UTCDateTime
import subprocess
import gc
from NonLinLocPy import read_nonlinloc # For reading NonLinLoc data (can install via pip)
from scipy.optimize import curve_fit
from mtspec import mtspec # For multi-taper spectral analysis
# ------------------- Define generally useful functions -------------------
def load_MT_dict_from_file(matlab_data_filename):
data=sio.loadmat(matlab_data_filename)
i=0
while True:
try:
# Load data UID from matlab file:
if data['Events'][0].dtype.descr[i][0] == 'UID':
uid=data['Events'][0][0][i][0]
if data['Events'][0].dtype.descr[i][0] == 'Probability':
MTp=data['Events'][0][0][i][0] # stored as a n length vector, the probability
if data['Events'][0].dtype.descr[i][0] == 'MTSpace':
MTs=data['Events'][0][0][i] # stored as a 6 by n array (6 as 6 moment tensor components)
i+=1
except IndexError:
break
try:
stations = data['Stations']
except KeyError:
stations = []
return uid, MTp, MTs, stations
def load_mseed(mseed_filename, filt_freqs=[]):
"""Function to load mseed from file. Returns stream, detrended.
filt_freqs - The high pass and low pass filter values to use (if specified)."""
st = obspy.read(mseed_filename)
st.detrend("demean")
if len(filt_freqs)>0:
st.filter('bandpass', freqmin=filt_freqs[0], freqmax=filt_freqs[1], corners=4)
return st
def force_stream_sample_alignment(st):
"""Function to force alignment if samples are out by less than a sample."""
for i in range(1, len(st)):
if np.abs(st[i].stats.starttime - st[0].stats.starttime) <= 1./st[i].stats.sampling_rate:
st[i].stats.starttime = st[0].stats.starttime # Shift the start time so that all traces are alligned.
# if st[i].stats.sampling_rate == st[0].stats.sampling_rate:
# st[i].stats.sampling_rate = st[0].stats.sampling_rate
return st
def check_st_n_samp(st):
"""Function to check number of samples."""
min_n_samp = len(st[0].data)
for i in range(len(st)):
if len(st[i].data) < min_n_samp:
min_n_samp = len(st[i].data)
for i in range(len(st)):
if len(st[i].data) > min_n_samp:
st[i].data = st[i].data[0:min_n_samp]
return st
def relabel_one_and_two_channels_with_N_E(st):
"""Function to relabel channel ??1 and ??2 labels as ??N and ??E.
Note: This is not a formal correction, just a relabelling procedure."""
for i in range(len(st)):
if st[i].stats.channel[-1] == '1':
st[i].stats.channel = st[i].stats.channel[0:-1] + 'N'
elif st[i].stats.channel[-1] == '2':
st[i].stats.channel = st[i].stats.channel[0:-1] + 'E'
return st
def get_inst_resp_info_from_network_dataless(inventory_fname):
"""Function to get instrument response dictionary from network .dataless file.
Arguments:
inventory_fname - Inventory filename (.dataless) that containing all the netowrk information. (str)
Returns:
inst_resp_dict - A dictionary containing all the instrument response information for each instrument
channel in the network. (dict)
"""
inv = obspy.read_inventory(inventory_fname)
inst_resp_dict = {}
for a in range(len(inv)):
network_code = inv.networks[a].code
inst_resp_dict[network_code] = {}
for b in range(len(inv.networks[a])):
station = inv.networks[a].stations[b].code
inst_resp_dict[network_code][station] = {}
for c in range(len(inv.networks[a].stations[b])):
channel = inv.networks[a].stations[b].channels[c].code
inst_resp_dict[network_code][station][channel] = {}
paz = inv.networks[a].stations[b].channels[c].response.get_paz()
inst_resp_dict[network_code][station][channel]['poles'] = paz.poles
inst_resp_dict[network_code][station][channel]['zeros'] = paz.zeros
inst_resp_dict[network_code][station][channel]['sensitivity'] = inv.networks[a].stations[b].channels[c].response.instrument_sensitivity.value
inst_resp_dict[network_code][station][channel]['gain'] = paz.normalization_factor #1.0
return inst_resp_dict
def get_full_MT_array(mt):
full_MT = np.array( ([[mt[0],mt[3]/np.sqrt(2.),mt[4]/np.sqrt(2.)],
[mt[3]/np.sqrt(2.),mt[1],mt[5]/np.sqrt(2.)],
[mt[4]/
|
np.sqrt(2.)
|
numpy.sqrt
|
import pickle
import numpy as np
import scipy.io as sio
import CompoundLists
import tensorflow as tf
from PredictEncoding import leave_one_out_evaluation
from SmallParser import SmallDatasetParser, get_doubling_xy, get_x, read_data, read_nested_orths
from Processing import correct_slopes, decorrect_slopes, normalize_features, normalize_with_norms, \
denormalize_with_norms, normalize_total, split_train_test
'''
Warning: this is VERY CPU/disk intensive and may slow down / crash your computer.
The final output of the program will contain accuracy info, and separate files will be created for the random gene sets (x and y)
and the LOO prediction results. The genes will be printed before starting the LOO procedure.
A single execution will likely take several hours.
'''
def main():
tf.logging.set_verbosity(tf.logging.ERROR)
compound_list = CompoundLists.UNGENERAL_45 # Dan: I changed this line
# Change these lines to change prediction direction.
# NOTE: currently only supports rat vitro -> vitro and rat vitro -> rat vivo
# i.e. x_type should always be rat_vitro
x_type = "rat_vitro"
y_type = "human_vitro" # Dan: change here if necessary!
x_timepoints = 3
y_timepoints = 3
if y_type == 'rat_vivo':
y_timepoints = 4
x_satisfied = False
y_satisfied = False
# Only use these lines if you already have the data files. Selection process will then be disabled.
#x_satisfied = True
#og_X, data_compounds, gene_list_x, X_gene_variance = pickle.load(open('Data/RatInVitro/20/data_X20_1.p', 'rb'))
#y_satisfied = True
#og_Y, data_compounds, gene_list_y, Y_gene_variance = pickle.load(open('Data/HumanInVitro/20/data_20_1_human.p', 'rb'))
# Variances from ST gene list (steatosis, 50 genes)
target_x_var = 0.001386 # rat in vitro
target_y_var = 0.001325 # rat in vivo
if y_type == 'human_vitro':
target_y_var = 0.000986
deviation = 0.03 # variance should be at most 3% more than the target and at least 3% less than the target
x_var_low = target_x_var - target_x_var * deviation
x_var_up = target_x_var + target_x_var * deviation
y_var_low = target_y_var - target_y_var * deviation
y_var_up = target_y_var + target_y_var * deviation
print("X var range: {} - {}".format(x_var_low, x_var_up))
print("Y var range: {} - {}".format(y_var_low, y_var_up))
nested = True #if true, input file needs to be selected below (scroll down)
orthologs = False #change here as desired
# names of output files need to be manually adjusted (as desired)
# to create the 'core' of a nest (e.g. 20), nested must be set to False!
if nested == False:
for k in range(80,81): # desired number of genes
numb_genes = k
for j in range(99,100): # desired number (i.e. names) of sets
x_satisfied = False # set to True if you want to select only one domain (not recommended)
y_satisfied = False
file1 = "data_X%d"%(numb_genes)+"_%d"%(j) + ".p"
file2 = "data_%d"%(numb_genes)+"_%d"%(j) + "_human.p" # change to desired domain here!
while not x_satisfied or not y_satisfied:
og_X, og_Y, data_compounds, genes_x, genes_y = read_data(compound_list.copy(), x_type=x_type, y_type=y_type, \
gene_list='random', dataset="big", numb_genes=numb_genes, domain="both", orthologs=orthologs)
if not x_satisfied:
X, _ = normalize_total(og_X)
X_gene_means = np.zeros(numb_genes)
X_gene_variance = np.zeros(numb_genes)
for i in range(numb_genes):
X_gene_means[i] = np.mean(X[:, i * x_timepoints : i * x_timepoints + x_timepoints])
X_gene_variance[i] = np.var(X[:, i * x_timepoints : i * x_timepoints + x_timepoints])
if X_gene_variance.mean() >= x_var_low and X_gene_variance.mean() <= x_var_up:
print("X satisfied!")
x_satisfied = True
gene_list_x = genes_x
if not orthologs:
with open(file1, 'wb') as f:
pickle.dump([og_X, data_compounds, gene_list_x, X_gene_variance], f)
print("Dumped file ", file1)
print("X genes: ", gene_list_x)
elif orthologs: # occurs only if we select unnested orthologs (e.g. 20)
continue
if not y_satisfied:
Y, _ = normalize_total(og_Y)
Y_gene_means = np.zeros(numb_genes)
Y_gene_variance = np.zeros(numb_genes)
for i in range(numb_genes):
Y_gene_means[i] = np.mean(Y[:, i * y_timepoints : i * y_timepoints + y_timepoints])
Y_gene_variance[i] = np.var(Y[:, i * y_timepoints : i * y_timepoints + y_timepoints])
print("Y Mean:", Y_gene_means.mean())
print("Y Variance:", Y_gene_variance.mean())
if Y_gene_variance.mean() >= y_var_low and Y_gene_variance.mean() <= y_var_up:
print("Y satisfied!")
y_satisfied = True
gene_list_y = genes_y
if not orthologs:
with open(file2, 'wb') as f:
pickle.dump([og_Y, data_compounds, gene_list_y, Y_gene_variance], f)
print("Dumped file ", file1)
print("Y genes: ", gene_list_y)
elif orthologs:
x_satisfied = False # starting all over again
if x_satisfied and y_satisfied and orthologs:
with open(file1, 'wb') as f:
pickle.dump([og_X, data_compounds, gene_list_x, X_gene_variance], f)
with open(file2, 'wb') as f:
pickle.dump([og_Y, data_compounds, gene_list_y, Y_gene_variance], f)
else: # nested case
numb_genes = 15 # number of genes to add
numb_genes_core = 35 # size of set to build upon
numb_genes_out = numb_genes + numb_genes_core
for j in range(1,2): # desired number (and names) of output sets
y_satisfied = False # set True if only one domain is desired (not recommended)
x_satisfied = False
file_out1 = "data_X%d"%(numb_genes_out) + "_%d"%(j) + "_nest.p"
file_out2 = "data_%d"%(numb_genes_out) + "_%d"%(j) + "_human_nest.p" # Dan: adjust here if necessary
#file_in1 = "data_X%d"%(numb_genes_core) + "_%d"%(j) + "_nest.p"
#file_in2 = "data_%d"%(numb_genes_core) + "_%d"%(j) + "_human_nest.p"
file_in1 = "Data/RatInVitro/%d"%(numb_genes_core) + "/Nested/Random%d"%(numb_genes_core) + \
"/data_X%d"%(numb_genes_core) + "_%d"%(j) + "_nest.p"
file_in2 = "Data/HumanInVitro/%d"%(numb_genes_core) + "/Nested/Random%d"%(numb_genes_core) + \
"/data_%d"%(numb_genes_core) + "_%d"%(j) + "_human_nest.p" # change name here as desired
X_core, _, gene_list_x_core, variance_x_core = pickle.load(open(file_in1, "rb"))
Y_core, _, gene_list_y_core, variance_y_core = pickle.load(open(file_in2, "rb"))
while not x_satisfied or not y_satisfied:
og_X, og_Y, data_compounds, gene_list_x, gene_list_y = read_data(compound_list.copy(), y_type=y_type, \
gene_list='random', domain="both", numb_genes=numb_genes, orthologs=orthologs, genes_provided=gene_list_x_core)
if not x_satisfied:
X_temp1, _ = normalize_total(og_X)
X_temp2, _ = normalize_total(X_core)
X =
|
np.concatenate((X_temp1, X_temp2), axis=1)
|
numpy.concatenate
|
""" Testing group-level finite difference. """
import unittest
import numpy as np
from openmdao.core import Group, Problem
from openmdao.components import ParamComp, ExecComp
from openmdao.test.simple_comps import SimpleComp, SimpleArrayComp
from openmdao.test.util import assert_rel_error
class TestIndices(unittest.TestCase):
def test_indices(self):
size = 10
root = Group()
root.add('P1', ParamComp('x', np.zeros(size)))
root.add('C1', ExecComp('y = x * 2.', y=np.zeros(size//2), x=np.zeros(size//2)))
root.add('C2', ExecComp('y = x * 3.', y=np.zeros(size//2), x=np.zeros(size//2)))
root.connect('P1.x', "C1.x", src_indices=list(range(size//2)))
root.connect('P1.x', "C2.x", src_indices=list(range(size//2, size)))
prob = Problem(root)
prob.setup(check=False)
root.P1.unknowns['x'][0:size//2] += 1.0
root.P1.unknowns['x'][size//2:size] -= 1.0
prob.run()
assert_rel_error(self, root.C1.params['x'], np.ones(size//2), 0.0001)
assert_rel_error(self, root.C2.params['x'], -
|
np.ones(size//2)
|
numpy.ones
|
import tensorflow as tf
from time import time
import numpy as np
from random import shuffle
import os
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.inception_v3 import InceptionV3
from keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from keras_efficientnets import EfficientNetB0
from DatasetProvider import DatasetProvider
from keras.optimizers import *
from sklearn.cluster import KMeans
from shutil import copy
from matplotlib import pyplot as plt
from matplotlib import ticker, cm
#from keras_adamw import AdamW, get_weight_decays, fill_dict_in_order
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
import sys
sys.path.append('../keras-auto-augment/')
from PIL import Image
from dataset import Cifar10ImageDataGenerator, SVHNImageDataGenerator, ImageNetImageDataGenerator
from keras import backend as K
import keras
import argparse
import glob
import pickle
import random
random.seed(10)
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Input, Conv2DTranspose, Flatten, Add, Activation, UpSampling2D, \
Dropout, BatchNormalization, GlobalAveragePooling2D, Layer, Lambda
from keras.models import Model
from utils import *
import sys
# Model and dataset directory:
batch_size = 256
cluster_idx = 6
figure_size = (15, 15)
checkpoint_idx = -1
#distance_range = 2
dataset = 'Oxford_IIIT_Pet'
dataset_dir = '../../datasets'
results_dir = './visualization'
number_of_similar_samples = 15
number_of_effective_clusters = 5
number_of_samples_per_cluster = 30
model_folder = 'backbone_EfficientNet_rbf_dims_64_centers_50_learning_rate_7.487e-05_weights_imagenet_augmentation_autogment_'
model_dir = os.path.join('models', dataset, model_folder)
# Import the model setup function and read the hyperparameters:
sys.path.append(model_dir)
from run_experiment import ModelSetup
def compute_embeddings_activations(tf_sess, img_input, tensors, x_train):
length = tensors[0].shape.as_list()[-1]
length1 = tensors[1].shape.as_list()[-1]
length2 = tensors[2].shape.as_list()[-1]
start, samples = 0, x_train.shape[0]
np_embeddings = -np.ones([samples, length])#, dtype=np.uint8)
np_activations = -np.ones([samples, length1])
np_predictions = -np.ones([samples, length2])
while start < samples:
embeds, acts, preds = tf_sess.run(tensors, feed_dict={img_input: x_train[start:start+batch_size, :, :, :]})
np_embeddings[start:start+batch_size, :] = embeds
np_activations[start:start+batch_size, :] = acts
np_predictions[start:start+batch_size, :] = preds
start += batch_size
print('Extract embeddings and predictions: {:05d}/{:05d}'.format(start, samples))
print('Sum of the unfilled elements in embeddings: {:d}'.format(np.sum(np_embeddings==-1)))
print('Sum of the unfilled elements in activations: {:d}'.format(np.sum(np_activations==-1)))
print('Sum of the unfilled elements in predictions: {:d}'.format(np.sum(np_predictions==-1)))
return np_embeddings, np_activations, np_predictions
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def augmentation_prepration():
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.depth = int(28)
args.width = int(10)
args.epochs = int(1)
args.cutout = False
args.auto_augment = True
return args
def plot_ground(w, distance_range=2):
fig = plt.figure(figsize=figure_size)
#ax = fig.add_subplot(111, aspect='equal')
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax = fig.add_axes([left, bottom, width, height])
xlist = np.linspace(-distance_range, distance_range, 100)
ylist = np.linspace(-distance_range, distance_range, 100)
X, Y = np.meshgrid(xlist, ylist)
Z = np.sqrt(X ** 2 + Y ** 2)
cp = plt.contour(X, Y, Z, colors=6*['red'], linewidths=1.0)
ax.clabel(cp, inline=True, fontsize=16)
cp = plt.contourf(X, Y, 1-Z/w, cmap=cm.PuBu_r)
plt.axis('equal')
plt.axis('tight')
#plt.colorbar()
tick_values = 0.8*distance_range
xy_labels = np.around(np.abs(np.linspace(-tick_values, tick_values, 5)), decimals=1)
xy_ticks = np.linspace(-tick_values, tick_values, 5)
plt.xticks(xy_ticks, xy_labels)
plt.yticks(xy_ticks, xy_labels)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(24)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(24)
#ax.set_title('Cluster')
ax.set_xlabel('Distance to the cluster center', fontsize=24)
ax.set_ylabel('Distance to the cluster center', fontsize=24)
#plt.show()
#plt.show()
return ax, plt
def compute_metric_distance(a, b, r=None):
# Compute the distances between a sample a and samples in matrix b based on the trained metric r
a, b = np.squeeze(a), np.squeeze(b)
if r is None:
print('Define the distance based on dot product!')
def compute_dist(_a, _b, _r):
# Compute distance between two samples
distance = np.dot(_a, _b)
return distance
else:
print('Define the distance based on the trained metric!')
r = np.squeeze(r)
def compute_dist(_a, _b, _r):
# Compute distance between two samples
diff = np.expand_dims(_a - _b, 0)
distance = np.matmul(np.matmul(diff, np.diag(_r)), np.transpose(diff))
return distance
if len(a.shape) == 1 and len(b.shape) == 1:
distances = compute_dist(a, b, r)
elif len(a.shape) == 1 and len(b.shape) != 1:
distances = np.zeros(b.shape[0])
for i in range(b.shape[0]):
distances[i] = compute_dist(a, b[i, :], r)
elif len(a.shape) != 1 and len(b.shape) == 1:
distances = np.zeros(a.shape[0])
for i in range(a.shape[0]):
distances[i] = compute_dist(a[i, :], b, r)
else:
distances = np.zeros([a.shape[0], b.shape[0]])
for i in range(a.shape[0]):
for j in range(b.shape[0]):
distances[i, j] = compute_dist(a[i, :], b[j, :], r)
return np.squeeze(distances)
def find_samples(np_embeddings, width, distance_range=2):
number_of_bins = 15
samples = [1, 0, 0, 3, 0, 5, 0, 9, 0, 12, 0, 0, 16, 0, 0, 0]
distances = np.sqrt(width*(1-np_embeddings))
#if distance_range < np.min(distances):
# distance_range = (np.min(distances)+np.max(distances))/2
bin_edges = np.linspace(0, distance_range, number_of_bins)
#samples_per_bin, = int(number_of_samples/number_of_bins), []
indecies, theta = [], []
for i in range(len(bin_edges)-1):
samples_per_bin = samples[i]
if samples_per_bin > 0:
found_indecies = list(np.where(np.bitwise_and(distances>bin_edges[i], distances<bin_edges[i+1]))[0])
shuffle(found_indecies)
found_indecies = found_indecies[:samples_per_bin]
indecies += list(found_indecies)
N = len(found_indecies)
theta += list(np.linspace(0, 2*np.pi*(1-1/np.max([1, N])), N)+(np.pi/18)*(np.random.rand(N)-0.5))
samples_per_bin = samples[i]
return np.array(indecies), np.array(theta)
def plot_images(ax, images, embeddings, w, theta, image_size=[64, 64]):
distances = np.sqrt(w*(1-embeddings))
for i in range(images.shape[0]):
x, y = distances[i]*np.cos(theta[i]), distances[i]*np.sin(theta[i])
im = np.array(Image.fromarray(images[i, :, :, :]).resize(image_size))
imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(im),
|
np.array([x, y])
|
numpy.array
|
import argparse
import cv2
import glob
import numpy as np
import os
import torch
from tqdm import tqdm
from archs.srcnn_style_arch import srcnn_style_net
from basicsr.utils.img_util import img2tensor
def ig(baseline_img, target_img, target_state_dict, total_step, conv_index):
""" Calculate Integrated Gradients of a single image
Args:
baseline_img (tensor): with the shape (1, 3, H, W)
target_img: (tensor): with the shape (1, 3, H, W)
target_state_dict: state_dict of target_net
Returns:
sorted_diff (list): sorted values
sorted_index (list): sorted index of kernel
"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
total_gradient = 0
target_net = srcnn_style_net(scale=2)
target_net.eval()
target_net = target_net.to(device)
target_net.load_state_dict(target_state_dict)
for step in range(0, total_step + 1):
alpha = step / total_step
interpolated_img = baseline_img + alpha * (target_img - baseline_img)
target_net.zero_grad()
interpolated_output = target_net(interpolated_img)
loss = interpolated_output.sum()
loss.backward()
grad_list = []
# calculate the gradient of conv contained in conv_index
for idx in conv_index:
grad = target_net.features[idx].weight.grad
grad = grad.reshape(-1, 3, 3)
grad_list.append(grad)
grad_list = torch.cat(grad_list, dim=0)
total_gradient += grad_list
ig_img = torch.sum(torch.sum(abs(total_gradient), dim=1), dim=1)
# Note that we do not multiple (final_img - base_img)
return ig_img.cpu().numpy()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--target_model_path',
type=str,
default='experiments/srcnn_style/target_model.pth',
help='path of target model')
parser.add_argument(
'--bic_folder', type=str, default='datasets/Set14/Linear_LRbicx2', help='folder that contains bicubic image')
parser.add_argument(
'--blur_folder', type=str, default='datasets/Set14/Blur2_LRbicx2', help='folder that contains blurry image')
parser.add_argument(
'--noise_folder', type=str, default='datasets/Set14/LRbicx2_noise0.1', help='folder that contains noisy image')
parser.add_argument('--total_step', type=int, default=100)
parser.add_argument('--conv_index', type=list, default=[0, 2, 4, 6, 8, 10, 12, 15, 17], help='index of conv layer')
parser.add_argument(
'--record_filters_folder',
type=str,
default='results/Interpret/neuron-search/srcnn_style/Set14/ig',
help='folder that saves the sorted location index of discovered filters')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# configuration
target_model_path = args.target_model_path
bic_folder = args.bic_folder
blur_folder = args.blur_folder
noise_folder = args.noise_folder
total_step = args.total_step
conv_index = args.conv_index
record_filters_folder = args.record_filters_folder
os.makedirs(record_filters_folder, exist_ok=True)
# define fintune_net_state_dict
target_net_state_dict = torch.load(target_model_path)['params_ema']
bic_img_list = sorted(glob.glob(os.path.join(bic_folder, '*')))
blur_img_list = sorted(glob.glob(os.path.join(blur_folder, '*')))
noise_img_list = sorted(glob.glob(os.path.join(noise_folder, '*')))
# deal noisy imgs
ig_average_noisy = 0.0
pbar = tqdm(total=len(noise_img_list), desc='')
for img_idx, path in enumerate(noise_img_list):
# read image
imgname = os.path.basename(path)
basename, _ = os.path.splitext(imgname)
pbar.set_description(f'Read {basename}')
pbar.update(1)
noisy_img = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
noisy_img = img2tensor(noisy_img).unsqueeze(0).to(device)
bic_img = cv2.imread(bic_img_list[img_idx], cv2.IMREAD_COLOR).astype(np.float32) / 255.
bic_img = torch.from_numpy(np.transpose(bic_img[:, :, [2, 1, 0]], (2, 0, 1))).float()
bic_img = bic_img.unsqueeze(0).to(device)
# use ig for a single image
ig_noisy = ig(bic_img, noisy_img, target_net_state_dict, total_step, conv_index)
ig_average_noisy += np.array(ig_noisy)
sorted_noisy_location = np.argsort(ig_average_noisy)[::-1]
save_noisy_filter_txt = os.path.join(record_filters_folder, 'noise_index.txt')
np.savetxt(save_noisy_filter_txt, sorted_noisy_location, delimiter=',', fmt='%d')
pbar.close()
# deal blurry imgs
ig_average_blurry = 0.0
print('Now we sort the filters for blur!')
pbar = tqdm(total=len(blur_img_list), desc='')
for img_idx, path in enumerate(blur_img_list):
# read image
imgname = os.path.basename(path)
basename, _ = os.path.splitext(imgname)
pbar.set_description(f'Read {basename}')
pbar.update(1)
blurry_img = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
blurry_img = torch.from_numpy(
|
np.transpose(blurry_img[:, :, [2, 1, 0]], (2, 0, 1))
|
numpy.transpose
|
# -*- coding: utf-8 -*-
"""Visualize log-frequency shift."""
# https://dsp.stackexchange.com/q/78622/50076 ################################
import numpy as np
from kymatio.numpy import Scattering1D
from kymatio.visuals import imshow, make_gif
from kymatio.toolkit import fdts
import matplotlib.pyplot as plt
#%% Make CWT object ##########################################################
N = 2048
cwt = Scattering1D(shape=N, J=7, Q=8, average=False, out_type='list',
r_psi=.85, oversampling=999, max_order=1)
#%% Make signals & take CWT ##################################################
x0 = fdts(N, n_partials=4, seg_len=N//4, f0=N/12)[0]
x1 = fdts(N, n_partials=4, seg_len=N//4, f0=N/20)[0]
Wx0 = np.array([c['coef'].squeeze() for c in cwt(x0)[1:]])
Wx1 = np.array([c['coef'].squeeze() for c in cwt(x1)[1:]])
#%% Make GIF #################################################################
fig0, ax0 = plt.subplots(1, 2, figsize=(12, 5))
fig1, ax1 = plt.subplots(1, 2, figsize=(12, 5))
# imshows
kw = dict(abs=1, ticks=0, show=0)
imshow(Wx0, ax=ax0[1], fig=fig0, **kw)
imshow(Wx1, ax=ax1[1], fig=fig1, **kw)
# plots
s, e = N//3, -N//3 # zoom
ax0[0].plot(x0[s:e])
ax1[0].plot(x1[s:e])
# ticks & ylims
mx = max(
|
np.abs(x0)
|
numpy.abs
|
from __future__ import division, print_function
import numpy as np
from bct.utils import BCTParamError, binarize
motiflib = 'motif34lib.mat'
# FIXME there may be some subtle bugs here
def find_motif34(m, n=None):
'''
This function returns all motif isomorphs for a given motif id and
class (3 or 4). The function also returns the motif id for a given
motif matrix
1. Input: Motif_id, e.g. 1 to 13, if class is 3
Motif_class, number of nodes, 3 or 4.
Output: Motif_matrices, all isomorphs for the given motif
2. Input: Motif_matrix e.g. [0 1 0; 0 0 1; 1 0 0]
Output Motif_id e.g. 1 to 13, if class is 3
Parameters
----------
m : int | matrix
In use case 1, a motif_id which is an integer.
In use case 2, the entire matrix of the motif
(e.g. [0 1 0; 0 0 1; 1 0 0])
n : int | None
In use case 1, the motif class, which is the number of nodes. This is
either 3 or 4.
In use case 2, None.
Returns
-------
M : np.ndarray | int
In use case 1, returns all isomorphs for the given motif
In use case 2, returns the motif_id for the specified motif matrix
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
z = (0,)
if n == 3:
mot = io.loadmat(fname)
m3 = mot['m3']
id3 = mot['id3'].squeeze()
ix, = np.where(id3 == m)
M = np.zeros((3, 3, len(ix)))
for i, ind in enumerate(ix):
M[:, :, i] = np.reshape(np.concatenate(
(z, m3[ind, 0:3], z, m3[ind, 3:6], z)), (3, 3))
elif n == 4:
mot = io.loadmat(fname)
m4 = mot['m4']
id4 = mot['id4'].squeeze()
ix, = np.where(id4 == m)
M = np.zeros((4, 4, len(ix)))
for i, ind in enumerate(ix):
M[:, :, i] = np.reshape(np.concatenate(
(z, m4[ind, 0:4], z, m4[ind, 4:8], z, m4[ind, 8:12], z)), (4, 4))
elif n is None:
try:
m = np.array(m)
except TypeError:
raise BCTParamError('motif matrix must be an array-like')
if m.shape[0] == 3:
M, = np.where(motif3struct_bin(m))
elif m.shape[0] == 4:
M, = np.where(motif4struct_bin(m))
else:
raise BCTParamError('motif matrix must be 3x3 or 4x4')
else:
raise BCTParamError('Invalid motif class, must be 3, 4, or None')
return M
def make_motif34lib():
'''
This function generates the motif34lib.mat library required for all
other motif computations. Not to be called externally.
'''
from scipy import io
import os
def motif3generate():
n = 0
M = np.zeros((54, 6), dtype=bool) # isomorphs
# canonical labels (predecssors of IDs)
CL = np.zeros((54, 6), dtype=np.uint8)
cl = np.zeros((6,), dtype=np.uint8)
for i in range(2**6): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(6 - len(m)) + m
G = np.array(((0, m[2], m[4]), (m[0], 0, m[5]),
(m[1], m[3], 0)), dtype=int)
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
if np.all(ko + ki): # if subgraph weakly connected
u = np.array((ko, ki)).T
cl.flat = u[np.lexsort((ki, ko))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:4], G.T.flat[5:8])).flat
n += 1
# convert CLs into motif IDs
_, ID = np.unique(
CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
# convert IDs into sporns & kotter classification
id_mika = (1, 3, 4, 6, 7, 8, 11)
id_olaf = (-3, -6, -1, -11, -4, -7, -8)
for mika, olaf in zip(id_mika, id_olaf):
ID[ID == mika] = olaf
ID = np.abs(ID)
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.squeeze(np.sum(M, axis=1)) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(5, -1, -1)),
(M.shape[0], 1)) * M, axis=1), dtype=np.uint32)
return M, Mn, ID, N
def motif4generate():
n = 0
M = np.zeros((3834, 12), dtype=bool) # isomorphs
CL = np.zeros((3834, 16), dtype=np.uint8) # canonical labels
cl = np.zeros((16,), dtype=np.uint8)
for i in range(2**12): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(12 - len(m)) + m
G = np.array(((0, m[3], m[6], m[9]), (m[0], 0, m[7], m[10]),
(m[1], m[4], 0, m[11]), (m[2], m[5], m[8], 0)), dtype=int)
Gs = G + G.T
v = Gs[0, :]
for j in range(2):
v = np.any(Gs[v != 0, :], axis=0) + v
if np.all(v): # if subgraph weakly connected
G2 = np.dot(G, G) != 0
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
ko2 = np.sum(G2, axis=1)
ki2 = np.sum(G2, axis=0)
u = np.array((ki, ko, ki2, ko2)).T
cl.flat = u[np.lexsort((ko2, ki2, ko, ki))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:5], G.T.flat[6:10],
G.T.flat[11:15])).flat
n += 1
# convert CLs into motif IDs
_, ID = np.unique(
CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.sum(M, axis=1) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(11, -1, -1)),
(M.shape[0], 1)) * M, axis=1), dtype=np.uint64)
return M, Mn, ID, N
dir = os.path.dirname(__file__)
fname = os.path.join(dir, motiflib)
if os.path.exists(fname):
print("motif34lib already exists")
return
m3, m3n, id3, n3 = motif3generate()
m4, m4n, id4, n4 = motif4generate()
io.savemat(fname, mdict={'m3': m3, 'm3n': m3n, 'id3': id3, 'n3': n3,
'm4': m4, 'm4n': m4n, 'id4': id4, 'n4': n4})
def motif3funct_bin(A):
'''
Functional motifs are subsets of connection patterns embedded within
anatomical motifs. Motif frequency is the frequency of occurrence of
motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 13xN np.ndarray
motif frequency matrix
f : 13x1 np.ndarray
motif frequency vector (averaged over all nodes)
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3 = mot['m3']
id3 = mot['id3'].squeeze()
n3 = mot['n3'].squeeze()
n = len(A) # number of vertices in A
f = np.zeros((13,)) # motif count for whole graph
F = np.zeros((13, n)) # motif frequency
A = binarize(A, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, 2]))
# find all contained isomorphs
ix = (np.dot(m3, a) == n3)
id = id3[ix] - 1
# unique motif occurrences
idu, jx = np.unique(id, return_index=True)
jx = np.append((0,), jx + 1)
mu = len(idu) # number of unique motifs
f2 = np.zeros((mu,))
for h in range(mu): # for each unique motif
f2[h] = jx[h + 1] - jx[h] # and frequencies
# then add to a cumulative count
f[idu] += f2
# numpy indexing is teh sucks :(
F[idu, u] += f2
F[idu, v1] += f2
F[idu, v2] += f2
return f, F
def motif3funct_wei(W):
'''
Functional motifs are subsets of connection patterns embedded within
anatomical motifs. Motif frequency is the frequency of occurrence of
motifs around a node. Motif intensity and coherence are weighted
generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 13xN np.ndarray
motif intensity matrix
Q : 13xN np.ndarray
motif coherence matrix
F : 13xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3 = mot['m3']
id3 = mot['id3'].squeeze()
n3 = mot['n3'].squeeze()
n = len(W)
I = np.zeros((13, n)) # intensity
Q = np.zeros((13, n)) # coherence
F = np.zeros((13, n)) # frequency
A = binarize(W, copy=True) # create binary adjmat
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, v2]))
ix = (np.dot(m3, a) == n3)
m = np.sum(ix)
w = np.array((W[v1, u], W[v2, u], W[u, v1],
W[v2, v1], W[u, v2], W[v1, v2]))
M = m3[ix, :] * np.tile(w, (m, 1))
id = id3[ix] - 1
l = n3[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1)**(1 / l) # intensity
q = i / x # coherence
# unique motif occurrences
idu, jx = np.unique(id, return_index=True)
jx = np.append((0,), jx + 1)
mu = len(idu) # number of unique motifs
i2, q2, f2 = np.zeros((3, mu))
for h in range(mu):
i2[h] = np.sum(i[jx[h] + 1:jx[h + 1] + 1])
q2[h] = np.sum(q[jx[h] + 1:jx[h + 1] + 1])
f2[h] = jx[h + 1] - jx[h]
# then add to cumulative count
I[idu, u] += i2
I[idu, v1] += i2
I[idu, v2] += i2
Q[idu, u] += q2
Q[idu, v1] += q2
Q[idu, v2] += q2
F[idu, u] += f2
F[idu, v1] += f2
F[idu, v2] += f2
return I, Q, F
def motif3struct_bin(A):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 13xN np.ndarray
motif frequency matrix
f : 13x1 np.ndarray
motif frequency vector (averaged over all nodes)
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3n = mot['m3n']
id3 = mot['id3'].squeeze()
n = len(A) # number of vertices in A
f = np.zeros((13,)) # motif count for whole graph
F = np.zeros((13, n)) # motif frequency
A = binarize(A, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, v2]))
s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
ix = id3[np.squeeze(s == m3n)] - 1
F[ix, u] += 1
F[ix, v1] += 1
F[ix, v2] += 1
f[ix] += 1
return f, F
def motif3struct_wei(W):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 13xN np.ndarray
motif intensity matrix
Q : 13xN np.ndarray
motif coherence matrix
F : 13xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3 = mot['m3']
m3n = mot['m3n']
id3 = mot['id3'].squeeze()
n3 = mot['n3'].squeeze()
n = len(W) # number of vertices in W
I = np.zeros((13, n)) # intensity
Q = np.zeros((13, n)) # coherence
F = np.zeros((13, n)) # frequency
A = binarize(W, copy=True) # create binary adjmat
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, 2]))
s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
ix = np.squeeze(s == m3n)
w = np.array((W[v1, u], W[v2, u], W[u, v1],
W[v2, v1], W[u, v2], W[v1, v2]))
M = w * m3[ix, :]
id = id3[ix] - 1
l = n3[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1)**(1 / l) # intensity
q = i / x # coherence
# add to cumulative counts
I[id, u] += i
I[id, v1] += i
I[id, v2] += i
Q[id, u] += q
Q[id, v1] += q
Q[id, v2] += q
F[id, u] += 1
F[id, v1] += 1
F[id, v1] += 1
return I, Q, F
def motif4funct_bin(A):
'''
Functional motifs are subsets of connection patterns embedded within
anatomical motifs. Motif frequency is the frequency of occurrence of
motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 199xN np.ndarray
motif frequency matrix
f : 199x1 np.ndarray
motif frequency vector (averaged over all nodes)
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m4 = mot['m4']
id4 = mot['id4'].squeeze()
n4 = mot['n4'].squeeze()
n = len(A)
f = np.zeros((199,))
F = np.zeros((199, n)) # frequency
A = binarize(A, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 3):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
vz = np.max((v1, v2)) # vz: largest rank node
# v3: all neighbors of v2 (>u)
V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1])
V3[V2] = 0 # not already in V1 and V2
# and all neighbors of v1 (>v2)
V3 = np.logical_or(
np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3)
V3[V1] = 0 # not already in V1
# and all neighbors of u (>vz)
V3 = np.logical_or(
np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3)
for v3 in np.where(V3)[0]:
a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1],
A[v3, v1], A[u, v2], A[v1, v2], A[
v3, v2], A[u, v3], A[v1, v3],
A[v2, v3]))
ix = (np.dot(m4, a) == n4) # find all contained isomorphs
id = id4[ix] - 1
# unique motif occurrences
idu, jx = np.unique(id, return_index=True)
jx = np.append((0,), jx)
mu = len(idu) # number of unique motifs
f2 = np.zeros((mu,))
for h in range(mu):
f2[h] = jx[h + 1] - jx[h]
# add to cumulative count
f[idu] += f2
F[idu, u] += f2
F[idu, v1] += f2
F[idu, v2] += f2
F[idu, v3] += f2
return f, F
def motif4funct_wei(W):
'''
Functional motifs are subsets of connection patterns embedded within
anatomical motifs. Motif frequency is the frequency of occurrence of
motifs around a node. Motif intensity and coherence are weighted
generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 199xN np.ndarray
motif intensity matrix
Q : 199xN np.ndarray
motif coherence matrix
F : 199xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m4 = mot['m4']
id4 = mot['id4'].squeeze()
n4 = mot['n4'].squeeze()
n = len(W)
I = np.zeros((199, n)) # intensity
Q = np.zeros((199, n)) # coherence
F = np.zeros((199, n)) # frequency
A = binarize(W, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 3):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
vz = np.max((v1, v2)) # vz: largest rank node
# v3: all neighbors of v2 (>u)
V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1])
V3[V2] = 0 # not already in V1 and V2
# and all neighbors of v1 (>v2)
V3 = np.logical_or(
np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3)
V3[V1] = 0 # not already in V1
# and all neighbors of u (>vz)
V3 = np.logical_or(
np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3)
for v3 in np.where(V3)[0]:
a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1],
A[v3, v1], A[u, v2], A[v1, v2], A[
v3, v2], A[u, v3], A[v1, v3],
A[v2, v3]))
ix = (np.dot(m4, a) == n4) # find all contained isomorphs
w = np.array((W[v1, u], W[v2, u], W[v3, u], W[u, v1], W[v2, v1],
W[v3, v1], W[u, v2], W[v1, v2], W[
v3, v2], W[u, v3], W[v1, v3],
W[v2, v3]))
m = np.sum(ix)
M = m4[ix, :] * np.tile(w, (m, 1))
id = id4[ix] - 1
l = n4[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1)**(1 / l) # intensity
q = i / x # coherence
# unique motif occurrences
idu, jx = np.unique(id, return_index=True)
jx = np.append((0,), jx + 1)
mu = len(idu) # number of unique motifs
i2, q2, f2 = np.zeros((3, mu))
for h in range(mu):
i2[h] = np.sum(i[jx[h] + 1:jx[h + 1] + 1])
q2[h] = np.sum(q[jx[h] + 1:jx[h + 1] + 1])
f2[h] = jx[h + 1] - jx[h]
# then add to cumulative count
I[idu, u] += i2
I[idu, v1] += i2
I[idu, v2] += i2
I[idu, v3] += i2
Q[idu, u] += q2
Q[idu, v1] += q2
Q[idu, v2] += q2
Q[idu, v3] += q2
F[idu, u] += f2
F[idu, v1] += f2
F[idu, v2] += f2
F[idu, v3] += f2
return I, Q, F
def motif4struct_bin(A):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 199xN np.ndarray
motif frequency matrix
f : 199x1 np.ndarray
motif frequency vector (averaged over all nodes)
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m4n = mot['m4n']
id4 = mot['id4'].squeeze()
n = len(A)
f = np.zeros((199,))
F = np.zeros((199, n)) # frequency
A = binarize(A, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 3):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
V2 = np.append(
|
np.zeros((u,), dtype=int)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 11:24:29 2018
@author: mayank
"""
import numpy as np
#import pandas as pd
#from time import time
from sklearn.model_selection import StratifiedKFold
#import os
#from sklearn.cluster import KMeans
from sklearn.utils import resample
from scipy.stats import mode
#from sklearn.metrics import f1_score
from sklearn.neighbors import NearestNeighbors
from numpy.matlib import repmat
from sklearn.metrics.pairwise import linear_kernel,rbf_kernel,manhattan_distances,polynomial_kernel,sigmoid_kernel,cosine_similarity,laplacian_kernel,paired_euclidean_distances,pairwise_distances
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.decomposition import IncrementalPCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from numpy.linalg import eigh
#%%
#from scipy.io import loadmat
#from sklearn.decomposition import IncrementalPCA
#from sklearn import mixture
class MCM:
def __init__(self, C1 = 1.0, C2 = 1e-05, C3 =1.0, C4 =1.0, problem_type ='classification', algo_type ='MCM' ,kernel_type = 'rbf', gamma = 1e-05, epsilon = 0.1,
feature_ratio = 1.0, sample_ratio = 1.0, feature_sel = 'random', n_ensembles = 1,
batch_sz = 128, iterMax1 = 1000, iterMax2 = 1, eta = 0.01, tol = 1e-08, update_type = 'adam',
reg_type = 'l1', combine_type = 'concat', class_weighting = 'balanced', upsample1 = False,
PV_scheme = 'kmeans', n_components = 100, do_pca_in_selection = False ):
self.C1 = C1 #hyperparameter 1 #loss function parameter
self.C2 = C2 #hyperparameter 2 #when using L1 or L2 or ISTA penalty
self.C3 = C3 #hyperparameter 2 #when using elastic net penalty (this parameter should be between 0 and 1) or margin penalty value need not be between 0 and 1
self.C4 = C4 #hyperparameter for final regressor or classifier used to ensemble when concatenating
# the outputs of previos layer of classifier or regressors
self.problem_type = problem_type #{0:'classification', 1:'regression'}
self.algo_type = algo_type #{0:MCM,1:'LSMCM'}
self.kernel_type = kernel_type #{0:'linear', 1:'rbf', 2:'sin', 3:'tanh', 4:'TL1', 5:'linear_primal', 6:'rff_primal', 7:'nystrom_primal'}
self.gamma = gamma #hyperparameter3 (kernel parameter for non-linear classification or regression)
self.epsilon = epsilon #hyperparameter4 ( It specifies the epsilon-tube within which
#no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value.)
self.n_ensembles = n_ensembles #number of ensembles to be learnt, if setting n_ensembles > 1 then keep the sample ratio to be around 0.7
self.feature_ratio = feature_ratio #percentage of features to select for each PLM
self.sample_ratio = sample_ratio #percentage of data to be selected for each PLM
self.batch_sz = batch_sz #batch_size
self.iterMax1 = iterMax1 #max number of iterations for inner SGD loop
self.iterMax2 = iterMax2 #max number of iterations for outer SGD loop
self.eta = eta #initial learning rate
self.tol = tol #tolerance to cut off SGD
self.update_type = update_type #{0:'sgd',1:'momentum',3:'nesterov',4:'rmsprop',5:'adagrad',6:'adam'}
self.reg_type = reg_type #{0:'l1', 1:'l2', 2:'en', 4:'ISTA', 5:'M'}#ISTA: iterative soft thresholding (proximal gradient), M: margin + l1
self.feature_sel = feature_sel #{0:'sliding', 1:'random'}
self.class_weighting = class_weighting #{0:'average', 1:'balanced'}
self.combine_type = combine_type #{0:'concat',1:'average',2:'mode'}
self.upsample1 = upsample1 #{0:False, 1:True}
self.PV_scheme = PV_scheme # {0:'kmeans',1:'renyi'}
self.n_components = n_components #number of components to choose as Prototype Vector set, or the number of features to form for kernel_approximation as in RFF and Nystroem
self.do_pca_in_selection = do_pca_in_selection #{0:False, 1:True}
def add_bias(self,xTrain):
N = xTrain.shape[0]
if(xTrain.size!=0):
xTrain=np.hstack((xTrain,np.ones((N,1))))
return xTrain
def standardize(self,xTrain):
me=np.mean(xTrain,axis=0)
std_dev=np.std(xTrain,axis=0)
#remove columns with zero std
idx=(std_dev!=0.0)
# print(idx.shape)
xTrain[:,idx]=(xTrain[:,idx]-me[idx])/std_dev[idx]
return xTrain,me,std_dev
def generate_samples(self,X_orig,old_imbalance_ratio,new_imbalance_ratio):
N=X_orig.shape[0]
M=X_orig.shape[1]
neighbors_thresh=10
new_samples=int(new_imbalance_ratio/old_imbalance_ratio*N - N)
#each point must generate these many samples
new_samples_per_point_orig=new_imbalance_ratio/old_imbalance_ratio - 1
new_samples_per_point=int(new_imbalance_ratio/old_imbalance_ratio - 1)
#check if the number of samples each point has to generate is > 1
X1=np.zeros((0,M))
if(new_samples_per_point_orig>0 and new_samples_per_point_orig<=1):
idx_samples=resample(np.arange(0,N), n_samples=int(N*new_samples_per_point_orig), random_state=1,replace=False)
X=X_orig[idx_samples,]
new_samples_per_point=1
N=X.shape[0]
else:
X=X_orig
if(N==1):
X1=repmat(X,new_samples,1)
elif(N>1):
if(N<=neighbors_thresh):
n_neighbors=int(N/2)
else:
n_neighbors=neighbors_thresh
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
for i in range(N):
#for each point find its n_neighbors nearest neighbors
inds=nbrs.kneighbors(X[i,:].reshape(1,-1), n_neighbors, return_distance=False)
temp_data=X[inds[0],:]
std=np.std(temp_data,axis=0)
me=np.mean(temp_data,axis=0)
np.random.seed(i)
x_temp=me + std*np.random.randn(new_samples_per_point,M)
X1=np.append(X1,x_temp,axis=0)
return X_orig, X1
def upsample(self,X,Y,new_imbalance_ratio,upsample_type):
#xTrain: samples X features
#yTrain : samples,
#for classification only
numClasses=np.unique(Y).size
class_samples=np.zeros((numClasses,))
X3=np.zeros((0,X.shape[1]))
Y3=np.zeros((0,))
#first find the samples per class per class
for i in range(numClasses):
idx1=(Y==i)
class_samples[i]=np.sum(idx1)
max_samples=np.max(class_samples)
# new_imbalance_ratio=0.5
if(upsample_type==1):
old_imbalance_ratio_thresh=0.5
else:
old_imbalance_ratio_thresh=1
for i in range(numClasses):
idx1=(Y==i)
old_imbalance_ratio=class_samples[i]/max_samples
X1=X[idx1,:]
Y1=Y[idx1,]
if(idx1.size==1):
X1=np.reshape(X1,(1,X.shape[1]))
if(old_imbalance_ratio<=old_imbalance_ratio_thresh and class_samples[i]!=0):
X1,X2=self.generate_samples(X1,old_imbalance_ratio,new_imbalance_ratio)
new_samples=X2.shape[0]
Y2=np.ones((new_samples,))
Y2=Y2*Y1[0,]
#append original and generated samples
X3=np.append(X3,X1,axis=0)
X3=np.append(X3,X2,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.append(Y3,Y2,axis=0)
else:
#append original samples only
X3=np.append(X3,X1,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.array(Y3,dtype=np.int32)
return X3,Y3
def kmeans_select(self,X,represent_points):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on the farthest distance from the kmeans centers
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
do_pca = self.do_pca_in_selection
N = X.shape[0]
if(do_pca == True):
if(X.shape[1]>50):
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
kmeans = MiniBatchKMeans(n_clusters=represent_points, batch_size=np.min([128,X.shape[0]]),random_state=0).fit(X)
centers = kmeans.cluster_centers_
labels = kmeans.labels_
sv= []
unique_labels = np.unique(labels).size
all_ind = np.arange(N)
for j in range(unique_labels):
X1 = X[labels == j,:]
all_ind_temp = all_ind[labels==j]
tempK = pairwise_distances(X1,np.reshape(centers[j,:],(1,X1.shape[1])))**2
inds = np.argmax(tempK,axis=0)
sv.append(all_ind_temp[inds[0]])
return sv
def renyi_select(self,X,represent_points):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on maximization of quadratic renyi entropy, which can be
written in terms of log sum exp which is a tightly bounded by max operator. Now for rbf kernel,
the max_{ij}(-\|x_i-x_j\|^2) is equivalent to min_{ij}(\|x_i-x_j\|^2).
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
do_pca = self.do_pca_in_selection
N= X.shape[0]
capacity=represent_points
selectionset=set([])
set_full=set(list(range(N)))
np.random.seed(1)
if(len(selectionset)==0):
selectionset = np.random.permutation(N)
sv = list(selectionset)[0:capacity]
else:
extrainputs = represent_points - len(selectionset)
leftindices =list(set_full.difference(selectionset))
info = np.random.permutation(len(leftindices))
info = info[1:extrainputs]
sv = selectionset.append(leftindices[info])
if(do_pca == True):
if(X.shape[1]>50): #takes more time
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
svX = X[sv,:]
min_info = np.zeros((capacity,2))
KsV = pairwise_distances(svX,svX)**2 #this is fast
KsV[KsV==0] = np.inf
min_info[:,1] = np.min(KsV,axis=1)
min_info[:,0] = np.arange(capacity)
minimum = np.min(min_info[:,1])
counter = 0
for i in range(N):
# find for which data the value is minimum
replace = np.argmin(min_info[:,1])
ids = int(min_info[min_info[:,0]==replace,0])
#Subtract from totalcrit once for row
tempminimum = minimum - min_info[ids,1]
#Try to evaluate kernel function
tempsvX = np.zeros(svX.shape)
tempsvX[:] = svX[:]
inputX = X[i,:]
tempsvX[replace,:] = inputX
tempK = pairwise_distances(tempsvX,np.reshape(inputX,(1,X.shape[1])))**2 #this is fast
tempK[tempK==0] = np.inf
distance_eval = np.min(tempK)
tempminimum = tempminimum + distance_eval
if (minimum < tempminimum):
minimum = tempminimum
min_info[ids,1] = distance_eval
svX[:] = tempsvX[:]
sv[ids] = i
counter +=1
return sv
def subset_selection(self,X,Y):
n_components = self.n_components
PV_scheme = self.PV_scheme
problem_type = self.problem_type
N = X.shape[0]
# M = X.shape[1]
numClasses = np.unique(Y).size
use_global_sig = False
use_global_sig1 = False
if(use_global_sig ==True or problem_type == 'regression'):
if(PV_scheme == 'renyi'):
# sig_global = np.power((np.std(X)*(np.power(N,(-1/(M+4))))),2)
subset = self.renyi_select(X,n_components)
elif(PV_scheme == 'kmeans'):
subset = self.kmeans_select(X,n_components)
else:
print('No PV_scheme provided... using all the samples!')
subset = list(np.arange(N))
else:
all_samples = np.arange(N)
subset=[]
subset_per_class = np.zeros((numClasses,))
class_dist = np.zeros((numClasses,))
for i in range(numClasses):
class_dist[i] = np.sum(Y == i)
subset_per_class[i] = int(np.ceil((class_dist[i]/N)*n_components))
for i in range(numClasses):
xTrain = X[Y == i,]
samples_in_class = all_samples[Y == i]
N1 = xTrain.shape[0]
# sig = np.power((np.std(xTrain)*(np.power(N1,(-1/(M+4))))),2)
if(PV_scheme == 'renyi'):
if(use_global_sig1 == False):
subset1 = self.renyi_select(xTrain,int(subset_per_class[i]))
else:
# sig_global = np.power((np.std(X)*(np.power(N,(-1/(M+4))))),2)
subset1 = self.renyi_select(xTrain,int(subset_per_class[i]))
elif(PV_scheme == 'kmeans'):
subset1 = self.kmeans_select(xTrain,int(subset_per_class[i]))
else:
print('No PV_scheme provided... using all the samples!')
subset1 = list(np.arange(N1))
temp=list(samples_in_class[subset1])
subset.extend(temp)
return subset
def divide_into_batches_stratified(self,yTrain):
batch_sz=self.batch_sz
#data should be of the form samples X features
N=yTrain.shape[0]
num_batches=int(np.ceil(N/batch_sz))
sample_weights=list()
numClasses=np.unique(yTrain).size
idx_batches=list()
skf=StratifiedKFold(n_splits=num_batches, random_state=1, shuffle=True)
j=0
for train_index, test_index in skf.split(np.zeros(N), yTrain):
idx_batches.append(test_index)
class_weights=np.zeros((numClasses,))
sample_weights1=np.zeros((test_index.shape[0],))
temp=yTrain[test_index,]
for i in range(numClasses):
idx1=(temp==i)
class_weights[i]=1.0/(np.sum(idx1)+1e-09)#/idx.shape[0]
sample_weights1[idx1]=class_weights[i]
sample_weights.append(sample_weights1)
j+=1
return idx_batches,sample_weights,num_batches
def kernel_transform(self, X1, X2 = None, kernel_type = 'linear_primal', n_components = 100, gamma = 1.0):
"""
X1: n_samples1 X M
X2: n_samples2 X M
X: n_samples1 X n_samples2 : if kernel_type is non primal
X: n_samples1 X n_components : if kernel_type is primal
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X2)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X2,1/(2*gamma))
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X2,-gamma)
elif(kernel_type == 'sin'):
X = np.sin(gamma*manhattan_distances(X1,X2))
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X2))
elif(kernel_type == 'rff_primal'):
rbf_feature = RBFSampler(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'nystrom_primal'):
#cannot have n_components more than n_samples1
if(n_components > X1.shape[0]):
n_components = X1.shape[0]
self.n_components = n_components
rbf_feature = Nystroem(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'linear_primal'):
X = X1
else:
print('No kernel_type passed: using linear primal solver')
X = X1
return X
def margin_kernel(self, X1, kernel_type = 'linear', gamma =1.0):
"""
X1: n_samples1 X M
X: n_samples1 X n_samples1 : if kernel_type is non primal
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X1)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X1,1/(2*gamma))
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X1,-gamma)
elif(kernel_type == 'sin'):
X = np.sin(gamma*manhattan_distances(X1,X1))
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X1))
else:
print('no kernel_type, returning None')
return None
return X
def matrix_decomposition(self, X):
"""
Finds the matrices consisting of positive and negative parts of kernel matrix X
Parameters:
----------
X: n_samples X n_samples
Returns:
--------
K_plus: kernel corresponding to +ve part
K_minus: kernel corresponding to -ve part
"""
[D,U]=eigh(X)
U_plus = U[:,D>0.0]
U_minus = U[:,D<=0.0]
D_plus = np.diag(D[D>0.0])
D_minus = np.diag(D[D<=0.0])
K_plus = np.dot(np.dot(U_plus,D_plus),U_plus.T)
K_minus = -np.dot(np.dot(U_minus,D_minus),U_minus.T)
return K_plus, K_minus
def inner_opt(self, X, Y, data1, level):
gamma = self.gamma
kernel_type = self.kernel_type
iterMax2 = self.iterMax2
iterMax1 = self.iterMax1
tol = self.tol
algo_type = self.algo_type
#if data1 = None implies there is no kernel computation, i.e., there is only primal solvers applicable
if(data1 is not None):
if(self.reg_type == 'M'):
K = self.margin_kernel( X1 = data1, kernel_type = kernel_type, gamma = gamma)
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
K_plus, K_minus = self.matrix_decomposition(K)
if(algo_type == 'MCM'):
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W_prev,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
if(kernel_type == 'linear' or kernel_type == 'rbf'):
#for mercer kernels no need to train for outer loop
print('Returning for mercer kernels')
return W_prev,f,iters,fvals
else:
print('Solving for non - mercer kernels')
#for non mercer kernels, train for outer loop with initial point as W_prev
W_best = np.zeros(W_prev.shape)
W_best[:] = W_prev[:]
f_best = np.inf
iter_best = 0
fvals = np.zeros((iterMax1+1,))
iters = 0
fvals[iters] = f
rel_error = 1.0
print('iters =%d, f_outer = %0.9f'%(iters,f))
while(iters < iterMax2 and rel_error > tol):
iters = iters + 1
if(algo_type == 'MCM'):
W,f,iters1,fvals1 = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
elif(algo_type == 'LSMCM'):
W,f,iters1,fvals1 = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters1,fvals1 = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
rel_error = np.abs((np.linalg.norm(W,'fro')-np.linalg.norm(W_prev,'fro'))/(np.linalg.norm(W_prev,'fro') + 1e-08))
W_prev[:] = W[:]
print('iters =%d, f_outer = %0.9f'%(iters,f))
if(f < f_best):
W_best[:] = W[:]
f_best = f
iter_best = iters
else:
break
fvals[iters] = -1
return W_best,f_best,iter_best,fvals
else:
print('Please choose a kernel_type from linear, rbf, sin, tanh or TL1 for reg_type = M to work ')
print('Using a linear kernel')
self.kernel_type = 'linear'
K_plus, K_minus = self.matrix_decomposition(K)
if(algo_type == 'MCM'):
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W_prev,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
return W_prev,f,iters,fvals
else:
#i.e., reg_type is not M, then train accordingly using either l1, l2, ISTA or elastic net penalty
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X, Y, level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X, Y, level, K_plus = None, K_minus = None, W = None)
return W, f, iters, fvals
else:
#i.e., data1 is None -> we are using primal solvers with either l1, l2, ISTA or elastic net penalty
if(self.reg_type == 'M'):
print('Please choose a kernel_type from linear, rbf, sin, tanh or TL1 for reg_type = M to work')
print('doing linear classifier with l1 norm on weights')
self.reg_type = 'l1'
self.C3 = 0.0
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X,Y,level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
return W,f,iters,fvals
else:
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X,Y,level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
return W,f,iters,fvals
return W,f,iters,fvals
def select_(self, xTest, xTrain, kernel_type, subset, idx_features, idx_samples):
#xTest corresponds to X1
#xTrain corresponds to X2
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
X2 = xTrain[idx_samples,:]
X2 = X2[:,idx_features]
X2 = X2[subset,]
X1 = xTest[:,idx_features]
else:
X1 = xTest[:,idx_features]
X2 = None
return X1, X2
def normalize_(self,xTrain, me, std):
idx = (std!=0.0)
xTrain[:,idx] = (xTrain[:,idx]-me[idx])/std[idx]
return xTrain
def fit(self,xTrain,yTrain):
#xTrain: samples Xfeatures
#yTrain: samples
#for classification: entries of yTrain should be between {0 to numClasses-1}
#for regresison : entries of yTrain should be real values
N = xTrain.shape[0]
M = xTrain.shape[1]
if(self.problem_type =='classification'):
numClasses=np.unique(yTrain).size
if(self.problem_type =='regression'):
if(yTrain.size == yTrain.shape[0]):
yTrain = np.reshape(yTrain,(yTrain.shape[0],1))
numClasses = yTrain.shape[1] #for multi target SVM, assuming all targets are independent to each other
feature_indices=np.zeros((self.n_ensembles,int(M*self.feature_ratio)),dtype=np.int32)
sample_indices=np.zeros((self.n_ensembles,int(N*self.sample_ratio)),dtype=np.int32)
W_all={}
me_all= {}
std_all = {}
subset_all = {}
if(self.combine_type=='concat'):
P_all=np.zeros((N,self.n_ensembles*numClasses)) #to concatenate the classes
level=0
gamma = self.gamma
kernel_type = self.kernel_type
n_components = self.n_components
for i in range(self.n_ensembles):
print('training PLM %d'%i)
if(self.sample_ratio!=1.0):
idx_samples=resample(np.arange(0,N), n_samples=int(N*self.sample_ratio), random_state=i,replace=False)
else:
idx_samples = np.arange(N)
if(self.feature_ratio!=1.0):
idx_features=resample(np.arange(0,M), n_samples=int(M*self.feature_ratio), random_state=i,replace=False)
else:
idx_features = np.arange(0,M)
feature_indices[i,:] = idx_features
sample_indices[i,:] = idx_samples
xTrain_temp = xTrain[idx_samples,:]
xTrain_temp = xTrain_temp[:,idx_features]
yTrain1 = yTrain[idx_samples,]
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
subset = self.subset_selection(xTrain_temp,yTrain1)
data1 = xTrain_temp[subset,]
subset_all[i] = subset
else:
subset_all[i] = []
data1 = None
xTrain1 = self.kernel_transform( X1 = xTrain_temp, X2 = data1, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
#standardize the dataset
xTrain1, me, std = self.standardize(xTrain1)
me_all[i] = me
std_all[i] = std
if(self.problem_type == 'regression'):
epsilon = self.epsilon
N1 = yTrain1.shape[0]
W = np.zeros((xTrain1.shape[1]+2,numClasses*2)) #2 is added to incorporate the yTrain2 and bias term appended to xTrain1
for j in range(numClasses):
yTrain3 = np.append(np.ones((N1,)), np.zeros((N1,)))
yTrain2 = np.append(yTrain1[:,j] + epsilon, yTrain1[:,j] - epsilon, axis = 0)
xTrain2 = np.append(xTrain1, xTrain1, axis = 0)
xTrain2 = np.append(xTrain2, np.reshape(yTrain2,(2*N1,1)), axis =1)
# Wa,f,iters,fvals=self.train(xTrain2,yTrain3,level)
Wa,f,iters,fvals = self.inner_opt(xTrain2, yTrain3, data1, level)
W[:,j:j+2] = Wa
W_all[i]=W # W will be of the shape (M+2,), here numClasses = 1
if(self.problem_type == 'classification'):
# W,f,iters,fvals=self.train(xTrain1,yTrain1,level)
W,f,iters,fvals = self.inner_opt(xTrain1, yTrain1, data1, level)
W_all[i]=W # W will be of the shape (M+2,numClasses)
if(self.n_ensembles == 1 or self.combine_type != 'concat'):
return W_all, sample_indices, feature_indices, me_all, std_all, subset_all
else:
if(self.combine_type=='concat'):
level=1
for i in range(self.n_ensembles):
X1, X2 = self.select_(xTrain, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
xTrain1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
xTrain1 = self.normalize_(xTrain1,me_all[i],std_all[i])
M = xTrain1.shape[1]
xTrain1=self.add_bias(xTrain1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((xTrain1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = xTrain1[:,0:M].dot(W1[0:M,]) + np.dot(xTrain1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
if(self.problem_type == 'classification'):
scores = xTrain1.dot(W)
P_all[:,i*numClasses:numClasses+i*numClasses] = scores
#train another regressor or classifier on top
if(self.problem_type == 'regression'):
epsilon = self.epsilon
P_all_1 = np.zeros((P_all.shape[0],self.n_ensembles))
W1 = np.zeros((P_all_1.shape[1]+2,numClasses*2))
for j in range(numClasses):
for k in range(self.n_ensembles):
P_all_1[:,k] = P_all[:,numClasses*k+j]
yTrain3 = np.append(np.ones((N,)), np.zeros((N,)))
yTrain2 = np.append(yTrain[:,j] + epsilon, yTrain[:,j] - epsilon, axis = 0)
P_all_2 = np.append(P_all_1, P_all_1, axis = 0)
P_all_2 = np.append(P_all_2, np.reshape(yTrain2,(2*N,1)), axis =1)
# Wa,f,iters,fvals = self.train(P_all_2,yTrain3,level)
Wa,f,iters,fvals = self.inner_opt(P_all_2, yTrain3, None, level)
W1[:,j:j+2] = Wa
if(self.problem_type == 'classification'):
# W1,f1,iters1,fvals1 = self.train(P_all,yTrain,level)
W1,f,iters,fvals = self.inner_opt(P_all, yTrain, None, level)
W_all[self.n_ensembles] = W1
return W_all, sample_indices, feature_indices, me_all, std_all, subset_all
def train(self, xTrain, yTrain, level, K_plus = None, K_minus = None, W = None):
#min D(E|w|_1 + (1-E)*0.5*|W|_2^2) + C*\sum_i\sum_(j)|f_j(i)| + \sum_i\sum_(j_\neq y_i)max(0,(1-f_y_i(i) + f_j(i)))
#setting C = 0 gives us SVM
# or when using margin term i.e., reg_type = 'M'
#min D(E|w|_1) + (E)*0.5*\sum_j=1 to numClasses (w_j^T(K+ - K-)w_j) + C*\sum_i\sum_(j)|f_j(i)| + \sum_i\sum_(j_\neq y_i)max(0,(1-f_y_i(i) + f_j(i)))
#setting C = 0 gives us SVM with margin term
if(self.upsample1==True):
xTrain,yTrain=self.upsample(xTrain,yTrain,new_imbalance_ratio=0.5,upsample_type=1)
xTrain=self.add_bias(xTrain)
M=xTrain.shape[1]
N=xTrain.shape[0]
numClasses=np.unique(yTrain).size
verbose = False
if(level==0):
C = self.C1 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty or margin term
else:
C = self.C4 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty since in combining the classifiers we use a linear primal classifier
iterMax1 = self.iterMax1
eta_zero = self.eta
class_weighting = self.class_weighting
reg_type = self.reg_type
update_type = self.update_type
tol = self.tol
np.random.seed(1)
if(W is None):
W=0.001*np.random.randn(M,numClasses)
W=W/np.max(np.abs(W))
else:
W_orig = np.zeros(W.shape)
W_orig[:] = W[:]
class_weights=np.zeros((numClasses,))
sample_weights=np.zeros((N,))
#divide the data into K clusters
for i in range(numClasses):
idx=(yTrain==i)
class_weights[i]=1.0/np.sum(idx)
sample_weights[idx]=class_weights[i]
G_clip_threshold = 100
W_clip_threshold = 500
eta=eta_zero
scores = xTrain.dot(W) #samples X numClasses
N = scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
thresh1 = np.zeros(mat.shape)
thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*np.sum(np.abs(scores)) + np.sum(thresh1)
f += (1.0/N)*f1
else:
f1 = C*np.sum(np.abs(scores)*sample_weights[:,None]) + np.sum(thresh1*sample_weights[:,None])
f+= (1.0/numClasses)*f1
if(K_minus is not None):
temp_mat = np.dot(K_minus,W_orig[0:(M-1),])
for i in range(numClasses):
#add the term (E/2*numclasses)*lambda^T*K_plus*lambda for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f+= ((0.5*E)/(numClasses))*f2
#the second term in the objective function
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f+= -((0.5*E)/(numClasses))*f3
iter1=0
print('iter1=%d, f=%0.3f'%(iter1,f))
f_best=f
fvals=np.zeros((iterMax1+1,))
fvals[iter1]=f_best
W_best=np.zeros(W.shape)
iter_best=iter1
f_prev=f_best
rel_error=1.0
# f_prev_10iter=f
if(reg_type=='l1' or reg_type =='en' or reg_type == 'M'):
# from paper: Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty
if(update_type == 'adam' or update_type == 'adagrad' or update_type == 'rmsprop'):
u = np.zeros(W.shape)
else:
u = 0.0
q=np.zeros(W.shape)
z=np.zeros(W.shape)
all_zeros=np.zeros(W.shape)
eta1=eta_zero
v=np.zeros(W.shape)
v_prev=np.zeros(W.shape)
vt=np.zeros(W.shape)
m=np.zeros(W.shape)
vt=np.zeros(W.shape)
cache=np.zeros(W.shape)
eps=1e-08
decay_rate=0.99
mu1=0.9
mu=mu1
beta1 = 0.9
beta2 = 0.999
iter_eval=10 #evaluate after every 10 iterations
idx_batches, sample_weights_batch, num_batches = self.divide_into_batches_stratified(yTrain)
while(iter1<iterMax1 and rel_error>tol):
iter1=iter1+1
for batch_num in range(0,num_batches):
# batch_size=batch_sizes[j]
test_idx=idx_batches[batch_num]
data=xTrain[test_idx,]
labels=yTrain[test_idx,]
N=labels.shape[0]
scores=data.dot(W)
correct_scores=scores[range(N),np.array(labels,dtype='int32')]#label_batches[j] for this line should be in the range [0,numClasses-1]
mat=(scores.transpose()-correct_scores.transpose()).transpose()
mat=mat+1.0
mat[range(N),np.array(labels,dtype='int32')]=0.0
thresh1=np.zeros(mat.shape)
thresh1[mat>0.0]=mat[mat>0.0]
binary1 = np.zeros(thresh1.shape)
binary1[thresh1>0.0] = 1.0
row_sum=np.sum(binary1,axis=1)
binary1[range(N),np.array(labels,dtype='int32')]=-row_sum
if(C !=0.0):
binary2 = np.zeros(scores.shape)
binary2[scores>0.0] = 1.0
binary2[scores<0.0] = -1.0
else:
binary2 = 0
dscores1 = binary1
dscores2 = binary2
if(class_weighting=='average'):
gradW = np.dot((dscores1 + C*dscores2).transpose(),data)
gradW=gradW.transpose()
gradW = (1.0/N)*gradW
# gradW += gradW1 - gradW2
else:
sample_weights_b=sample_weights_batch[batch_num]
gradW=np.dot((dscores1 + C*dscores2).transpose(),data*sample_weights_b[:,None])
gradW=gradW.transpose()
gradW=(1.0/numClasses)*gradW
# gradW += gradW1 - gradW2
if(np.sum(gradW**2)>G_clip_threshold):#gradient clipping
gradW = G_clip_threshold*gradW/np.sum(gradW**2)
if(update_type=='sgd'):
W = W - eta*gradW
elif(update_type=='momentum'):
v = mu * v - eta * gradW # integrate velocity
W += v # integrate position
elif(update_type=='nesterov'):
v_prev[:] = v[:] # back this up
v = mu * v - eta * gradW # velocity update stays the same
W += -mu * v_prev + (1 + mu) * v # position update changes form
elif(update_type=='adagrad'):
cache += gradW**2
W += - eta1* gradW / (np.sqrt(cache) + eps)
elif(update_type=='rmsprop'):
cache = decay_rate * cache + (1 - decay_rate) * gradW**2
W += - eta1 * gradW / (np.sqrt(cache) + eps)
elif(update_type=='adam'):
m = beta1*m + (1-beta1)*gradW
mt = m / (1-beta1**(iter1+1))
v = beta2*v + (1-beta2)*(gradW**2)
vt = v / (1-beta2**(iter1+1))
W += - eta1 * mt / (np.sqrt(vt) + eps)
else:
W = W - eta*gradW
if(reg_type == 'M'):
gradW1= np.zeros(W.shape)
gradW2= np.zeros(W.shape)
for i in range(numClasses):
w=W[0:(M-1),i]
if(K_plus is not None):
gradW1[0:(M-1),i]=((E*0.5)/(numClasses))*2*np.dot(K_plus,w)
if(K_minus is not None):
gradW2[0:(M-1),i]=((E*0.5)/(numClasses))*temp_mat[:,i]
if(update_type == 'adam'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(cache) + eps))
else:
W += -(gradW1-gradW2)*(eta)
if(reg_type == 'ISTA'):
if(update_type == 'adam'):
idx_plus = W > D*(eta1/(np.sqrt(vt) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(vt) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(vt) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(vt[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(vt[idx_minus]) + eps))
W[idx_zero] = 0.0
elif(update_type == 'adagrad' or update_type =='rmsprop'):
idx_plus = W > D*(eta1/(np.sqrt(cache) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(cache) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(cache) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(cache[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(cache[idx_minus]) + eps))
W[idx_zero] = 0.0
else:
idx_plus = W > D*(eta)
idx_minus = W < -D*(eta)
idx_zero = np.abs(W) < D*(eta)
W[idx_plus] = W[idx_plus] - D*(eta)
W[idx_minus] = W[idx_minus] + D*(eta)
W[idx_zero] = 0.0
if(reg_type=='l2'):
if(update_type == 'adam'):
W += -D*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='en'):
if(update_type == 'adam'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='l1' or reg_type == 'M'):
if(update_type=='adam'):
u = u + D*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(reg_type=='en'):
if(update_type=='adam'):
u = u + D*E*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*E*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*E*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(np.sum(W**2)>W_clip_threshold):#gradient clipping
W = W_clip_threshold*W/np.sum(W**2)
if(iter1%iter_eval==0):
#once the W are calculated for each epoch we calculate the scores
scores=xTrain.dot(W)
# scores=scores-np.max(scores)
N=scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
thresh1 = np.zeros(mat.shape)
thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*np.sum(np.abs(scores)) + np.sum(thresh1)
f += (1.0/N)*f1
else:
f1 = C*np.sum(np.abs(scores)*sample_weights[:,None]) + np.sum(thresh1*sample_weights[:,None])
f+= (1.0/numClasses)*f1
for i in range(numClasses):
#first term in objective function for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f += ((0.5*E)/(numClasses))*f2
#the second term in the objective function for margin
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f += -((0.5*E)/(numClasses))*f3
if(verbose == True):
print('iter1=%d, f=%0.3f'%(iter1,f))
fvals[iter1]=f
rel_error=np.abs(f_prev-f)/np.abs(f_prev)
max_W = np.max(np.abs(W))
W[np.abs(W)<1e-03*max_W]=0.0
if(f<f_best):
f_best=f
W_best[:]=W[:]
max_W = np.max(np.abs(W))
W_best[np.abs(W_best)<1e-03*max_W]=0.0
iter_best=iter1
else:
break
f_prev=f
eta=eta_zero/np.power((iter1+1),1)
fvals[iter1]=-1
return W_best,f_best,iter_best,fvals
def predict(self,data, xTrain, W_all, sample_indices, feature_indices, me_all, std_all, subset_all):
#type=2 -> mode of all labels
#type=1 -> average of all labels
#type=3 -> concat of all labels
types = self.combine_type
kernel_type = self.kernel_type
gamma = self.gamma
n_components = self.n_components
n_ensembles = feature_indices.shape[0]
N = data.shape[0]
M = data.shape[1]
if(self.problem_type == 'classification'):
numClasses = W_all[0].shape[1]
label = np.zeros((N,))
if(self.problem_type == 'regression'):
numClasses = int(W_all[0].shape[1]/2)
print('numClasses=%d'%numClasses)
label = np.zeros((N,numClasses))
# print('numClasses =%d'%numClasses)
if(types=='mode'):
label_all_1 = np.zeros((N,n_ensembles))
label_all_2 = np.zeros((N,n_ensembles*numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform(X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label_all_2[:,i*numClasses:i*numClasses+numClasses] = scores
if(self.problem_type == 'classification'):
scores = data1.dot(W)
label_all_1[:,i] = np.argmax(scores,axis=1)
if(self.problem_type == 'classification'):
label = mode(label_all_1,axis=1)[0]
label = np.int32(np.reshape(label,(N,)))
return label
if(self.problem_type == 'regression'):
label = np.zeros((N,numClasses))
for j in range(numClasses):
label_temp = np.zeros((N,n_ensembles))
for k in range(n_ensembles):
label_temp[:,k] = label_all_2[:,k*numClasses+j]
label[:,j] = np.reshape(mode(label_temp,axis=1)[0],(label.shape[0],))
return label
elif(types=='average'):
label_all_2=np.zeros((N,numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
# W1 = (W[:,0]-W[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label += label + scores/n_ensembles
if(self.problem_type == 'classification'):
scores = data1.dot(W)
label_all_2 += label_all_2 + scores
if(self.problem_type == 'classification'):
label=np.argmax(label_all_2,axis=1)
return label
if(self.problem_type == 'regression'):
return label
elif(types =='concat'):
# if(self.problem_type == 'regression'):
# P_all=np.zeros((N,n_ensembles))
# if(self.problem_type == 'classification'):
N = data.shape[0]
P_all=np.zeros((N,n_ensembles*numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
# if(self.problem_type == 'regression'):
# W1 = (W[:,0]-W[:,1])/2
# scores=data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
# scores = -1.0/(W1[M,] + 1e-08)*scores
# P_all[:,i] = scores
if(self.problem_type == 'classification'):
scores = data1.dot(W)
P_all[:,i*numClasses:numClasses+i*numClasses] = scores
if(n_ensembles == 1):
if(self.problem_type == 'regression'):
if(numClasses == 1):
label = np.reshape(P_all,(P_all.shape[0],))
else:
label = P_all
if(self.problem_type == 'classification'):
label=np.argmax(P_all,axis=1)
return label
W = W_all[n_ensembles]
M = P_all.shape[1]
# P_all = self.add_bias(P_all)
if(self.problem_type == 'regression'):
scores = np.zeros((P_all.shape[0],numClasses))
P_all_1 = np.zeros((P_all.shape[0],n_ensembles))
# W = np.zeros((P_all_1.shape[1]+2,numClasses*2))
for j in range(numClasses):
P_all_1 = np.zeros((P_all.shape[0],n_ensembles))
for k in range(n_ensembles):
P_all_1[:,k] = P_all[:,numClasses*k+j]
M = P_all_1.shape[1]
P_all_1 = self.add_bias(P_all_1)
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = P_all_1[:,0:M].dot(W1[0:M,]) + np.dot(P_all_1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label = scores
return label
# W1 = (W[:,0]-W[:,1])/2
# scores=P_all[:,0:M].dot(W1[0:M,]) + np.dot(P_all[:,M], W1[M+1,])
# scores = -1.0/(W1[M,] + 1e-08)*scores
# label = scores
if(self.problem_type == 'classification'):
P_all = self.add_bias(P_all)
scores = P_all.dot(W)
label = np.argmax(scores,axis=1)
return label
def accuracy_classifier(self,actual_label,found_labels):
acc=np.divide(np.sum(actual_label==found_labels)*100.0 , actual_label.shape[0],dtype='float64')
return acc
def accuracy_regressor(self,actual_label,found_labels):
acc=np.divide(np.linalg.norm(actual_label - found_labels)**2 , actual_label.shape[0],dtype='float64')
return acc
def train_LSMCM(self, xTrain, yTrain, level, K_plus = None, K_minus = None, W = None):
#min D(E|w|_1 + (1-E)*0.5*|W|_2^2) + C*\sum_i\sum_(j)|f_j(i)**2| + \sum_i\sum_(j_\neq y_i)(1-f_y_i(i) + f_j(i))**2
#setting C = 0 gives us SVM
# or when using margin term i.e., reg_type = 'M'
#min D(E|w|_1) + (E)*0.5*\sum_j=1 to numClasses (w_j^T(K+ - K-)w_j) + C*\sum_i\sum_(j)|f_j(i)**2| + \sum_i\sum_(j_\neq y_i)(1-f_y_i(i) + f_j(i))**2
#setting C = 0 gives us SVM with margin term
# print('LSMCM Training')
# print('reg_type=%s, algo_type=%s, problem_type=%s,kernel_type=%s'%(self.reg_type,self.algo_type,self.problem_type,self.kernel_type))
# print('C1=%0.4f, C2=%0.4f, C3=%0.4f'%(self.C1,self.C2,self.C3))
if(self.upsample1==True):
xTrain,yTrain=self.upsample(xTrain,yTrain,new_imbalance_ratio=0.5,upsample_type=1)
xTrain=self.add_bias(xTrain)
M=xTrain.shape[1]
N=xTrain.shape[0]
numClasses=np.unique(yTrain).size
verbose = False
if(level==0):
C = self.C1 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty or margin term
else:
C = self.C4 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty since in combining the classifiers we use a linear primal classifier
iterMax1 = self.iterMax1
eta_zero = self.eta
class_weighting = self.class_weighting
reg_type = self.reg_type
update_type = self.update_type
tol = self.tol
np.random.seed(1)
if(W is None):
W=0.001*np.random.randn(M,numClasses)
W=W/np.max(np.abs(W))
else:
W_orig = np.zeros(W.shape)
W_orig[:] = W[:]
class_weights=np.zeros((numClasses,))
sample_weights=np.zeros((N,))
#divide the data into K clusters
for i in range(numClasses):
idx=(yTrain==i)
class_weights[i]=1.0/np.sum(idx)
sample_weights[idx]=class_weights[i]
G_clip_threshold = 100
W_clip_threshold = 500
eta=eta_zero
scores = xTrain.dot(W) #samples X numClasses
N = scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(yTrain,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
mat1 = 1 - correct_scores + max_scores
# thresh1 = np.zeros(mat.shape)
# thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
#(1- f_yi + max_j neq yi f_j)^2
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*0.5*np.sum(scores**2) + 0.5*np.sum((mat1)**2)
f += (1.0/N)*f1
else:
f1 = C*0.5*np.sum((scores**2)*sample_weights[:,None]) + 0.5*np.sum((mat1**2)*sample_weights[:,None])
f+= (1.0/numClasses)*f1
if(K_minus is not None):
temp_mat = np.dot(K_minus,W_orig[0:(M-1),])
for i in range(numClasses):
#add the term (E/2*numclasses)*lambda^T*K_plus*lambda for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f+= ((0.5*E)/(numClasses))*f2
#the second term in the objective function
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f+= -((0.5*E)/(numClasses))*f3
iter1=0
print('iter1=%d, f=%0.3f'%(iter1,f))
f_best=f
fvals=np.zeros((iterMax1+1,))
fvals[iter1]=f_best
W_best=np.zeros(W.shape)
iter_best=iter1
f_prev=f_best
rel_error=1.0
# f_prev_10iter=f
if(reg_type=='l1' or reg_type =='en' or reg_type == 'M'):
# from paper: Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty
if(update_type == 'adam' or update_type == 'adagrad' or update_type == 'rmsprop'):
u =
|
np.zeros(W.shape)
|
numpy.zeros
|
import tensorflow as tf
import numpy as np
import random
from scipy.stats import pearsonr
from analyze_predictions import *
from scipy.spatial import distance
import seaborn as sns
import matplotlib.pyplot as plt
def Euclidean_dist(A, B):
C = A - B
return sum(map(sum, C * C)) ** 0.5
def MAE(A, B): ## Mean Absolute Error
C = A - B
return sum(map(sum, C * C)) / (C.shape[0] * C.shape[1])
def random_split_train_test(X0, training_dictionary_fraction, seed, dictionary_size=0.5, biased_training=0.):
training_dictionary_size = max(int(training_dictionary_fraction * X0.shape[1]), 5)
if dictionary_size < 1:
dictionary_size = dictionary_size * training_dictionary_size
dictionary_size = int(dictionary_size)
xi = np.zeros(X0.shape[1], dtype=np.bool)
if biased_training > 0:
np.random.seed(seed)
i = np.random.randint(len(xi))
dist = distance.cdist([X0[:, i]], X0.T, 'correlation')[0]
didx = np.argsort(dist)[1:int(biased_training * training_dictionary_size) + 1]
else:
didx = []
xi[didx] = True
if biased_training < 1:
remaining_idx = np.setdiff1d(range(len(xi)), didx)
np.random.seed(seed)
xi[np.random.choice(remaining_idx, training_dictionary_size - xi.sum(), replace=False)] = True
xa = X0[:, xi]
xb = X0[:, np.invert(xi)]
return xa, xb
def compare_results(A, B):
results = list((1 - distance.correlation(A.flatten(), B.flatten())))
results += list(Euclidean_dist(A, B))
results += list(MAE(A, B))
return results
tf.set_random_seed(1)
# Hyper Parameters
LR = 0.0001 # learning rate
Dropout_rate = 0.5
# GSE Data
data_path = "./Data/mass_cytomatry.txt"
X = np.loadtxt(data_path)
X =
|
np.delete(X, (0, 1, 2), axis=1)
|
numpy.delete
|
import unittest
import numpy as np
import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.UHFQuantumController as UHF
class Test_UHFQC(unittest.TestCase):
@classmethod
def setup_class(cls):
cls.uhf = UHF.UHFQC(name='MOCK_UHF', server='emulator',
device='dev2109', interface='1GbE')
cls.uhf.reset_waveforms_zeros()
@classmethod
def teardown_class(cls):
cls.uhf.close()
def test_instantiation(self):
self.assertEqual(Test_UHFQC.uhf.devname, 'dev2109')
def test_DIO_program(self):
self.uhf.awg_sequence_acquisition_and_DIO_triggered_pulse(cases=[
0, 2, 14])
self.uhf.start()
uploaded_program = self.uhf._awgModule._sourcestring
p = uploaded_program.split('\n')
assert len(p) == 52 # program is 52 lines
# Test that the codeword preamble is identical
assert p[:8] == \
['// Start of automatically generated codeword table',
'wave wave_ch1_cw000 = "dev2109_wave_ch1_cw000";',
'wave wave_ch2_cw000 = "dev2109_wave_ch2_cw000";',
'wave wave_ch1_cw002 = "dev2109_wave_ch1_cw002";',
'wave wave_ch2_cw002 = "dev2109_wave_ch2_cw002";',
'wave wave_ch1_cw014 = "dev2109_wave_ch1_cw014";',
'wave wave_ch2_cw014 = "dev2109_wave_ch2_cw014";',
'// End of automatically generated codeword table']
assert p[39:45] == [
' switch (cw) {',
' case 0x00000000: playWave(wave_ch1_cw000, wave_ch2_cw000);',
' case 0x00040000: playWave(wave_ch1_cw002, wave_ch2_cw002);',
' case 0x001c0000: playWave(wave_ch1_cw014, wave_ch2_cw014);',
' default: playWave(ones(32), ones(32)); err_cnt += 1;',
' }']
def test_waveform_table_generation(self):
self.uhf.awg_sequence_acquisition_and_DIO_triggered_pulse(
cases=[0, 2, 14])
assert self.uhf.cases() == [0, 2, 14]
wf_table = self.uhf._get_waveform_table(0)
assert wf_table == [('wave_ch1_cw000', 'wave_ch2_cw000'),
('wave_ch1_cw002', 'wave_ch2_cw002'),
('wave_ch1_cw014', 'wave_ch2_cw014')]
@unittest.skip('FIXME: disabled, see PR #643 and PR #635 (marked as important)')
def test_dynamic_waveform_upload(self):
Test_UHFQC.uhf.wave_ch1_cw000(np.ones(48))
# resetting the compilation count to ensure test is self contained
Test_UHFQC.uhf._awgModule._compilation_count[0] = 0
Test_UHFQC.uhf.awg_sequence_acquisition_and_pulse()
Test_UHFQC.uhf.start()
Test_UHFQC.uhf.stop()
# The program must be compiled exactly once at this point
self.assertEqual(Test_UHFQC.uhf._awgModule.get_compilation_count(0), 1)
# Modify a waveform
Test_UHFQC.uhf.wave_ch1_cw000(0.5*Test_UHFQC.uhf.wave_ch1_cw000())
# Start again
Test_UHFQC.uhf.start()
Test_UHFQC.uhf.stop()
# No further compilation allowed
self.assertEqual(Test_UHFQC.uhf._awgModule.get_compilation_count(0), 1)
# Change the length of a waveform
w0 = np.concatenate(
(Test_UHFQC.uhf.wave_ch1_cw000(), Test_UHFQC.uhf.wave_ch1_cw000()))
Test_UHFQC.uhf.wave_ch1_cw000(w0)
# Start again
Test_UHFQC.uhf.start()
Test_UHFQC.uhf.stop()
# Now the compilation must have been executed again
self.assertEqual(Test_UHFQC.uhf._awgModule.get_compilation_count(0), 2)
def test_reset_waveforms_zeros(self):
self.uhf.wave_ch1_cw003(np.ones(80))
assert np.allclose(self.uhf.wave_ch1_cw003(),
|
np.ones(80)
|
numpy.ones
|
import cv2
import numpy as np
from xml.etree import ElementTree as ET
import os
import shutil
"""get voc annosList
给定VOC中的一个任务, 然后从总的annos中抽出含有该任务的图片
用这个替代 dataList 中的 annoNames
"""
def vocAnnoPathes(path):
with open(path, 'r') as f:
lines = f.readlines()
annoNames = [line.strip().split(" ")[0] + ".xml" for line in lines]
return annoNames
def vertify(xmin, ymin,xmax,ymax,width,height):
assert 0 <= xmin < width, "xmin must in [0, {}), given {}".format(width, xmin)
assert 0 <= ymin < height, "ymin must in [0, {}), given {}".format(height, ymin)
assert xmin < xmax <= width, "xmax must in (xmin, {}], given {}".format(width, xmax)
assert ymin < ymax <= height, "ymax must in (ymin, {}], given {}".format(height, ymax)
"""
在一个xml中解析出来的所有的box
annoPath: xml的路径
choiceCls:对那个类别进行检测
"""
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def parseVoc(annoPath, choiceCls = ['boat']):
LAYOUT = ("hand", "head", "")
indexMap = dict(zip(CLASSES, range(len(CLASSES))))
tree = ET.parse(annoPath)
root = tree.getroot()
size=root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
label = []
for obj in root.iter("object"):
difficult = int(obj.find("difficult").text)
if difficult==1:
continue
clsName = obj.find("name").text.strip().lower()
if clsName not in choiceCls:
continue
clsId = indexMap[clsName]
xmlbox = obj.find("bndbox")
xmin = (float(xmlbox.find('xmin').text))
ymin = (float(xmlbox.find('ymin').text) )
xmax = (float(xmlbox.find('xmax').text) )
ymax = (float(xmlbox.find('ymax').text) )
try:
vertify(xmin, ymin,xmax,ymax,width,height)
except AssertionError as e:
raise RuntimeError("Invalid label at {}, {}".format(annoPath, e))
if len(choiceCls) == 1:
clsId =0
label.append([xmin, ymin, xmax-xmin, ymax-ymin, clsId])
if len(label) == 0:
label = np.array([[-1, -1, -1, -1, -1]])
label = np.array(label)
return label
if __name__ == '__main__':
""" config"""
root = "/media/q/data/datasets/VOC/"
dataName = "VOC2007_test"
trainvaltest = "test.txt"#2007test:test; 2012:trainval
# names = [CLASSES[14]] # only chocie one cls to label, this label is 0
# names = CLASSES # i want train 20 models, so every class has a dir,
names = ["all"]# chocie all 20 class to label
bg = 10# if chocie one class , bg% background will choice
allNum = 16500 # 2012:16500; 2007test :5000
diff1 = 1 #if chocie difficult1,1:chocie diffi0 and diff 1; 0:only diffi0
"""end"""
imgRoot = root + dataName + "/JPEGImages/"
annoRoot = root + dataName +"/Annotations/"
for j in range( len(names)):
name = names[j]
if name == "all":
dirname = "trainval_diff" + str(int(diff1))
annotxtPath = root + dataName + "/" + "ImageSets/Main/"+trainvaltest
else:
dirname = "trainval_diff" + str(int(diff1)) + "_" + str(bg)+"bg"
annotxtPath = root + dataName + "/" + "ImageSets/Main/"+name+"_" + trainvaltest
annSaveDir = root + dataName + "/format_me/Main/" + name + "/" + dirname + "/"
if os.path.exists(annSaveDir):
shutil.rmtree(annSaveDir)
if not os.path.exists(annSaveDir):
os.makedirs(annSaveDir)
anns = vocAnnoPathes(annotxtPath)
print(j, "-"*10, len(anns))
annNames = []
idx = 0
for i in range(len(anns)):
imgPath = imgRoot + anns[i].strip().split(".")[0] + ".jpg"
annoPath = annoRoot + anns[i]
#img = cv2.imread(imgPath)
if name == "all":
label = parseVoc(annoPath, choiceCls=list(CLASSES))
else:
label = parseVoc(annoPath, choiceCls=[name])
# for j in range(label.shape[0]):
# img = cv2.rectangle(img, (int(label[j][0]), int(label[j][1])),
# (int(label[j][0] + label[j][2]), int(label[j][1] + label[j][3])), (0,0,255), thickness=1, lineType=None, shift=None)
# img = cv2.putText(img, CLASSES[int(label[j][4])], (int(label[j][0]), int(label[j][1]) + 10),
# 0, 1, (0,0,255), thickness=2, lineType=None, bottomLeftOrigin=None)
# if label.ndim > 1:
if name!= "all" and bg > 0:
dir0name = "trainval_diff" + str(int(diff1)) + "_" + str(0) + "bg"
bg0dir = root + dataName + "/format_me/Main/" + name + "/" + dir0name + "/"
assert os.path.exists(bg0dir), "0% background txt dir cannot be found ,so cannot add bg% background"
objnum = len(os.listdir(bg0dir))
if name=="all" or bg ==0:
flag = False
else:
flag = np.random.random() < 0.1*objnum/(allNum-objnum)
if not (label == np.array([[-1, -1, -1, -1, -1]])).all() or flag:# not bk :#or(not flag1 and flag2):
annSavePath = annSaveDir + anns[i].split(".")[0] + ".txt"
|
np.savetxt(annSavePath, label)
|
numpy.savetxt
|
import openmoc
import numpy as np
from universes import universes, cells, surfaces
from surfaces import gap
###############################################################################
######################### Set Simulation Param ############################
###############################################################################
reflector_refines = 3
###############################################################################
########################### Creating Lattices #############################
###############################################################################
lattices = {}
# Instantiate Lattices
lattices['Refined Reflector Mesh'] = openmoc.Lattice()
lattices['Reflector Unrodded Assembly'] = openmoc.Lattice()
lattices['Reflector Rodded Assembly'] = openmoc.Lattice()
lattices['Reflector Right Assembly'] = openmoc.Lattice()
lattices['Reflector Bottom Assembly'] = openmoc.Lattice()
lattices['Reflector Corner Assembly'] = openmoc.Lattice()
lattices['Reflector Assembly'] = openmoc.Lattice()
lattices['UO2 Unrodded Assembly'] = openmoc.Lattice()
lattices['UO2 Rodded Assembly'] = openmoc.Lattice()
lattices['MOX Unrodded Assembly'] = openmoc.Lattice()
lattices['MOX Rodded Assembly'] = openmoc.Lattice()
lattices['Root'] = openmoc.Lattice()
lattices['Gap Reflector Rodded Assembly'] = openmoc.Lattice()
lattices['Gap Reflector Right Assembly'] = openmoc.Lattice()
lattices['Gap Reflector Bottom Assembly'] = openmoc.Lattice()
lattices['Gap Reflector Corner Assembly'] = openmoc.Lattice()
lattices['Gap UO2 Unrodded Assembly'] = openmoc.Lattice()
lattices['Gap UO2 Rodded Assembly'] = openmoc.Lattice()
lattices['Gap MOX Unrodded Assembly'] = openmoc.Lattice()
lattices['Gap MOX Rodded Assembly'] = openmoc.Lattice()
# Abbreviate universes that will fill lattices
u = universes['UO2']
m = universes['MOX 4.3%']
o = universes['MOX 7.0%']
x = universes['MOX 8.7%']
g = universes['Guide Tube']
f = universes['Fission Chamber']
c = universes['Control Rod']
p = universes['Moderator Pin']
r = universes['Reflector']
a = universes['Refined Reflector Mesh']
# Sliced up water cells - semi finely spaced
width_xy = 1.26 / reflector_refines
lattices['Refined Reflector Mesh'].setWidth\
(width_x=width_xy, width_y=width_xy, width_z=100.)
template = [[[r] * reflector_refines] * reflector_refines]
lattices['Refined Reflector Mesh'].setUniverses(template)
# UO2 unrodded 17 x 17 assemblies
lattices['UO2 Unrodded Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]]
lattices['UO2 Unrodded Assembly'].setUniverses(template)
# UO2 rodded 17 x 17 assemblies
lattices['UO2 Rodded Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u],
[u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u],
[u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]]
lattices['UO2 Rodded Assembly'].setUniverses(template)
# MOX unrodded 17 x 17 assemblies
lattices['MOX Unrodded Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, o, o, o, o, g, o, o, g, o, o, g, o, o, o, o, m],
[m, o, o, g, o, x, x, x, x, x, x, x, o, g, o, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, g, x, x, g, x, x, g, x, x, g, x, x, g, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, g, x, x, g, x, x, f, x, x, g, x, x, g, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, g, x, x, g, x, x, g, x, x, g, x, x, g, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, o, g, o, x, x, x, x, x, x, x, o, g, o, o, m],
[m, o, o, o, o, g, o, o, g, o, o, g, o, o, o, o, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]]
lattices['MOX Unrodded Assembly'].setUniverses(template)
# MOX rodded 17 x 17 assemblies
lattices['MOX Rodded Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, o, o, o, o, c, o, o, c, o, o, c, o, o, o, o, m],
[m, o, o, c, o, x, x, x, x, x, x, x, o, c, o, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, c, x, x, c, x, x, c, x, x, c, x, x, c, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, c, x, x, c, x, x, f, x, x, c, x, x, c, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, c, x, x, c, x, x, c, x, x, c, x, x, c, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, o, c, o, x, x, x, x, x, x, x, o, c, o, o, m],
[m, o, o, o, o, c, o, o, c, o, o, c, o, o, o, o, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]]
lattices['MOX Rodded Assembly'].setUniverses(template)
# Reflector unrodded 17 x 17 assemblies
lattices['Reflector Unrodded Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, g, p, p, g, p, p, g, p, p, p, p, p],
[p, p, p, g, p, p, p, p, p, p, p, p, p, g, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, g, p, p, g, p, p, g, p, p, g, p, p, g, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, g, p, p, g, p, p, f, p, p, g, p, p, g, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, g, p, p, g, p, p, g, p, p, g, p, p, g, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, g, p, p, p, p, p, p, p, p, p, g, p, p, p],
[p, p, p, p, p, g, p, p, g, p, p, g, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p]]]
lattices['Reflector Unrodded Assembly'].setUniverses(template)
# Reflector rodded 17 x 17 assemblies
lattices['Reflector Rodded Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, c, p, p, c, p, p, c, p, p, p, p, p],
[p, p, p, c, p, p, p, p, p, p, p, p, p, c, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, c, p, p, c, p, p, c, p, p, c, p, p, c, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, c, p, p, c, p, p, f, p, p, c, p, p, c, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, c, p, p, c, p, p, c, p, p, c, p, p, c, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, c, p, p, p, p, p, p, p, p, p, c, p, p, p],
[p, p, p, p, p, c, p, p, c, p, p, c, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p]]]
lattices['Reflector Rodded Assembly'].setUniverses(template)
# Reflector right 17 x 17 assemblies
lattices['Reflector Right Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[a] * 11 + [r] * 6] * 17]
lattices['Reflector Right Assembly'].setUniverses(template)
# Reflector bottom 17 x 17 assemblies
lattices['Reflector Bottom Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[a] * 17] * 11
template += [[r] * 17] * 6
template = [template]
lattices['Reflector Bottom Assembly'].setUniverses(template)
# Reflector corner 17 x 17 assemblies
lattices['Reflector Corner Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[a] * 11 + [r] * 6] * 11
template += [[r] * 17] * 6
template = [template]
lattices['Reflector Corner Assembly'].setUniverses(template)
# Reflector right 17 x 17 assemblies
lattices['Reflector Assembly'].setWidth(width_x=1.26, width_y=1.26, width_z=100.)
template = [[[a] * 17] * 17]
lattices['Reflector Assembly'].setUniverses(template)
row = np.array([[r]*17])
col = np.array([[r]*19]).reshape(-1, 1)
width = [
[gap] + [1.26]*17 + [gap],
[gap] + [1.26]*17 + [gap],
[100]
]
# Gap UO2 unrodded 17 x 17 assemblies
lattices['Gap UO2 Unrodded Assembly'].setWidths(width[0], width[1], width[2])
template = [[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template = np.concatenate((col,template,col),axis=1)
template = template[np.newaxis,:]
template = template.tolist()
lattices['Gap UO2 Unrodded Assembly'].setUniverses(template)
# Gap UO2 rodded 17 x 17 assemblies
lattices['Gap UO2 Rodded Assembly'].setWidths(width[0], width[1], width[2])
template = [[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u],
[u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u],
[u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template = np.concatenate((col,template,col),axis=1)
template = template[np.newaxis,:]
template = template.tolist()
lattices['Gap UO2 Rodded Assembly'].setUniverses(template)
# Gap MOX unrodded 17 x 17 assemblies
lattices['Gap MOX Unrodded Assembly'].setWidths(width[0], width[1], width[2])
template = [[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, o, o, o, o, g, o, o, g, o, o, g, o, o, o, o, m],
[m, o, o, g, o, x, x, x, x, x, x, x, o, g, o, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, g, x, x, g, x, x, g, x, x, g, x, x, g, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, g, x, x, g, x, x, f, x, x, g, x, x, g, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, g, x, x, g, x, x, g, x, x, g, x, x, g, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, o, g, o, x, x, x, x, x, x, x, o, g, o, o, m],
[m, o, o, o, o, g, o, o, g, o, o, g, o, o, o, o, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template = np.concatenate((col,template,col),axis=1)
template = template[np.newaxis,:]
template = template.tolist()
lattices['Gap MOX Unrodded Assembly'].setUniverses(template)
# Gap MOX rodded 17 x 17 assemblies
lattices['Gap MOX Rodded Assembly'].setWidths(width[0], width[1], width[2])
template = [[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, o, o, o, o, c, o, o, c, o, o, c, o, o, o, o, m],
[m, o, o, c, o, x, x, x, x, x, x, x, o, c, o, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, c, x, x, c, x, x, c, x, x, c, x, x, c, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, c, x, x, c, x, x, f, x, x, c, x, x, c, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, o, x, x, x, x, x, x, x, x, x, x, x, o, o, m],
[m, o, c, x, x, c, x, x, c, x, x, c, x, x, c, o, m],
[m, o, o, o, x, x, x, x, x, x, x, x, x, o, o, o, m],
[m, o, o, c, o, x, x, x, x, x, x, x, o, c, o, o, m],
[m, o, o, o, o, c, o, o, c, o, o, c, o, o, o, o, m],
[m, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template = np.concatenate((col,template,col),axis=1)
template = template[np.newaxis,:]
template = template.tolist()
lattices['Gap MOX Rodded Assembly'].setUniverses(template)
# Gap Reflector rodded 17 x 17 assemblies
lattices['Gap Reflector Rodded Assembly'].setWidths(width[0], width[1], width[2])
template = [[[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, c, p, p, c, p, p, c, p, p, p, p, p],
[p, p, p, c, p, p, p, p, p, p, p, p, p, c, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, c, p, p, c, p, p, c, p, p, c, p, p, c, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, c, p, p, c, p, p, f, p, p, c, p, p, c, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, c, p, p, c, p, p, c, p, p, c, p, p, c, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, c, p, p, p, p, p, p, p, p, p, c, p, p, p],
[p, p, p, p, p, c, p, p, c, p, p, c, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p],
[p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p]]]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template = np.concatenate((col,template,col),axis=1)
template = template[np.newaxis,:]
template = template.tolist()
lattices['Gap Reflector Rodded Assembly'].setUniverses(template)
# Gap Reflector right 17 x 17 assemblies
lattices['Gap Reflector Right Assembly'].setWidths(width[0], width[1], width[2])
template = [[[a] * 11 + [r] * 6] * 17]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template = np.concatenate((col,template,col),axis=1)
template = template[np.newaxis,:]
template = template.tolist()
lattices['Gap Reflector Right Assembly'].setUniverses(template)
# Gap Reflector bottom 17 x 17 assemblies
lattices['Gap Reflector Bottom Assembly'].setWidths(width[0], width[1], width[2])
template = [[a] * 17] * 11
template += [[r] * 17] * 6
template = [template]
template = np.array(template)
template = np.concatenate((row,template[0],row),axis=0)
template =
|
np.concatenate((col,template,col),axis=1)
|
numpy.concatenate
|
import numpy as np
import matplotlib.pyplot as plt
def create_random_adjacency(size=500, blocks=100, sets=[0, 200, 320, 390], missing=.9999, set_density=.6):
# create matrix
x =
|
np.random.rand(size,size)
|
numpy.random.rand
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 12:30:55 2017
@author: sjjoo
"""
import sys
import mne
import matplotlib.pyplot as plt
import imageio
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import scipy.io as sio
import time
from functools import partial
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne import set_config
import matplotlib.font_manager as font_manager
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
fs_dir = '/mnt/diskArray/projects/avg_fsurfer'
this_env['SUBJECTS_DIR'] = fs_dir
raw_dir = '/mnt/scratch/NLR_MEG4'
#raw_dir = '/mnt/scratch/NLR_MEG_EOG2'
os.chdir(raw_dir)
subs = ['NLR_102_RS','NLR_110_HH','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_162_EF','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_201_GS',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420', 'NLR_HB275','NLR_GB355']
session2 = ['102_rs160815','110_hh160809',
'145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','162_ef160829','163_lf160920',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828',
'nlr_hb275170828','nlr_gb355170907']
n_subjects = len(subs)
#%%
""" CHANGE the file name here !!! """
fname_data = op.join(raw_dir, 'session2_data_loose_depth8_normal.npy')
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
conditions1 = ['word_c254_p20_dot', 'word_c254_p50_dot', 'word_c137_p20_dot',
'word_c254_p80_dot', 'word_c137_p80_dot', 'bigram_c254_p20_dot',
'bigram_c254_p50_dot', 'bigram_c137_p20_dot',
'word_c254_p20_word', 'word_c254_p50_word', 'word_c137_p20_word',
'word_c254_p80_word', 'word_c137_p80_word', 'bigram_c254_p20_word',
'bigram_c254_p50_word', 'bigram_c137_p20_word'
]
conditions2 = [0, 1, 2, 3, 4, 8, 9, 10, 11, 12]
#X13 = np.empty((20484, 481, n_subjects, len(conditions2)))
X13 = np.empty((20484, 601, n_subjects, len(conditions2)))
fs_vertices = [np.arange(10242)] * 2
n_epochs = np.empty((n_subjects,len(conditions2)))
for n, ss in enumerate(session2):
os.chdir(os.path.join(raw_dir,session2[n]))
os.chdir('inverse')
fn = 'Conditions_40-sss_eq_'+session2[n]+'-ave.fif'
fn_inv = session2[n] + '-depth8-inv.fif'
# fn_inv = session1[n] + '-40-sss-meg-inv.fif'
inv = mne.minimum_norm.read_inverse_operator(fn_inv, verbose=None)
for iCond, s in enumerate(conditions2):
evoked = mne.read_evokeds(fn, condition=conditions1[s], baseline=(None,0), kind='average', proj=True)
# mne.viz.plot_snr_estimate(evoked, inv)
# os.chdir(os.path.join(raw_dir,session1[n]))
# os.chdir('covariance')
# fn_cov = session1[n] + '-40-sss-cov.fif'
# cov = mne.read_cov(fn_cov)
# evoked.plot()
# evoked.plot_topomap(times=np.linspace(0.05, 0.15, 11), ch_type='mag')
# evoked.plot_white(cov)
# os.chdir(os.path.join(raw_dir,session1[n]))
# os.chdir('inverse')
n_epochs[n][iCond] = evoked.nave
stc = mne.minimum_norm.apply_inverse(evoked,inv,lambda2, method=method, pick_ori='normal') #None
# plt.figure()
# plt.plot(1e3 * stc.times, stc.data[::100, :].T)
# plt.xlabel('time (ms)')
# plt.ylabel('%s value' % method)
# plt.show()
stc.crop(-0.1, 0.9)
tstep = stc.tstep
times = stc.times
# Average brain
"""
One should check if morph map is current and correct. Otherwise, it will spit out and error.
Check SUBJECTS_DIR/morph-maps
"""
morph_mat = mne.compute_morph_matrix(subs[n], 'fsaverage', stc.vertices,
fs_vertices, smooth=20,
subjects_dir=fs_dir)
stc_fsaverage = stc.morph_precomputed('fsaverage', fs_vertices, morph_mat, subs[n])
# tmin, tmax = 0.080, 0.120
# stc_mean = stc_fsaverage.copy().crop(tmin, tmax).mean()
#
# labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir)
# V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
# V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
#
# stc_mean_label = stc_mean.in_label(V1_label_lh)
# data = np.abs(stc_mean_label.data)
# stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
#
# func_labels, _ = mne.stc_to_label(stc_mean_label, src='fsaverage', subjects_dir=fs_dir, smooth=False)
#
# stc_anat_label = stc_fsaverage.in_label(V1_label_lh)
# pca_anat = stc_fsaverage.extract_label_time_course(V1_label_lh, src='fsaverage', mode='pca_flip')[0]
#
# stc_func_label = stc.in_label(func_label)
# pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
#
# # flip the pca so that the max power between tmin and tmax is positive
# pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
# pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
# stc_morph = mne.morph_data(subs[n], 'fsaverage', stc, n_jobs=18,
# grade=fs_vertices, subjects_dir=fs_dir)
# stc_morph.save('%s_loose_morph' % conditions1[iCond])
#
# tt = np.arange(0.05, 0.15, 0.01)
# # plot magnetometer data as topomaps
# evoked.plot()
# evoked.plot_topomap(tt, ch_type='mag')
#
# # compute a 50 ms bin to stabilize topographies
## evoked.plot_topomap(tt, ch_type='mag', average=0.05)
#
# # plot gradiometer data (plots the RMS for each pair of gradiometers)
# evoked.plot_topomap(tt, ch_type='grad')
#
# # plot magnetometer data as an animation
# evoked.animate_topomap(ch_type='mag', times=times, frame_rate=10)
#
# # plot magnetometer data as topomap at 1 time point : 100 ms
# # and add channel labels and title
# evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
# size=6, res=128, title='Auditory response')
# plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
#
X13[:,:,n,iCond] = stc_fsaverage.data
os.chdir(raw_dir)
np.save(fname_data, X13)
np.save('session2_times.npy',times)
|
np.save('session2_tstep.npy',tstep)
|
numpy.save
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base CCDData class.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy import units as u
class HRSOrder(object):
"""A class describing a single order for a High Resolutoin Spectrograph
observation.
Parameters
-----------
order: integer
Order of the HRS observations
region: list, tuple, or `~numpy.ndarray`
region is an object that contains coordinates for pixels in
the image which are part of this order. It should be a list
containing two arrays with the coordinates listed in each array.
flux: `~numpy.ndarray`
Fluxes corresponding to each pixel coordinate in region.
wavelength: `~numpy.ndarray`
Wavelengths corresponding to each pixel coordinate in region.
order_type: str
Type of order for the Order of the HRS observations
flux_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the flux.
wavelength_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the wavelength
"""
def __init__(self, order, region=None, flux=None, wavelength=None,
flux_unit=None, wavelength_unit=None, order_type=None):
self.order = order
self.region = region
self.flux = flux
self.wavelength = wavelength
self.flux_unit = flux_unit
self.wavelength_unit = wavelength_unit
self.order_type = order_type
@property
def order(self):
return self._order
@order.setter
def order(self, value):
if not isinstance(value, int):
raise TypeError('order is not an integer')
self._order = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
if value not in ['sky', 'object', None]:
raise TypeError("order_type is not None, 'sky', or 'object'")
self._order_type = value
@property
def region(self):
return self._region
@region.setter
def region(self, value):
if value is None:
self._region = None
return
if len(value) != 2:
raise TypeError("region is not of length 2")
if len(value[0]) != len(value[1]):
raise TypeError(
"coordinate lists in region are not of equal length")
self.npixels = len(value[0])
self._region = value
@property
def flux(self):
return self._flux
@flux.setter
def flux(self, value):
if value is None:
self._flux = None
return
if self.region is None:
raise ValueError('No region is set yet')
if len(value) != self.npixels:
raise TypeError("flux is not the same length as region")
self._flux = value
@property
def wavelength(self):
return self._wavelength
@wavelength.setter
def wavelength(self, value):
if value is None:
self._wavelength = None
return
if self.region is None:
raise ValueError('No region is set yet')
if len(value) != self.npixels:
raise TypeError("wavelength is not the same length as region")
self._wavelength = value
@property
def flux_unit(self):
return self._flux_unit
@flux_unit.setter
def flux_unit(self, value):
if value is None:
self._flux_unit = None
else:
self._flux_unit = u.Unit(value)
@property
def wavelength_unit(self):
return self._wavelength_unit
@wavelength_unit.setter
def wavelength_unit(self, value):
if value is None:
self._wavelength_unit = None
else:
self._wavelength_unit = u.Unit(value)
def set_order_from_array(self, data):
"""Given an array of data which has an order specified at each pixel,
set the region at the given order for HRSOrder
Parameters
----------
data: `~numpy.ndarray`
data is an 2D array with an order value specified at each pixel. If
no order is available for a given pixel, the pixel should have a
value of zero.
"""
if not isinstance(data, np.ndarray):
raise TypeError('data is not an numpy.ndarray')
if data.ndim != 2:
raise TypeError('data is not a 2D numpy.ndarray')
self.region = np.where(data == self.order)
def set_flux_from_array(self, data, flux_unit=None):
"""Given an array of data of fluxes, set the fluxes for
the region at the given order for HRSOrder
Parameters
----------
data: `~numpy.ndarray`
data is an 2D array with a flux value specified at each pixel.
flux_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the flux.
"""
if not isinstance(data, np.ndarray):
raise TypeError('data is not an numpy.ndarray')
if data.ndim != 2:
raise TypeError('data is not a 2D numpy.ndarray')
self.flux = data[self.region]
self.flux_unit = flux_unit
def set_wavelength_from_array(self, data, wavelength_unit):
"""Given an array of wavelengths, set the wavelength for
each pixel coordinate in `~HRSOrder.region`.
Parameters
----------
data: `~numpy.ndarray`
data is an 2D array with a wavelength value specified at each pixel
wavelength_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the wavelength
"""
if not isinstance(data, np.ndarray):
raise TypeError('data is not an numpy.ndarray')
if data.ndim != 2:
raise TypeError('data is not a 2D numpy.ndarray')
self.wavelength = data[self.region]
self.wavelength_unit = wavelength_unit
def set_wavelength_from_model(
self, model, params, wavelength_unit, **kwargs):
"""Given an array of wavelengths, set the wavelength for
each pixel coordinate in `~HRSOrder.region`.
Parameters
----------
model: function
model is a callable function that will create a corresponding
wavelength for each pixel in `~HRSOrder.region`. The function
can either be 1D or 2D. If it is 2D, the x-coordinate should
be the first argument.
params: `~numpy.ndarray`
Either a 1D or 2D list of parameters with the number of elements
corresponding to the number of pixles. Typically, if model
is a 1D function, this would be the x-coordinated from
`~HRSOrder.region`. Otherwise, this would be expected to be
`~HRSOrder.region`.
wavelength_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the wavelength
**kwargs:
All additional keywords to be passed to model
"""
if not hasattr(model, '__call__'):
raise TypeError('model is not a function')
self.wavelength_unit = wavelength_unit
if len(params) == self.npixels:
self.wavelength = model(params, **kwargs)
elif len(params) == 2:
self.wavelength = model(params[1], params[0], **kwargs)
else:
raise TypeError('params is not the correct size or shape')
def create_box(self, flux, interp=False):
"""Convert order into a square representation with integer shifts
beteween each pixel
Parameters
----------
flux: ~numpy.ndarray
Array of values to convert into a rectangular representation
Returns
-------
box: ~numpy.ndarray
Rectangular represnation of flux
"""
xmax = self.region[1].max()
xmin = 0
ymax = self.region[0].max()
ymin = self.region[0].min()
xs = xmax-xmin
coef = np.polyfit(self.region[1], self.region[0], 3)
xarr = np.arange(xs+1)
yarr =
|
np.polyval(coef, xarr)
|
numpy.polyval
|
from scipy.optimize import linear_sum_assignment
import numpy as np
import copy
import math
import random
class RuleFoundation():
def __init__(self, n_agent, n_thread, space, mcv):
self.n_thread = n_thread
self.n_agent = n_agent
self.handler = [None for _ in range(self.n_thread)]
assert n_agent == 10
def interact_with_env(self, team_intel):
info = team_intel['Latest-Team-Info']
done = team_intel['Env-Suffered-Reset']
step_cnt = team_intel['Current-Obs-Step']
action_list = []
for thread in range(self.n_thread):
act_dict = {'detector_act':None, 'fighter_act':None}
if done[thread]:
self.handler[thread] = RuleAgent()
self.handler[thread].set_map_info(1000, 1000, 0, 10)
act_dict['detector_act'], act_dict['fighter_act'] = self.handler[thread].get_action(obs_dict=info[thread], step_cnt=step_cnt[thread])
action_list.append(act_dict)
pass
# $n_thread.${}
return action_list, None
class RuleAgent():
def __init__(self): # 初始化接口
self.obs_ind = 'raw' # 状态信息形式
self.tar = 0
self.N = 0
self.angle=0
self.color_flag=True
self.formation_flag=4
self.star_back=True
self.missile_long = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
self.missile_short = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
# missile_long[i][0]记录的是敌方第i+1个单元身上的远程炮弹数量
# missile_long[i][k]记录的是敌方第i+1个单元身上的第k个远程炮的计时器
self.beyond_flag = False
self.leader_id = 4
self.tar_pos = np.full((4,2,2), 0)
# red
self.tar_pos[0][0][0] = 36
self.tar_pos[0][1][0] = 400
self.tar_pos[1][0][0] = 100
self.tar_pos[1][1][0] = 400
self.tar_pos[2][0][0]= 700
self.tar_pos[2][1][0]= 500
self.tar_pos[3][0][0] = 500
self.tar_pos[3][1][0] = 700
# blue
self.tar_pos[0][0][1] = 964
self.tar_pos[0][1][1] = 400
self.tar_pos[1][0][1] = 900
self.tar_pos[1][1][1] = 400
self.tar_pos[2][0][1]= 300
self.tar_pos[2][1][1]= 500
self.tar_pos[3][0][1] = 500
self.tar_pos[3][1][1] = 700
# type_data(攻击距离,发起攻击时要给敌方的索引加几,炮弹类型在self_info[j,?]中的索引)
self.long_data = (120, 1, 1)
self.short_data = (50, 11, 2)
def init_param(self):
self.tar = 0
self.N = 0
self.angle=0
self.color_flag=True
self.formation_flag=4
self.star_back=True
self.missile_long = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
self.missile_short = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
# missile_long[i][0]记录的是敌方第i+1个单元身上的远程炮弹数量
# missile_long[i][k]记录的是敌方第i+1个单元身上的第k个远程炮的计时器
self.beyond_flag = False
self.leader_id = 4
def dist(self, obs_dict, i, j):
adv_obs = obs_dict['fighter'][0]['adv_obs']
# 计算距离
x = adv_obs[i][2 * j]
y = adv_obs[i][2 * j + 1]
distance = x ** 2 + y ** 2
distance = math.sqrt(distance)
return distance
def set_map_info(self, size_x, size_y, detector_num, fighter_num): # 读取地图信息
self.size_x = size_x
self.size_y = size_y
self.detector_num = detector_num
self.fighter_num = fighter_num # 根据需要自行选择函数实现形式
self.leader_id = 4
def _bipartite_min_dists(self, dists):
ri, ci = linear_sum_assignment(dists)
return ri, ci
def tar_judge(self,adv_obs):
tar_exist = False
for i in range(self.fighter_num):
for j in range(self.fighter_num):
if adv_obs[i][j*2]!=-1000 and adv_obs[i][j*2+1]!=-1000:
tar_exist = True
break
break
return tar_exist
def sum_alive(self,alive_status):
alive_num = 0
for i in range(self.fighter_num):
if alive_status[i]:
alive_num+=1
return alive_num
def tar_assign(self,alive_status,adv_obs):
fighter_action = np.full((self.fighter_num,4),0)
for i in range(self.fighter_num):
# 判断该攻击单元是否存活
if not alive_status[i]:
continue
min_dis = 1000 ** 2 + 1000 ** 2
for j in range(self.fighter_num): # 记录离我方单元最近的敌方单位id及pos
x = adv_obs[i][2 * j]
y = adv_obs[i][2 * j + 1]
dis = x ** 2 + y ** 2
if dis < min_dis:
min_dis = dis
min_id = j
theta_start = np.arctan2(adv_obs[i][2*min_id+1], adv_obs[i][2*min_id])
if theta_start < 0:
theta_start += 2 * np.pi
course = (int)((theta_start / (2 * np.pi)) * 360)
fighter_action[i][0] = course
return fighter_action
def formation(self,alive_status,self_pos,self_info,step_cnt,formation_flag): # 编队
fighter_action = np.full((self.fighter_num, 4), 0)
if self.color == 'red':
if step_cnt % 8 == 0 or step_cnt % 9 == 0:
for i in range(self.fighter_num):
fighter_action[i][0] = (self_info[i][0] + 120) % 360
return fighter_action
else:
if step_cnt % 8 == 0 or step_cnt % 9 == 0:
for i in range(self.fighter_num):
if self_info[i][0]-120<0:
fighter_action[i][0] = self_info[i][0] -120 + 360
else:
fighter_action[i][0] = self_info[i][0] - 120
return fighter_action
# 挑选领航者
if not alive_status[self.leader_id]: # 领航者死亡
for i in range(self.fighter_num):
# 挑选存活单元作为领航者
if alive_status[i]:
self.leader_id = i
break
# 设置默认航向
if self.color == 'red':
default_course = 0
else:
default_course = 180
start_offset = 100 # 半圆大小3
# 确定领航者的航迹
for y in range(self.fighter_num):
if not alive_status[y]:
continue
if y == self.leader_id:
if self.star_back:
if self.color == 'red':
if self_pos[self.leader_id][0] > 50 :
fighter_action[self.leader_id][0] = default_course+180
else :
fighter_action[self.leader_id][0] = default_course
self.star_back=False
else :
if self_pos[self.leader_id][0] < 950:
fighter_action[self.leader_id][0] = default_course - 180
else:
self.star_back = False
fighter_action[self.leader_id][0] = default_course
else :
if self.color=='red' :
# 领航者位置到达目标位置
if self_pos[self.leader_id][0] == self.tar_pos[self.tar][0][0] and self_pos[self.leader_id][1] == self.tar_pos[self.tar][1][0]:
self.tar = self.tar + 1
else:
theta_leader = np.arctan2(self.tar_pos[self.tar][1][0] - self_pos[self.leader_id][1],self.tar_pos[self.tar][0][0] - self_pos[self.leader_id][0])
if theta_leader < 0:
theta_leader += 2 * np.pi
course = (theta_leader / (2 * np.pi)) * 360
if 90 < course < 180 or 270 < course < 360:
course = math.floor(course)
else:
course = math.ceil(course)
fighter_action[self.leader_id][0] = course
if self.tar == 4:
self.tar = 0
else :
if self_pos[self.leader_id][0] == self.tar_pos[self.tar][0][1] and self_pos[self.leader_id][1] == self.tar_pos[self.tar][1][1]:
self.tar = self.tar + 1
else:
theta_leader = np.arctan2(self.tar_pos[self.tar][1][1] - self_pos[self.leader_id][1],self.tar_pos[self.tar][0][1] - self_pos[self.leader_id][0])
if theta_leader < 0:
theta_leader += 2 * np.pi
course = (theta_leader / (2 * np.pi)) * 360
if 90 < course < 180 or 270 < course < 360:
course = math.floor(course)
else:
course = math.ceil(course)
fighter_action[self.leader_id][0] = course
if self.tar == 4 :
self.tar = 0
#print(course)
# 确定跟随者的航迹
else:
if formation_flag == 1: ##圆形编队
fighter_live_num_list = []
for fighter_live_num in range(self.fighter_num):
if alive_status[fighter_live_num]:
fighter_live_num_list.append(fighter_live_num)
angle = (int)(360 / (len(fighter_live_num_list) - 1))
expected_poses_patrol = []
leader_position_patrol = np.array([self_pos[self.leader_id][0], self_pos[self.leader_id][1]]) # 领航者的位置
for i in range(len(fighter_live_num_list)):
if fighter_live_num_list[i] != self.leader_id:
if fighter_live_num_list[i] > self.leader_id:
expected_poses_patrol.append(np.array([leader_position_patrol + start_offset * np.array([np.cos(angle * (i - 1) * np.pi / 180),np.sin(angle * (i - 1) * np.pi / 180)])]))
else:
expected_poses_patrol.append([leader_position_patrol + start_offset * np.array([np.cos(angle * i * np.pi / 180), np.sin(angle * i * np.pi / 180)])])
dists_patrol = np.array([[np.linalg.norm(np.array([self_pos[i][0], self_pos[i][1]]) - pos) for pos in expected_poses_patrol] for i in range(len(fighter_live_num_list)) if fighter_live_num_list[i] != self.leader_id])
ri, ci = self._bipartite_min_dists(dists_patrol)
for i in range(len(fighter_live_num_list)):
if y == fighter_live_num_list[i]:
if y > self.leader_id:
expected_poses_for_it = expected_poses_patrol[ci[i - 1]]
else:
expected_poses_for_it = expected_poses_patrol[ci[i]]
break
relative_value_patrol = expected_poses_for_it - np.array([self_pos[y][0], self_pos[y][1]])
theta_patrol = np.arctan2(relative_value_patrol[0][1], relative_value_patrol[0][0])
if theta_patrol < 0:
theta_patrol += 2 * np.pi
course = (int)((theta_patrol / (2 * np.pi)) * 360)
fighter_action[y][0] = course
elif formation_flag == 2: ##半圆编队
y_width = 60.0
y_offset = 120
if self.color == 'red':
x_offset = -120.0
else:
x_offset = 120.0
##确定期望位置 这个很关键
expected_poses = []
leader_position = np.array([self_pos[self.leader_id][0],self_pos[self.leader_id][1]])
for i in range(self.fighter_num - 1):
if i == 0:
temp_position = [leader_position + np.array([0.0, y_width])]
expected_poses.append(temp_position)
elif i == 1:
temp_position = [leader_position + np.array([0.0, 2 * y_width])]
expected_poses.append(temp_position)
elif i == 2:
temp_position = [leader_position + np.array([0.0, -y_width])]
expected_poses.append(temp_position)
elif i == 3:
temp_position = [leader_position + np.array([0.0, -2 * y_width])]
expected_poses.append(temp_position)
elif i == 4:
temp_position = [leader_position + np.array(
[x_offset * np.cos(60 * np.pi / 180), -y_offset * np.sin(60 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 5:
temp_position = [leader_position + np.array(
[x_offset * np.cos(30 * np.pi / 180), -y_offset * np.sin(30 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 6:
temp_position = [leader_position + np.array(
[x_offset * np.cos(0 * np.pi / 180), y_offset * np.sin(0 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 7:
temp_position = [leader_position + np.array(
[x_offset * np.cos(30 * np.pi / 180), y_offset * np.sin(30 * np.pi / 180)])]
expected_poses.append(temp_position)
elif i == 8:
temp_position = [leader_position + np.array(
[x_offset * np.cos(60 * np.pi / 180), y_offset * np.sin(60 * np.pi / 180)])]
expected_poses.append(temp_position)
dists = np.array([[np.linalg.norm(np.array([self_pos[i][0],self_pos[i][1]]) - pos) for pos in expected_poses] for i in range(self.fighter_num) if i != self.leader_id])
ri, ci = self._bipartite_min_dists(dists)
if y <= self.leader_id:
ci_v1 = y
else:
ci_v1 = y - 1
relative_value = expected_poses[ci[ci_v1]] - np.array([self_pos[y][0],self_pos[y][1]])
theta_start = np.arctan2(relative_value[0][1], relative_value[0][0])
if theta_start < 0:
theta_start += 2 * np.pi
course = (int)((theta_start / (2 * np.pi)) * 360)
fighter_action[y][0] = course
elif formation_flag == 3: ## 三角编队
y_width = 45.0
if self.color == 'red':
x_width = 25.0
else:
x_width = -25.0
##确定期望位置 这个很关键
expected_poses = []
leader_position = np.array([self_pos[self.leader_id][0],self_pos[self.leader_id][1]])
for i in range(self.fighter_num - 1):
if i == 0:
temp_position = [leader_position + np.array([-x_width, 0])]
expected_poses.append(temp_position)
elif i == 1:
temp_position = [leader_position + np.array([-x_width, -y_width])]
expected_poses.append(temp_position)
elif i == 2:
temp_position = [leader_position + np.array([-x_width, -2*y_width])]
expected_poses.append(temp_position)
elif i == 3:
temp_position = [leader_position + np.array([-x_width, 2 * y_width])]
expected_poses.append(temp_position)
elif i == 4:
temp_position = [leader_position + np.array([-x_width, 3*y_width ])]
expected_poses.append(temp_position)
elif i == 5:
temp_position = [leader_position + np.array([0 , y_width * 3 / 2])]
expected_poses.append(temp_position)
elif i == 6:
temp_position = [leader_position + np.array([0, -y_width *3/ 2])]
expected_poses.append(temp_position)
elif i == 7:
temp_position = [leader_position + np.array([x_width, -y_width * 0.5])]
expected_poses.append(temp_position)
elif i == 8:
temp_position = [leader_position + np.array([x_width, -y_width * 0.5])]
expected_poses.append(temp_position)
dists = np.array([[np.linalg.norm(np.array([self_pos[i][0],self_pos[i][1]]) - pos) for pos in expected_poses] for i in range(self.fighter_num) if i != self.leader_id])
ri, ci = self._bipartite_min_dists(dists)
if y <= self.leader_id:
ci_v1 = y
else:
ci_v1 = y - 1
relative_value = expected_poses[ci[ci_v1]] - np.array([self_pos[y][0],self_pos[y][1]])
theta_start = np.arctan2(relative_value[0][1], relative_value[0][0])
if theta_start < 0:
theta_start += 2 * np.pi
course = (int)((theta_start / (2 * np.pi)) * 360)
fighter_action[y][0] = course
elif formation_flag == 4: ## 网口
y_width = 45.0
if self.color == 'red':
x_width = 45.0
else:
x_width = -45.0
##确定期望位置 这个很关键
expected_poses = []
leader_position = np.array([self_pos[self.leader_id][0],self_pos[self.leader_id][1]])
for i in range(self.fighter_num - 1):
if i == 0:
temp_position = [leader_position + np.array([0, -y_width])]
expected_poses.append(temp_position)
elif i == 1:
temp_position = [leader_position + np.array([0.5 * x_width, -2 * y_width])]
expected_poses.append(temp_position)
elif i == 2:
temp_position = [leader_position + np.array([1 * x_width, -3 * y_width])]
expected_poses.append(temp_position)
elif i == 3:
temp_position = [leader_position +
|
np.array([2 * x_width, -4 * y_width])
|
numpy.array
|
import numpy as np
import unittest
from .context import stltovoxel # noqa: F401
from stltovoxel import slice
class TestSlice(unittest.TestCase):
def test_where_line_crosses_z(self):
p1 = np.array([2, 4, 1])
p2 = np.array([1, 2, 5])
self.assertTrue((slice.where_line_crosses_z(p1, p2, 1) == p1).all())
self.assertTrue((slice.where_line_crosses_z(p1, p2, 5) == p2).all())
self.assertTrue((slice.where_line_crosses_z(p1, p2, 3) == np.array([1.5, 3, 3])).all())
self.assertTrue((slice.where_line_crosses_z(np.array([0, 0, 0]), np.array([0, 1, 1]), 0.5) ==
np.array([0.0, 0.5, 0.5])).all())
def test_linear_interpolation(self):
p1 = np.array([2, 4, 1])
p2 = np.array([1, 2, 5])
self.assertTrue((slice.linear_interpolation(p1, p2, 0) == p1).all())
self.assertTrue((slice.linear_interpolation(p1, p2, 1) == p2).all())
self.assertTrue((slice.linear_interpolation(p1, p2, .5) == np.array([1.5, 3, 3])).all())
def test_triangle_to_intersecting_lines(self):
pixels = np.zeros((100, 100), dtype=bool)
lines = []
tri = np.array([
[2, 4, 1],
[1, 2, 5],
[3, 2, 3]
])
slice.triangle_to_intersecting_lines(tri, 4, pixels, lines)
expected = np.array([
((1.25, 2.5, 4.0), (2.0, 2.0, 4.0)),
])
self.assertTrue((expected == lines).all())
def test_triangle_to_intersecting_lines_one_point_same(self):
pixels = np.zeros((100, 100), dtype=bool)
lines = []
tri = np.array([
(2, 4, 1),
(1, 2, 5),
(3, 2, 3)
])
slice.triangle_to_intersecting_lines(tri, 3, pixels, lines)
expected = np.array([
((1.5, 3, 3), (3, 2, 3)),
])
self.assertTrue((expected == lines).all())
def test_triangle_to_intersecting_lines_two_point_same(self):
pixels = np.zeros((100, 100), dtype=bool)
lines = []
tri = np.array([
[2, 4, 3],
[3, 2, 3],
[1, 2, 5],
])
slice.triangle_to_intersecting_lines(tri, 3, pixels, lines)
expected = np.array([
(tri[0], tri[1]),
])
self.assertTrue((expected == lines).all())
def test_triangle_to_intersecting_lines_three_point_same(self):
pixels =
|
np.zeros((100, 100), dtype=bool)
|
numpy.zeros
|
import pytest
import numpy as np
import timeit
import imp
import os
import sys
import warnings
# Needed when running mpiexec. Be sure to run from tests directory.
if 'PYTHONPATH' not in os.environ:
base_path = os.path.abspath('..')
sys.path.insert(0, base_path)
from MLMCPy.mlmc import MLMCSimulator
from MLMCPy.model import ModelFromData
from MLMCPy.input import RandomInput
from MLMCPy.input import InputFromData
from tests.testing_scripts.spring_mass import SpringMassModel
# Create list of paths for each data file.
# Used to parametrize tests.
my_path = os.path.dirname(os.path.abspath(__file__))
data_path = my_path + "/../testing_data"
@pytest.fixture
def random_input():
"""
Creates a RandomInput object that produces samples from a
uniform distribution.
"""
return RandomInput()
@pytest.fixture
def data_input():
"""
Creates an InputFromData object that produces samples from a file
containing spring mass input data.
"""
return InputFromData(os.path.join(data_path, "spring_mass_1D_inputs.txt"),
shuffle_data=False)
@pytest.fixture
def data_input_2d():
"""
Creates an InputFromData object that produces samples from a file
containing two dimensional data.
"""
return InputFromData(os.path.join(data_path, "2D_test_data.csv"),
shuffle_data=False)
@pytest.fixture
def beta_distribution_input():
"""
Creates a RandomInput object that produces samples from a
beta distribution.
"""
np.random.seed(1)
def beta_distribution(shift, scale, alpha, beta, size):
return shift + scale * np.random.beta(alpha, beta, size)
return RandomInput(distribution_function=beta_distribution,
shift=1.0, scale=2.5, alpha=3., beta=2.)
@pytest.fixture
def spring_models():
"""
Creates a list of three SpringMassModel objects of increasing fidelity.
"""
model_level1 = SpringMassModel(mass=1.5, time_step=1.0, cost=1.0)
model_level2 = SpringMassModel(mass=1.5, time_step=0.1, cost=10.0)
model_level3 = SpringMassModel(mass=1.5, time_step=0.01, cost=100.0)
return [model_level1, model_level2, model_level3]
@pytest.fixture
def models_from_data():
"""
Creates a list of three ModelFromData objects of increasing fidelity.
:return:
"""
input_filepath = os.path.join(data_path, "spring_mass_1D_inputs.txt")
output1_filepath = os.path.join(data_path, "spring_mass_1D_outputs_1.0.txt")
output2_filepath = os.path.join(data_path, "spring_mass_1D_outputs_0.1.txt")
output3_filepath = os.path.join(data_path,
"spring_mass_1D_outputs_0.01.txt")
model1 = ModelFromData(input_filepath, output1_filepath, 1.)
model2 = ModelFromData(input_filepath, output2_filepath, 4.)
model3 = ModelFromData(input_filepath, output3_filepath, 16.)
return [model1, model2, model3]
@pytest.fixture
def models_from_2d_data():
"""
Creates a list of three ModelFromData objects with a small amount of
two dimensional data.
"""
input_filepath = os.path.join(data_path, "2D_test_data.csv")
output1_filepath = os.path.join(data_path, "2D_test_data_output.csv")
output2_filepath = os.path.join(data_path, "2D_test_data_output.csv")
output3_filepath = os.path.join(data_path, "2D_test_data_output.csv")
model1 = ModelFromData(input_filepath, output1_filepath, 1.)
model2 = ModelFromData(input_filepath, output2_filepath, 4.)
model3 = ModelFromData(input_filepath, output3_filepath, 16.)
return [model1, model2, model3]
@pytest.fixture
def filename_2d_5_column_data():
"""
Creates a string containing the path to a file with a large number of rows
of data with five columns.
"""
return os.path.join(data_path, "2D_test_data_long.csv")
@pytest.fixture
def filename_2d_3_column_data():
"""
Creates a string containing the path to a file with a large number of rows
of data with three columns.
"""
return os.path.join(data_path, "2D_test_data_output_3_col.csv")
@pytest.fixture
def comm():
"""
Creates a MPI.COMM_WORLD object for working with multi-process information.
"""
try:
imp.find_module('mpi4py')
from mpi4py import MPI
return MPI.COMM_WORLD
except ImportError:
class FakeCOMM:
def __init__(self):
self.size = 1
self.rank = 0
@staticmethod
def allgather(thing):
return np.array([thing])
return FakeCOMM()
def test_model_from_data(data_input, models_from_data):
"""
Executes simulate() with models and inputs created from files
to ensure there are no exceptions while performing basic functionality.
"""
sim = MLMCSimulator(models=models_from_data, data=data_input)
sim.simulate(1., initial_sample_sizes=20)
def test_model_with_random_input(beta_distribution_input, spring_models):
"""
Executes simulate() with models and inputs created from random
distributions to ensure there are no exceptions while performing basic
functionality.
"""
sim = MLMCSimulator(models=spring_models, data=beta_distribution_input)
sim.simulate(1., initial_sample_sizes=20)
def test_for_verbose_exceptions(data_input, models_from_data):
"""
Executes simulate() with verbose enabled to ensure that there are
no exceptions while in verbose mode.
"""
# Redirect the verbose out to null.
stdout = sys.stdout
with open(os.devnull, 'w') as f:
sys.stdout = f
sim = MLMCSimulator(models=models_from_data, data=data_input)
sim.simulate(1., initial_sample_sizes=20, verbose=True)
# Put stdout back in place.
sys.stdout = stdout
def test_simulate_exception_for_invalid_parameters(data_input,
models_from_data):
"""
Ensures that expected exceptions occur when running simulate() with invalid
parameters.
"""
test_mlmc = MLMCSimulator(models=models_from_data, data=data_input)
with pytest.raises(ValueError):
test_mlmc.simulate(epsilon=-.1, initial_sample_sizes=20)
with pytest.raises(TypeError):
test_mlmc.simulate(epsilon='one', initial_sample_sizes=20)
with pytest.raises(TypeError):
test_mlmc.simulate(epsilon=.1, initial_sample_sizes='five')
with pytest.raises(TypeError):
test_mlmc.simulate(epsilon=.1, initial_sample_sizes=5, target_cost='3')
with pytest.raises(ValueError):
test_mlmc.simulate(epsilon=.1, initial_sample_sizes=5, target_cost=-1)
with pytest.raises(ValueError):
test_mlmc.simulate(epsilon=.1, initial_sample_sizes=1)
with pytest.raises(ValueError):
test_mlmc.simulate(epsilon=.1, initial_sample_sizes=[5, 4, 3, 2])
def test_simulate_expected_output_types(data_input, models_from_data):
"""
Tests the data types returned by simulate().
"""
test_mlmc = MLMCSimulator(models=models_from_data, data=data_input)
result, sample_count, variances = \
test_mlmc.simulate(epsilon=1., initial_sample_sizes=20)
assert isinstance(result, np.ndarray)
assert isinstance(sample_count, np.ndarray)
assert isinstance(variances, np.ndarray)
@pytest.mark.parametrize("num_qoi, variances, epsilons",
[[1, [[4.], [1.]], [.1]],
[2, [[4., 4.], [1, 1.]], [.1, .01]],
[3, [[4., 4., 4.], [1, 1., 1.]], [.1, 1., .01]]])
def test_optimal_sample_sizes_expected_outputs(num_qoi, variances, epsilons,
data_input, models_from_data):
"""
Tests samples sizes produced by simulator's compute_optimal_sample_sizes()
against expected computed sample sizes for various sets of parameters.
"""
test_mlmc = MLMCSimulator(models=models_from_data[:2], data=data_input)
data_input._data = np.broadcast_to(data_input._data,
(data_input._data.shape[0], num_qoi))
test_mlmc._epsilons = epsilons
costs = np.array([1., 4.])
test_mlmc._compute_optimal_sample_sizes(costs, np.array(variances))
# Check results.
sample_sizes = test_mlmc._sample_sizes
if num_qoi == 1:
expected_sample_size = [800, 200]
else:
expected_sample_size = [80000, 20000]
assert np.all(np.isclose(sample_sizes, expected_sample_size, atol=1))
def test_costs_and_initial_variances_spring_models(beta_distribution_input,
spring_models):
"""
Tests costs and variances computed by simulator's
compute_costs_and_variances() against expected values based on a
beta distribution.
"""
sim = MLMCSimulator(models=spring_models, data=beta_distribution_input)
np.random.seed(1)
sim._initial_sample_sizes = np.array([100,100,100])
costs, variances = sim._compute_costs_and_variances()
true_variances = np.array([[8.245224951411819],
[0.0857219498864355],
[7.916295509470576e-06]])
true_costs = np.array([1., 11., 110.])
assert np.all(np.isclose(true_costs, costs))
assert np.all(np.isclose(true_variances, variances, rtol=.1))
def test_costs_and_initial_variances_models_from_data(data_input,
models_from_data):
"""
Tests costs and variances computed by simulator's
compute_costs_and_variances() against expected values based on data loaded
from files.
"""
np.random.seed(1)
sim = MLMCSimulator(models=models_from_data, data=data_input)
sim._initial_sample_sizes = np.array([100,100,100])
costs, variances = sim._compute_costs_and_variances()
true_variances = np.array([[9.262628271266264],
[0.07939834631411287],
[5.437083709623372e-06]])
true_costs = np.array([1.0, 5.0, 20.0])
assert np.all(np.isclose(true_costs, costs))
assert np.all(np.isclose(true_variances, variances, rtol=.1))
def test_calculate_estimate_for_springmass_random_input(beta_distribution_input,
spring_models):
"""
Tests simulator estimate against expected value for beta distribution.
"""
np.random.seed(1)
# Result from 20,000 sample monte carlo spring mass simulation.
mc_20000_output_sample_mean = 12.3186216602
sim = MLMCSimulator(models=spring_models,
data=beta_distribution_input)
estimate, sample_sizes, variances = sim.simulate(0.1, 100)
assert np.isclose(estimate[0], mc_20000_output_sample_mean, atol=.25)
def test_monte_carlo_estimate_value(data_input, models_from_data):
"""
Tests simulator estimate against expected value for spring mass file data.
"""
np.random.seed(1)
# Result from 20,000 sample monte carlo spring mass simulation.
mc_20000_output_sample_mean = 12.3186216602
# Passing in one model into MLMCSimulator should make it run in monte
# carlo simulation mode.
models = [models_from_data[0]]
sim = MLMCSimulator(models=models, data=data_input)
estimate, sample_sizes, variances = sim.simulate(.05, 50)
assert np.isclose(estimate, mc_20000_output_sample_mean, atol=.25)
def test_hard_coded_springmass_random_input(beta_distribution_input,
spring_models, comm):
"""
Tests simulator estimate and variances against precomputed values.
"""
np.random.seed(1)
mlmc_hard_coded_mean = [12.274674424393805]
mlmc_hard_coded_variance = [0.01078008]
sim = MLMCSimulator(models=spring_models,
data=beta_distribution_input)
all_sample_sizes = np.array([1113, 34, 0])
get_cpu_samples = np.vectorize(sim._determine_num_cpu_samples)
sim._cpu_sample_sizes = get_cpu_samples(all_sample_sizes)
sim._determine_input_output_size()
sim._caching_enabled = False
sim._sample_sizes = all_sample_sizes
np.random.seed(1)
estimate, cpu_sample_sizes, variances = sim._run_simulation()
assert np.all(np.isclose(estimate, mlmc_hard_coded_mean))
assert np.all(np.isclose(variances, mlmc_hard_coded_variance))
def test_estimate_and_variance_improved_by_lower_epsilon(data_input,
models_from_data):
"""
Runs simulate with decreasing epsilons and ensures that the resulting
estimates are increasingly accurate and that the variances decrease.
"""
np.random.seed(1)
# Result from 20,000 sample monte carlo spring mass simulation.
mc_20000_output_sample_mean = 12.3186216602
sim = MLMCSimulator(models=models_from_data,
data=data_input)
estimates = np.zeros(3)
variances = np.zeros_like(estimates)
for i, epsilon in enumerate([1., .5, .1]):
estimates[i], sample_sizes, variances[i] = \
sim.simulate(epsilon=epsilon, initial_sample_sizes=50)
error = np.abs(estimates - mc_20000_output_sample_mean)
assert error[0] > error[1] > error[2]
assert variances[0] > variances[1] > variances[2]
def test_always_at_least_one_sample_taken(data_input, models_from_data):
sim = MLMCSimulator(models=models_from_data, data=data_input)
estimates, sample_sizes, variances = sim.simulate(epsilon=5.,
initial_sample_sizes=100)
assert np.sum(sample_sizes) > 0
def test_estimate_and_variance_improved_by_higher_target_cost(data_input,
models_from_data):
"""
Runs simulator with increasing target costs and ensures that the resulting
estimates are increasingly accurate and variances decrease.
"""
np.random.seed(1)
# Result from 20,000 sample monte carlo spring mass simulation.
mc_20000_output_sample_mean = 12.3186216602
sim = MLMCSimulator(models=models_from_data, data=data_input)
estimates = np.zeros(3)
variances = np.zeros_like(estimates)
sample_sizes = np.zeros((3, 3))
for i, target_cost in enumerate([5, 25, 500]):
estimates[i], sample_sizes[i], variances[i] = \
sim.simulate(epsilon=.5,
initial_sample_sizes=100,
target_cost=target_cost)
error = np.abs(estimates - mc_20000_output_sample_mean)
assert error[0] > error[1] > error[2]
assert np.sum(sample_sizes[0]) < np.sum(sample_sizes[1])
assert np.sum(sample_sizes[1]) < np.sum(sample_sizes[2])
assert variances[0] > variances[1] > variances[2]
@pytest.mark.parametrize("epsilon", [1., .5, .1, .05])
def test_final_variances_less_than_epsilon_goal(data_input,
models_from_data,
epsilon):
"""
Ensures that square root of variances produced by simulator are lower than
the specified epsilon parameter.
"""
sim = MLMCSimulator(models=models_from_data, data=data_input)
estimate, sample_sizes, variances = \
sim.simulate(epsilon=epsilon,
initial_sample_sizes=50)
assert np.sqrt(variances[0]) < epsilon
assert not np.isclose(variances[0], 0.)
@pytest.mark.parametrize('cpu_sample_sizes', [[1, 0, 0], [1, 0, 1], [1, 1, 0],
[1, 1, 1], [1, 2, 1], [10, 5, 2]])
def test_outputs_for_small_sample_sizes(data_input, models_from_data,
cpu_sample_sizes, comm):
"""
Test various combinations of small sample sizes to ensure stability of
simulator under these conditions as well as accuracy of estimate and
variances.
"""
output1_filepath = os.path.join(data_path, "spring_mass_1D_outputs_1.0.txt")
output2_filepath = os.path.join(data_path, "spring_mass_1D_outputs_0.1.txt")
output3_filepath = os.path.join(data_path,
"spring_mass_1D_outputs_0.01.txt")
outputs = list()
outputs.append(np.genfromtxt(output1_filepath)[comm.rank::comm.size])
outputs.append(np.genfromtxt(output2_filepath)[comm.rank::comm.size])
outputs.append(np.genfromtxt(output3_filepath)[comm.rank::comm.size])
all_sample_sizes = np.array(cpu_sample_sizes) * comm.size
sim = MLMCSimulator(models=models_from_data, data=data_input)
sim._caching_enabled = False
sim._cpu_sample_sizes = np.array(cpu_sample_sizes)
sim._sample_sizes = np.copy(all_sample_sizes)
sim._determine_input_output_size()
sim_estimate, ss, sim_variance = sim._run_simulation()
# Acquire samples in same sequence simulator would.
samples = []
sample_index = 0
for i, s in enumerate(cpu_sample_sizes):
output = outputs[i][sample_index:sample_index + s]
if i > 0:
lower_output = outputs[i-1][sample_index:sample_index + s]
else:
lower_output = np.zeros_like(output)
diff = output - lower_output
all_diff = np.concatenate(comm.allgather(diff))
samples.append(all_diff)
sample_index += s
# Compute mean and variances.
sample_mean = 0.
sample_variance = 0.
for i, sample in enumerate(samples):
if all_sample_sizes[i] > 0:
sample_mean += np.sum(sample, axis=0) / all_sample_sizes[i]
sample_variance += np.var(sample, axis=0) / all_sample_sizes[i]
# Test sample computations vs simulator.
assert np.isclose(sim_estimate, sample_mean, atol=10e-15)
assert np.isclose(sim_variance, sample_variance, atol=10e-15)
@pytest.mark.parametrize("cache_size", [10, 7, 200])
def test_output_caching(data_input, models_from_data, cache_size):
"""
Runs simulator's _evaluate_sample() with and without caching enabled
to ensure consistency of outputs. Also tests the estimate and variances
with and without caching.
"""
sim = MLMCSimulator(models=models_from_data, data=data_input)
# Run simulation to generate cache.
estimate1, sample_sizes, variances1 = sim.simulate(1., cache_size)
# Collect output from _evaluate_sample with caching enabled.
num_levels = len(models_from_data)
max_samples = np.max(sim._sample_sizes)
outputs_with_caching = np.zeros((num_levels, max_samples, 1))
outputs_without_caching = np.zeros_like(outputs_with_caching)
data_input.reset_sampling()
for level in range(num_levels):
num_samples = sim._sample_sizes[level]
if num_samples == 0:
continue
samples = sim._draw_samples(num_samples)
for i, sample in enumerate(samples):
outputs_with_caching[level, i] = \
sim._evaluate_sample(sample, level)
# Collect same data with caching disabled.
sim._caching_enabled = False
sim._data.reset_sampling()
for level in range(num_levels):
num_samples = sim._sample_sizes[level]
if num_samples == 0:
continue
samples = sim._draw_samples(num_samples)
for i, sample in enumerate(samples):
outputs_without_caching[level, i] = \
sim._evaluate_sample(sample, level)
assert np.all(np.isclose(outputs_without_caching, outputs_with_caching))
estimate2, sample_sizes, variances2 = sim._run_simulation()
# Now compare estimator and output variances.
# If caching is working properly, they should match.
assert np.array_equal(estimate1, estimate2)
assert np.array_equal(variances1, variances2)
def test_input_output_with_differing_column_count(filename_2d_5_column_data,
filename_2d_3_column_data):
"""
Ensures that simulator handles input and output data with differing numbers
of columns.
"""
model1 = ModelFromData(filename_2d_5_column_data,
filename_2d_3_column_data,
1.)
model2 = ModelFromData(filename_2d_5_column_data,
filename_2d_3_column_data,
4.)
data_input = InputFromData(filename_2d_5_column_data)
sim = MLMCSimulator(models=[model1, model2], data=data_input)
sim.simulate(100., 10)
def test_fail_if_model_outputs_do_not_match_shapes(filename_2d_5_column_data,
filename_2d_3_column_data):
"""
Ensures simulator throws an exception if inputs and outputs with differing
numbers of samples are provided.
"""
model1 = ModelFromData(filename_2d_5_column_data,
filename_2d_5_column_data,
1.)
model2 = ModelFromData(filename_2d_5_column_data,
filename_2d_3_column_data,
4.)
data_input = InputFromData(filename_2d_5_column_data)
with pytest.raises(ValueError):
MLMCSimulator(models=[model1, model2], data=data_input)
def test_hard_coded_test_2_level(data_input, models_from_data):
"""
Test simulator cost, initial variance, and sample size computations against
precomputed values with two models.
"""
# Get simulation results.
np.random.seed(1)
models = models_from_data[:2]
sim = MLMCSimulator(models=models, data=data_input)
sim_estimate, sim_sample_sizes, output_variances = \
sim.simulate(epsilon=1., initial_sample_sizes=200)
sim_costs, sim_variances = sim._compute_costs_and_variances()
# Results from hard coded testing with same parameters.
hard_coded_variances = np.array([[7.659619446414387],
[0.07288894751770203]])
hard_coded_sample_sizes = np.array([9, 0])
hard_coded_estimate = np.array([11.639166038233583])
assert np.all(np.isclose(sim_variances, hard_coded_variances))
assert np.all(np.isclose(sim_estimate, hard_coded_estimate))
assert np.all(np.isclose(sim._sample_sizes, hard_coded_sample_sizes))
def test_hard_coded_test_3_level(data_input, models_from_data):
"""
Test simulator cost, initial variance, and sample size computations against
precomputed values with three models.
"""
# Get simulation results.
sim = MLMCSimulator(models=models_from_data, data=data_input)
sim_estimate, sim_sample_sizes, output_variances = \
sim.simulate(epsilon=1., initial_sample_sizes=200)
sim_costs, sim_variances = sim._compute_costs_and_variances()
# Results from hard coded testing with same parameters.
hard_coded_variances = np.array([[7.659619446414387],
[0.07288894751770203],
[7.363159154583542e-06]])
hard_coded_sample_sizes = np.array([9, 0, 0])
hard_coded_estimate = np.array([11.639166038233583])
assert np.all(np.isclose(sim_variances, hard_coded_variances))
assert np.all(np.isclose(sim_estimate, hard_coded_estimate))
assert np.all(np.isclose(sim._sample_sizes, hard_coded_sample_sizes))
def test_graceful_handling_of_insufficient_samples(data_input_2d, comm,
models_from_2d_data):
"""
Ensure that the simulator does not throw an exception when insufficient
samples are provided.
"""
# Warnings will be triggered; avoid displaying them during testing.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# We only have five rows of data, so ignore cpus of rank > 4.
# An intentional exception would be thrown by the simulator.
if comm.rank > 4:
return
# Test when sampling with too large initial sample size.
sim = MLMCSimulator(models=models_from_2d_data, data=data_input_2d)
sim.simulate(epsilon=1., initial_sample_sizes=10)
# Test when sampling with too large computed sample sizes.
sim = MLMCSimulator(models=models_from_2d_data, data=data_input_2d)
sim.simulate(epsilon=.01, initial_sample_sizes=5)
def test_multiple_run_consistency(data_input, models_from_data):
"""
Ensure that simulator can be run multiple times without exceptions and
returns consistent results.
"""
sim = MLMCSimulator(models=models_from_data, data=data_input)
estimate1, sample_sizes1, variances1 = \
sim.simulate(epsilon=1., initial_sample_sizes=100)
sim = MLMCSimulator(models=models_from_data, data=data_input)
estimate2, sample_sizes2, variances2 = \
sim.simulate(epsilon=1., initial_sample_sizes=100)
estimate3, sample_sizes3, variances3 = \
sim.simulate(epsilon=1., initial_sample_sizes=100)
assert np.all(np.isclose(estimate1, estimate2))
assert np.all(np.isclose(estimate2, estimate3))
assert np.all(np.isclose(sample_sizes1, sample_sizes2))
assert np.all(np.isclose(sample_sizes2, sample_sizes3))
assert np.all(
|
np.isclose(variances1, variances2)
|
numpy.isclose
|
#!/usr/bin/ python3
# -*- coding: utf-8 -*-
# python3
# Make this standard template for testing and training
import networkx as nx
# from networkx.algorithms.approximation import independent_set
import numpy as np
import pandas as pd
import scipy.io as sio
import time
from collections import deque
from copy import deepcopy
from scipy.io import savemat
from scipy.spatial import distance_matrix
import dwave_networkx as dnx
import sys
import os
from copy import copy, deepcopy
from itertools import chain, combinations
from heuristics import greedy_search, dist_greedy_search, local_greedy_search, mlp_gurobi
# visualization
from graph_util import *
from runtime_config import flags
flags.DEFINE_string('output', 'wireless', 'output folder')
flags.DEFINE_string('test_datapath', './data/ER_Graph_Uniform_NP20_test', 'test dataset')
flags.DEFINE_string('wt_sel', 'qr', 'qr: queue length * rate, q/r: q/r, q: queue length only, otherwise: random')
flags.DEFINE_float('load_min', 0.01, 'traffic load min')
flags.DEFINE_float('load_max', 0.15, 'traffic load max')
flags.DEFINE_float('load_step', 0.01, 'traffic load step')
flags.DEFINE_integer('instances', 10, 'number of layers.')
flags.DEFINE_integer('num_channels', 1, 'number of channels')
flags.DEFINE_integer('opt', 0, 'test algorithm')
flags.DEFINE_string('graph', 'poisson', 'type of graphs')
from agent_dqn_util import A2CAgent
from directory import find_model_folder
model_origin = find_model_folder(flags.FLAGS, 'exp')
flags1 = deepcopy(flags.FLAGS)
agent = A2CAgent(flags1, 64000)
try:
agent.load(model_origin)
except:
print("unable to load {}".format(model_origin))
n_instances = flags.FLAGS.instances
def emv(samples, pemv, n=3):
assert samples.size == pemv.size
k = float(2/(n+1))
return samples * k + pemv * (1-k)
def channel_collision(adj, nflows, link_rates_ts, schedule_mv):
"""Return non-collision set of a schedule"""
schedule = schedule_mv % nflows
wts = np.zeros(shape=(nflows,), dtype=np.bool)
if schedule.size > 0:
wts[schedule] = 1
non_collision = wts.copy()
for s in schedule:
_, nb_set = np.nonzero(adj[s])
if np.sum(wts[nb_set]) > 0:
non_collision[s] = 0
capacity = np.zeros(shape=(nflows,))
capacity[non_collision] = link_rates_ts[non_collision]
return capacity
gtype = flags.FLAGS.graph
train = True
n_networks = 500
# n_instances = 10
timeslots = 64
lp = 5
algoname = 'DGCN-LGS'
if train:
# algolist = ['DGCN-LGS']
# algolist = ['Greedy', algoname]
algolist = ['Greedy', 'shadow', algoname]
else:
# algolist = ['Greedy', 'DGCN-LGS']
# algolist = ['Greedy', algoname, 'Benchmark']
algolist = ['Greedy', 'shadow', algoname]
if flags.FLAGS.opt == 0:
algoname = 'DGCN-LGS'
elif flags.FLAGS.opt == 1:
algoname = 'DGCN-LGS-it'
algolist = [algoname]
elif flags.FLAGS.opt == 2 or flags.FLAGS.opt == 4:
algoname = 'DGCN-RS'
algolist = [algoname]
elif flags.FLAGS.opt == 3:
algoname = 'CGCN-RS'
algolist = [algoname]
else:
sys.exit("Unsupported opt {}".format(flags.FLAGS.opt))
algoref = algolist[0]
sim_area = 250
sim_node = 100
sim_rc = 1
sim_ri = 4
n_ch = 1
p_overlap = 0.8
# link rate high and low bound (number of packets per time slot)
sim_rate_hi = 100
sim_rate_lo = 0
# Testing load range (upper limit = 1/(average degree of conflict graphs))
# 10.78 for 10 graphs, 10.56 for 20 graphs
load_min = flags.FLAGS.load_min
load_max = flags.FLAGS.load_max
load_step = flags.FLAGS.load_step
wt_sel = flags.FLAGS.wt_sel
output_dir = flags.FLAGS.output
output_csv = os.path.join(output_dir,
'metric_vs_load_summary_{}-channel_utility-{}_opt-{}_load-{:.1f}-{:.1f}_train.csv'
.format(n_ch, wt_sel, flags.FLAGS.opt, load_min, load_max)
)
res_list = []
res_df = pd.DataFrame(columns=['graph',
'seed',
'load',
'name',
'avg_queue_len',
'50p_queue_len',
'95p_queue_len',
'5p_queue_len',
'avg_utility',
'avg_degree'])
if os.path.isfile(output_csv):
res_df = pd.read_csv(output_csv, index_col=0)
d_array = np.zeros((n_networks,), dtype=np.float)
if train:
datapath = flags.FLAGS.datapath
epochs = flags.FLAGS.epochs
else:
datapath = flags.FLAGS.test_datapath
epochs = 1
val_mat_names = sorted(os.listdir(datapath))
cnt = 0
print("Average degree of all conflict graphs: {}".format(np.mean(d_array)))
np.random.seed(1)
if train:
loss = 1.0
else:
loss = np.nan
wts_sample_file = os.path.join(output_dir, 'samples.txt')
load_array = np.round(np.arange(load_min, load_max+load_step, load_step), 2)
# load = load_array[np.random.randint(0, len(load_array) - 1)]
buffer = deque(maxlen=20)
# rewardfun = lambda a, b: a*.8+b*0.2
rewardfun = lambda a, b: a*0+b*1.0
pemv = np.array([2.0])
pemv_best = np.array([1.05])
gtypes = ['ba2', 'star30']
gtypep = np.array([0.2, 0.8])
for i in range(100*flags.FLAGS.epochs):
idx = np.random.randint(1, len(val_mat_names))
gtypei = gtypes[np.random.choice(2, p=gtypep)]
if gtypei == 'poisson':
mat_contents = sio.loadmat(os.path.join(datapath, val_mat_names[idx]))
gdict = mat_contents['gdict'][0, 0]
seed = mat_contents['random_seed'][0, 0]
graph_c, graph_i = poisson_graphs_from_dict(gdict)
adj_gK = nx.adjacency_matrix(graph_i)
flows = [e for e in graph_c.edges]
nflows = len(flows)
elif gtypei == 'star30':
graph_i = nx.star_graph(30)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'star20':
graph_i = nx.star_graph(20)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'star10':
graph_i = nx.star_graph(10)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'ba1':
graph_i = nx.barabasi_albert_graph(70, 1)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'ba2':
graph_i = nx.barabasi_albert_graph(100, 2)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'er':
graph_i = nx.erdos_renyi_graph(50, 0.1)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'er1':
graph_i = nx.erdos_renyi_graph(100, 0.1)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'tree':
try:
graph_i = nx.random_powerlaw_tree(50, gamma=3.0, seed=i, tries=2000)
except:
graph_i = nx.random_powerlaw_tree(50, gamma=3.0, tries=1000)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
elif gtypei == 'tree-line':
try:
graph_c = nx.random_powerlaw_tree(50, gamma=3.0, seed=i, tries=2000)
except:
graph_c = nx.random_powerlaw_tree(50, gamma=3.0, tries=1000)
graph_i = nx.line_graph(graph_c)
adj_gK = nx.adjacency_matrix(graph_i)
nflows = adj_gK.shape[0]
seed = i
else:
mat_contents = sio.loadmat(os.path.join(datapath, val_mat_names[idx]))
adj_gK = mat_contents['adj']
nflows = adj_gK.shape[0]
seed = i
graph_i = nx.from_scipy_sparse_matrix(adj_gK)
netcfg = "{}: s {}, n {}, f {}, t {}".format(gtypei, seed, sim_node, nflows, timeslots)
np.random.seed(idx)
d_list = []
for v in graph_i:
d_list.append(graph_i.degree[v])
avg_degree = np.nanmean(d_list)
max_degree = np.amax(d_list)
load = load_array[np.random.randint(0, len(load_array) - 1)]
treeseed = int(1000 * time.time()) % 10000000
np.random.seed(idx)
# np.random.seed(treeseed)
arrival_rate = 0.5 * (sim_rate_lo + sim_rate_hi) * load
interarrivals = np.random.exponential(1.0/arrival_rate, (nflows, int(2*timeslots*arrival_rate)))
arrival_time = np.cumsum(interarrivals, axis=1)
acc_pkts = np.zeros(shape=(nflows, timeslots))
for t in range(0, timeslots):
acc_pkts[:, t] = np.count_nonzero(arrival_time < t, axis=1)
arrival_pkts = np.diff(acc_pkts, prepend=0)
arrival_pkts = arrival_pkts.transpose()
link_rates = np.random.normal(0.5 * (sim_rate_lo + sim_rate_hi), 0.25 * (sim_rate_hi - sim_rate_lo),
size=[timeslots, nflows, n_ch])
link_rates = link_rates.astype(int)
link_rates[link_rates < sim_rate_lo] = sim_rate_lo
link_rates[link_rates > sim_rate_hi] = sim_rate_hi
to_print = []
time_start = time.time()
weight_samples = []
queue_mtx_dict = {}
dep_pkts_dict = {}
util_mtx_dict = {}
schedule_dict = {}
wts_dict = {}
queue_algo = np.zeros(shape=(lp, nflows))
dep_pkts_algo = np.zeros(shape=(lp, nflows))
queue_shadow = np.zeros(shape=(lp, nflows))
dep_pkts_shadow = np.zeros(shape=(lp, nflows))
wts_shadow = np.zeros(shape=(lp, nflows))
for algo in algolist:
queue_mtx_dict[algo] = np.zeros(shape=(timeslots, nflows))
dep_pkts_dict[algo] = np.zeros(shape=(timeslots, nflows))
util_mtx_dict[algo] = np.zeros(timeslots)
schedule_dict[algo] = np.zeros(shape=(timeslots, nflows))
util_mtx_dict[algo][0] = 1
wts_dict[algo] = np.zeros(shape=(nflows, n_ch))
state_buff = deque(maxlen=timeslots)
mask_vec = np.arange(0, nflows)
last_emb_vec = np.zeros(shape=(nflows*n_ch, ))
last_sol_vec = np.zeros(shape=(nflows*n_ch, ))
for t in range(1, timeslots):
for algo in algolist:
queue_mtx_dict[algo][t, :] = queue_mtx_dict[algo][t-1, :] + arrival_pkts[t, :]
queue_mtx_algo = np.multiply(np.expand_dims(queue_mtx_dict[algo][t, :], axis=1), np.ones(shape=(nflows, n_ch)))
if wt_sel == 'qr':
wts0 = queue_mtx_algo * link_rates[t, :, :]
elif wt_sel == 'q':
wts0 = queue_mtx_algo
elif wt_sel == 'qor':
wts0 = queue_mtx_algo / link_rates[t, :, :]
elif wt_sel == 'qrm':
wts0 = np.minimum(queue_mtx_algo, link_rates[t, :, :])
else:
np.random.seed(i*1000+t)
wts0 = np.random.uniform(0, 1, (nflows, n_ch))
wts1 = np.reshape(wts0, nflows * n_ch, order='F')
raw_wts = np.concatenate((queue_mtx_algo, link_rates[t, :, :]), axis=1)
if algo == "Greedy":
wts_dict[algo] = wts1
mwis, total_wt = local_greedy_search(adj_gK, wts_dict[algo])
mwis0, total_wt0 = greedy_search(adj_gK, wts_dict[algo])
util_mtx_dict[algo][t] = total_wt/total_wt0
elif algo == "Greedy-Th":
wts_dict[algo] = wts1
mwis, total_wt = dist_greedy_search(adj_gK, wts_dict[algo], 0.1)
mwis0, total_wt0 = greedy_search(adj_gK, wts_dict[algo])
util_mtx_dict[algo][t] = total_wt/total_wt0
elif algo == 'Benchmark':
wts_dict[algo] = wts1
mwis, total_wt, _ = mlp_gurobi(adj_gK, wts_dict[algo])
util_mtx_dict[algo][t] = 1.0
elif algo == 'DGCN-LGS':
wts_dict[algo] = wts1
mwis0, total_wt0 = greedy_search(adj_gK, wts_dict[algo])
act_vals, state = agent.utility(adj_gK, wts1, train=train)
mwis, _ = local_greedy_search(adj_gK, act_vals)
total_wt = np.sum(wts_dict[algo][list(mwis)])
util_mtx_dict[algo][t] = total_wt / total_wt0
state_buff.append((state, act_vals, list(mwis), t))
elif algo == 'shadow':
for ip in range(0, lp):
if ip == 0:
queue_shadow[0, :] = queue_mtx_dict[algoname][t-1, :] + arrival_pkts[t, :]
else:
if t + ip < timeslots:
queue_shadow[ip, :] = queue_shadow[ip-1, :] + arrival_pkts[t+ip, :]
else:
queue_shadow[ip, :] = queue_shadow[ip - 1, :]
queue_mtx_tmp = np.multiply(np.expand_dims(queue_shadow[ip, :], axis=1), np.ones(shape=(nflows, n_ch)))
if t + ip < timeslots:
wts_i = queue_mtx_tmp * link_rates[t+ip, :, :]
mwis, total_wt = local_greedy_search(adj_gK, wts_i)
schedule_mv = np.array(list(mwis))
link_rates_ts = np.reshape(link_rates[t+ip, :, :], nflows * n_ch, order='F')
capacity = channel_collision(adj_gK, nflows, link_rates_ts, schedule_mv)
dep_pkts_shadow[ip, :] = np.minimum(queue_shadow[ip, :], capacity)
queue_shadow[ip, :] = queue_shadow[ip, :] - dep_pkts_shadow[ip, :]
else:
dep_pkts_shadow[ip, :] = dep_pkts_shadow[ip-1, :]
queue_shadow[ip, :] = queue_shadow[ip-1, :]
util_mtx_dict[algo][t] = 1
elif algo == 'scheduler':
wts_dict[algo] = wts1
mwis0, total_wt0 = greedy_search(adj_gK, wts_dict[algo])
mwis, actions, state = agent.scheduler(adj_gK, raw_wts, train=train)
mwis, total_wt = local_greedy_search(adj_gK, wts_dict[algo]*actions)
equal_wt = channel_collision(adj_gK, nflows, wts_dict[algo], np.array(list(mwis)))
total_wt = np.sum(equal_wt)
util_mtx_dict[algo][t] = total_wt / total_wt0
state_buff.append((state, actions, mask_vec, t))
else:
sys.exit("Unsupported opt {}".format(flags.FLAGS.opt))
schedule_mv = np.array(list(mwis))
link_rates_ts = np.reshape(link_rates[t, :, :], nflows*n_ch, order='F')
schedule_dict[algo][t, schedule_mv] = 1
capacity = channel_collision(adj_gK, nflows, link_rates_ts, schedule_mv)
if algo == 'shadow':
dep_pkts_dict[algo][t, :] =
|
np.mean(dep_pkts_shadow[:, :], axis=0)
|
numpy.mean
|
# -*- coding: utf-8 -*-
# _logsmooth.py
# Module providing the logsmooth function
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the logsmooth() function
"""
from __future__ import division
import numpy as np
from scipy.linalg import norm
from ._dbp import dbp
def logsmooth(X, inBin, nbin=8, n=3):
"""Smooth the fft, and convert it to dB.
**Parameters:**
X : (N,) ndarray
The FFT data.
inBin : int
The bin index of the input sine wave (if any).
nbin : int, optional
The number of bins on which the averaging will be performed,
used *before* 3*inBin
n : int, optional
Around the location of the input signal and its harmonics (up to the
third harmonic), don't average for n bins.
The logsmooth algorithm uses nbin bins from 0 to 3*inBin,
thereafter the bin sizes is increased by a factor of 1.1,
staying less than 2^10.
For the :math:`n` sets of bins:
:math:`inBin + i, 2*inBin + i ... n*inBin+i`, where :math:`i \\in [0,2]`
don't do averaging. This way, the noise BW
and the scaling of the tone and its harmonics are unchanged.
.. note::
Unfortunately, harmonics above the nth appear smaller than they
really are because their energy is averaged over many bins.
**Returns:**
f, p : tuple of 1d- ndarrays
The bins and smoothed FFT, expressed in dB.
.. seealso::
* :func:`plotSpectrum`, convenience function to first call
:func:`logsmooth` and then plot on a logarithmic x-axis its return
value.
* :func:`circ_smooth`, smoothing algorithm suitable for linear
x-axis plotting.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from deltasigma import dbv, ds_hann, figureMagic, logsmooth
T = 2 #s
Fs = 231e3 #Hz
N = int(np.round(T*Fs, 0)) # FFT points
freq = .1e3
t = np.arange(N)/Fs
u0 = np.sin(2*np.pi*t*freq)
u0 = u0 + .01*u0**2+.001*u0**3+.005*u0**4
U = np.fft.fft(u0 * ds_hann(N))/(N/4)
f = np.linspace(0, Fs, N + 1)
f = f[:N/2 + 1]
plt.subplot(211)
plt.semilogx(f, dbv(U[:N/2 + 1]))
plt.hold(True)
inBin = np.round(freq/Fs*N)
fS, US = logsmooth(U, inBin)
plt.semilogx(fS*Fs, US, 'r', linewidth=2.5)
plt.xlim([f[0]*Fs, Fs/2])
plt.ylabel('U(f) [dB]')
figureMagic(xRange=[100, 1e4], yRange=[-400, 0], name='Spectrum')
plt.subplot(212)
plt.loglog(fS[1:]*Fs, np.diff(fS*Fs))
plt.xlabel('f [Hz]')
plt.ylabel('Averaging interval [Hz]')
figureMagic(xRange=[100, 1e4])
plt.show()
"""
# preliminary sanitization of the input
if not np.prod(X.shape) == max(X.shape):
raise ValueError('Expected a (N,) or (N, 1)-shaped array.')
if len(X.shape) > 1:
X = np.squeeze(X)
inBin = int(inBin)
N = X.shape[0]
N2 = int(np.floor(N/2))
f1 = int(inBin % nbin)
startbin = np.concatenate((np.arange(f1, inBin, nbin),
np.arange(inBin, inBin + 3)
))
i = 1 # my fix
while i < n: # n can be big and xrange is not in Python3
startbin = np.concatenate((startbin,
np.arange(startbin[-1] + 1,
(inBin + 1)*(i + 1) - 1, nbin),
(i + 1)*(inBin + 1) - 1 + np.arange(0, 3)
))
i = i + 1
# the following is my fix - REP?
startbin = np.concatenate((startbin, np.array((startbin[-1] + 1,))))
m = startbin[-1] + nbin
while m < N2 - 1:
startbin = np.concatenate((startbin, np.array((m,))))
nbin =
|
np.min((nbin*1.1, 2**10))
|
numpy.min
|
#!/usr/bin/python3
import numpy as np
import nibabel as nib
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('-indir', nargs=1, type=str)
parser.add_argument('-inpath', nargs=1, type=str)
parser.add_argument('-outdir', nargs=1, type=str)
parser.add_argument('-filename', nargs=1, type=str)
args = parser.parse_args()
# indir = args.indir[0]
inpath = args.inpath[0]
outdir = args.outdir[0]
filename = args.filename[0]
# proxy_img = nib.load(indir+ "/" + filename)
proxy_img = nib.load(inpath)
img = np.asarray(proxy_img.dataobj)
affine = proxy_img.affine
proxy_img.uncache()
img = np.squeeze(img)
# https://codereview.stackexchange.com/questions/132914/crop-black-border-of-image-using-numpy
# Mask of non-black pixels (assuming image has a single channel).
mask = img > 0
# Coordinates of non-black pixels.
coords =
|
np.argwhere(mask)
|
numpy.argwhere
|
import abc
from itertools import chain
import logging
import os
import pickle
import random
from typing import List, Union
import GPUtil
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
from scipy.stats import multivariate_normal, lognorm, norm, chi
from tensorflow.python.client import device_lib
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import trange
import math
class Algorithm(metaclass=abc.ABCMeta):
def __init__(self, module_name, name, seed, details=False, out_dir=None):
self.logger = logging.getLogger(module_name)
self.name = name
self.seed = seed
self.details = details
self.prediction_details = {}
self.out_dir = out_dir
self.torch_save = False
if self.seed is not None:
random.seed(seed)
np.random.seed(seed)
def __str__(self):
return self.name
@abc.abstractmethod
def fit(self, X):
"""
Train the algorithm on the given dataset
"""
@abc.abstractmethod
def predict(self, X):
"""
:return anomaly score
"""
def set_output_dir(self, out_dir):
self.out_dir = out_dir
def get_val_err(self):
"""
:return: reconstruction error_tc for validation set,
dimensions of num_val_time_points x num_channels
Call after training
"""
return None
def get_val_loss(self):
"""
:return: scalar loss after training
"""
return None
class PyTorchUtils(metaclass=abc.ABCMeta):
def __init__(self, seed, gpu):
self.gpu = gpu
self.seed = seed
if self.seed is not None:
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
self.framework = 0
self.torch_save = True
@property
def device(self):
return torch.device(f'cuda:{self.gpu}' if torch.cuda.is_available() and self.gpu is not None else 'cpu')
def to_var(self, t, **kwargs):
# ToDo: check whether cuda Variable.
t = t.to(self.device)
return Variable(t, **kwargs)
def to_device(self, model):
model.to(self.device)
class TensorflowUtils(metaclass=abc.ABCMeta):
def __init__(self, seed, gpu):
self.gpu = gpu
self.seed = seed
if self.seed is not None:
tf.set_random_seed(seed)
self.framework = 1
@property
def device(self):
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
return tf.device(gpus[self.gpu] if gpus and self.gpu is not None else '/cpu:0')
# class AedUtils(metaclass=abc.ABCMeta):
# def __init__(self):
# pass
#
# @staticmethod
def get_sub_seqs(x_arr, seq_len, stride=1, start_discont=np.array([])):
"""
:param start_discont: the start points of each sub-part in case the x_arr is just multiple parts joined together
:param x_arr: dim 0 is time, dim 1 is channels
:param seq_len: size of window used to create subsequences from the data
:param stride: number of time points the window will move between two subsequences
:return:
"""
excluded_starts = []
[excluded_starts.extend(range((start - seq_len + 1), start)) for start in start_discont if start > seq_len]
seq_starts = np.delete(np.arange(0, x_arr.shape[0] - seq_len + 1, stride), excluded_starts)
x_seqs = np.array([x_arr[i:i + seq_len] for i in seq_starts])
return x_seqs
def get_train_data_loaders(x_seqs: np.ndarray, batch_size: int, splits: List, seed: int, shuffle: bool = False,
usetorch = True):
"""
Splits the train data between train, val, etc. Creates and returns pytorch data loaders
:param shuffle: boolean that determines whether samples are shuffled before splitting the data
:param seed: seed used for the random shuffling (if shuffling there is)
:param x_seqs: input data where each row is a sample (a sequence) and each column is a channel
:param batch_size: number of samples per batch
:param splits: list of split fractions, should sum up to 1.
:param usetorch: if True returns dataloaders, otherwise return datasets
:return: a tuple of data loaders as long as splits. If len_splits = 1, only 1 data loader is returned
"""
if np.sum(splits) != 1:
scale_factor = np.sum(splits)
splits = [fraction/scale_factor for fraction in splits]
if shuffle:
np.random.seed(seed)
x_seqs = x_seqs[np.random.permutation(len(x_seqs))]
np.random.seed()
split_points = [0]
for i in range(len(splits)-1):
split_points.append(split_points[-1] + int(splits[i]*len(x_seqs)))
split_points.append(len(x_seqs))
if usetorch:
loaders = tuple([DataLoader(dataset=x_seqs[split_points[i]:split_points[i+1]], batch_size=batch_size,
drop_last=False, pin_memory=True, shuffle=False) for i in range(len(splits))])
return loaders
else:
# datasets = tuple([x_seqs[split_points[i]:
# (split_points[i] + (split_points[i+1]-split_points[i])//batch_size*batch_size)]
# for i in range(len(splits))])
datasets = tuple([x_seqs[split_points[i]:split_points[i+1]]
for i in range(len(splits))])
return datasets
def fit_with_early_stopping(train_loader, val_loader, pytorch_module, patience, num_epochs, lr, verbose=True,
last_t_only=True, ret_best_val_loss=False):
"""
:param train_loader: the pytorch data loader for the training set
:param val_loader: the pytorch data loader for the validation set
:param pytorch_module:
:param patience:
:param num_epochs: the maximum number of epochs for the training
:param lr: the learning rate parameter used for optimization
:return: trained module, avg train and val loss per epoch, final loss on train + val data per channel
"""
pytorch_module.to(pytorch_module.device) # .double()
optimizer = torch.optim.Adam(pytorch_module.parameters(), lr=lr)
epoch_wo_improv = 0
pytorch_module.train()
train_loss_by_epoch = []
val_loss_by_epoch = []
best_val_loss = None
best_params = pytorch_module.state_dict()
# assuming first batch is complete
for epoch in trange(num_epochs):
if epoch_wo_improv < patience:
logging.debug(f'Epoch {epoch + 1}/{num_epochs}.')
if verbose:
GPUtil.showUtilization()
pytorch_module.train()
train_loss = []
for ts_batch in train_loader:
ts_batch = ts_batch.float().to(pytorch_module.device)
output = pytorch_module(ts_batch)
if last_t_only:
loss = nn.MSELoss(reduction="mean")(output[:, -1], ts_batch[:, -1])
else:
loss = nn.MSELoss(reduction="mean")(output, ts_batch)
pytorch_module.zero_grad()
loss.backward()
optimizer.step()
# multiplying by length of batch to correct accounting for incomplete batches
train_loss.append(loss.item()*len(ts_batch))
train_loss = np.mean(train_loss)/train_loader.batch_size
train_loss_by_epoch.append(train_loss)
# Get Validation loss
pytorch_module.eval()
val_loss = []
with torch.no_grad():
for ts_batch in val_loader:
ts_batch = ts_batch.float().to(pytorch_module.device)
output = pytorch_module(ts_batch)
if last_t_only:
loss = nn.MSELoss(reduction="mean")(output[:, -1], ts_batch[:, -1])
else:
loss = nn.MSELoss(reduction="mean")(output, ts_batch)
val_loss.append(loss.item()*len(ts_batch))
val_loss = np.mean(val_loss)/val_loader.batch_size
val_loss_by_epoch.append(val_loss)
best_val_loss_epoch = np.argmin(val_loss_by_epoch)
if best_val_loss_epoch == epoch:
# any time a new best is encountered, the best_params will get replaced
best_params = pytorch_module.state_dict()
best_val_loss = val_loss
# Check for early stopping by counting the number of epochs since val loss improved
if epoch > 0 and val_loss >= val_loss_by_epoch[-2]:
epoch_wo_improv += 1
else:
epoch_wo_improv = 0
else:
# early stopping is applied
pytorch_module.load_state_dict(best_params)
break
pytorch_module.eval()
val_reconstr_errors = []
with torch.no_grad():
for ts_batch in val_loader:
ts_batch = ts_batch.float().to(pytorch_module.device)
output = pytorch_module(ts_batch)[:, -1]
error = nn.L1Loss(reduction='none')(output, ts_batch[:, -1])
val_reconstr_errors.append(error.cpu().numpy())
if len(val_reconstr_errors) > 0:
val_reconstr_errors = np.concatenate(val_reconstr_errors)
if ret_best_val_loss:
return pytorch_module, train_loss_by_epoch, val_loss_by_epoch, val_reconstr_errors, best_val_loss
return pytorch_module, train_loss_by_epoch, val_loss_by_epoch, val_reconstr_errors
def predict_univar_outputs_encodings(trained_univar_model, ts_batch):
univar_outputs = []
univar_encodings = []
for channel_num, univar_model in enumerate(trained_univar_model):
univar_output, univar_encoding = univar_model(ts_batch[:, :, channel_num].unsqueeze(2), return_latent=True)
if len(univar_encoding.shape) == 2:
univar_encoding = univar_encoding.unsqueeze(1)
univar_outputs.append(univar_output[:, -1]) # output shape is [batch_size, 1], encoded shape is [batch_size, 5]
univar_encodings.append(univar_encoding)
univar_outputs = torch.cat(univar_outputs, dim=1)
univar_errors = ts_batch[:, -1, :] - univar_outputs
univar_encodings = torch.stack(univar_encodings).permute(1, 0, 2, 3)
return univar_errors, univar_outputs, univar_encodings
def fit_with_early_stopping_residual_joint(train_loader, val_loader,
untrained_univar_model: List[nn.Module],
pytorch_module, patience, num_epochs, lr, verbose=True):
"""
:param train_loader: the pytorch data loader for the training set
:param val_loader: the pytorch data loader for the validation set
:param untrained_univar_model: untrained model, may already set into eval mode. Assumed to be pytorch model.
:param pytorch_module:
:param patience:
:param num_epochs: the maximum number of epochs for the training
:param lr: the learning rate parameter used for optimization
:return: trained module, avg train and val loss per epoch, final loss on train + val data per channel
"""
pytorch_module.to(pytorch_module.device) # .double()
[model.to(pytorch_module.device) for model in untrained_univar_model]
all_params = [model.parameters() for model in untrained_univar_model] + [pytorch_module.parameters()]
optimizer = torch.optim.Adam(chain(*all_params), lr=lr)
epoch_wo_improv = 0
train_loss_by_epoch = []
val_loss_by_epoch = []
for epoch in trange(num_epochs):
if epoch_wo_improv < patience:
logging.debug(f'Epoch {epoch + 1}/{num_epochs}.')
if verbose:
GPUtil.showUtilization()
pytorch_module.train()
[model.train() for model in untrained_univar_model]
train_loss = []
for ts_batch in train_loader:
ts_batch = ts_batch.float().to(pytorch_module.device)
univar_errors, univar_outputs, univar_encodings = predict_univar_outputs_encodings(untrained_univar_model, ts_batch)
output = pytorch_module(univar_encodings)
loss = nn.MSELoss(reduction="mean")(output, univar_errors) + nn.MSELoss(reduction="mean")(univar_outputs, ts_batch[:, -1])
pytorch_module.zero_grad()
[model.zero_grad() for model in untrained_univar_model]
loss.backward()
optimizer.step()
train_loss.append(loss.item())
train_loss = np.mean(train_loss)
train_loss_by_epoch.append(train_loss)
# Get Validation loss
pytorch_module.eval()
[model.eval() for model in untrained_univar_model]
val_loss = []
with torch.no_grad():
for ts_batch in val_loader:
ts_batch = ts_batch.float().to(pytorch_module.device)
with torch.no_grad():
univar_errors, _, univar_encodings = predict_univar_outputs_encodings(untrained_univar_model,
ts_batch)
output = pytorch_module(univar_encodings)
loss = nn.MSELoss(reduction="mean")(output, univar_errors) + nn.MSELoss(reduction="mean")(univar_outputs, ts_batch[:, -1])
val_loss.append(loss.item())
val_loss = np.mean(val_loss)
val_loss_by_epoch.append(val_loss)
# Check for early stopping by counting the number of epochs since val loss improved
if epoch > 1:
if val_loss_by_epoch[-1] >= val_loss_by_epoch[-2]:
epoch_wo_improv += 1
if epoch_wo_improv == 1:
before_overfit_par = pytorch_module.state_dict()
else:
epoch_wo_improv = 0
else:
pytorch_module.load_state_dict(before_overfit_par)
break
pytorch_module.eval()
[model.eval() for model in untrained_univar_model]
val_reconstr_errors = []
with torch.no_grad():
for ts_batch in val_loader:
ts_batch = ts_batch.float().to(pytorch_module.device)
with torch.no_grad():
_, univar_outputs, univar_encodings = predict_univar_outputs_encodings(untrained_univar_model,
ts_batch)
output = univar_outputs + pytorch_module(univar_encodings)
error = nn.L1Loss(reduction='none')(output, ts_batch[:, -1])
val_reconstr_errors.append(torch.squeeze(error).cpu().numpy())
val_reconstr_errors =
|
np.concatenate(val_reconstr_errors)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
import sys
sys.path.insert(0,'../../')
sys.path.insert(0,'..')
import numpy as np
#import mayavi.mlab as mlab
#from scipy.stats import norm
#import matplotlib as plt
from mpl_toolkits.mplot3d import Axes3D
from bayes_opt import BayesOpt
from bayes_opt.batchBO.batch_pvrs import BatchPVRS
#from bayes_opt import PradaBayOptBatch
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.metrics.pairwise import euclidean_distances
from bayes_opt.acquisition_maximization import acq_max
from scipy.stats import norm as norm_dist
import random
from bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows
import os
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
#my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
#my_cmap = plt.get_cmap('cubehelix')
my_cmap = plt.get_cmap('Blues')
counter = 0
#class Visualization(object):
#def __init__(self,bo):
#self.plot_gp=0
#self.posterior=0
#self.myBo=bo
def plot_bo(bo):
if bo.dim==1:
plot_bo_1d(bo)
if bo.dim==2:
plot_bo_2d(bo)
def plot_acq_bo_1d(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(10, 10))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(8, 1, height_ratios=[3, 1,1,1,1,1,1,1])
axis = plt.subplot(gs[0])
acq_UCB = plt.subplot(gs[1])
acq_EI = plt.subplot(gs[2])
acq_POI = plt.subplot(gs[3])
#acq_TS2 = plt.subplot(gs[5])
acq_ES = plt.subplot(gs[4])
acq_PES = plt.subplot(gs[5])
acq_MRS = plt.subplot(gs[6])
acq_Consensus = plt.subplot(gs[7])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*np.std(bo.Y_original)+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_UCB.plot(x_original, utility, label='Utility Function', color='purple')
acq_UCB.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
#acq_UCB.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_UCB.set_xlim((np.min(x_original), np.max(x_original)))
acq_UCB.set_ylabel('UCB', fontdict={'size':16})
acq_UCB.set_xlabel('x', fontdict={'size':16})
# EI
acq_func={}
acq_func['name']='ei'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_EI.plot(x_original, utility, label='Utility Function', color='purple')
acq_EI.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_EI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_EI.set_xlim((np.min(x_original), np.max(x_original)))
acq_EI.set_ylabel('EI', fontdict={'size':16})
acq_EI.set_xlabel('x', fontdict={'size':16})
# POI
acq_func={}
acq_func['name']='poi'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_POI.plot(x_original, utility, label='Utility Function', color='purple')
acq_POI.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_POI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_POI.set_xlim((np.min(x_original), np.max(x_original)))
acq_POI.set_ylabel('POI', fontdict={'size':16})
acq_POI.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_EI.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_MRS.plot(x_original, utility, label='Utility Function', color='purple')
acq_MRS.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MRS.set_xlim((np.min(x_original), np.max(x_original)))
acq_MRS.set_ylabel('MRS', fontdict={'size':16})
acq_MRS.set_xlabel('x', fontdict={'size':16})
# PES
acq_func={}
acq_func['name']='pes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_PES.plot(x_original, utility, label='Utility Function', color='purple')
acq_PES.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_PES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_PES.set_xlim((np.min(x_original), np.max(x_original)))
acq_PES.set_ylabel('PES', fontdict={'size':16})
acq_PES.set_xlabel('x', fontdict={'size':16})
# TS1
acq_func={}
acq_func['name']='consensus'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_Consensus.plot(x_original, utility, label='Utility Function', color='purple')
temp=np.asarray(myacq.object.xstars)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_Consensus.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
#acq_TS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_Consensus.set_xlim((np.min(x_original), np.max(x_original)))
#acq_TS.set_ylim((np.min(utility)*0.9, np.max(utility)*1.1))
acq_Consensus.set_ylabel('Consensus', fontdict={'size':16})
acq_Consensus.set_xlabel('x', fontdict={'size':16})
# ES
acq_func={}
acq_func['name']='es'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_ES.plot(x_original, utility, label='Utility Function', color='purple')
acq_ES.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_ES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_ES.set_xlim((np.min(x_original), np.max(x_original)))
acq_ES.set_ylabel('ES', fontdict={'size':16})
acq_ES.set_xlabel('x', fontdict={'size':16})
strFileName="{:d}_GP_acquisition_functions.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_acq_bo_1d_vrs(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(10, 11))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(8, 1, height_ratios=[2, 1,1,1,1,1,1,1])
axis = plt.subplot(gs[0])
acq_UCB = plt.subplot(gs[1])
acq_EI = plt.subplot(gs[2])
#acq_POI = plt.subplot(gs[3])
#acq_TS2 = plt.subplot(gs[5])
acq_MES = plt.subplot(gs[3])
acq_ES = plt.subplot(gs[4])
acq_MRS = plt.subplot(gs[5])
acq_PES = plt.subplot(gs[6])
acq_Consensus = plt.subplot(gs[7])
mu, sigma = bo.posterior(x)
# get maximum of mu function
mu_max=mu.max()
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='f(x)')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'o', markersize=8, label=u'Data X', color='g')
axis.plot(x_original, mu_original, '--', color='k', label='$\mu(x)$')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*np.std(bo.Y_original)+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.3, fc='c', ec='None', label='$\sigma(x)$')
axis.get_xaxis().set_visible(False)
axis.set_yticklabels([])
axis.set_xticklabels([])
axis.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
axis.legend(loc='center left', bbox_to_anchor=(0.01, 1.15),prop={'size':16},ncol=6)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_UCB.plot(x_original, utility, label='Utility Function', color='purple')
acq_UCB.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.set_yticklabels([])
acq_UCB.set_xticklabels([])
#acq_UCB.get_yaxis().set_visible(False)
#acq_UCB.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_UCB.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_UCB.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_UCB.set_ylabel('UCB', fontdict={'size':16})
acq_UCB.set_xlabel('x', fontdict={'size':16})
# EI
acq_func={}
acq_func['name']='ei'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_EI.plot(x_original, utility, label='Utility Function', color='purple')
acq_EI.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_EI.get_xaxis().set_visible(False)
#acq_EI.get_yaxis().set_visible(False)
#acq_EI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_EI.set_yticklabels([])
acq_EI.set_xticklabels([])
acq_EI.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_EI.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_EI.set_ylabel('EI', fontdict={'size':16})
#acq_EI.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_EI.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
xstars=[]
ystars=[]
# TS1
# finding the xt of Thompson Sampling
numXtar=100
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
xstars.append(xt_TS)
yt_TS=acq_mu.acq_kind(xt_TS,bo.gp,y_max=np.max(bo.Y))
if yt_TS>mu_max:
ystars.append(yt_TS)
if not ystars:
ystars.append([mu_max])
temp=np.asarray(xstars)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_MRS.plot(x_original, utility, label='Utility Function', color='purple')
acq_MRS.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
acq_MRS.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#acq_MRS.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
#label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_MRS.get_xaxis().set_visible(False)
acq_MRS.set_yticklabels([])
acq_MRS.set_xticklabels([])
#acq_MRS.get_yaxis().set_visible(False)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MRS.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_MRS.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_MRS.set_ylabel('MRS', fontdict={'size':16})
#acq_MRS.set_xlabel('x', fontdict={'size':16})
# MES
acq_func={}
acq_func['name']='mes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
acq_func['ystars']=ystars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_MES.plot(x_original, utility, label='Utility Function', color='purple')
acq_MES.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#acq_MRS.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
#label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_MES.get_xaxis().set_visible(False)
acq_MES.set_yticklabels([])
acq_MES.set_xticklabels([])
#acq_MES.get_yaxis().set_visible(False)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MES.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_MES.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_MES.set_ylabel('MES', fontdict={'size':16})
#acq_MES.set_xlabel('x', fontdict={'size':16})
# PES
acq_func={}
acq_func['name']='pes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_PES.plot(x_original, utility, label='Utility Function', color='purple')
acq_PES.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
acq_PES.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Selected point $x_t$', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_PES.get_xaxis().set_visible(False)
acq_PES.set_yticklabels([])
acq_PES.set_xticklabels([])
#acq_PES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_PES.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_PES.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_PES.set_ylabel('PES', fontdict={'size':16})
acq_PES.set_xlabel('x', fontdict={'size':16})
#acq_PES.get_yaxis().set_visible(False)
### VRS
acq_func={}
acq_func['name']='vrs_of_ts'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#mytest=np.vstack((x.reshape(-1,1),bo.gp.X))
#utility_existing_X = myacq.acq_kind(mytest, bo.gp, np.max(bo.Y))
#utility=0-utility
temp=np.asarray(myacq.object.xstars)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.plot(x_original, utility, label=r'$\alpha(x)$', color='purple')
#acq_Consensus.plot(x_original, [np.asscalar(myacq.object.average_predvar)]*len(x_original), label=r'threshold', color='black')
#print np.asscalar(myacq.object.average_predvar)
#print np.min(utility)
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_Consensus.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label='$x^*$ samples', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_Consensus.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Selected point $x_t$', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#acq_Consensus.get_yaxis().set_visible(False)
#acq_TS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_Consensus.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_Consensus.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_Consensus.set_yticklabels([])
#acq_TS.set_ylim((np.min(utility)*0.9, np.max(utility)*1.1))
acq_Consensus.set_ylabel('PVRS', fontdict={'size':16})
acq_Consensus.set_xlabel('x', fontdict={'size':16})
acq_Consensus.legend(loc='center left', bbox_to_anchor=(0.01, -1.1),prop={'size':16},ncol=3)
#acq_ES.get_xaxis().set_visible(False)
# ES
acq_func={}
acq_func['name']='es'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_ES.plot(x_original, utility, label='Utility Function', color='purple')
acq_ES.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
acq_ES.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#max_point=np.max(utility)
#acq_ES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
#acq_ES.get_yaxis().set_visible(False)
acq_ES.get_xaxis().set_visible(False)
acq_ES.set_yticklabels([])
acq_ES.set_xticklabels([])
acq_ES.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_ES.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_ES.set_ylabel('ES', fontdict={'size':16})
#acq_ES.set_xlabel('x', fontdict={'size':16})
strFileName="{:d}_GP_acquisition_functions_vrs.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_1d(bo):
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(8, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*np.std(bo.Y_original)+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq.plot(x_original, utility, label='Utility Function', color='purple')
acq.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility) + 0.5))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_1d_variance(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#fig=plt.figure(figsize=(8, 5))
fig, ax1 = plt.subplots(figsize=(8.5, 4))
mu, sigma = bo.posterior(x)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
def distance_function(x,X):
Euc_dist=euclidean_distances(x,X)
dist=Euc_dist.min(axis=1)
return dist
utility_distance=distance_function(x.reshape((-1, 1)),bo.X)
idxMaxVar=np.argmax(utility)
#idxMaxVar=[idx for idx,val in enumerate(utility) if val>=0.995]
ax1.plot(x_original, utility, label='GP $\sigma(x)$', color='purple')
ax1.scatter(x_original[idxMaxVar], utility[idxMaxVar], marker='s',label='x=argmax $\sigma(x)$', color='blue',linewidth=2)
#ax1.scatter(x_original[idxMaxVar], utility[idxMaxVar], label='$||x-[x]||$', color='blue',linewidth=2)
ax1.plot(bo.X_original.flatten(), [0]*len(bo.X_original.flatten()), 'D', markersize=10, label=u'Observations', color='r')
idxMaxDE=np.argmax(utility_distance)
ax2 = ax1.twinx()
ax2.plot(x_original, utility_distance, label='$d(x)=||x-[x]||^2$', color='black')
ax2.plot(x_original[idxMaxDE], utility_distance[idxMaxDE], 'o',label='x=argmax d(x)', color='black',markersize=10)
ax2.set_ylim((0, 0.45))
ax1.set_xlim((np.min(x_original)-0.01, 0.01+np.max(x_original)))
ax1.set_ylim((-0.02, np.max(utility) + 0.05))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
ax1.set_ylabel(r'$\sigma(x)$', fontdict={'size':18})
ax2.set_ylabel('d(x)', fontdict={'size':18})
ax1.set_xlabel('x', fontdict={'size':18})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#ax1.legend(loc=2, bbox_to_anchor=(1.1, 1), borderaxespad=0.,fontsize=14)
#ax2.legend(loc=2, bbox_to_anchor=(1.1, 0.3), borderaxespad=0.,fontsize=14)
plt.title('Exploration by GP variance vs distance',fontsize=22)
ax1.legend(loc=3, bbox_to_anchor=(0.05,-0.32,1, -0.32), borderaxespad=0.,fontsize=14,ncol=4)
ax2.legend(loc=3, bbox_to_anchor=(0.05,-0.46,1, -0.46), borderaxespad=0.,fontsize=14,ncol=2)
#plt.legend(fontsize=14)
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\demo_geometric"
strFileName="{:d}_var_DE.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
def plot_acq_bo_2d_vrs(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 50)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 50)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 50)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 50)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(14, 20))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
nRows=6
axis_mean2d = fig.add_subplot(nRows, 2, 1)
axis_variance2d = fig.add_subplot(nRows, 2, 2)
acq_UCB = fig.add_subplot(nRows, 2, 3)
#acq_EI =fig.add_subplot(nRows, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_ES = fig.add_subplot(nRows, 2, 4)
acq_PES = fig.add_subplot(nRows, 2, 5)
acq_MRS = fig.add_subplot(nRows, 2, 6)
#acq_ydist = fig.add_subplot(nRows, 2, 8)
acq_VRS = fig.add_subplot(nRows, 2, 7)
acq_Batch_VRS_B_2 = fig.add_subplot(nRows, 2, 8)
acq_Batch_VRS_B_3 = fig.add_subplot(nRows, 2, 9)
acq_Batch_VRS_B_4 = fig.add_subplot(nRows, 2, 10)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# get maximum of mu function
mu_max=mu.max()
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean $\mu(x)$',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
axis_mean2d.get_xaxis().set_visible(False)
axis_mean2d.get_yaxis().set_visible(False)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance $\sigma(x)$',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
axis_variance2d.get_xaxis().set_visible(False)
axis_variance2d.get_yaxis().set_visible(False)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.get_yaxis().set_visible(False)
"""
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
"""
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=25*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gp)
#if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
#if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# MRS
acq_func={}
acq_func['name']='mes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['ystars']=y_stars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_MRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_MRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_MRS.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_suggestion_original=xstars*bo.max_min_gap+bo.bounds[:,0]
acq_MRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_MRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_MRS.set_title('MES',fontsize=16)
acq_MRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_MRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_MRS, shrink=0.9)
acq_MRS.get_xaxis().set_visible(False)
acq_MRS.get_yaxis().set_visible(False)
"""
# plot distribution of y_star
mu_ydist, std_ydist = norm_dist.fit(y_stars)
# Plot the histogram.
acq_ydist.hist(y_stars,bins=20,normed=True,alpha=.6,color='g',label=ur'Histogram of $y^*$')
# Plot the PDF.
x = np.linspace(np.min(y_stars), np.max(y_stars), 100)
p = norm_dist.pdf(x, mu_ydist, std_ydist)
acq_ydist.plot(x,p,'k', linewidth=2,label='Gaussian curve')
acq_ydist.legend()
acq_ydist.set_title(ur'Distribution of $y^*$',fontsize=16)
"""
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
#acq_func['xstars']=xstars
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_PES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_PES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
acq_PES.get_xaxis().set_visible(False)
acq_PES.get_yaxis().set_visible(False)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
#temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_ES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_ES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
acq_ES.get_xaxis().set_visible(False)
acq_ES.get_yaxis().set_visible(False)
#xstars.append(xt_UCB)
#xstars.append(xt_EI)
#xstars.append(xt_ES)
#xstars.append(xt_PES)
# Variance Reduction Search
acq_func={}
acq_func['name']='pvrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('PVRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_VRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
acq_VRS.get_xaxis().set_visible(False)
acq_VRS.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=2
acq_Batch_VRS_B_2.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=2)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_2.set_title('Batch PVRS B=2',fontsize=16)
acq_Batch_VRS_B_2.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_Batch_VRS_B_2.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_2, shrink=0.9)
acq_Batch_VRS_B_2.get_xaxis().set_visible(False)
acq_Batch_VRS_B_2.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=3
acq_Batch_VRS_B_3.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_3.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=3)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_3.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_3.set_title('Batch PVRS B=3',fontsize=16)
acq_Batch_VRS_B_3.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_Batch_VRS_B_3.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_3, shrink=0.9)
acq_Batch_VRS_B_3.get_xaxis().set_visible(False)
acq_Batch_VRS_B_3.get_yaxis().set_visible(False)
acq_Batch_VRS_B_3.legend(loc='center left', bbox_to_anchor=(0.01, -0.2),prop={'size':20},ncol=3)
# Batch Variance Reduction Search B=4
acq_Batch_VRS_B_4.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_4.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=4)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_4.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_4.set_title('Batch PVRS B=4',fontsize=16)
acq_Batch_VRS_B_4.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_4.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_4, shrink=0.9)
acq_Batch_VRS_B_4.get_xaxis().set_visible(False)
acq_Batch_VRS_B_4.get_yaxis().set_visible(False)
strFileName="{:d}_GP2d_acquisition_functions_vrs.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_acq_bo_2d_vrs_3x2(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 50)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 50)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 50)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 50)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(14, 16))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
nRows=4
axis_mean2d = fig.add_subplot(nRows, 2, 1)
axis_variance2d = fig.add_subplot(nRows, 2, 2)
acq_UCB = fig.add_subplot(nRows, 2, 3)
acq_ES = fig.add_subplot(nRows, 2, 4)
acq_PES = fig.add_subplot(nRows, 2, 5)
acq_VRS = fig.add_subplot(nRows, 2, 6)
acq_Batch_VRS_B_2 = fig.add_subplot(nRows, 2, 7)
acq_Batch_VRS_B_3 = fig.add_subplot(nRows, 2, 8)
#acq_Batch_VRS_B_4 = fig.add_subplot(nRows, 2, 10)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# get maximum of mu function
mu_max=mu.max()
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean $\mu(x)$',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
axis_mean2d.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
axis_mean2d.get_xaxis().set_visible(False)
axis_mean2d.get_yaxis().set_visible(False)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance $\sigma(x)$',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
axis_variance2d.get_xaxis().set_visible(False)
axis_variance2d.get_yaxis().set_visible(False)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_UCB.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.get_yaxis().set_visible(False)
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=25*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gp)
#if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
#if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
#temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_ES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_ES.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
acq_ES.get_xaxis().set_visible(False)
acq_ES.get_yaxis().set_visible(False)
#xstars.append(xt_UCB)
#xstars.append(xt_EI)
#xstars.append(xt_ES)
#xstars.append(xt_PES)
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
#acq_func['xstars']=xstars
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_PES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_PES.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
acq_PES.get_xaxis().set_visible(False)
acq_PES.get_yaxis().set_visible(False)
"""
# plot distribution of y_star
mu_ydist, std_ydist = norm_dist.fit(y_stars)
# Plot the histogram.
acq_ydist.hist(y_stars,bins=20,normed=True,alpha=.6,color='g',label=ur'Histogram of $y^*$')
# Plot the PDF.
x = np.linspace(np.min(y_stars), np.max(y_stars), 100)
p = norm_dist.pdf(x, mu_ydist, std_ydist)
acq_ydist.plot(x,p,'k', linewidth=2,label='Gaussian curve')
acq_ydist.legend()
acq_ydist.set_title(ur'Distribution of $y^*$',fontsize=16)
"""
# Variance Reduction Search
acq_func={}
acq_func['name']='vrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('PVRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_VRS.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
acq_VRS.get_xaxis().set_visible(False)
acq_VRS.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=2
acq_Batch_VRS_B_2.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=2)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_2.set_title('B-PVRS B=2',fontsize=16)
acq_Batch_VRS_B_2.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_2.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_2, shrink=0.9)
acq_Batch_VRS_B_2.get_xaxis().set_visible(False)
acq_Batch_VRS_B_2.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=3
acq_Batch_VRS_B_3.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_3.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=3)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_3.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_3.set_title('B-PVRS B=3',fontsize=16)
acq_Batch_VRS_B_3.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_3.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_3, shrink=0.9)
acq_Batch_VRS_B_3.get_xaxis().set_visible(False)
acq_Batch_VRS_B_3.get_yaxis().set_visible(False)
acq_Batch_VRS_B_2.legend(loc='center left', bbox_to_anchor=(0.1, -0.2),prop={'size':20},ncol=3)
# Batch Variance Reduction Search B=4
"""
acq_Batch_VRS_B_4.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_4.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=4)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_4.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_4.set_title('Batch PVRS B=4',fontsize=16)
acq_Batch_VRS_B_4.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_4.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_4, shrink=0.9)
acq_Batch_VRS_B_4.get_xaxis().set_visible(False)
acq_Batch_VRS_B_4.get_yaxis().set_visible(False)
"""
strFileName="{:d}_GP2d_acquisition_functions_vrs.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
def plot_acq_bo_2d_vrs_backup(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 80)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 80)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 80)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 80)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(14, 20))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
nRows=5
axis_mean2d = fig.add_subplot(nRows, 2, 1)
axis_variance2d = fig.add_subplot(nRows, 2, 2)
acq_UCB = fig.add_subplot(nRows, 2, 3)
acq_EI =fig.add_subplot(nRows, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_ES = fig.add_subplot(nRows, 2, 5)
acq_PES = fig.add_subplot(nRows, 2, 6)
acq_MRS = fig.add_subplot(nRows, 2, 7)
acq_ydist = fig.add_subplot(nRows, 2, 8)
acq_VRS = fig.add_subplot(nRows, 2, 9)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# get maximum of mu function
mu_max=mu.max()
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=50*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gpmyfunction)
if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# MRS
acq_func={}
acq_func['name']='mes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['ystar_suggestions']=y_stars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_MRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_MRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_MRS.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_suggestion_original=xstars*bo.max_min_gap+bo.bounds[:,0]
acq_MRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='s',color='y',s=40,label='xstars')
acq_MRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_MRS.set_title('MES',fontsize=16)
acq_MRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_MRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_MRS, shrink=0.9)
# plot distribution of y_star
mu_ydist, std_ydist = norm_dist.fit(y_stars)
# Plot the histogram.
acq_ydist.hist(y_stars,bins=20,normed=True,alpha=.6,color='g',label=r'Histogram of $y^*$')
# Plot the PDF.
x = np.linspace(np.min(y_stars), np.max(y_stars), 100)
p = norm_dist.pdf(x, mu_ydist, std_ydist)
acq_ydist.plot(x,p,'k', linewidth=2,label='Gaussian curve')
acq_ydist.legend()
acq_ydist.set_title(r'Distribution of $y^*$',fontsize=16)
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
#acq_func['xstars']=xstars
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_PES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='s',color='y',s=40,label='xstars')
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_PES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
#temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_ES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='s',color='y',s=40,label='xstars')
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_ES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
xstars.append(xt_UCB)
xstars.append(xt_EI)
xstars.append(xt_ES)
#xstars.append(xt_PES)
# Variance Reduction Search
acq_func={}
acq_func['name']='vrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='s',color='y',s=40,label='xstars')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('VRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_VRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
strFileName="{:d}_GP2d_acquisition_functions_vrs.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_acq_bo_2d(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 80)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 80)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 80)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 80)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(14, 20))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
axis_mean2d = fig.add_subplot(4, 2, 1)
axis_variance2d = fig.add_subplot(4, 2, 2)
acq_UCB = fig.add_subplot(4, 2, 3)
acq_EI =fig.add_subplot(4, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_ES = fig.add_subplot(4, 2, 5)
acq_PES = fig.add_subplot(4, 2, 6)
acq_MRS = fig.add_subplot(4, 2, 7)
acq_Consensus = fig.add_subplot(4, 2, 8)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_MRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_MRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_MRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_MRS.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_MRS.set_title('MRS',fontsize=16)
acq_MRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_MRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_MRS, shrink=0.9)
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_PES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_ES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
xstars=[]
xstars.append(xt_UCB)
xstars.append(xt_EI)
xstars.append(xt_ES)
xstars.append(xt_PES)
# Consensus
acq_func={}
acq_func['name']='consensus'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_Consensus.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_Consensus.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='s',color='y',s=100,label='xstars')
acq_Consensus.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Consensus.set_title('Consensus',fontsize=16)
acq_Consensus.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_Consensus.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_Consensus, shrink=0.9)
strFileName="{:d}_GP2d_acquisition_functions.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d_pvrs(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 80)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 80)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 80)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 80)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(12, 13))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
axis_mean2d = fig.add_subplot(3, 2, 1)
axis_variance2d = fig.add_subplot(3, 2, 2)
acq_UCB = fig.add_subplot(3, 2, 3)
acq_EI =fig.add_subplot(3, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_VRS = fig.add_subplot(3, 2, 5)
acq_Batch_VRS_B_2 = fig.add_subplot(3, 2, 6)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
axis_mean2d.get_xaxis().set_visible(False)
axis_mean2d.get_yaxis().set_visible(False)
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
axis_variance2d.get_xaxis().set_visible(False)
axis_variance2d.get_yaxis().set_visible(False)
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=25*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gp)
#if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
#if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.get_yaxis().set_visible(False)
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
acq_EI.get_xaxis().set_visible(False)
acq_EI.get_yaxis().set_visible(False)
# Predictive Variance Reduction Search
acq_func={}
acq_func['name']='pvrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('PVRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_VRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
acq_VRS.get_xaxis().set_visible(False)
acq_VRS.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=2
acq_Batch_VRS_B_2.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
func_params['function']=bo.function
gp_params = {'lengthscale':0.2*2,'noise_delta':1e-8}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X=bo2.maximize_batch_greedy_PVRS(B=2)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_2.set_title('B-PVRS B=2',fontsize=16)
acq_Batch_VRS_B_2.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_2.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_2, shrink=0.9)
acq_Batch_VRS_B_2.get_xaxis().set_visible(False)
acq_Batch_VRS_B_2.get_yaxis().set_visible(False)
acq_VRS.legend(loc='center left', bbox_to_anchor=(0.1, -0.2),prop={'size':20},ncol=3)
strFileName="{:d}_GP2d_acquisition_functions.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d_pvrs_short(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 80)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 80)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 80)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 80)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(13, 7))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
axis_mean2d = fig.add_subplot(2, 2, 1)
axis_variance2d = fig.add_subplot(2, 2, 2)
#acq_UCB = fig.add_subplot(2, 2, 3)
#acq_EI =fig.add_subplot(3, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_VRS = fig.add_subplot(2, 2, 3)
acq_Batch_VRS_B_2 = fig.add_subplot(2, 2, 4)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
axis_mean2d.get_xaxis().set_visible(False)
axis_mean2d.get_yaxis().set_visible(False)
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
axis_variance2d.get_xaxis().set_visible(False)
axis_variance2d.get_yaxis().set_visible(False)
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=25*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gp)
#if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
#if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# UCB
"""
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.get_yaxis().set_visible(False)
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
acq_EI.get_xaxis().set_visible(False)
acq_EI.get_yaxis().set_visible(False)
"""
# Predictive Variance Reduction Search
acq_func={}
acq_func['name']='pvrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'Sampled $x^*$')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Selected $x_t$')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('PVRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_VRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
acq_VRS.get_xaxis().set_visible(False)
acq_VRS.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=2
acq_Batch_VRS_B_2.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
func_params['function']=bo.function
gp_params = {'lengthscale':0.2*2,'noise_delta':1e-8}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X=bo2.maximize_batch_greedy_PVRS(B=2)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_2.set_title('B-PVRS B=2',fontsize=16)
acq_Batch_VRS_B_2.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_2.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_2, shrink=0.9)
acq_Batch_VRS_B_2.get_xaxis().set_visible(False)
acq_Batch_VRS_B_2.get_yaxis().set_visible(False)
#acq_VRS.legend(loc='center left', bbox_to_anchor=(0.1, -0.2),prop={'size':20},ncol=1)
acq_VRS.legend( bbox_to_anchor=(3.45, 1.4),prop={'size':17},ncol=1)
strFileName="{:d}_GP2d_acquisition_functions.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure()
#axis2d = fig.add_subplot(1, 2, 1)
acq2d = fig.add_subplot(1, 1, 1)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp)
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=30,label='Peak')
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq2d.legend(loc='center left',ncol=3,bbox_to_anchor=(0, -0.2))
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d_withGPmeans(bo):
x1 =
|
np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
|
numpy.linspace
|
from __future__ import annotations
from typing import Any, Optional
import numpy as np
import pytest
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import check_is_fitted
from mapie.utils import (
check_alpha,
check_alpha_and_n_samples,
check_n_features_in,
check_n_jobs,
check_null_weight,
check_verbose,
fit_estimator,
)
X_toy = np.array([0, 1, 2, 3, 4, 5]).reshape(-1, 1)
y_toy = np.array([5, 7, 9, 11, 13, 15])
n_features = 10
X, y = make_regression(
n_samples=500, n_features=n_features, noise=1.0, random_state=1
)
class DumbEstimator:
def fit(
self, X: np.ndarray, y: Optional[np.ndarray] = None
) -> DumbEstimator:
self.fitted_ = True
return self
def test_check_null_weight_with_none() -> None:
"""Test that the function has no effect if sample weight is None."""
sw_out, X_out, y_out = check_null_weight(None, X_toy, y_toy)
assert sw_out is None
np.testing.assert_almost_equal(X_out, X_toy)
np.testing.assert_almost_equal(y_out, y_toy)
def test_check_null_weight_with_nonzeros() -> None:
"""Test that the function has no effect if sample weight is never zero."""
sample_weight = np.ones_like(y_toy)
sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)
np.testing.assert_almost_equal(sw_out, sample_weight)
np.testing.assert_almost_equal(X_out, X_toy)
np.testing.assert_almost_equal(y_out, y_toy)
def test_check_null_weight_with_zeros() -> None:
"""Test that the function reduces the shape if there are zeros."""
sample_weight = np.ones_like(y_toy)
sample_weight[:1] = 0.0
sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)
np.testing.assert_almost_equal(sw_out, np.array([1, 1, 1, 1, 1]))
np.testing.assert_almost_equal(X_out, np.array([[1], [2], [3], [4], [5]]))
np.testing.assert_almost_equal(y_out, np.array([7, 9, 11, 13, 15]))
@pytest.mark.parametrize("estimator", [LinearRegression(), DumbEstimator()])
@pytest.mark.parametrize("sample_weight", [None,
|
np.ones_like(y_toy)
|
numpy.ones_like
|
"""
Utilities for Gaussian process (GP) inference.
"""
import numpy as np
from scipy.linalg import solve_triangular
from scipy.spatial.distance import cdist
def kern_exp_quad(xmat1, xmat2, ls, alpha):
"""
Exponentiated quadratic kernel function (aka squared exponential kernel aka
RBF kernel).
"""
return alpha ** 2 * kern_exp_quad_noscale(xmat1, xmat2, ls)
def kern_exp_quad_noscale(xmat1, xmat2, ls):
"""
Exponentiated quadratic kernel function (aka squared exponential kernel aka
RBF kernel), without scale parameter.
"""
sq_norm = (-1 / (2 * ls ** 2)) * cdist(xmat1, xmat2, 'sqeuclidean')
return np.exp(sq_norm)
def squared_euc_distmat(xmat1, xmat2, coef=1.0):
"""
Distance matrix of squared euclidean distance (multiplied by coef) between
points in xmat1 and xmat2.
"""
return coef * cdist(xmat1, xmat2, 'sqeuclidean')
def kern_distmat(xmat1, xmat2, ls, alpha, distfn):
"""
Kernel for a given distmat, via passed in distfn (which is assumed to be fn
of xmat1 and xmat2 only).
"""
distmat = distfn(xmat1, xmat2)
sq_norm = -distmat / ls ** 2
return alpha ** 2 * np.exp(sq_norm)
def get_cholesky_decomp(k11_nonoise, sigma, psd_str):
"""Return cholesky decomposition."""
if psd_str == 'try_first':
k11 = k11_nonoise + sigma ** 2 * np.eye(k11_nonoise.shape[0])
try:
return stable_cholesky(k11, False)
except np.linalg.linalg.LinAlgError:
return get_cholesky_decomp(k11_nonoise, sigma, 'project_first')
elif psd_str == 'project_first':
k11_nonoise = project_symmetric_to_psd_cone(k11_nonoise)
return get_cholesky_decomp(k11_nonoise, sigma, 'is_psd')
elif psd_str == 'is_psd':
k11 = k11_nonoise + sigma ** 2 * np.eye(k11_nonoise.shape[0])
return stable_cholesky(k11)
def stable_cholesky(mmat, make_psd=True):
"""Return a 'stable' cholesky decomposition of mmat."""
if mmat.size == 0:
return mmat
try:
lmat = np.linalg.cholesky(mmat)
except np.linalg.linalg.LinAlgError as e:
if not make_psd:
raise e
diag_noise_power = -11
max_mmat = np.diag(mmat).max()
diag_noise = np.diag(mmat).max() * 1e-11
break_loop = False
while not break_loop:
try:
lmat = np.linalg.cholesky(
mmat + ((10 ** diag_noise_power) * max_mmat) * np.eye(mmat.shape[0])
)
break_loop = True
except np.linalg.linalg.LinAlgError:
if diag_noise_power > -9:
print(
'\tstable_cholesky failed with '
'diag_noise_power=%d.' % (diag_noise_power)
)
diag_noise_power += 1
if diag_noise_power >= 5:
print(
'\t***** stable_cholesky failed: added diag noise '
'= %e' % (diag_noise)
)
return lmat
def project_symmetric_to_psd_cone(mmat, is_symmetric=True, epsilon=0):
"""Project symmetric matrix mmat to the PSD cone."""
if is_symmetric:
try:
eigvals, eigvecs = np.linalg.eigh(mmat)
except np.linalg.LinAlgError:
print('\tLinAlgError encountered with np.eigh. Defaulting to eig.')
eigvals, eigvecs = np.linalg.eig(mmat)
eigvals = np.real(eigvals)
eigvecs = np.real(eigvecs)
else:
eigvals, eigvecs = np.linalg.eig(mmat)
clipped_eigvals = np.clip(eigvals, epsilon, np.inf)
return (eigvecs * clipped_eigvals).dot(eigvecs.T)
def solve_lower_triangular(amat, b):
"""Solves amat*x=b when amat is lower triangular."""
return solve_triangular_base(amat, b, lower=True)
def solve_upper_triangular(amat, b):
"""Solves amat*x=b when amat is upper triangular."""
return solve_triangular_base(amat, b, lower=False)
def solve_triangular_base(amat, b, lower):
"""Solves amat*x=b when amat is a triangular matrix."""
if amat.size == 0 and b.shape[0] == 0:
return np.zeros((b.shape))
else:
return solve_triangular(amat, b, lower=lower)
def sample_mvn(mu, covmat, nsamp):
"""
Sample from multivariate normal distribution with mean mu and covariance
matrix covmat.
"""
mu = mu.reshape(-1,)
ndim = len(mu)
lmat = stable_cholesky(covmat)
umat = np.random.normal(size=(ndim, nsamp))
return lmat.dot(umat).T + mu
def gp_post(x_train, y_train, x_pred, ls, alpha, sigma, kernel, full_cov=True):
"""Compute parameters of GP posterior"""
k11_nonoise = kernel(x_train, x_train, ls, alpha)
lmat = get_cholesky_decomp(k11_nonoise, sigma, 'try_first')
smat = solve_upper_triangular(lmat.T, solve_lower_triangular(lmat, y_train))
k21 = kernel(x_pred, x_train, ls, alpha)
mu2 = k21.dot(smat)
k22 = kernel(x_pred, x_pred, ls, alpha)
vmat = solve_lower_triangular(lmat, k21.T)
k2 = k22 - vmat.T.dot(vmat)
if full_cov is False:
k2 = np.sqrt(
|
np.diag(k2)
|
numpy.diag
|
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import sys
import time
from keras.preprocessing import sequence
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
data_path = sys.argv[1]
test_output_file = sys.argv[2]
peer_review_output_file = sys.argv[3]
from os import listdir
class Video_Caption_Generator():
def __init__(self, dim_image, n_words, dim_hidden, batch_size, n_lstm_steps, n_video_lstm_step
,n_caption_lstm_step,schedule_p, bias_init_vector=None):
self.dim_image = dim_image
self.n_words = n_words
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_video_lstm_step=n_video_lstm_step
self.n_caption_lstm_step=n_caption_lstm_step
self.schedule_p = schedule_p
with tf.device("/cpu:0"):
self.Wemb = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='Wemb') # (token_unique, 1000)
self.lstm1 = tf.nn.rnn_cell.BasicLSTMCell(dim_hidden, state_is_tuple=False) # c_state, m_state are concatenated along the column axis
self.lstm2 = tf.nn.rnn_cell.BasicLSTMCell(dim_hidden, state_is_tuple=False)
self.encode_image_W = tf.Variable( tf.random_uniform([dim_image, dim_hidden], -0.1, 0.1), name='encode_image_W') # (4096, 1000)
self.encode_image_b = tf.Variable( tf.zeros([dim_hidden]), name='encode_image_b')
# variable for attention (hWz)
self.attention_z = tf.Variable(tf.random_uniform([self.batch_size,self.lstm2.state_size],-0.1,0.1), name="attention_z")
self.attention_W = tf.Variable(tf.random_uniform([self.lstm1.state_size,self.lstm2.state_size],-0.1,0.1),name="attention_W")
self.embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1,0.1), name='embed_word_W') # (1000, n_words)
if bias_init_vector is not None:
self.embed_word_b = tf.Variable(bias_init_vector.astype(np.float32), name='embed_word_b')
else:
self.embed_word_b = tf.Variable(tf.zeros([n_words]), name='embed_word_b')
def build_model(self):
video = tf.placeholder(tf.float32, [self.batch_size, self.n_video_lstm_step, self.dim_image]) # (batch, 80, 4096)
video_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_video_lstm_step])
caption = tf.placeholder(tf.int32, [self.batch_size, self.n_caption_lstm_step+1]) # enclude <BOS>; store word ID; (batch, max_length)
caption_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_caption_lstm_step+1]) # (batch_size, max_length+1)
video_flat = tf.reshape(video, [-1, self.dim_image])
image_emb = tf.nn.xw_plus_b( video_flat, self.encode_image_W, self.encode_image_b ) # (batch_size*n_lstm_steps, dim_hidden)
image_emb = tf.reshape(image_emb, [self.batch_size, self.n_lstm_steps, self.dim_hidden])
print("lstm1 sate size,",self.lstm1.state_size)
print("lstm2 sate size,",self.lstm2.state_size) # 2*hidden size
state1 = tf.zeros([self.batch_size, self.lstm1.state_size]) # initial state
state2 = tf.zeros([self.batch_size, self.lstm2.state_size]) # initial state
padding = tf.zeros([self.batch_size, self.dim_hidden]) # (batch, 1000)
probs = []
loss = 0.0
############################## Encoding Stage ##################################
context_padding = tf.zeros([self.batch_size, self.lstm2.state_size]) #(batch_size, 2000)
h_list = []
for i in range(0, self.n_video_lstm_step): # n_vedio_lstm_step = 80
with tf.variable_scope("LSTM1", reuse= (i!=0)):
output1, state1 = self.lstm1(image_emb[:,i,:], state1)
h_list.append(state1)
with tf.variable_scope("LSTM2", reuse=(i!=0)):
output2, state2 = self.lstm2(tf.concat( [padding, output1, context_padding] ,1), state2)
print(np.shape(h_list))
h_list = tf.stack(h_list,axis=1)
print(np.shape(h_list)) # (64, 80, 2000)
############################# Decoding Stage ######################################
for i in range(0, self.n_caption_lstm_step): ## Phase 2 => only generate captions
if i==0:
with tf.device("/cpu:0"):
current_embed = tf.nn.embedding_lookup(self.Wemb, caption[:, i])
else: # schedule sampling
print(self.schedule_p)
if(np.random.binomial(1,self.schedule_p)==1): # schedule_p 擲骰子值出來是1的機率
with tf.device("/cpu:0"):
current_embed = tf.nn.embedding_lookup(self.Wemb, caption[:, i])
else:
max_prob_index = tf.argmax(logit_words, 1)[0]
with tf.device("/cpu:0"):
current_embed = tf.nn.embedding_lookup(self.Wemb, max_prob_index)
with tf.variable_scope("LSTM1",reuse= True):
output1, state1 = self.lstm1(padding, state1)
##### attention ####
context = []
if i == 0:
new_z = self.attention_z
# h_list_flat = tf.reshape(h_list,[-1,self.lstm1.state_size])
# print("h_list_flat shape, ", h_list_flat.shape) # 5120,2000
# for sample in range(0, self.batch_size):
# alpha_list = [] # a list to store alpha"s" in each training sample
# for step_ in range(0,self.n_video_lstm_step):
# alpha =1 - tf.losses.cosine_distance(h_list[sample,step_,:], new_z[sample,:], dim=0)
# alpha_list.append(alpha)
# alpha_list = tf.expand_dims(alpha_list,1)
# ci = tf.reduce_sum(tf.multiply(alpha_list, h_list[sample,:,:]),axis = 0)
# context.append(ci)
# context = tf.stack(context)
# print("context shape", content.shape)
h_list_flat = tf.reshape(h_list,[-1,self.lstm1.state_size])
htmp = tf.matmul(h_list_flat,self.attention_W) # for matmul operation (5120,2000)
hW = tf.reshape(htmp,[self.batch_size, self.n_video_lstm_step,self.lstm2.state_size])
for x in range(0,self.batch_size):
x_alpha = tf.reduce_sum(tf.multiply(hW[x,:,:],new_z[x,:]),axis=1)
x_alpha = tf.nn.softmax(x_alpha)
x_alpha = tf.expand_dims(x_alpha,1)
x_new_z = tf.reduce_sum(tf.multiply(x_alpha,h_list[x,:,:]),axis=0)
context.append(x_new_z)
context = tf.stack(context)
print("context shape", context.shape)
with tf.variable_scope("LSTM2", reuse= True):
print(output1.shape) # (64,1000)
output2, state2 = self.lstm2(tf.concat([current_embed, output1, context], 1), state2)
new_z = state2
labels = tf.expand_dims(caption[:, i+1], 1) # (batch, max_length, 1)
indices = tf.expand_dims(tf.range(0, self.batch_size, 1), 1) # (batch_size, 1)
concated = tf.concat([indices, labels], 1)
onehot_labels = tf.sparse_to_dense(concated, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
logit_words = tf.nn.xw_plus_b(output2, self.embed_word_W, self.embed_word_b) #probability of each word
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit_words, labels= onehot_labels)
cross_entropy = cross_entropy * caption_mask[:,i]
probs.append(logit_words)
current_loss = tf.reduce_sum(cross_entropy)/self.batch_size
loss = loss + current_loss
return loss, video, video_mask, caption, caption_mask, probs
def build_generator(self):
# batch_size = 1
context_padding = tf.zeros([1, self.lstm2.state_size])
h_list = []
video = tf.placeholder(tf.float32, [1, self.n_video_lstm_step, self.dim_image]) # (80, 4096)
video_mask = tf.placeholder(tf.float32, [1, self.n_video_lstm_step])
video_flat = tf.reshape(video, [-1, self.dim_image])
image_emb = tf.nn.xw_plus_b(video_flat, self.encode_image_W, self.encode_image_b)
image_emb = tf.reshape(image_emb, [1, self.n_video_lstm_step, self.dim_hidden])
state1 = tf.zeros([1, self.lstm1.state_size])
state2 = tf.zeros([1, self.lstm2.state_size])
padding = tf.zeros([1, self.dim_hidden])
generated_words = []
probs = []
embeds = []
for i in range(0, self.n_video_lstm_step):
with tf.variable_scope("LSTM1", reuse=(i!=0)):
output1, state1 = self.lstm1(image_emb[:, i, :], state1)
h_list.append(state1)
with tf.variable_scope("LSTM2", reuse=(i!=0)):
output2, state2 = self.lstm2(tf.concat([padding, output1, context_padding], 1), state2)
h_list = tf.stack(h_list,axis=1)
for i in range(0, self.n_caption_lstm_step):
if i == 0:
with tf.device('/cpu:0'):
current_embed = tf.nn.embedding_lookup(self.Wemb, tf.ones([1], dtype=tf.int64))
with tf.variable_scope("LSTM1", reuse=True):
output1, state1 = self.lstm1(padding, state1)
with tf.variable_scope("LSTM2", reuse=True):
context = []
if i == 0:
new_z = self.attention_z
h_list_flat = tf.reshape(h_list,[-1,self.lstm1.state_size])
htmp = tf.matmul(h_list_flat,self.attention_W)
hW = tf.reshape(htmp, [1, self.n_video_lstm_step,self.lstm1.state_size])
for x in range(0,1): # only one sample
x_alpha = tf.reduce_sum(tf.multiply(hW[x,:,:],new_z[x,:]),axis=1)
x_alpha = tf.nn.softmax(x_alpha)
x_alpha = tf.expand_dims(x_alpha,1)
x_new_z = tf.reduce_sum(tf.multiply(x_alpha,h_list[x,:,:]),axis=0)
context.append(x_new_z)
context = tf.stack(context)
output2, state2 = self.lstm2(tf.concat([current_embed, output1,context],1), state2)
new_z = state2
logit_words = tf.nn.xw_plus_b( output2, self.embed_word_W, self.embed_word_b)
max_prob_index = tf.argmax(logit_words, 1)[0]
generated_words.append(max_prob_index)
probs.append(logit_words)
with tf.device("/cpu:0"):
current_embed = tf.nn.embedding_lookup(self.Wemb, max_prob_index)
current_embed = tf.expand_dims(current_embed, 0)
embeds.append(current_embed)
return video, video_mask, generated_words, probs, embeds
dim_image = 4096
dim_hidden= 256
n_video_lstm_step = 80
n_caption_lstm_step = 15
n_frame_step = 80
n_epochs = 1000
batch_size = 32
learning_rate = 0.0001
ixtoword = pd.Series(
|
np.load('./ixtoword.npy')
|
numpy.load
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/8 15:50
# @Author : DaiPuWei
# E-Mail : <EMAIL>
# blog : https://blog.csdn.net/qq_30091945
# @Site : 中国民航大学北教25实验室506
# @File : LinearRegression.py
# @Software: PyCharm
import numpy as np
class LinearRegression(object):
def __init__(self,input_data,realresult,theta = None):
"""
:param input_data: 输入数据
:param realresult: 真实结果
:param theta: 线性回归的参数,默认为None,即可以没有
"""
# 构造输入数据数组
self.InputData = []
# 给每组输入数据增添常数项1
for data in input_data:
Data = [1.0]
# 把input_data拓展到Data内,即把input_data的每一维数据添加到Data
Data.extend(list(data))
self.InputData.append(Data)
self.InputData = np.array(self.InputData)
# 构造输入数据对应的结果
self.Result = realresult
if type(self.Result) != np.ndarray:
self.Result = np.array(self.Result)
# thetha参数不为None时,利用thetha构造模型参数
if theta is not None:
self.Theta = theta
else:
# 随机生成服从标准正态分布的参数
self.Theta = np.random.randn((
|
np.shape(input_data)
|
numpy.shape
|
import numpy as np
import scipy.linalg as la
from scipy.linalg import eig, eigh
from scipy.integrate import simps, quadrature
from scipy import special as ss
from coulomb_funcs import mycoulfg_mix_rescaled_ufunc, coulombw_ufunc, drho_coulombw_ufunc, \
coulombc_ufunc, coulomb_heta_ufunc, coulombf_ufunc, mycoulfg_mix_ufunc
from two_body_comp_pot import two_body_pot
def nonlocal_matrix_element_gauss(wf1, wf2, r_nodes, r_weights, op=None):
"""
Calculate the matrix element of NONLOCAL operator op, using quadratures
from the two_body_pot objects.
op when None, op = delta(r-rp) on mesh, which is delta_{r,rp}/weight.
"""
if op is None:
op = np.diag(1/r_weights)
return (wf1 * r_weights) @ op @ (wf2 * r_weights)
def local_matrix_element_gauss(wf1, wf2, r_nodes, r_weights, op=None):
"""
Calculate the matrix element of op (which is taken to be
the identity if omitted) using Gaussian quadratures from the two_body_pot
objects.
"""
if op is None:
op = np.ones(len(wf1))
return (wf1 * op * wf2) @ r_weights
def ere_from_tau(angL=0, kc=0, k=10, tau=0.1, test_not_coulomb=True):
"""
based on tau values, compute good ERE:
w(angL)*(c_{eta,angL})^2 k^{2angL+1} (\cot\delta(k) + 2 eta*k^(2 angL +1) *v*Re[Heta] ] ,
with w(angL)=(Gamma[2 l + 2]/Gamma[l + 1]/2^l)^2;
v=\prod_{j=1}^angL(1+eta^2/j^2) (note v=1 for angL=0);
c_{eta,l} defined in coulombc_ufunc in Coulomb_funcs.py.
This ERE definition smoothly transitions to the ere functin of the eta=0 case for neutral particle scattering
"""
L=angL
tandelta=tau*k
if test_not_coulomb:
goodere=k**(2*L+1)/tandelta
else :
eta=kc/k
cetaLsq=(np.array(coulombc_ufunc(L,eta)).astype(float))**2
w=(ss.factorial(2*L+1)/ss.factorial(L)/2**L)**2
if L==0:
v=1.
else:
v=np.prod(np.array([1+eta**2/j**2 for j in range(1,L+1) ]), axis=0)
heta=np.array(coulomb_heta_ufunc(eta)).astype(complex)
fullere= k**(2*L+1) * w * cetaLsq / tandelta
goodere=fullere+k**(2*L+1)*2*eta* v *
|
np.real(heta)
|
numpy.real
|
import sys
import numpy as np
import pyBigWig
import scipy.stats
def mse(x1,x2):
return ((x1-x2) ** 2.).mean()
def masked_mse(x1,x2,mask): #binary mask for e.g. gene regions
return ((x1-x2) ** 2.).dot(mask)/mask.sum()
chr_all=['chr' + str(i) for i in range(1,23)] + ['chrX']
num_bp=np.array([248956422,242193529,198295559,190214555,181538259,170805979,159345973,145138636,138394717,133797422,135086622,133275309,114364328,107043718,101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,156040895])
num_25bp=np.ceil(num_bp/25.0).astype('int')
chr_len=dict(zip(chr_all,num_bp.tolist()))
chr_len25=dict(zip(chr_all,num_25bp.tolist()))
weight=num_25bp/float(np.sum(num_25bp))
# masked mse for promoter, gene, and enhancer regions
dict_mask1={}
dict_mask2={}
dict_mask3={}
for the_chr in chr_all[20:21]:
bw=pyBigWig.open('../data_challenge/anno/prom.bigwig')
dict_mask1[the_chr]=np.array(bw.values(the_chr,0,chr_len25[the_chr]))
bw.close()
bw=pyBigWig.open('../data_challenge/anno/gene.bigwig')
dict_mask2[the_chr]=np.array(bw.values(the_chr,0,chr_len25[the_chr]))
bw.close()
bw=pyBigWig.open('../data_challenge/anno/enh.bigwig')
dict_mask3[the_chr]=np.array(bw.values(the_chr,0,chr_len25[the_chr]))
bw.close()
the_cell='51'
the_assay='M29'
the_id = 'C' + the_cell + the_assay
the_chr='chr21'
dict_var={}
dict_var[the_chr]=np.load('example/var_' + the_assay + '_' + the_chr + '.npy')
## load data
mat=np.zeros((2, chr_len25[the_chr]))
# 0.gt
mat[0,:]=np.load('example/C51M29_chr21.npy')
# 1.ocelot prediction
mat[1,:]=np.load('example/pred25bp_C51M29_chr21.npy')
## order for extra metrics
mat_argsort=np.zeros((2, chr_len25[the_chr]),dtype=int)
mat_rank=
|
np.zeros((2, chr_len25[the_chr]))
|
numpy.zeros
|
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
import matplotlib.pyplot as plt
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
mean = 10
std = 1
sample_size = 1000
samples = np.random.normal(mean, std, sample_size)
u_gaussian = UnivariateGaussian()
u_gaussian.fit(samples)
print("(" + str(u_gaussian.mu_) + ", " + str(u_gaussian.var_) + ")")
# Question 2 - Empirically showing sample mean is consistent
sample_sizes = np.arange(10, 1010, 10)
absolute_distances = np.empty(sample_sizes.shape[0])
for i in range(sample_sizes.shape[0]):
new_samples =
|
np.random.normal(mean, std, sample_sizes[i])
|
numpy.random.normal
|
import os
import logging
import numpy as np
from numpy.random import RandomState
import astropy.units as u
from astropy.units import Quantity
import warnings
from configparser import ConfigParser
import regions
import appdirs
# Configuration
soxs_cfg_defaults = {"soxs_data_dir": "/does/not/exist",
"abund_table": "angr",
"apec_vers": "3.0.9"}
CONFIG_DIR = os.environ.get('XDG_CONFIG_HOME',
os.path.join(os.path.expanduser('~'),
'.config', 'soxs'))
if not os.path.exists(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
except OSError:
warnings.warn("unable to create soxs config directory")
CURRENT_CONFIG_FILE = os.path.join(CONFIG_DIR, 'soxs.cfg')
if not os.path.exists(CURRENT_CONFIG_FILE):
cp = ConfigParser()
cp.add_section("soxs")
try:
with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
cp.write(new_cfg)
except IOError:
warnings.warn("unable to write new config file")
soxs_cfg = ConfigParser(soxs_cfg_defaults)
soxs_cfg.read([CURRENT_CONFIG_FILE, 'soxs.cfg'])
if not soxs_cfg.has_section("soxs"):
soxs_cfg.add_section("soxs")
# Logging
soxsLogger = logging.getLogger("soxs")
ufstring = "%(name)-3s : [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s : [%(levelname)-18s] %(asctime)s %(message)s"
soxs_sh = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
soxs_sh.setFormatter(formatter)
# add the handler to the logger
soxsLogger.addHandler(soxs_sh)
soxsLogger.setLevel('INFO')
soxsLogger.propagate = False
mylog = soxsLogger
mylog.setLevel('INFO')
if soxs_cfg.has_option("soxs", "response_path"):
mylog.warning("The 'response_path' option in the SOXS configuration "
"is deprecated and has been replaced with 'soxs_data_dir'. "
"Please update your configuration accordingly.")
soxs_cfg.set("soxs", "soxs_data_dir", soxs_cfg.get("soxs", "response_path"))
if soxs_cfg.get("soxs", "soxs_data_dir") == "/does/not/exist":
soxs_data_dir = appdirs.user_cache_dir("soxs")
mylog.warning(f"Setting 'soxs_data_dir' to {soxs_data_dir} for this session. "
f"Please update your configuration if you want it somewhere else.")
soxs_cfg.set("soxs", "soxs_data_dir", appdirs.user_cache_dir("soxs"))
def issue_deprecation_warning(msg):
import warnings
from numpy import VisibleDeprecationWarning
warnings.warn(msg, VisibleDeprecationWarning, stacklevel=3)
soxs_path = os.path.abspath(os.path.dirname(__file__))
soxs_files_path = os.path.join(soxs_path, "files")
def parse_prng(prng):
if isinstance(prng, RandomState):
return prng
else:
return RandomState(prng)
def iterable(obj):
"""
Grabbed from Python Cookbook / matplotlib.cbook.
Returns true/false for *obj* iterable.
"""
try:
len(obj)
except:
return False
return True
def ensure_list(obj):
"""
This function ensures that *obj* is a list. Typically used to convert a
string to a list, for instance ensuring the *fields* as an argument is a
list.
"""
if obj is None:
return [obj]
if not isinstance(obj, list):
return [obj]
return obj
def ensure_numpy_array(obj):
"""
This function ensures that *obj* is a numpy array.
Typically used to convert scalar, list or tuple
argument passed to functions using Cython.
"""
if isinstance(obj, np.ndarray):
if obj.shape == ():
return np.array([obj])
# We cast to ndarray to catch ndarray subclasses
return
|
np.array(obj)
|
numpy.array
|
"""
This file contains the core algorithms for
* the forward mode (univariate Taylor polynomial arithmetic)
* the reverse mode
The functions are operating solely on numpy datastructures.
Rationale
---------
If speed is an issue, one can rather easily replace
the function implementations by C or Fortran functions.
"""
import math
import functools
import numpy
from numpy.lib.stride_tricks import as_strided, broadcast_arrays
try:
import scipy.linalg
import scipy.special
except ImportError:
pass
try:
import pytpcore
except ImportError:
pytpcore = None
from algopy import nthderiv
def _plus_const(x_data, c, out=None):
"""
Constants are only added to the d=0 slice of the data array.
A function like this is not so useful for multiplication by a constant,
because UTPM multiplication by a constant scales the entire data array
rather than acting on only the d=0 slice.
"""
if out is None:
y_data = numpy.copy(x_data)
else:
y_data = out
y_data[0] += c
return y_data
def _eval_slow_generic(f, x_data, out=None):
"""
This is related to summations associated with the name '<NAME>.'
@param f: f(X, out=None, n=0) computes nth derivative of f at X
@param x_data: something about algorithmic differentiation
@param out: something about algorithmic differentiation
@param return: something about algorithmic differentiation
"""
#FIXME: Improve or replace this function.
# It is intended to help with naive implementations
# of truncated taylor expansions
# of functions of a low degree polynomial,
# when the nth derivatives of the function of interest
# can be computed more or less directly.
y_data = nthderiv.np_filled_like(x_data, 0, out=out)
D, P = x_data.shape[:2]
# base point: d = 0
y_data[0] = f(x_data[0])
# higher order coefficients: d > 0
for d in range(1, D):
# Accumulate coefficients of truncated expansions of powers
# of the polynomial.
if d == 1:
accum = x_data[1:].copy()
else:
for i in range(D-2, 0, -1):
accum[i] = numpy.sum(accum[:i] * x_data[i:0:-1], axis=0)
accum[0] = 0.
# Add the contribution of this summation term.
y_data[1:] += f(x_data[0], n=d) * accum / float(math.factorial(d))
return y_data
def _black_f_white_fprime(f, fprime_data, x_data, out=None):
"""
The function evaluation is a black box, but the derivative is compound.
@param f: computes the scalar function directly
@param fprime_data: the array associated with the evaluated derivative
@param x_data: something about algorithmic differentiation
@param out: something about algorithmic differentiation
@param return: something about algorithmic differentiation
"""
y_data = nthderiv.np_filled_like(x_data, 0, out=out)
D, P = x_data.shape[:2]
# Do the direct computation efficiently (e.g. using C implemention of erf).
y_data[0] = f(x_data[0])
# Compute the truncated series coefficients using discrete convolution.
#FIXME: one of these two loops can be vectorized
for d in range(1, D):
for c in range(d):
y_data[d] += fprime_data[d-1-c] * x_data[c+1] * (c+1)
y_data[d] /= d
return y_data
def _taylor_polynomials_of_ode_solutions(
a_data, b_data, c_data,
u_data, v_data,
):
"""
This is a general O(D^2) algorithm for functions that are ODE solutions.
It is an attempt to implement Proposition 13.1
of "Evaluating Derivatives" by Griewank and Walther (2008).
The function must satisfy the identity
b(u) f'(u) - a(u) f(u) = c(u)
where a, b and c are already represented by their Taylor expansions.
Also u is represented as a Taylor expansion, and so is v.
But we are only given the first term of v, which is the recursion base.
In this function we use the notation from the book mentioned above.
"""
# define the number of terms allowed in the truncated series
D = u_data.shape[0]
d = D-1
# these arrays have elements that are scaled slightly differently
u_tilde_data = u_data.copy()
v_tilde_data = v_data.copy()
for j in range(1, D):
u_tilde_data[j] *= j
v_tilde_data[j] *= j
# this is just convenient temporary storage which is not so important
s = numpy.zeros_like(u_data)
# on the other hand the e_data is very important for recursion
e_data = numpy.zeros_like(u_data)
# do the dynamic programming to fill the v_data array
for k in range(D):
if k > 0:
for j in range(1, k+1):
s[k] += (c_data[k-j] + e_data[k-j]) * u_tilde_data[j]
for j in range(1, k):
s[k] -= b_data[k-j] * v_tilde_data[j]
v_tilde_data[k] = s[k] / b_data[0]
v_data[k] = v_tilde_data[k] / k
if k < d:
for j in range(k+1):
e_data[k] += a_data[j] * v_data[k-j]
return v_data
def vdot(x,y, z = None):
"""
vectorized dot
z = vdot(x,y)
Rationale:
given two iteratable containers (list,array,...) x and y
this function computes::
z[i] = numpy.dot(x[i],y[i])
if z is None, this function allocates the necessary memory
Warning: the naming is inconsistent with numpy.vdot
Warning: this is a preliminary version that is likely to be changed
"""
x_shp = numpy.shape(x)
y_shp = numpy.shape(y)
if x_shp[-1] != y_shp[-2]:
raise ValueError('got x.shape = %s and y.shape = %s'%(str(x_shp),str(y_shp)))
if numpy.ndim(x) == 3:
P,N,M = x_shp
P,M,K = y_shp
retval = numpy.zeros((P,N,K))
for p in range(P):
retval[p,:,:] = numpy.dot(x[p,:,:], y[p,:,:])
return retval
elif numpy.ndim(x) == 4:
D,P,N,M = x_shp
D,P,M,K = y_shp
retval = numpy.zeros((D,P,N,K))
for d in range(D):
for p in range(P):
retval[d,p,:,:] = numpy.dot(x[d,p,:,:], y[d,p,:,:])
return retval
def truncated_triple_dot(X,Y,Z, D):
"""
computes d^D/dt^D ( [X]_D [Y]_D [Z]_D) with t set to zero after differentiation
X,Y,Z are (DT,P,N,M) arrays s.t. the dimensions match to compute dot(X[d,p,:,:], dot(Y[d,p,:,:], Z[d,p,:,:]))
"""
import algopy.exact_interpolation
noP = False
if len(X.shape) == 3:
noP = True
DT,NX,MX = X.shape
X = X.reshape((DT,1,NX,MX))
if len(Y.shape) == 3:
noP = True
DT,NY,MY = Y.shape
Y = Y.reshape((DT,1,NY,MY))
if len(Z.shape) == 3:
noP = True
DT,NZ,MZ = Z.shape
Z = Z.reshape((DT,1,NZ,MZ))
DT,P,NX,MX = X.shape
DT,P,NZ,MZ = Z.shape
multi_indices = algopy.exact_interpolation.generate_multi_indices(3,D)
retval = numpy.zeros((P,NX,MZ))
for mi in multi_indices:
for p in range(P):
if mi[0] == D or mi[1] == D or mi[2] == D:
continue
retval[p] += numpy.dot(X[mi[0],p,:,:], numpy.dot(Y[mi[1],p,:,:], Z[mi[2],p,:,:]))
if noP == False:
return retval
else:
return retval[0]
def broadcast_arrays_shape(x_shp,y_shp):
if len(x_shp) < len(y_shp):
tmp = x_shp
x_shp = y_shp
y_shp = tmp
z_shp = numpy.array(x_shp,dtype=int)
for l in range(1,len(y_shp)-1):
if z_shp[-l] == 1: z_shp[-l] = y_shp[-l]
elif z_shp[-l] != 1 and y_shp[-l] != 1 and z_shp[-l] != y_shp[-l]:
raise ValueError('cannot broadcast arrays')
return z_shp
class RawAlgorithmsMixIn:
@classmethod
def _broadcast_arrays(cls, x_data, y_data):
""" UTPM equivalent of numpy.broadcast_arrays """
# transpose arrays s.t. numpy.broadcast can be used
Lx = len(x_data.shape)
Ly = len(y_data.shape)
x_data = x_data.transpose( tuple(range(2,Lx)) + (0,1))
y_data = y_data.transpose( tuple(range(2,Ly)) + (0,1))
# broadcast arrays
x_data, y_data = broadcast_arrays(x_data, y_data)
# transpose into the original format
Lx = len(x_data.shape)
Ly = len(y_data.shape)
x_data = x_data.transpose( (Lx-2, Lx-1) + tuple(range(Lx-2)) )
y_data = y_data.transpose( (Ly-2, Ly-1) + tuple(range(Lx-2)) )
return x_data, y_data
@classmethod
def _mul(cls, x_data, y_data, out=None):
"""
z = x*y
"""
if numpy.shape(x_data) != numpy.shape(y_data):
raise NotImplementedError
D, P = x_data.shape[:2]
#FIXME: there is a memoryview and buffer contiguity checking error
# which may or may not be caused by a bug in numpy or cython.
if pytpcore and all(s > 1 for s in x_data.shape):
# tp_mul is not careful about aliasing
z_data = numpy.empty_like(x_data)
x_data_reshaped = x_data.reshape((D, -1))
y_data_reshaped = y_data.reshape((D, -1))
z_data_reshaped = z_data.reshape((D, -1))
pytpcore.tp_mul(x_data_reshaped, y_data_reshaped, z_data_reshaped)
if out is not None:
out[...] = z_data_reshaped.reshape((z_data.shape))
return out
else:
return z_data
else:
# numpy.sum is careful about aliasing so we can use out=z_data
if out is None:
z_data = numpy.empty_like(x_data)
else:
z_data = out
for d in range(D)[::-1]:
numpy.sum(
x_data[:d+1,:,...] * y_data[d::-1,:,...],
axis=0,
out = z_data[d,:,...])
return z_data
@classmethod
def _minimum(cls, x_data, y_data, out=None):
if x_data.shape != y_data.shape:
raise NotImplementedError(
'algopy broadcasting is not implemented for this function')
D = x_data.shape[0]
xmask = numpy.less_equal(x_data[0], y_data[0])
ymask = 1 - xmask
z_data = numpy.empty_like(x_data)
for d in range(D):
numpy.add(xmask * x_data[d], ymask * y_data[d], out=z_data[d])
if out is not None:
out[...] = z_data[...]
return out
else:
return z_data
@classmethod
def _maximum(cls, x_data, y_data, out=None):
if x_data.shape != y_data.shape:
raise NotImplementedError(
'algopy broadcasting is not implemented for this function')
D = x_data.shape[0]
xmask = numpy.greater_equal(x_data[0], y_data[0])
ymask = 1 - xmask
z_data = numpy.empty_like(x_data)
for d in range(D):
numpy.add(xmask * x_data[d], ymask * y_data[d], out=z_data[d])
if out is not None:
out[...] = z_data[...]
return out
else:
return z_data
@classmethod
def _amul(cls, x_data, y_data, out = None):
"""
z += x*y
"""
z_data = out
if out is None:
raise NotImplementedError
(D,P) = z_data.shape[:2]
for d in range(D):
z_data[d,:,...] += numpy.sum(x_data[:d+1,:,...] * y_data[d::-1,:,...], axis=0)
@classmethod
def _itruediv(cls, z_data, x_data):
(D,P) = z_data.shape[:2]
tmp_data = z_data.copy()
for d in range(D):
tmp_data[d,:,...] = 1./ x_data[0,:,...] * ( z_data[d,:,...] - numpy.sum(tmp_data[:d,:,...] * x_data[d:0:-1,:,...], axis=0))
z_data[...] = tmp_data[...]
@classmethod
def _truediv(cls, x_data, y_data, out = None):
"""
z = x/y
"""
if out is None:
raise NotImplementedError
z_data = numpy.empty_like(out)
(D,P) = z_data.shape[:2]
for d in range(D):
z_data[d,:,...] = 1./ y_data[0,:,...] * ( x_data[d,:,...] - numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...], axis=0))
out[...] = z_data[...]
return out
@classmethod
def _reciprocal(cls, y_data, out=None):
"""
z = 1/y
"""
#FIXME: this function could use some attention;
# it was copypasted from div
z_data = numpy.empty_like(y_data)
D = y_data.shape[0]
if pytpcore:
y_data_reshaped = y_data.reshape((D, -1))
z_data_reshaped = z_data.reshape((D, -1))
pytpcore.tp_reciprocal(y_data_reshaped, z_data_reshaped)
else:
for d in range(D):
if d == 0:
z_data[d,:,...] = 1./ y_data[0,:,...] * ( 1 - numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...], axis=0))
else:
z_data[d,:,...] = 1./ y_data[0,:,...] * ( 0 - numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...], axis=0))
if out is not None:
out[...] = z_data[...]
return out
else:
return z_data
@classmethod
def _pb_reciprocal(cls, ybar_data, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
#FIXME: this is probably dumb
tmp = -cls._reciprocal(cls._square(x_data))
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _floordiv(cls, x_data, y_data, out = None):
"""
z = x // y
use L'Hospital's rule when leading coefficients of y_data are zero
"""
z_data = out
if out is None:
raise NotImplementedError
(D,P) = z_data.shape[:2]
x_data = x_data.copy()
y_data = y_data.copy()
#print x_data
#print y_data
# left shifting x_data and y_data if necessary
mask = Ellipsis
while True:
mask = numpy.where( abs(y_data[0, mask]) <= 1e-8)
if len(mask[0]) == 0:
break
elif len(mask) == 1:
mask = mask[0]
x_data[:D-1, mask] = x_data[1:, mask]
x_data[D-1, mask] = 0.
y_data[:D-1, mask] = y_data[1:, mask]
y_data[D-1, mask] = 0.
for d in range(D):
z_data[d,:,...] = 1./ y_data[0,:,...] * \
( x_data[d,:,...]
- numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...],
axis=0)
)
@classmethod
def _pow_real(cls, x_data, r, out = None):
""" y = x**r, where r is scalar """
y_data = out
if out is None:
raise NotImplementedError
(D,P) = y_data.shape[:2]
if type(r) == int and r >= 0:
if r == 0:
y_data[...] = 0.
y_data[0, ...] = 1.
return y_data
elif r == 1:
y_data[...] = x_data[...]
return y_data
elif r == 2:
return cls._square(x_data, out=y_data)
elif r >= 3:
y_data[...] = x_data[...]
for nr in range(r-1):
cls._mul(x_data, y_data, y_data)
return
else:
raise NotImplementedError("power to %d is not implemented" % r)
y_data[0] = x_data[0]**r
for d in range(1,D):
y_data[d] = r * numpy.sum([y_data[d-k] * k * x_data[k] for k in range(1,d+1)], axis = 0) - \
numpy.sum([ x_data[d-k] * k * y_data[k] for k in range(1,d)], axis = 0)
y_data[d] /= x_data[0]
y_data[d] /= d
@classmethod
def _pb_pow_real(cls, ybar_data, x_data, r, y_data, out = None):
""" pullback function of y = pow(x,r) """
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
(D,P) = y_data.shape[:2]
# if r == 0:
# raise NotImplementedError('x**0 is special and has not been implemented')
# if type(r) == int:
# if r == 2:
# print 'r=',r
# print 'x_data=',x_data
# print 'y_data=',y_data
# print 'xbar_data=',xbar_data
# print 'ybar_data=',ybar_data
if type(r) == int:
if r > 0:
tmp = numpy.zeros_like(xbar_data)
cls._pow_real(x_data, r - 1, out = tmp)
tmp *= r
cls._mul(ybar_data, tmp, tmp)
xbar_data += tmp
else:
tmp = numpy.zeros_like(xbar_data)
cls._truediv(y_data, x_data, tmp)
tmp[...] = numpy.nan_to_num(tmp)
cls._mul(ybar_data, tmp, tmp)
tmp *= r
xbar_data += tmp
# print 'xbar_data=',xbar_data
@classmethod
def _max(cls, x_data, axis = None, out = None):
if out is None:
raise NotImplementedError('should implement that')
x_shp = x_data.shape
D,P = x_shp[:2]
shp = x_shp[2:]
if len(shp) > 1:
raise NotImplementedError('should implement that')
for p in range(P):
out[:,p] = x_data[:,p,numpy.argmax(x_data[0,p])]
@classmethod
def _argmax(cls, a_data, axis = None):
if axis is not None:
raise NotImplementedError('should implement that')
a_shp = a_data.shape
D,P = a_shp[:2]
return numpy.argmax(a_data[0].reshape((P,numpy.prod(a_shp[2:]))), axis = 1)
@classmethod
def _absolute(cls, x_data, out=None):
"""
z = |x|
"""
if out is None:
z_data = numpy.empty_like(x_data)
else:
z_data = out
D = x_data.shape[0]
if D > 1:
x_data_sign = numpy.sign(x_data[0])
for d in range(D):
if d == 0:
numpy.absolute(x_data[d], out=z_data[d])
else:
numpy.multiply(x_data[d], x_data_sign, out=z_data[d])
return z_data
@classmethod
def _pb_absolute(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = numpy.empty_like(x_data)
D = x_data.shape[0]
for d in range(D):
if d == 0:
numpy.sign(x_data[d], out=fprime_data[d])
else:
fprime_data[d].fill(0)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _negative(cls, x_data, out=None):
"""
z = -x
"""
return numpy.multiply(x_data, -1, out=out)
@classmethod
def _pb_negative(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = numpy.empty_like(x_data)
fprime_data[0].fill(-1)
fprime_data[1:].fill(0)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _square(cls, x_data, out=None):
"""
z = x*x
This can theoretically be twice as efficient as mul(x, x).
"""
if out is None:
z_data = numpy.empty_like(x_data)
else:
z_data = out
tmp = numpy.zeros_like(x_data)
D, P = x_data.shape[:2]
for d in range(D):
d_half = (d+1) // 2
if d:
AB = x_data[:d_half, :, ...] * x_data[d:d-d_half:-1, :, ...]
numpy.sum(AB * 2, axis=0, out=tmp[d, :, ...])
if (d+1) % 2 == 1:
tmp[d, :, ...] += numpy.square(x_data[d_half, :, ...])
z_data[...] = tmp[...]
return z_data
@classmethod
def _pb_square(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
cls._amul(ybar_data, x_data*2, out=out)
@classmethod
def _sqrt(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data = numpy.zeros_like(x_data)
D,P = x_data.shape[:2]
y_data[0] = numpy.sqrt(x_data[0])
for k in range(1,D):
y_data[k] = 1./(2.*y_data[0]) * ( x_data[k] - numpy.sum( y_data[1:k] * y_data[k-1:0:-1], axis=0))
out[...] = y_data[...]
return out
@classmethod
def _pb_sqrt(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
tmp = xbar_data.copy()
cls._truediv(ybar_data, y_data, tmp)
tmp /= 2.
xbar_data += tmp
return xbar_data
@classmethod
def _exp(cls, x_data, out=None):
if out is None:
y_data = numpy.empty_like(x_data)
else:
y_data = out
D,P = x_data.shape[:2]
if pytpcore:
x_data_reshaped = x_data.reshape((D, -1))
y_data_reshaped = y_data.reshape((D, -1))
tmp = numpy.empty_like(x_data_reshaped)
pytpcore.tp_exp(x_data_reshaped, tmp, y_data_reshaped)
else:
y_data[0] = numpy.exp(x_data[0])
xtctilde = x_data[1:].copy()
for d in range(1,D):
xtctilde[d-1] *= d
for d in range(1, D):
y_data[d] = numpy.sum(y_data[:d][::-1]*xtctilde[:d], axis=0)/d
return y_data
@classmethod
def _pb_exp(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
cls._amul(ybar_data, y_data, xbar_data)
@classmethod
def _expm1(cls, x_data, out=None):
fprime_data = cls._exp(x_data)
return _black_f_white_fprime(
nthderiv.expm1, fprime_data, x_data, out=out)
@classmethod
def _pb_expm1(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = cls._exp(x_data)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _logit(cls, x_data, out=None):
fprime_data = cls._reciprocal(x_data - cls._square(x_data))
return _black_f_white_fprime(
scipy.special.logit, fprime_data, x_data, out=out)
@classmethod
def _pb_logit(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = cls._reciprocal(x_data - cls._square(x_data))
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _expit(cls, x_data, out=None):
b_data = cls._reciprocal(_plus_const(cls._exp(x_data), 1))
fprime_data = b_data - cls._square(b_data)
return _black_f_white_fprime(
scipy.special.expit, fprime_data, x_data, out=out)
@classmethod
def _pb_expit(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
b_data = cls._reciprocal(_plus_const(cls._exp(x_data), 1))
fprime_data = b_data - cls._square(b_data)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _sign(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data = out
D, P = x_data.shape[:2]
y_data[0] =
|
numpy.sign(x_data[0])
|
numpy.sign
|
import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (BinomialBayesMixedGLM,
PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_crossed_logit(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_poisson(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_logit_pandas(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.arange(nc), np.ones(cs))
b = np.kron(np.ones(cs), np.arange(nc))
fe = np.ones(nc * cs)
vc = np.zeros(nc * cs)
for i in np.unique(a):
ii = np.flatnonzero(a == i)
vc[ii] += s1 * np.random.normal()
for i in np.unique(b):
ii = np.flatnonzero(b == i)
vc[ii] += s2 * np.random.normal()
lp = -0.5 * fe + vc
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y})
return df
def test_simple_logit_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-3)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt.predict(linear=linear, exog=exog)
pr2 = glmm.predict(rslt.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr1.max() <= 1, True)
def test_simple_poisson_map():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 0.2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
assert_allclose(
glmm1.logposterior_grad(rslt1.params),
np.zeros_like(rslt1.params),
atol=1e-3)
# This should give the same answer as above
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt1.predict(linear=linear, exog=exog)
pr2 = rslt2.predict(linear=linear, exog=exog)
pr3 = glmm1.predict(rslt1.params, linear=linear, exog=exog)
pr4 = glmm2.predict(rslt2.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2, rtol=1e-5)
assert_allclose(pr2, pr3, rtol=1e-5)
assert_allclose(pr3, pr4, rtol=1e-5)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr2.min() >= 0, True)
assert_equal(pr3.min() >= 0, True)
# Check dimensions and PSD status of cov_params
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_logit_map():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_poisson_map():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_logit_map_crossed_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 0.5)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
rslt.summary()
r = rslt.random_effects("a")
assert_allclose(
r.iloc[0, :].values, np.r_[-0.02004904, 0.094014], atol=1e-4)
# Check dimensions and PSD status of cov_params
cm = rslt.cov_params()
p = rslt.params.shape[0]
assert_equal(list(cm.shape), [p, p])
np.linalg.cholesky(cm)
def test_elbo_grad():
for f in range(2):
for j in range(2):
if f == 0:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
else:
y, exog_fe, exog_vc, ident = gen_crossed_logit(
10, 10, 1, 2)
elif f == 1:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_poisson(
10, 10, 0.5)
else:
y, exog_fe, exog_vc, ident = gen_crossed_poisson(
10, 10, 1, 0.5)
exog_vc = sparse.csr_matrix(exog_vc)
if f == 0:
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
else:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
for k in range(3):
if k == 0:
vb_mean = rslt1.params
vb_sd = np.ones_like(vb_mean)
elif k == 1:
vb_mean = np.zeros(len(vb_mean))
vb_sd = np.ones_like(vb_mean)
else:
vb_mean = np.random.normal(size=len(vb_mean))
vb_sd = np.random.uniform(1, 2, size=len(vb_mean))
mean_grad, sd_grad = glmm1.vb_elbo_grad(vb_mean, vb_sd)
def elbo(vec):
n = len(vec) // 2
return glmm1.vb_elbo(vec[:n], vec[n:])
x = np.concatenate((vb_mean, vb_sd))
g1 = approx_fprime(x, elbo, 1e-5)
n = len(x) // 2
mean_grad_n = g1[:n]
sd_grad_n = g1[n:]
assert_allclose(mean_grad, mean_grad_n, atol=1e-2, rtol=1e-2)
assert_allclose(sd_grad, sd_grad_n, atol=1e-2, rtol=1e-2)
def test_simple_logit_vb():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[0.75330405, -0.71643228, -2.49091288, -0.00959806, 0.00450254],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[0.79338836, -0.7599833, -0.64149356, -0.24772884, 0.10775366],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_simple_poisson_vb():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.07233493, -0.06706505, -0.47159649, 1.12575122, -1.02442201],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[0.00790914, 0.00080666, -0.00050719, 0.00022648, 0.00046235],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.07088814, -0.06373107, -0.22770786, 1.12923746, -1.26161339],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.00747782, 0.0092554, 0.04508904, 0.02934488, 0.20312746],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_logit_vb():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-5.43073978e-01, -2.46197518e+00, -2.36582801e+00,
-9.64030461e-03, 2.32701078e-03],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[4.12927123e-02, -2.04448923e-04, 4.64829219e-05, 1.20377543e-04,
-1.45003234e-04],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.70834417, -0.3571011, 0.19126823, -0.36074489, 0.058976],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.05212492, 0.04729656, 0.03916944, 0.25921842, 0.25782576],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_logit_vb_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 2)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm1 = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt1 = glmm1.fit_vb()
glmm2 = BinomialBayesMixedGLM(
glmm1.endog, glmm1.exog, glmm1.exog_vc, glmm1.ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
rslt1.summary()
rslt2.summary()
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_poisson_vb():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.54855281, 0.10458834, -0.68777741, -0.01699925, 0.77200546],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.54691502, 0.22297158, -0.52673802, -0.06218684, 0.74385237],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_poisson_formula():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
for vb in False, True:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident)
if vb:
rslt1 = glmm1.fit_vb()
else:
rslt1 = glmm1.fit_map()
# Build categorical variables that match exog_vc
df = pd.DataFrame({"y": y, "x1": exog_fe[:, 0]})
z1 = np.zeros(len(y))
for j,k in enumerate(np.flatnonzero(ident == 0)):
z1[exog_vc[:, k] == 1] = j
df["z1"] = z1
z2 = np.zeros(len(y))
for j,k in enumerate(
|
np.flatnonzero(ident == 1)
|
numpy.flatnonzero
|
#!/usr/local/bin/python2
from PIL import Image
import numpy
# from scipy import *
import os
import subprocess
# im = Image.open('pic.jpg').convert('L')
im = Image.open('pic.jpg')
# im.save('gray.png','png')
# im = Image.open('gray.png').convert('RGB')
# # im.save('backrgb.jpg','JPEG')
#
# imnew = Image.new('RGB',im.size)
# imnew.paste(im)
# imnew.save('test_backRGB.png','png')
# pic = im.load()
# print(pic[(3,4)])
print(os.pathsep)
print(os.sep)
# sp = subprocess.call('ls -l',shell=True)
# sp = subprocess.call(['ls','-l'])
import time
# import scipy
# sp = subprocess.Popen(['./test_run'])
#
# for i in range(5):
# time.sleep(1)
# print(sp.poll())
# print(sp.returncode)
# sp.kill()
# for i in range(5):
# time.sleep(1)
# print(sp.poll())
# print(sp.returncode)
# while True:
# pass
from PIL import Image
import numpy as np
a = [(1,2,3),(4,5,6),(7,8,9)]
c = []
for el in a:
c.extend(list(el))
print(a)
b = list(a)
print(b)
print(np.ndarray.tolist(np.array(c)))
b = np.array(a)
print(b.size)
print(b)
print(b[1][2])
print(b.tostring())
print('before image')
from scipy import misc
# im = misc.imread('pic.jpg')
im = misc.imread('test_misc_toRGB.jpg')
print(im[4][5])
print(type(im))
print('shape')
print(im.shape)
print(im.dtype)
print('size')
print(im.size)
print(im.ctypes)
print(im.strides)
print(im.__array_interface__['data'])
print(im.tobytes().__len__())
if im.shape[0]*im.shape[1]*im.shape[2] == im.size:
print('size is right')
num = 23
print(str(hex(num)))
arr = [2,3,4,5,6]
print(type((str(arr)[2])))
arr = np.array([[1,2,3,4],[1,2,3,4]],dtype = np.uint8)
# print(arr.tobytes())
arr_new = np.array([1,2,3,4,5,6,7])
print(arr_new)
arr = '101010'.encode('utf8')
print(arr)
print(arr.decode())
print(np.fromstring(arr, dtype = np.uint8))
print(list(arr.decode()))
print(np.array(arr))
print(type(arr))
print('numpy to bytes')
arr = np.array([1,2,3,4,5], dtype = np.uint8)
print(arr)
byarr = arr.tobytes()
print(byarr)
arrbak = np.fromstring(byarr, dtype = np.uint8)
print(arrbak)
im = np.array([1,0,0,1,0,1])
print(im)
img = Image.frombytes('1',(2,3),im.tobytes())
print('abcd')
arr =
|
np.zeros((512,512,3),dtype = np.uint8)
|
numpy.zeros
|
#copyright <NAME> 2018(email_ID:-<EMAIL>,<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
#=========================================================================================================
'''
A simple handwritten devnagari numerals image classifier test code.The purpose of this code is to take in sample test input and classify it as one of the devnagari numerals (from 0-9).Following are the requirements:-
(1)The saved checkpoint that contains our trained session('./image_train2.ckpt') in same location as 'image_recog_train2.py' and 'image_recog_test2.py'.
(2)A handwritten image of 32*32 pixels:-(i)that could be test set of 'DevanagariHandwrittenCharacterDataset',(ii)or could be drawn in MS-Word with (a)either thickest white pencil stroke on black background,or(b)thickest black pencil stroke on white background.
'''
#import modules
import sys
from PIL import Image,ImageFilter
import numpy as np
import tensorflow as tf
'''
This function takes in the image location and recognizes the image as one of the(0-9)devnagari numerals:-
'''
def img_recog(image_location):
imag=Image.open(image_location)
image=imag.convert('L')
image_data=np.array(image,dtype=np.float32)
image_data =
|
np.multiply(image_data, 1.0/255.0)
|
numpy.multiply
|
import numpy as np
import time
import torch
import torch.nn as nn
from torch.nn import init
class SememeSumLstm(nn.Module):
def __init__(self, sememe_dim, mem_dim):
super(SememeSumLstm, self).__init__()
self.in_dim = sememe_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.reset_parameters()
def node_forward(self, inputs):
iou = self.ioux(inputs)# three Wx+b
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
c = torch.mul(i, u)
h = torch.mul(o, torch.tanh(c))
return c, h
def forward(self, inputs):
max_time, batch_size, _ = inputs.size()
c = []
h = []
for time in range(max_time):
new_c, new_h = self.node_forward(inputs[time])
c.append(new_c)
h.append(new_h)
return torch.stack(c, 0), torch.stack(h, 0)
def reset_parameters(self):
layers = [self.ioux]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
class SememeSumGRU(nn.Module):
def __init__(self, sememe_dim, mem_dim):
super(SememeSumGRU, self).__init__()
self.in_dim = sememe_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.reset_parameters()
def node_forward(self, inputs):
iou = self.ioux(inputs)# three Wx+b
i, o = torch.split(iou, iou.size(1) // 2, dim=1)
i, o = torch.sigmoid(i), torch.tanh(o)
h = torch.mul(i,o)
return h
def forward(self, inputs):
max_time, batch_size, _ = inputs.size()
h = []
for time in range(max_time):
new_h = self.node_forward(inputs[time])
h.append(new_h)
return torch.stack(h, 0)
def reset_parameters(self):
layers = [self.ioux]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
class LSTM_baseline(nn.Module):
def __init__(self, config, sememe):
super(LSTM_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.emb_sememe = nn.Embedding(2186, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fx_s, self.fh, self.fs]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] =
|
np.fromstring(vec, sep=' ')
|
numpy.fromstring
|
from IPython.core.display import Image, display
from PIL import Image as Img
from PIL import ImageDraw as ImgDraw
from PIL import ImageFont
from random import uniform
import xml.etree.ElementTree as ET
import numpy as np
import os
import json
from json import JSONEncoder
def read_labels(path):
with open(path) as f:
labels = f.readlines()
labels = [l.strip() for l in labels]
labels_count = len(labels)
return labels, labels_count
class LabelEncoder():
def __init__(self, labels):
self.__dict__ = dict()
index = 0
for label in labels:
self.__dict__[label] = index
self.__dict__[index] = label
index += 1
def supports(self, label):
return label in self.__dict__
def encode(self, label):
return self.__dict__[label]
def decode(self, index):
return self.__dict__[index]
class Object(object):
def __init__(self, xmin = None, ymin = None, xmax = None, ymax = None, conf = 0, name = 'unnamed'):
self.name = name
self.conf = conf
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __str__(self):
return f'{self.name} ({self.conf}) ({self.xmin}, {self.ymin}) ({self.xmax}, {self.ymax})'
class MyJSONEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
class Annotation(object):
def __init__(self):
self.objects = []
self.imagewidth = None
self.imageheight = None
self.filename = None
def parse_annotation(filepath):
objects = []
#TODO: rework - find objects instead of iterating
et = ET.parse(filepath)
for obj in et.findall('object'):
curr = Object()
skip = 0
for child in obj.iter():
if skip > 0:
skip-=1
continue
if child.tag == 'part':
skip = 6
if child.tag != 'bndbox':
if(child.text.isdigit()):
setattr(curr, child.tag, int(child.text))
else:
setattr(curr, child.tag, child.text)
if(curr.difficult == 0):
objects.append(curr)
filename = et.find('filename').text
width = et.find('size/width').text
height = et.find('size/height').text
annotation = Annotation()
annotation.objects = objects
annotation.imagewidth = int(width)
annotation.imageheight = int(height)
annotation.filename = filename
return annotation
font = ImageFont.load_default()
#font = ImageFont.truetype("./fonts/comic-sans-ms/COMIC.TTF", 14)
def _get_color():
return (0, 255, 0)
return (int(uniform(0, 255)), int(uniform(0, 255)), int(uniform(0, 255)))
def draw_image(imagepath, objects=[], draw_grid=False, grid_size=(0, 0), save=False, displ = True):
im = Img.open(imagepath)
draw = ImgDraw.Draw(im)
if draw_grid:
width_factor = im.width / grid_size[0]
height_factor = im.height / grid_size[1]
for i in range(max(grid_size[0], grid_size[1])):
draw.line((i * width_factor, 0) + (i * width_factor, im.height), fill=0, width=1)
draw.line((0, i * height_factor) + (im.width, i * height_factor), fill=0, width=1)
for obj in objects:
# print(obj)
color = _get_color()
draw.text((obj.xmin, obj.ymin - 20), f"{obj.name}: {round(obj.conf, 2)}", font=font, fill=color)
draw.line((obj.xmin, obj.ymin) + (obj.xmax, obj.ymin), fill=color, width=2)
draw.line((obj.xmin, obj.ymax) + (obj.xmax, obj.ymax), fill=color, width=2)
draw.line((obj.xmin, obj.ymin) + (obj.xmin, obj.ymax), fill=color, width=2)
draw.line((obj.xmax, obj.ymin) + (obj.xmax, obj.ymax), fill=color, width=2)
# xmid = (obj.xmax + obj.xmin) / 2
# ymid = (obj.ymax + obj.ymin) / 2
# draw.line((xmid, ymid) + (xmid, ymid), fill = color, width = 2)
if save:
path = imagepath.replace('\\', r'/')
filename = path.split('/')[-1]
im.save(f'./out/{filename}')
if displ:
display(im)
def save_objects_to_json(imagepath, objects):
filename = imagepath.split('/')[-1]
text_file = open(f'./out/{filename}.json', "w")
n = text_file.write(json.dumps(objects, cls=MyJSONEncoder))
text_file.close()
def image_to_vgg_input(imagepath, inputshape):
im = Img.open(imagepath).resize(inputshape)
im = np.array(im, np.float32)
im -= 255 / 2
return im
def image_to_yolo_input(imagepath, inputshape):
im = Img.open(imagepath).resize(inputshape)
im = np.array(im, np.float32)
im /= 255
return im
def image_to_mobilenet_input(imagepath, inputshape):
im = Img.open(imagepath).resize(inputshape)
im =
|
np.array(im, np.float32)
|
numpy.array
|
from opentamp.core.internal_repr.predicate import Predicate
from opentamp.core.internal_repr.plan import Plan
from opentamp.core.util_classes.common_predicates import ExprPredicate
from opentamp.core.util_classes.namo_predicates import NEAR_TOL
from opentamp.core.util_classes.openrave_body import OpenRAVEBody
from opentamp.core.util_classes.torch_funcs import GaussianBump, ThetaDir
from opentamp.errors_exceptions import PredicateException
from sco_py.expr import Expr, AffExpr, EqExpr, LEqExpr
from collections import OrderedDict
import numpy as np
import os
import pybullet as P
import sys
import time
import traceback
"""
This file implements the predicates for the 2D NAMO domain.
"""
dsafe = 1e-3
# dmove = 1.1e0 # 5e-1
dmove = 1.8e0 # 5e-1
contact_dist = 2e-1 # dsafe
gripdist = 0.61 # 75
RS_SCALE = 0.5
N_DIGS = 5
GRIP_VAL = 0.1
COL_TS = 5 # 3
N_COLS = 8
RETREAT_DIST = 1.2
ATTRMAP = {
"Robot": (
("pose", np.array(list(range(2)), dtype=np.int)),
("gripper", np.array(list(range(1)), dtype=np.int)),
("theta", np.array(list(range(1)), dtype=np.int)),
("vel", np.array(list(range(1)), dtype=np.int)),
("acc", np.array(list(range(1)), dtype=np.int)),
),
"Can": (("pose", np.array(list(range(2)), dtype=np.int)),),
"Target": (("value", np.array(list(range(2)), dtype=np.int)),),
"RobotPose": (
("value", np.array(list(range(2)), dtype=np.int)),
("theta", np.array(list(range(1)), dtype=np.int)),
("gripper", np.array(list(range(1)), dtype=np.int)),
("vel", np.array(list(range(1)), dtype=np.int)),
("acc", np.array(list(range(1)), dtype=np.int)),
),
"Obstacle": (("pose", np.array(list(range(2)), dtype=np.int)),),
"Grasp": (("value", np.array(list(range(2)), dtype=np.int)),),
"Rotation": (("value", np.array(list(range(1)), dtype=np.int)),),
}
HANDLE_OFFSET = 0.8
def add_to_attr_inds_and_res(t, attr_inds, res, param, attr_name_val_tuples):
if param.is_symbol():
t = 0
for attr_name, val in attr_name_val_tuples:
inds = np.where(param._free_attrs[attr_name][:, t])[0]
getattr(param, attr_name)[inds, t] = val[inds]
if param in attr_inds:
res[param].extend(val[inds].flatten().tolist())
attr_inds[param].append((attr_name, inds, t))
else:
res[param] = val[inds].flatten().tolist()
attr_inds[param] = [(attr_name, inds, t)]
def process_traj(raw_traj, timesteps):
"""
Process raw_trajectory so that it's length is desired timesteps
when len(raw_traj) > timesteps
sample Trajectory by space to reduce trajectory size
when len(raw_traj) < timesteps
append last timestep pose util the size fits
Note: result_traj includes init_dof and end_dof
"""
result_traj = []
if len(raw_traj) == timesteps:
result_traj = raw_traj.copy()
else:
traj_arr = [0]
result_traj.append(raw_traj[0])
# calculate accumulative distance
for i in range(len(raw_traj) - 1):
traj_arr.append(
traj_arr[-1] + np.linalg.norm(raw_traj[i + 1] - raw_traj[i])
)
step_dist = traj_arr[-1] / (timesteps - 1)
process_dist, i = 0, 1
while i < len(traj_arr) - 1:
if traj_arr[i] == process_dist + step_dist:
result_traj.append(raw_traj[i])
process_dist += step_dist
elif traj_arr[i] < process_dist + step_dist < traj_arr[i + 1]:
dist = process_dist + step_dist - traj_arr[i]
displacement = (
(raw_traj[i + 1] - raw_traj[i])
/ (traj_arr[i + 1] - traj_arr[i])
* dist
)
result_traj.append(raw_traj[i] + displacement)
process_dist += step_dist
else:
i += 1
result_traj.append(raw_traj[-1])
return np.array(result_traj).T
def get_rrt_traj(env, robot, active_dof, init_dof, end_dof):
# assert body in env.GetRobot()
active_dofs = robot.GetActiveDOFIndices()
robot.SetActiveDOFs(active_dof)
robot.SetActiveDOFValues(init_dof)
params = Planner.PlannerParameters()
params.SetRobotActiveJoints(robot)
params.SetGoalConfig(end_dof) # set goal to all ones
# # forces parabolic planning with 40 iterations
# import ipdb; ipdb.set_trace()
params.SetExtraParameters(
"""<_postprocessing planner="parabolicsmoother">
<_nmaxiterations>20</_nmaxiterations>
</_postprocessing>"""
)
planner = RaveCreatePlanner(env, "birrt")
planner.InitPlan(robot, params)
traj = RaveCreateTrajectory(env, "")
result = planner.PlanPath(traj)
if result == False:
robot.SetActiveDOFs(active_dofs)
return None
traj_list = []
for i in range(traj.GetNumWaypoints()):
# get the waypoint values, this holds velocites, time stamps, etc
data = traj.GetWaypoint(i)
# extract the robot joint values only
dofvalues = traj.GetConfigurationSpecification().ExtractJointValues(
data, robot, robot.GetActiveDOFIndices()
)
# raveLogInfo('waypint %d is %s'%(i,np.round(dofvalues, 3)))
traj_list.append(np.round(dofvalues, 3))
robot.SetActiveDOFs(active_dofs)
return np.array(traj_list)
def get_ompl_rrtconnect_traj(env, robot, active_dof, init_dof, end_dof):
# assert body in env.GetRobot()
dof_inds = robot.GetActiveDOFIndices()
robot.SetActiveDOFs(active_dof)
robot.SetActiveDOFValues(init_dof)
params = Planner.PlannerParameters()
params.SetRobotActiveJoints(robot)
params.SetGoalConfig(end_dof) # set goal to all ones
# forces parabolic planning with 40 iterations
planner = RaveCreatePlanner(env, "OMPL_RRTConnect")
planner.InitPlan(robot, params)
traj = RaveCreateTrajectory(env, "")
planner.PlanPath(traj)
traj_list = []
for i in range(traj.GetNumWaypoints()):
# get the waypoint values, this holds velocites, time stamps, etc
data = traj.GetWaypoint(i)
# extract the robot joint values only
dofvalues = traj.GetConfigurationSpecification().ExtractJointValues(
data, robot, robot.GetActiveDOFIndices()
)
# raveLogInfo('waypint %d is %s'%(i,np.round(dofvalues, 3)))
traj_list.append(np.round(dofvalues, 3))
robot.SetActiveDOFs(dof_inds)
return traj_list
def opposite_angle(theta):
return ((theta + 2 * np.pi) % (2 * np.pi)) - np.pi
def angle_diff(theta1, theta2):
diff1 = theta1 - theta2
diff2 = opposite_angle(theta1) - opposite_angle(theta2)
if np.abs(diff1) < np.abs(diff2):
return diff1
return diff2
def add_angle(theta, delta):
return ((theta + np.pi + delta) % (2 * np.pi)) - np.pi
def twostep_f(xs, dist, dim, pts=COL_TS, grad=False, isrobot=False):
if grad:
res = []
jac = np.zeros((0, 2 * dim))
for t in range(pts):
coeff = float((pts - 1) - t) / (pts - 1)
if len(xs) == 2:
next_pos = coeff * xs[0] + (1 - coeff) * xs[1]
if isrobot:
next_pos[2] = -GRIP_VAL # min(xs[0][2], xs[1][2])
# next_pos[3] = np.arctan2(next_pos[0], next_pos[1])
else:
next_pos = xs[0]
cur_jac = dist(next_pos)[1]
filldim = dim - cur_jac.shape[1]
# cur_jac = np.c_[cur_jac[:,:2], np.zeros((N_COLS, filldim)), cur_jac[:,2:]]
# res.append(dist(next_pos)[1])
if filldim > 0:
cur_jac = np.c_[cur_jac, np.zeros((len(cur_jac), filldim))]
jac = np.r_[jac, np.c_[coeff * cur_jac, (1 - coeff) * cur_jac]]
# jac = np.r_[jac, np.c_[cur_jac, cur_jac]]
return jac
else:
res = []
for t in range(pts):
coeff = float((pts - 1) - t) / (pts - 1)
if len(xs) == 2:
next_pos = coeff * xs[0] + (1 - coeff) * xs[1]
if isrobot:
next_pos[2] = -GRIP_VAL # min(xs[0][2], xs[1][2])
# next_pos[3] = np.arctan2(next_pos[0], next_pos[1])
else:
next_pos = xs[0]
res.append(dist(next_pos)[0])
return np.concatenate(res, axis=0)
class CollisionPredicate(ExprPredicate):
def __init__(
self,
name,
e,
attr_inds,
params,
expected_param_types,
dsafe=dsafe,
debug=False,
ind0=0,
ind1=1,
active_range=(0, 1),
priority=3,
):
# NOTE: Below line is for debugging purposes only, should be commented out
# and line below should be commented in
self._debug = True
# self._debug = debug
# if self._debug:
# self._env.SetViewer("qtcoin")
self.dsafe = dsafe
self.ind0 = ind0
self.ind1 = ind1
self._cache = {}
self.n_cols = N_COLS
self.check_aabb = False
super(CollisionPredicate, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=active_range, priority=priority)
self._init_include = False
def test(self, time, negated=False, tol=1e-4):
# This test is overwritten so that collisions can be calculated correctly
if not self.is_concrete():
return False
if time < 0:
traceback.print_exception(*sys.exc_info())
raise PredicateException("Out of range time for predicate '%s'." % self)
try:
result = self.neg_expr.eval(
self.get_param_vector(time), tol=tol, negated=(not negated)
)
return result
except IndexError:
traceback.print_exception(*sys.exc_info())
## this happens with an invalid time
raise PredicateException("Out of range time for predicate '%s'." % self)
def plot_cols(self, env, t):
_debug = self._debug
self._env = env
self._debug = True
self.distance_from_obj(self.get_param_vector(t))
self._debug = _debug
# @profile
def _set_robot_pos(self, x):
flattened = tuple(x.round(N_DIGS).flatten())
p0 = self.params[self.ind0]
p1 = self.params[self.ind1]
b0 = self._param_to_body[p0]
b1 = self._param_to_body[p1]
if b0.isrobot():
robot = b0
obj = b1
# elif b1.isrobot():
# robot = b1
# obj = b0
else:
raise Exception("Should not call this without the robot!")
pose0 = x[0:2]
pose1 = x[4:6]
b0.set_dof(
{
"left_grip": x[2],
"right_grip": x[2],
"robot_theta": x[3],
"ypos": 0.0,
"xpos": 0.0,
}
)
b0.set_pose(pose0)
b1.set_pose(pose1)
if "door" in b1._geom.get_types():
b1.set_dof({"door_hinge": x[6]})
return pose0, pose1
def set_pos(self, x):
return self._set_pos(x)
def _set_pos(self, x):
flattened = tuple(x.round(N_DIGS).flatten())
# if flattened in self._cache and self._debug is False:
# return self._cache[flattened]
p0 = self.params[self.ind0]
p1 = self.params[self.ind1]
b0 = self._param_to_body[p0]
b1 = self._param_to_body[p1]
if b0.isrobot() or b1.isrobot():
return self._set_robot_pos(x)
pose0 = x[0:2]
pose1 = x[2:4]
b0.set_pose(pose0)
b1.set_pose(pose1)
return pose0, pose1
def _check_robot_aabb(self, b0, b1):
vals = np.zeros((self.n_cols, 1))
jacs = np.zeros((self.n_cols, 4))
(x1, y1, z1), (x2, y2, z2) = P.getAABB(b0.body_id, 5)
(x3, y3, z3), (x4, y4, z4) = P.getAABB(b0.body_id, 7)
(x5, y5, z5), (x6, y6, z6) = P.getAABB(b0.body_id, 3)
grip_aabb = [
(min(x1, x3, x5), min(y1, y3, y5), min(z1, z3, z5)),
(max(x4, x2, x6), max(y4, y2, y6), max(z4, z2, z6)),
]
minpt, maxpt = grip_aabb
overlaps = P.getOverlappingObjects(grip_aabb[0], grip_aabb[1])
if overlaps is not None and len(overlaps):
ind = 0
for body_id, link in overlaps:
if body_id != b1.body_id:
continue
cur_minpt, cur_maxpt = P.getAABB(body_id, link)
d1, d2 = cur_minpt[0] - maxpt[0], minpt[0] - cur_maxpt[0]
d3, d4 = cur_minpt[1] - maxpt[1], minpt[1] - cur_maxpt[1]
if (
d1 <= self.dsafe
and d2 <= self.dsafe
and d3 <= self.dsafe
and d4 <= self.dsafe
):
xd = max(d1, d2)
yd = max(d3, d4)
if xd > yd:
vals[ind] = self.dsafe - xd
if d1 < d2:
jacs[ind, 0] = -1
jacs[ind, 2] = 1
else:
jacs[ind, 0] = 1
jacs[ind, 2] = -1
else:
vals[ind] = self.dsafe - yd
if d3 < d4:
jacs[ind, 1] = -1
jacs[ind, 3] = 1
else:
jacs[ind, 1] = 1
jacs[ind, 3] = -1
ind += 1
return vals, jacs
def distance_from_obj(self, x, n_steps=0):
pose0, pose1 = self.set_pos(x)
p0 = self.params[self.ind0]
p1 = self.params[self.ind1]
b0 = self._param_to_body[p0]
b1 = self._param_to_body[p1]
vals = np.zeros((self.n_cols, 1))
jacs = np.zeros((self.n_cols, 4))
# if self.check_aabb:
# vals, jacs = self._check_robot_aabb(b0, b1)
collisions = P.getClosestPoints(b0.body_id, b1.body_id, contact_dist)
col_val, jac01 = self._calc_grad_and_val(
p0.name, p1.name, pose0, pose1, collisions
)
final_val = col_val
final_jac = jac01
for i in range(len(final_val)):
if final_val[i] < vals[i]:
final_val[i] = vals[i]
final_jac[i] = jacs[i]
# self._cache[flattened] = (val.copy(), jac.copy())
if b0.isrobot():
if len(collisions):
pose0, pose1 = np.r_[pose0, [[0]]], np.r_[pose1, [[0]]]
colvec = np.array([c[5] for c in collisions])
axisvec = np.array([[0, 0, 1] for _ in collisions])
pos0vec = np.array([pose0.flatten() for _ in collisions])
crosstorque = np.cross(colvec - pos0vec, [0, 0, 1])
rotjac = np.dot(crosstorque, pose1 - pose0)
rotjac = (
0 * np.r_[rotjac, np.zeros((len(final_jac) - len(collisions), 1))]
)
else:
rotjac = np.zeros((final_jac.shape[0], 1))
final_jac = np.c_[
final_jac[:, :2], np.zeros_like(rotjac), rotjac, final_jac[:, 2:]
]
return final_val, final_jac
def _calc_rot_grad(self, rpose, objpose, colpos):
jntaxis = np.array([0, 0, 1])
return np.dot(objpose - rpose, np.cross(colpos - rpos, jntaxis))
# @profile
def _calc_grad_and_val(self, name0, name1, pose0, pose1, collisions):
vals = np.zeros((self.n_cols, 1))
jacs = np.zeros((self.n_cols, 4))
val = -1 * float("inf")
results = []
n_cols = len(collisions)
assert n_cols <= self.n_cols
jac = np.zeros((1, 4))
p0 = next(filter(lambda p: p.name == name0, list(self._param_to_body.keys())))
p1 = next(filter(lambda p: p.name == name1, list(self._param_to_body.keys())))
b0 = self._param_to_body[p0]
b1 = self._param_to_body[p1]
for i, c in enumerate(collisions):
linkA, linkB = c[3], c[4]
linkAParent, linkBParent = c[1], c[2]
sign = 0
if linkAParent == b0.body_id and linkBParent == b1.body_id:
pt0, pt1 = c[5], c[6]
linkRobot, linkObj = linkA, linkB
sign = -1
elif linkBParent == b0.body_id and linkAParent == b1.body_id:
pt1, pt0 = c[5], c[6]
linkRobot, linkObj = linkB, linkA
sign = 1
else:
continue
distance = c[8] # c.contactDistance
normal = np.array(c[7]) # c.contactNormalOnB # Pointing towards A
results.append((pt0, pt1, distance))
if self._debug:
# self._plot_collision(pt0, pt1, distance)
# print("pt0 = ", pt0)
# print("pt1 = ", pt1)
# print("distance = ", distance)
# print("normal = ", normal)
self._plot_collision_normal(pt0, pt1, distance, normal)
vals[i, 0] = self.dsafe - distance
jacs[i, :2] = -1 * normal[:2]
jacs[i, 2:] = normal[:2]
return np.array(vals).reshape((self.n_cols, 1)), np.array(jacs).reshape((self.n_cols, 4))
def _plot_collision(self, ptA, ptB, distance):
if not np.allclose(ptA, ptB, atol=1e-3):
if distance < 0:
# Red because collision
rgb = (1, 0, 0)
else:
# Green because no collision
rgb = (0, 1, 0)
P.addUserDebugLine(ptA, ptB, rgb, 0.05)
def _plot_collision_normal(self, ptA, ptB, distance, normal):
if not np.allclose(ptA, ptB, atol=1e-3):
if distance < 0:
# Plot red arrow because collision
P.addUserDebugLine(ptA, ptA + normal, (1, 0, 0), 0.01, 0.5)
class HLPoseUsed(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
## At Can Target
self.pose = params[0]
if self.pose.is_symbol():
k = "value"
else:
k = "pose"
attr_inds = OrderedDict([(self.pose, [(k, np.array([0, 1], dtype=np.int))])])
A = np.zeros((2, 2))
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLPoseUsed, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
self.hl_info = True
def test(self, time, negated=False, tol=1e-4):
if negated:
return True
return super(HLPoseUsed, self).test(time, tol=tol)
class HLGraspFailed(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None, debug=False):
self.pose = params[0]
if self.pose.is_symbol():
k = "value"
else:
k = "pose"
attr_inds = OrderedDict([(self.pose, [(k, np.array([0, 1], dtype=np.int))])])
A = np.zeros((2, 2))
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLGraspFailed, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
self.hl_info = True
def test(self, time, negated=False, tol=1e-4):
return True
class HLTransferFailed(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None, debug=False):
self.pose = params[0]
if self.pose.is_symbol():
k = "value"
else:
k = "pose"
attr_inds = OrderedDict([(self.pose, [(k, np.array([0, 1], dtype=np.int))])])
A = np.zeros((2, 2))
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLTransferFailed, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
self.hl_info = True
def test(self, time, negated=False, tol=1e-4):
return True
class HLPlaceFailed(HLTransferFailed):
pass
class HLPoseAtGrasp(HLPoseUsed):
# RobotAt Robot Can Grasp
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
## At Robot RobotPose
self.r, self.c, self.g = params
k = "pose" if not self.r.is_symbol() else "value"
attr_inds = OrderedDict(
[
(self.r, [(k, np.array([0, 1], dtype=np.int))]),
(self.c, [("pose", np.array([0, 1], dtype=np.int))]),
(self.g, [("value", np.array([0, 1], dtype=np.int))]),
]
)
A = np.c_[
np.r_[np.eye(2), -np.eye(2)],
np.r_[-np.eye(2), np.eye(2)],
np.r_[-np.eye(2), np.eye(2)],
]
b = np.zeros((4, 1))
val = NEAR_TOL * np.ones((4, 1))
aff_e = AffExpr(A, b)
e = LEqExpr(aff_e, val)
super(HLPoseUsed, self).__init__(
name, e, attr_inds, params, expected_param_types
)
self.hl_info = True
class HLAtGrasp(HLPoseUsed):
pass
class HLPoseAtGrasp(HLPoseUsed):
pass
class At(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
## At Can Target
self.can, self.targ = params
attr_inds = OrderedDict(
[
(self.can, [("pose", np.array([0, 1], dtype=np.int))]),
(self.targ, [("value", np.array([0, 1], dtype=np.int))]),
]
)
self.coeff = 1e-2
A = self.coeff * np.c_[np.eye(2), -np.eye(2)]
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(At, self).__init__(name, e, attr_inds, params, expected_param_types, priority=-2)
self._init_include = False
class AtNEq(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
## At Can Target
self.can, self.eq, self.targ = params
attr_inds = OrderedDict(
[
(self.can, [("pose", np.array([0, 1], dtype=np.int))]),
(self.targ, [("value", np.array([0, 1], dtype=np.int))]),
]
)
if self.can is not self.eq:
A = np.c_[np.eye(2), -np.eye(2)]
b = np.zeros((2, 1))
val = np.zeros((2, 1))
else:
A = np.zeros((2, 4))
b = np.ones((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(AtNEq, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
class AtInit(At):
def test(self, time, negated=False, tol=1e-4):
return True
def hl_test(self, time, negated=False, tol=1e-4):
return True
class RobotAt(At):
# RobotAt Robot RobotPose
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
## At Robot RobotPose
self.r, self.rp = params
attr_inds = OrderedDict(
[
(self.r, [("pose", np.array([0, 1], dtype=np.int))]),
(self.rp, [("value", np.array([0, 1], dtype=np.int))]),
]
)
A = np.c_[np.eye(2), -np.eye(2)]
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(At, self).__init__(name, e, attr_inds, params, expected_param_types)
class RobotAtRot(At):
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
self.r, self.rot = params
attr_inds = OrderedDict(
[
(self.r, [("theta", np.array([0], dtype=np.int))]),
(self.rot, [("value", np.array([0], dtype=np.int))]),
]
)
A = np.c_[np.eye(1), -np.eye(1)]
b = np.zeros((1, 1))
val = np.zeros((1, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(At, self).__init__(name, e, attr_inds, params, expected_param_types)
class BoxAt(At):
pass
class Near(At):
def __init__(self, name, params, expected_param_types, env=None, sess=None, debug=False):
self.r, self.c = params
attr_inds = OrderedDict(
[
(self.r, [("pose", np.array([0, 1], dtype=np.int))]),
(self.c, [("value", np.array([0, 1], dtype=np.int))]),
]
)
A = np.c_[np.r_[
|
np.eye(2)
|
numpy.eye
|
from generic import is_prime, is_prime_fermat, is_prime_miller
from time import time as t
import numpy as np
'''
O arquivo test_prime.py tem por objetivo validar o funcionamento dos metodos
de teste de primalidade prime (força bruta), fermat e miller, garantindo fidelidade do resultado alcancado
por meio da comparacao com uma lista de números primos gerada previamente
'''
#https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n
def getPrimeList(n):
""" Returns a list of primes < n """
sieve = [True] * n
for i in range(3,int(n**0.5)+1,2):
if sieve[i]:
sieve[i*i::2*i]=[False]*((n-i*i-1)//(2*i)+1)
return [2] + [i for i in range(3,n,2) if sieve[i]]
def print_diff(a1,a2):
""" Print not similar elements """
diff = []
for i in a1:
if a2.count(i) == 0:
diff.append(i)
for i in a2:
if a1.count(i) == 0:
if diff.count(i) == 0:
diff.append(i)
return diff
amount_test = 100
p0 = getPrimeList(amount_test)
p1, p2, p3, = [], [], []
start = t()
for i in range(2,amount_test):
p1.append(i) if is_prime(i) else None
print(" # # Teste do método 'is_prime' para %d números" % amount_test)
print(" Tempo decorrido:", t()-start)
print(" Resultado -> OK") if np.array_equal(p0,p1) else print(" Resultado -> PROBLEMA",print_diff(p0,p1))
start = t()
for i in range(2,amount_test):
p2.append(i) if is_prime_fermat(i) else None
print(" # # Teste do método 'is_prime_fermat' para %d números" % amount_test)
print(" Tempo decorrido:", t()-start)
print(" Resultado -> OK") if np.array_equal(p0,p2) else print(" Resultado -> PROBLEMA",print_diff(p0,p2))
start = t()
for i in range(2,amount_test):
p3.append(i) if is_prime_miller(i) else None
print(" # # Teste do método 'is_prime_miller' para %d números" % amount_test)
print(" Tempo decorrido:", t()-start)
print(" Resultado -> OK") if
|
np.array_equal(p0,p3)
|
numpy.array_equal
|
"""
A bidirectional LSTM with optional CRF and character-based presentation for NLP sequence tagging.
Author: <NAME>
License: CC BY-SA 3.0
"""
from __future__ import print_function
import keras
from keras.models import Model
from keras.models import Sequential
from keras.layers.core import *
from keras.layers import *
from keras.optimizers import *
from keras.preprocessing.sequence import pad_sequences
import os
import sys
import random
import time
import math
import numpy as np
import logging
from util.F1Validation import compute_f1_token_basis
if (sys.version_info > (3, 0)):
import pickle as pkl
else: #Python 2.7 imports
import cPickle as pkl
class BiLSTM:
learning_rate_updates = {'sgd': {1: 0.1, 3:0.05, 5:0.01} }
verboseBuild = True
model = None
epoch = 0
skipOneTokenSentences=True
dataset = None
embeddings = None
labelKey = None
writeOutput = True
resultsOut = None
modelSavePath = None
maxCharLen = None
pad_sequences = False
params = {'miniBatchSize': 32,
'dropout': [0.5, 0.5],
'LSTM-Size': [100],
'optimizer': 'adam',
'earlyStopping': -1,
'clipvalue': 0.5,
'clipnorm': 1,
'attentionActivation': "sigmoid",
'noAttention': False,
'experimentDate': 0,
'attType': "word",
'padSequences': True} #Default params
def __init__(self, devEqualTest=False, params=None):
if params != None:
self.params.update(params)
self.devEqualTest = devEqualTest
logging.info("BiLSTM model initialized with parameters: %s" % str(self.params))
def setMappings(self, embeddings, mappings):
self.mappings = mappings
self.embeddings = embeddings
self.idx2Word = {v: k for k, v in self.mappings['tokens'].items()}
def setTrainDataset(self, dataset, labelKey):
self.dataset = dataset
self.labelKey = labelKey
self.label2Idx = self.mappings[labelKey]
self.idx2Label = {v: k for k, v in self.label2Idx.items()}
self.mappings['label'] = self.mappings[labelKey]
self.max_train_score = 0
self.max_test_score = 0
self.max_dev_score = 0
self.max_scores = {'train': self.max_test_score,
'test': self.max_test_score,
'dev': self.max_dev_score}
self.last_scores = {'train': {'O':"",'Claim':"",'MajorClaim':"",'Premise':""},
'test': {'O':"",'Claim':"",'MajorClaim':"",'Premise':""},
'dev': {'O':"",'Claim':"",'MajorClaim':"",'Premise':""}}
self.best_scores = {'train': {},
'test': {},
'dev': {}}
def calculateLargestSentence(self, sentences):
largest = 0
for idx in range(len(sentences)):
sentenceLength = len(sentences[idx]['tokens'])
largest = sentenceLength if sentenceLength > largest else largest
return largest
def trainModel(self):
#if self.pad_sequences:
#_,_ = self.getPaddedSentences(dataset, labelKey)
#padear aca y obtener los tamaños que van a ser el batch size y el tamaño de los inputs (secuencia mas larga, )
if self.model == None:
largestSentence = self.calculateLargestSentence(self.dataset['trainMatrix'])
self.buildModel(largestSentence) #pasar por parametro el batchsize y secuencia mas larga
trainMatrix = self.dataset['trainMatrix']
self.epoch += 1
if self.params['optimizer'] in self.learning_rate_updates and self.epoch in self.learning_rate_updates[self.params['optimizer']]:
K.set_value(self.model.optimizer.lr, self.learning_rate_updates[self.params['optimizer']][self.epoch])
logging.info("Update Learning Rate to %f" % (K.get_value(self.model.optimizer.lr)))
iterator = self.online_iterate_dataset(trainMatrix, self.labelKey) if self.params['miniBatchSize'] == 1 else self.batch_iterate_padded_dataset(trainMatrix, self.labelKey)
for batch in iterator:
labels = batch[0]
nnInput = batch[1:]
self.model.train_on_batch(nnInput, labels)
def predictLabels(self, sentences):
if self.model == None:
largestSentence = self.calculateLargestSentence(self.dataset['trainMatrix'])
self.buildModel(largestSentence)
predLabels = [None]*len(sentences)
sentenceLengths = self.getSentenceLengths(sentences)
for senLength, indices in sentenceLengths.items():
if self.skipOneTokenSentences and senLength == 1:
if 'O' in self.label2Idx:
dummyLabel = self.label2Idx['O']
else:
dummyLabel = 0
predictions = [[dummyLabel]] * len(indices) #Tag with dummy label
else:
features = ['tokens']
inputData = {name: [] for name in features}
for idx in indices:
for name in features:
inputData[name].append(sentences[idx][name])
for name in features:
inputData[name] = np.asarray(inputData[name])
predictions = self.model.predict([inputData[name] for name in features], verbose=False)
predictions = predictions.argmax(axis=-1) #Predict classes
predIdx = 0
for idx in indices:
predLabels[idx] = predictions[predIdx]
sentences[idx]['label'] = predictions[predIdx]
predIdx += 1
return predLabels
def predictPaddedLabels(self, sentences):
if self.model == None:
largestSentence = self.calculateLargestSentence(self.dataset['trainMatrix'])
self.buildModel(largestSentence)
sentencesNumber= len(sentences)
att_scores = [None] * len(sentences)
sentencesPaddedTokens, sentenceLabels = self.getPaddedSentences(sentences, self.labelKey)
features = ['tokens']
inputData = {name: [] for name in features}
for idx in range(sentencesNumber):
for name in features:
inputData[name].append(sentencesPaddedTokens[idx])
for name in features:
inputData[name] =
|
np.asarray(inputData[name])
|
numpy.asarray
|
import gym
from gym import spaces
import numpy as np
# from os import path
import snakeoil3 as snakeoil3
import numpy as np
import copy
import collections as col
import signal
import subprocess
import time
import os
from PIL import Image
import random
class TorcsEnv:
terminal_judge_start = 100 # If after 100 timestep still no progress, terminated
termination_limit_progress = 5 # [km/h], episode terminates if car is running slower than this limit
default_speed = 50
initial_reset = True
def __init__(self, env_number, lock, vision=False, throttle=False, gear_change=False):
self.vision = vision
self.throttle = throttle
self.gear_change = gear_change
self.initial_run = True
self.cur_pic_index = 0
##print("launch torcs")
# os.system('pkill torcs')
self.port_number = 3101 + env_number
self.env_number = env_number
self.lock = lock
self.pro = None
self.cur_ep = 0
if(self.pro != None):
os.killpg(os.getpgid(self.pro.pid), signal.SIGTERM)
time.sleep(0.2)
# if self.vision is True:
# os.system('torcs -nofuel -nodamage -nolaptime -vision &')
# else:
# os.system('torcs -nofuel -nolaptime &')
self.restart_window()
# cmd = 'torcs -nofuel -nolaptime -p {} &'.format(self.port_number)
# self.pro = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
# time.sleep(0.5)
# xdo_cmd = "xdotool windowmove $(xdotool getactivewindow) 300 300"
# os.system(xdo_cmd)
# os.system('sh autostart.sh')
# time.sleep(0.5)
"""
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=self.vision) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
"""
# if throttle is False:
# self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))
# else:
# self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(3,))
if vision is False:
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf])
self.observation_space = spaces.Box(low=low, high=high)
else:
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf, 255])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf, 0])
self.observation_space = spaces.Box(low=low, high=high)
def step(self, u):
# def step(self, action_index):
#print("Step")
# convert thisAction to the actual torcs actionstr
# u = self.map_action(action_index)
ob = self.get_obs()
# # target_speed = 0.50
u = np.clip(u, -1.0, 1.0)
# print(u)
# u[1] = (u[1] + 1.0)/2.0
target_speed = 0.80
speed_p = 3.0
if ob.speedX < target_speed:
u[1] = (target_speed - ob.speedX) * speed_p
else:
u[1] = 0.1
u[1] += u[1]/4.0
if ob.speedX < 0.30:
u[2] = 0.0
else:
u[2] = (u[2] + 1.0)/8.0
# u[2] = 0.0
# print(u)
client = self.client
this_action = self.agent_to_torcs(u)
# Apply Action
action_torcs = client.R.d
# Steering
action_torcs['steer'] = this_action['steer'] # in [-1, 1]
# Simple Autnmatic Throttle Control by Snakeoil
if self.throttle is False:
target_speed = self.default_speed
if client.S.d['speedX'] < target_speed - (client.R.d['steer']*50):
client.R.d['accel'] += .01
else:
client.R.d['accel'] -= .01
if client.R.d['accel'] > 0.2:
client.R.d['accel'] = 0.2
if client.S.d['speedX'] < 10:
client.R.d['accel'] += 1/(client.S.d['speedX']+.1)
# Traction Control System
if ((client.S.d['wheelSpinVel'][2]+client.S.d['wheelSpinVel'][3]) -
(client.S.d['wheelSpinVel'][0]+client.S.d['wheelSpinVel'][1]) > 5):
action_torcs['accel'] -= .2
else:
action_torcs['accel'] = this_action['accel']
action_torcs['brake'] = this_action['brake']
# Automatic Gear Change by Snakeoil
if self.gear_change is True:
action_torcs['gear'] = this_action['gear']
else:
# Automatic Gear Change by Snakeoil is possible
action_torcs['gear'] = 1
if self.throttle:
if client.S.d['speedX'] > 50:
action_torcs['gear'] = 2
if client.S.d['speedX'] > 80:
action_torcs['gear'] = 3
if client.S.d['speedX'] > 110:
action_torcs['gear'] = 4
if client.S.d['speedX'] > 140:
action_torcs['gear'] = 5
if client.S.d['speedX'] > 170:
action_torcs['gear'] = 6
# Save the privious full-obs from torcs for the reward calculation
obs_pre = copy.deepcopy(client.S.d)
# One-Step Dynamics Update #################################
# Apply the Agent's action into torcs
try:
client.respond_to_server()
# Get the response of TORCS
client.get_servers_input()
except:
ob = self.get_obs()
s_t = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ))
reward = 0
client.R.d['meta'] = True
return s_t, reward, client.R.d['meta'], {}
pass
# Get the current full-observation from torcs
obs = client.S.d
# Make an obsevation from a raw observation vector from TORCS
self.observation = self.make_observaton(obs)
# Reward setting Here #######################################
# direction-dependent positive reward
track = np.array(obs['track'])
trackPos = np.array(obs['trackPos'])
sp = np.array(obs['speedX'])
damage = np.array(obs['damage'])
rpm = np.array(obs['rpm'])
sin_speed_theta = 1.1
track_theta = 1.3
# track_theta = 0
damage_theta = 10
progress = sp*np.cos(obs['angle']) - np.abs(sp*np.sin(obs['angle']))*sin_speed_theta - sp * np.abs(obs['trackPos'])*track_theta
# print("{:.5f} {:.5f} {:.5f}".format(sp*np.cos(obs['angle']) , np.abs(sp*np.sin(obs['angle'])) , sp * np.abs(obs['trackPos'])))
# print(damage)
reward = progress
# collision detection
if obs['damage'] - obs_pre['damage'] > 0:
# print('collision!')
# reward = -1
reward -= (obs['damage'] - obs_pre['damage'])*damage_theta
episode_terminate = True
client.R.d['meta'] = True
# Termination judgement #########################
episode_terminate = False
#if (abs(track.any()) > 1 or abs(trackPos) > 1): # Episode is terminated if the car is out of track
# reward = -200
# episode_terminate = True
# client.R.d['meta'] = True
#if self.terminal_judge_start < self.time_step: # Episode terminates if the progress of agent is small
# if progress < self.termination_limit_progress:
# print("No progress")
# episode_terminate = True
# client.R.d['meta'] = True
if np.cos(obs['angle']) < 0: # Episode is terminated if the agent runs backward
episode_terminate = True
client.R.d['meta'] = True
# print(obs['rpm'])
if self.time_step > 50 and obs['rpm']/10000.0 <= 0.09426:
# print(obs['rpm'])
episode_terminate = True
client.R.d['meta'] = True
if self.time_step > 500:
episode_terminate = True
client.R.d['meta'] = True
if client.R.d['meta'] is True: # Send a reset signal
self.initial_run = False
client.respond_to_server()
self.time_step += 1
ob = self.get_obs()
s_t = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ))
return s_t, reward, client.R.d['meta'], {}
def reset(self, relaunch=False):
# print("Reset")
if (self.cur_ep + 1) % 20 == 0:
self.reset_torcs()
self.time_step = 0
if self.initial_reset is not True:
self.client.R.d['meta'] = True
self.client.respond_to_server()
## TENTATIVE. Restarting TORCS every episode suffers the memory leak bug!
if relaunch is True:
self.reset_torcs()
print("### TORCS is RELAUNCHED ###")
# Modify here if you use multiple tracks in the environment
while True:
try:
self.client = snakeoil3.Client(self, p=self.port_number, vision=self.vision) # Open new UDP in vtorcs
break
except:
time.sleep(5)
pass
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
self.observation = self.make_observaton(obs)
self.last_u = None
self.initial_reset = False
ob = self.get_obs()
s_t = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ))
self.cur_ep += 1
return s_t
def end(self):
print('pkill torcs')
if self.pro != None:
os.killpg(os.getpgid(self.pro.pid), signal.SIGTERM)
# os.system('pkill torcs')
def get_obs(self):
return self.observation
def restart_window(self):
# while os.environ["TORCS_RESTART"] == "1":
# print("env: {} is waiting for signal".format(self.env_number))
# time.sleep(10 + random.randint(1,10))
# os.environ["TORCS_RESTART"] == "1"
# self.lock.acquire()
cmd = 'torcs -nofuel -nolaptime -p {} &'.format(self.port_number)
self.pro = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
time.sleep(1.0)
# self.env_number
row = self.env_number % 4
col = int(self.env_number / 4)
x = 64 + 320 * row
y = 30 + 280 * col
# print(self.env_number, x, y)
xdo_cmd = "xdotool windowmove $(xdotool getactivewindow) {} {}".format(x, y)
os.system(xdo_cmd)
os.system('sh autostart.sh')
time.sleep(0.5)
# self.lock.release()
# os.environ["TORCS_RESTART"] == "0"
def reset_torcs(self):
# print("relaunch torcs")
# os.system('pkill torcs')
if self.pro != None:
os.killpg(os.getpgid(self.pro.pid), signal.SIGTERM)
# time.sleep(0.2)
# if self.vision is True:
# os.system('torcs -nofuel -nodamage -nolaptime -vision &')
# else:
# os.system('torcs -nofuel -nolaptime &')
# cmd = 'torcs -nofuel -nolaptime -p {} &'.format(self.port_number)
# self.pro = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
# time.sleep(0.5)
# os.system('sh autostart.sh')
# time.sleep(0.5)
self.restart_window()
def agent_to_torcs(self, u):
torcs_action = {'steer': u[0]}
if self.throttle is True: # throttle action is enabled
torcs_action.update({'accel': u[1]})
torcs_action.update({'brake': u[2]})
if self.gear_change is True: # gear change action is enabled
torcs_action.update({'gear': int(u[3])})
return torcs_action
def obs_vision_to_image_rgb(self, obs_image_vec):
image_vec = obs_image_vec
# print(len(image_vec))
# r = image_vec[0:len(image_vec):3]
# g = image_vec[1:len(image_vec):3]
# b = image_vec[2:len(image_vec):3]
# sz = (64, 64)
# r = np.array(r).reshape(sz)
# g = np.array(g).reshape(sz)
# b = np.array(b).reshape(sz)
data = np.zeros((64, 64, 3), dtype=np.uint8)
for i in range(64):
for j in range(64):
cur_index = (i*64 + j)*3
data[i][j] = [image_vec[cur_index], image_vec[cur_index+1], image_vec[cur_index+2]]
img = Image.fromarray(data, 'RGB')
img.save("saved_pic/{}.png".format(self.cur_pic_index))
self.cur_pic_index += 1
# print(np.array([r, g, b], dtype=np.uint8).shape)
print(data.shape)
# return np.array([r, g, b], dtype=np.uint8)
return data
def make_observaton(self, raw_obs):
if self.vision is False:
names = ['focus',
'speedX', 'speedY', 'speedZ', 'angle', 'damage',
'opponents',
'rpm',
'track',
'trackPos',
'wheelSpinVel']
Observation = col.namedtuple('Observaion', names)
return Observation(focus=np.array(raw_obs['focus'], dtype=np.float32)/200.,
speedX=np.array(raw_obs['speedX'], dtype=np.float32)/300.0,
speedY=np.array(raw_obs['speedY'], dtype=np.float32)/300.0,
speedZ=np.array(raw_obs['speedZ'], dtype=np.float32)/300.0,
angle=np.array(raw_obs['angle'], dtype=np.float32)/3.1416,
damage=
|
np.array(raw_obs['damage'], dtype=np.float32)
|
numpy.array
|
from math import ceil
from tqdm import tqdm
import os
import argparse
import imageio
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
def read_image(path:str)->np.ndarray:
"""
Read an image from a path.
"""
X = Image.open(path)
X = np.array(X)
X = np.mean(X, axis=-1)
return X
def plot_image_lst(image_lst, ranks):
"""
Plot a list of images.
"""
plt.figure(figsize=(20, 20))
plt.tight_layout()
for i in range(len(image_lst)):
plt.subplot(ceil(len(image_lst)/2), ceil(len(image_lst)/2), i+1)
if i == 0:
plt.title('Original')
else:
plt.title(f'R={ranks[i-1]} approx.')
plt.imshow(image_lst[i], cmap='gray')
plt.axis('off')
plt.savefig('images/images.png')
plt.show()
def plot_singular_values(s: np.ndarray):
"""
Plot the singular values.
"""
plt.figure(figsize=(20, 20))
plt.tight_layout()
plt.semilogy(s, '-o', label='Singular Values')
plt.xlabel('Rank')
plt.ylabel('Energy')
plt.title('Singular Values')
plt.savefig('images/singular_values.png')
plt.show()
# Calculate the cumulative sum of the singular values and plot it.
def plot_cumulative_sum_singular(s: np.ndarray):
"""
Plot the cumulative sum of the singular values.
"""
plt.figure(figsize=(20, 20))
plt.tight_layout()
plt.plot(np.cumsum(s)/
|
np.sum(s)
|
numpy.sum
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 08:59, 23/09/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# -------------------------------------------------------------------------------------------------------%
from numpy import round, abs, any, ndarray, isfinite, isnan, log
from numpy import max, sqrt, array, mean, dot, divide, arctan, sum, median, argsort, zeros, concatenate, var, std
class Metrics:
"""
https://scikit-learn.org/stable/modules/model_evaluation.html#regression-metrics
"""
def __init__(self, y_true, y_pred):
"""
:param y_true:
:param y_pred:
"""
if type(y_true) is ndarray and type(y_pred) is ndarray:
y_true = y_true.astype('float64')
y_pred = y_pred.astype('float64') # x = x[~np.isnan(x)] can't remove if array is dtype object, only work with dtype float
if y_true.ndim == 1 and y_pred.ndim == 1:
## Remove all Nan in y_pred
y_true = y_true[~isnan(y_pred)]
y_pred = y_pred[~isnan(y_pred)]
## Remove all Inf in y_pred
self.y_true = y_true[isfinite(y_pred)]
self.y_pred = y_pred[
|
isfinite(y_pred)
|
numpy.isfinite
|
from __future__ import print_function
import pandas as pd
import numpy as np
data = pd.read_csv('input.csv')
w=data
lind = list(map(lambda x: float(x.replace(',','.')), w['leaf_ind']))
ctop = list(map(lambda x: float(x.replace(',','.')), w['curvtop']))
cbas = list(map(lambda x: float(x.replace(',','.')), w['curvbasis']))
pwid = list(map(lambda x: float(x.replace(',','.')), w['pwid']))
bins_leafind =np.min(lind)+np.array([0, (np.max(lind)-np.min(lind))/3.0, 2.0*(np.max(lind)-np.min(lind))/3.0, np.max(lind)-np.min(lind)])
bins_curvtop =np.min(ctop)+np.array([0, (np.max(ctop)-np.min(ctop))/3.0, 2.0*(np.max(ctop)-np.min(ctop))/3.0, np.max(ctop)-np.min(ctop)])
bins_curvbasis =np.min(cbas)+np.array([0, (np.max(cbas)-np.min(cbas))/3.0, 2.0*(np.max(cbas)-np.min(cbas))/3.0, np.max(cbas)-np.min(cbas)])
bins_pwid =np.min(pwid)+np.array([0, (np.max(pwid)-np.min(pwid))/3.0, 2.0*(np.max(pwid)-np.min(pwid))/3.0, np.max(pwid)-np.min(pwid)])
for name in np.unique(data['short']):
lind = list(map(lambda x: float(x.replace(',','.')), w[w['short']==name]['leaf_ind']))
ctop = list(map(lambda x: float(x.replace(',','.')), w[w['short']==name]['curvtop']))
cbas = list(map(lambda x: float(x.replace(',','.')), w[w['short']==name]['curvbasis']))
pwid = list(map(lambda x: float(x.replace(',','.')), w[w['short']==name]['pwid']))
bins = (1.0,1.5,3.0,10.0)
lind = 1.0/np.array(lind)
lind = np.histogram(lind, bins=bins)[0]/float(np.sum(np.histogram(lind, bins=bins)[0]))
#bins = (0.0,5, 10.0, 1000.0)
ctop = np.histogram(ctop, bins=bins_curvtop)[0]/float(np.sum(np.histogram(ctop, bins=bins_curvtop)[0]))
#bins = (0.0,5, 10.0, 1000.0)
cbas = np.histogram(cbas, bins=bins_curvbasis)[0]/float(np.sum(np.histogram(cbas, bins=bins_curvbasis)[0]))
#bins = (-10.,-0.05, 0.05, 10.0)
pwid = np.histogram(pwid, bins=bins_pwid)[0]/float(np.sum(
|
np.histogram(pwid, bins=bins_pwid)
|
numpy.histogram
|
""" Perform simple simulations on meshes.
"""
from __future__ import division
import os
import random
import numpy as np
from . import _fieldkit
def random_walk(domain, N, runs, steps, coords=None, images=None, seed=None, threads=None):
""" Performs a simple random walk in a domain on a lattice.
The random walk is executed on the nodes inside the
:py:class:`~fieldkit.mesh.Domain`. One of the 6 lattice
directions is chosen at random, and the walker moves to the
new lattice site if it is also in the `domain`. Otherwise,
it stays in place. The walk is performed in a sequence of
`runs`, with each run having length `steps`. The walker
coordinates are recorded before every run in an unwrapped
system suitable for computing the mean-squared displacement.
Parameters
----------
domain : :py:class:`~fieldkit.mesh.Domain`
The digitized domain to simulate in.
N : int
The number of random walkers.
runs : int
The number of runs to complete.
steps : int
The number of steps taken by the walker per run.
seed : int
The seed to use for the random number generator. If None,
a value is drawn from the system entropy.
coords : array_like or None
An `Nx3` array of node (integer) coordinates.
images : array_like or None
An `Nx3` array of image (integer) coordinates.
threads : int
The number of OpenMP threads to use in the simulation. If None,
use the value of `OMP_NUM_THREADS` from the environment if it is
set; otherwise, default to 1.
Returns
-------
trajectory : numpy.ndarray
A `runsxNx3` array containing the unwrapped node coordinates.
coords : numpy.ndarray
An `Nx3` array containing the last wrapped coordinates.
images : numpy.ndarray
An `Nx3` array containing the last image coordinates.
Notes
-----
The random walk uses the node coordinate system based on integers
lying from `(0,0,0)` (inclusive) to `domain.mesh.shape` (exclusive).
These coordinates can be transformed to real coordinates using the
appropriate fractional conversion.
The `coords` and `images` arguments can be used to restart a previous
calculation. If used, `coords` must be properly wrapped to lie within
the mesh-shape bounds. The `images` argument is optional; if not supplied,
it will be assumed to be zero. However, this will not allow a clean
restart for calculating things like the mean-squared displacement.
If restarting a run, it is advisable to choose a new `seed` as the state
of the random number generator is not preserved between calls.
The wrapped `coords` can be unwrapped by adding the appropriate images::
unwrapped = coords + images * mesh.shape
"""
# initial coordinates
if coords is None:
coords = np.asarray([domain.nodes[c] for c in np.random.randint(low=0, high=len(domain.nodes), size=N)], dtype=np.int32)
coords = coords.transpose()
else:
coords = np.array(coords, dtype=np.int32)
if coords.shape[0] != N or coords.shape[1] != 3:
raise IndexError('Coordinate array must be Nx3')
if not np.all([domain.mask[tuple(c)] for c in coords]):
raise IndexError('All coordinates must lie in the domain')
coords = coords.transpose()
# initial images
if images is None:
images = np.zeros_like(coords)
else:
images = np.array(images, dtype=np.int32)
if images.shape[0] != N or images.shape[1] != 3:
raise IndexError('Image array must be Nx3')
images = images.transpose()
# if unspecified, draw a seed from the system entropy
if seed is None:
seed = random.SystemRandom().randint(-2147483648,2147483647)
# try to get threads
if threads is None:
try:
threads = os.environ['OMP_NUM_THREADS']
except:
threads = 1
# run random walk simulation
trajectory = _fieldkit.simulate.random_walk(domain.mask, coords, images, steps, runs, seed, threads)
return trajectory.transpose(),coords.transpose(),images.transpose()
def biased_walk(domain, probs, N, runs, steps, coords=None, images=None, seed=None, threads=None):
r""" Performs a biased random walk in a domain on a lattice.
The biased random walk is executed on the nodes inside the
:py:class:`~fieldkit.mesh.Domain`. One of the 6 lattice
directions is chosen accordings to the weights in `probs`,
and the walker moves to the new lattice site if it is also
in the `domain`. Otherwise, it stays in place. The walk is
performed in a sequence of `runs`, with each run having
length `steps`. The walker coordinates are recorded before
every run in an unwrapped system suitable for computing the
mean-squared displacement.
Parameters
----------
domain : :py:class:`~fieldkit.mesh.Domain`
The digitized domain to simulate in.
probs : array_like
The transition probabilities to 6 adjacent lattice sites,
as an `(Nx,Ny,Nz,6)` array.
N : int
The number of random walkers.
runs : int
The number of runs to complete.
steps : int
The number of steps taken by the walker per run.
seed : int
The seed to use for the random number generator. If None,
a value is drawn from the system entropy.
coords : array_like or None
An `Nx3` array of node (integer) coordinates.
images : array_like or None
An `Nx3` array of image (integer) coordinates.
threads : int or None
The number of OpenMP threads to use in the simulation. If None,
use the value of `OMP_NUM_THREADS` from the environment if it is
set; otherwise, default to 1.
Returns
-------
trajectory : numpy.ndarray
A `runsxNx3` array containing the unwrapped node coordinates.
coords : numpy.ndarray
An `Nx3` array containing the last wrapped coordinates.
images : numpy.ndarray
An `Nx3` array containing the last image coordinates.
Notes
-----
This method is very similar to :py:func:`random_walk`, other than
the transition probabilites, so refer to its documentation for the
meaning of the coordinates and images.
The transition probabilites (`probs`) give the weighted probabilities
to transition to an adjacent lattice site. They should be specified in
the order `(+x,-x,+y,-y,+z,-z)` for each site. Each entry must be in the
range [0,1], and the sum of the transition probabilities for a site must
be less than or equal to 1. If the sum :math:`p` is less than 1 for a site,
the walker will remain on the site with probability :math:`1-p`.
This is effectively an implementation of null-event (rejection) kinetic
Monte Carlo. To mapping from kMC transition rates :math:`\Gamma_{ij}` to
transition probabilities :math:`p_{ij}`, multiply the transition rates by
a nominal average timestep :math:`\Delta t` (:math:`p_{ij} = \Delta t \Gamma_{ij}`).
This timestep should be chosen in a way that makes the algorithm efficient
and still satisfies the conditions for the transition probabilities. One way
to do this is to find the **maximum sum** of transition rates for all
sites :math:`\Gamma_i^*`. The timestep is then :math:`\Delta t = 1/\Gamma^*`.
To create an unbiased random walk::
probs = np.full(list(mesh.shape) + [6], 1./6.)
The timestep is then :math:`\Delta t = \Delta x^2/6 D_0` where `D_0` is the
diffusion coefficient on the unconfined lattice.
"""
# preprocess the transition probabilities
if np.any(probs) < 0 or np.any(probs) > 1:
raise ValueError('Transition probabilities must lie between 0 and 1.')
cumprobs = np.cumsum(probs, axis=-1, dtype=np.float64)
if np.any(cumprobs[...,-1]) > 1:
raise ValueError('Transition probabilities must sum <= 1.')
cumprobs = np.asfortranarray(np.moveaxis(cumprobs,-1,0))
# initial coordinates
if coords is None:
coords = np.asarray([domain.nodes[c] for c in np.random.randint(low=0, high=len(domain.nodes), size=N)], dtype=np.int32)
coords = coords.transpose()
else:
coords = np.array(coords, dtype=np.int32)
if coords.shape[0] != N or coords.shape[1] != 3:
raise IndexError('Coordinate array must be Nx3')
if not np.all([domain.mask[tuple(c)] for c in coords]):
raise IndexError('All coordinates must lie in the domain')
coords = coords.transpose()
# initial images
if images is None:
images = np.zeros_like(coords)
else:
images =
|
np.array(images, dtype=np.int32)
|
numpy.array
|
#------------------------------------------------------------------------------------------#
import numpy as np
import pandas as pd
from scipy.spatial import distance
import scipy
import h5py, time, random, os, sys, pyflagser, warnings, datetime
import matplotlib.pyplot as plt
from tqdm import tqdm
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from itertools import repeat
from morphological_types import *
from pyflagser import flagser_count_unweighted as fcu
from pyflagser import flagser_unweighted as fu
import matplotlib
#------------------------------------------------------------------------------------------#
t = time.process_time()
#------------------------------------------------------------------------------------------#
''' data '''
def king_file(mc):
mc_file = h5py.File(f'../data/average/cons_locs_pathways_mc{mc}_Column.h5', 'r')
populations, connections = mc_file.get('populations'), mc_file.get('connectivity')
return populations, connections
#------------------------------------------------------------------------------------------#
''' Model Constructions '''
def Bio_M(m_type, populations, connections):
for M_a in tqdm(m_type):
for M_b in m_type:
# spacial coordinates of the neurons in each neuronal m-type
L_a = pd.DataFrame(np.matrix(populations[M_a]['locations']), columns = ['x', 'y', 'z'])
L_b = pd.DataFrame(np.matrix(populations[M_b]['locations']), columns = ['x', 'y', 'z'])
# distances between each neuron pathway group
D_ = scipy.spatial.distance.cdist(L_a, L_b, 'euclidean')
# Bins
bins = np.arange(1, D_.max(), 75) - np.concatenate([[0],
np.array(np.ones(len(np.arange(1, D_.max(), 75)) - 1))])
# Matrix of distance bins
C_ = np.array(np.digitize(D_, bins))
# Bin groups in matrix
groups = np.array(range(len(bins))) + 1
# Actual connections matrix
a = np.array(connections[M_a][M_b]['cMat'])
ab = pd.DataFrame(a)
ab.to_csv("../output/Bio_M/reconstruction/" + str(M_a) + str(M_b) + ".csv", header = False, index = False)
#------------------------------------------------------------------------------------------#
def ER(mc, m_type, populations):
# compute full list of neuron locations. Collect array, locate connections (pre/post list)
locations = np.vstack(np.array(populations[i]['locations']) for i in m_type)
array = np.load('../output/Bio_M/model/mc' + str(mc) + '_array.npy')
N = 31346
A = np.zeros((N, N), dtype = np.int8)
for i in tqdm(range(N)):
A[i,:] = np.random.rand(N) < 0.008
B = np.array(A)
np.save('../output/Erdos/model/erdos_renyi.npy', B)
#------------------------------------------------------------------------------------------#
def neuron_swap(m_type, populations, connections):
for M_a in tqdm(m_type):
for M_b in m_type:
# spacial coordinates of the neurons in each neuronal m-type
L_a = pd.DataFrame(np.matrix(populations[M_a]['locations']), columns = ['x', 'y', 'z'])
L_b = pd.DataFrame(np.matrix(populations[M_b]['locations']), columns = ['x', 'y', 'z'])
# distances between each neuron pathway group
D_ = scipy.spatial.distance.cdist(L_a, L_b, 'euclidean')
# Bins
bins = np.arange(1, D_.max(), 75) - np.concatenate([[0],
np.array(np.ones(len(np.arange(1, D_.max(), 75)) - 1))])
# Matrix of distance bins
C_ = np.array(np.digitize(D_, bins))
# Bin groups in matrix
groups = np.array(range(len(bins))) + 1
# Actual connections matrix
a = np.array(connections[M_a][M_b]['cMat'])
# Shuffle of each matrix
for aw in groups:
b = a[C_ == aw]
np.random.shuffle(b)
iz, jz = np.nonzero(C_ == aw)
for i, j, v in zip(iz, jz, b):
a[i, j] = v
ab = pd.DataFrame(a)
ab.to_csv("../output/GB/general_reconstruction/" + str(M_a) + str(M_b) + ".csv", header = False, index = False)
#------------------------------------------------------------------------------------------#
def subdivide_connectome(mc, m_type, populations, connections):
A = np.load('../output/Bio_M/model/mc' + str(mc) + '_array.npy')
array_size_by_morph = [np.array(connections[i][i]['cMat']).shape[0] for i in m_type]
layer1 = np.sum(array_size_by_morph[0:6])
layer2 = np.sum(array_size_by_morph[6:16])
layer4 = np.sum(array_size_by_morph[16:28])
layer5 = np.sum(array_size_by_morph[28:41])
layer6 = np.sum(array_size_by_morph[41:])
layers = [layer1, layer2, layer4, layer5, layer6]
boundaries = np.cumsum(layers)
boundaries = np.insert(boundaries, 0, 0)
for i in range(len(boundaries)-1):
for j in range(len(boundaries)-1):
np.save('../output/BC/blocks/mc6_' + str(i) + str(j) + '_array.npy', \
A[boundaries[i]:boundaries[i+1],boundaries[j]:boundaries[j+1]])
#------------------------------------------------------------------------------------------#
def stat_inputs(mc, m_type, populations, connections):
# compute full list of neuron locations. Collect array, locate connections (pre/post list)
locations = np.vstack(np.array(populations[i]['locations']) for i in m_type)
array = np.load('../output/Bio_M/model/mc' + str(mc) + '_array.npy')
X = np.where(array == 1)
distance_list = np.array(np.sqrt(np.sum((locations[X[1]] - locations[X[0]])**2, axis = 1)))
pre, post = X[0], X[1]
counts, bins = np.histogram(distance_list, bins = np.arange(0, distance_list.max(), 75))
np.set_printoptions(suppress=True)
bins1 = bins[:-1]**2
probability = np.array(counts/max(counts)).astype(float)
return pre, post, locations, probability, bins1
#------------------------------------------------------------------------------------------#
def main_fast(pre, post, locations):
new_pre =
|
np.empty_like(pre)
|
numpy.empty_like
|
"""
Divide 9952 training objects into eight groups,
and do an 8-fold leave-1/8 out.
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon/TheCannon')
sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon')
from TheCannon import dataset
from TheCannon import model
from TheCannon import lamost
from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
import pyfits
direc_ref = "/Users/annaho/TheCannon/data/lamost_paper"
def group_data():
""" Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs. """
tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups)
def train(ds, ii):
""" Run the training step, given a dataset object. """
print("Loading model")
m = model.CannonModel(2)
print("Training...")
m.fit(ds)
np.savez("./ex%s_coeffs.npz" %ii, m.coeffs)
np.savez("./ex%s_scatters.npz" %ii, m.scatters)
np.savez("./ex%s_chisqs.npz" %ii, m.chisqs)
np.savez("./ex%s_pivots.npz" %ii, m.pivots)
fig = m.diagnostics_leading_coeffs(ds)
plt.savefig("ex%s_leading_coeffs.png" %ii)
# m.diagnostics_leading_coeffs_triangle(ds)
# m.diagnostics_plot_chisq(ds)
return m
def load_model(ii):
print("Loading model")
m = model.CannonModel(2)
m.coeffs = np.load("./ex%s_coeffs.npz" %ii)['arr_0']
m.scatters = np.load("./ex%s_scatters.npz" %ii)['arr_0']
m.chisqs = np.load("./ex%s_chisqs.npz" %ii)['arr_0']
m.pivots = np.load("./ex%s_pivots.npz" %ii)['arr_0']
return m
def test(ds, m, group):
nguesses = 7
nobj = len(ds.test_ID)
nlabels = len(m.pivots)
choose = np.random.randint(0,nobj,size=nguesses)
tr_label = ds.tr_label
print("nlab")
print(nlabels)
print("nobj")
print(nobj)
print("tr label shape")
print(tr_label.shape)
print("m pivots shape")
print(m.pivots.shape)
starting_guesses = tr_label[choose]-m.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,m,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("ex%s_labels_all_starting_vals.npz" %group, labels)
|
np.savez("ex%s_chisq_all_starting_vals.npz" %group, chisq)
|
numpy.savez
|
from __future__ import print_function
#!/usr/bin/python
"""
Simple adjunct routine to plot LSPS/CRACM maps with traces, over cell image if possible.
Reuqires acq4read.
Takes advantage of acq4read code to access all data and attributes.
"""
import os
import re
import itertools
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import datetime
import pprint
import textwrap as WR
import collections
import pandas as pd
import scipy.ndimage
import scipy.signal
import numpy as np
import seaborn as sns
import matplotlib
# matplotlib.use('Qt5Agg')
import matplotlib.pyplot as mpl
from matplotlib.widgets import RectangleSelector
import matplotlib.backend_bases as MBB
import scipy.ndimage as SND
import shapely as SH
import shapely.affinity as SHA
import shapely.geometry as SG
# import descartes
from pylibrary.plotting import plothelpers as PH
import pylibrary.tools.utility as PU
from pyqtgraph import configfile
from pylibrary.plotting import picker
import pylibrary.tools.tifffile as tf
from .. ephysanalysis import acq4read as ARC
from .. ephysanalysis import metaarray as EM
from .. ephysanalysis import boundrect as BR
from .. mapanalysistools import digital_filters as FILT
import montage
import mahotas as MH
class ScannerInfo(object):
"""
Get scanner information, compute the scanner box and some additional parameters
Do this as a class to encapsulate the data and make reusable.
"""
def __init__(self, AR, offset):
# print('ScannerInfo called')
BRI = BR.BoundRect()
self.offset = offset
self.AR = AR # save the acq4read instance for access to the data
self.AR.getScannerPositions()
self.scannerpositions = np.array(AR.scannerpositions)
for i, s in enumerate(self.scannerpositions):
self.scannerpositions[i,0] += float(self.offset[0])
self.scannerpositions[i,1] += float(self.offset[1])
print(self.scannerpositions[:10])
pos = self.AR.scannerCamera['frames.ma']['transform']['pos']
scale = self.AR.scannerCamera['frames.ma']['transform']['scale']
region = self.AR.scannerCamera['frames.ma']['region']
self.binning = self.AR.scannerCamera['frames.ma']['binning']
# print('Scanner pos, scale, region: ', pos, scale, region)
# print('Scanner binning: ', self.binning)
binning = self.binning
scale = list(scale)
self.scale = scale
if self.AR.spotsize is None:
self.AR.spotsize=50.
print ('Spot Size reset to: {0:0.3f} microns'.format(self.AR.spotsize*1e6))
x0 = pos[0] + scale[0]*region[0]/binning[0]
x1 = pos[0] + scale[0]*(region[0]+region[2])/binning[0]
y0 = pos[1] + scale[1]*region[1]/binning[1]
y1 = pos[1] + scale[1]*(region[1]+region[3])/binning[1]
self.camerabox = [[x0, y0], [x0, y1], [x1, y1], [x1, y0], [x0, y0]]
# self.camerabox = [[pos[0] + scale[0]*region[0]/binning[0], pos[1] + scale[1]*region[1]/binning[1]],
# [pos[0] + scale[0]*region[0]/binning[0], pos[1] + scale[1]*region[3]/binning[1]],
# [pos[0] + scale[0]*region[2]/binning[0], pos[1] + scale[1]*region[3]/binning[1]],
# [pos[0] + scale[0]*region[2]/binning[0], pos[1] + scale[1]*region[1]/binning[1]],
# [pos[0] + scale[0]*region[0]/binning[0], pos[1] + scale[1]*region[1]/binning[1]]
# ]
scannerbox = BRI.getRectangle(self.AR.scannerpositions)
self.scanner_sh = SH.geometry.MultiPoint(self.AR.scannerpositions)
self.envelope_sh = self.scanner_sh.envelope
self.centroid_sh = self.scanner_sh.centroid
if scannerbox is None: # likely just one point
pt = self.AR.scannerpositions
fp = np.array([[pt[0][0]], [pt[0][1]]])
scannerbox = fp
else:
fp = np.array([scannerbox[0][0], scannerbox[1][1]]).reshape(2,1)
# print('fp: ', fp)
scannerbox = np.append(scannerbox, fp, axis=1)
self.scboxw = np.array(scannerbox)
# print('scanner camerabox: ', self.camerabox)
self.boxw = np.swapaxes(np.array(self.camerabox), 0, 1)
# print('scanner box: ', self.boxw)
class ImageInfo(object):
"""
Get Image information, compute the scanner box and some additional parameters
Do this as a class to encapsulate the data and make reusable.
"""
def __init__(self, AR):
BRI = BR.BoundRect()
self.AR = AR # save the acq4read instance for access to the data
# self.AR.getImage()
pos = self.AR.Image_pos
scale = self.AR.Image_scale
region = self.AR.Image_region
self.binning = self.AR.Image_binning
binning = self.binning
# print('Image pos, scale, region: ', pos, scale, region)
# print('Image binning: ', self.binning)
scale = list(scale)
self.scale = scale
self.filename = self.AR.Image_filename
x0 = pos[0] # + scale[0]*region[0]/binning[0]
x1 = pos[0] + scale[0]*(region[2])/binning[0]
y0 = pos[1] #+ scale[1]*region[1]/binning[1]
y1 = pos[1] + scale[1]*(region[3])/binning[1]
self.camerabox = [[x0, y0], [x0, y1], [x1, y1], [x1, y0], [x0, y0]]
self.boxw = np.swapaxes(np.array(self.camerabox), 0, 1)
# self.boxw = np.array(self.camerabox)
class EventReader(object):
def __init__(self, basepath, filename, mapname):
fne = Path(filename.replace('/', '~')+'.pkl') # '2019.11.08_000~slice_002~cell_000.pkl'
fe = Path(basepath, 'events', fne)
with open(fe, 'rb') as fh:
d = pd.read_pickle(fh, compression=None)
# print(d.keys())
dx = d[Path(str(fe.stem).replace('~', '/').replace('../', ''), mapname)]
self.data = dx
class MosaicReader(object):
"""
Read a mosaic editor mosaic file
"""
def __init__(self, filename, basepath):
self.basepath = basepath
self._saveVersion = (2, 0) # default.
state = json.load(open(filename, 'r'))
if state.get('contents', None) != 'MosaicEditor_save':
raise TypeError("This does not appear to be MosaicEditor save data.")
if state['version'][0] > self._saveVersion[0]:
raise TypeError("Save data has version %d.%d, but this MosaicEditor only supports up to version %d.x." % (state['version'][0], state['version'][1], self._saveVersion[0]))
root = state['rootPath']
loadfail = []
for itemState in state['items']:
fname = itemState.get('filename')
fname = Path(root, Path(fname).name)
# print('fname: ', fname.is_file())
# print(root)
# print('itemstate: ', itemState)
if fname is None:
# create item from scratch and restore state
itemtype = itemState.get('type')
if itemtype not in items.itemTypes():
# warn the user later on that we could not load this item
loadfail.append((itemState.get('name'), 'Unknown item type "%s"' % itemtype))
continue
item = self.addItem(type=itemtype, name=itemState['name'])
else:
# create item by loading file and restore state
# if root is None:
# fh = DataManager.getHandle(fh)
# else:
if str(fname.name).startswith('image_'):
image_data = self.AR.getImage(fname)
elif str(fname.name).startswith('video_'):
image_data = EM.MetaArray(file=fname)
fh = root[fname]
item = self.addFile(fh, name=itemState['name'], inheritTransform=False)
item.restoreState(itemState)
self.canvas.view.setState(state['view'])
if len(loadfail) > 0:
msg = "\n".join(["%s: %s" % m for m in loadfail])
raise Exception("Failed to load some items:\n%s" % msg)
def addItem(self, item=None, type=None, **kwds):
"""Add an item to the MosaicEditor canvas.
May provide either *item* which is a CanvasItem or QGraphicsItem instance, or
*type* which is a string specifying the type of item to create and add.
"""
if isinstance(item, Qt.QGraphicsItem):
print('was qt')
return self.canvas.addGraphicsItem(item, **kwds)
else:
print('not qt')
return self.canvas.addItem(item, type, **kwds)
class MapTraces(object):
def __init__(self):
self.cell = None
self.datasets = OrderedDict()
self.image = None
self.AR = ARC.Acq4Read()
self.AR.setImportant(False) # turn off the default flag
self.outputfn = None
self.invert = True
self.vmax = 20000.
self.voff = 0.
self.ioff = 0.050
self.basezero = True
self.ax = None
self.ax2 = None
self.xscale = 1.0
self.yscale = 1.0
self.nspots = 0
self.ticks = None
self.overlay = True
self.indicesplotted = []
self.twin = [0, 0.6]
self.averageScannerImages = False # normally, would not do
self.SI_minmax = [0., 1.]
self.SI_original_minmax = [0., 1.]
self.cellpos = None
self.experiment = None
self.image = None
self.mosaic = None
self.mosaics = []
self.calbar = [20, 500] # 20 ms, 500 pA
self.offset = [0., 0.] # offset between pos and image
self.tbarpos = None # gets set to intersection once created
self.tbar_coords = None # line for the tbar
self.tbar = None # matplotlib line object for tbar
self.tbar_visible = False
self.tbar_angle = 0.
self.scholl_plot = False
self.ref_angles = None
self.picker = picker.Picker()
sns.set()
sns.color_palette("colorblind", 10)
self.palette = itertools.cycle(sns.color_palette("colorblind", 10))
sns.set_style("white")
sns.set_style("ticks")
self.window = False
self.XY = [[None, None]]
self.XYdepth = 0
self.calbarobj = None
self.calbartext = None
self.mx = 0
self.my = 0
self.notch_freqs = None
self.notch_flag = False
self.notch_Q = 12.
def setScannerImages(self, flag):
self.averageScannerImages = flag
def setEventData(self, eventdata):
self.eventdata = eventdata
def setProtocol(self, cell, image=None, videos=None, mosaic=None):
self.cell = Path(cell)
if not self.cell.is_dir():
print(f"Did not find directory: {str(cell):s}")
raise ValueError
if image is not None:
self.image = Path(self.cell, image)
print('image path: ', self.image)
if str(Path(self.image).name).startswith('image_'):
imagefile = Path(self.image).with_suffix('.tif') # make sure right suffix is there
self.image_data = self.AR.getImage(Path(imagefile))
elif str(Path(self.image).name).startswith('image_'):
imagefile = Path(self.image).with_suffix('.tif') # make sure right suffix is there
self.image_data = self.AR.getImage(Path(imagefile))
elif str(Path(self.image).name).startswith('video_'):
imagefile = Path(self.image).with_suffix('.ma')
print('imagefile: ', imagefile)
self.image_data = self.AR.getImage(Path(imagefile))
self.image_data = np.max(self.image_data, axis=0) # max projection along stack
self.image_data = np.rot90(np.fliplr(self.image_data))
else:
raise ValueError('Do not know how to handle image: ', self.image)
self.AR.getIndex(currdir=imagefile.parent)
if 'userTransform' not in list(self.AR._index[imagefile.name].keys()):
self.refpos = self.AR._index[imagefile.name]['deviceTransform']['pos']
else:
self.refpos = self.AR._index[imagefile.name]['userTransform']['pos'] # use repositioned image location
self.ImgInfo = ImageInfo(self.AR)
else:
self.image = None
if mosaic is not None:
# print('mosaic: ', mosaic)
self._saveVersion = (2, 0) # default.
state = json.load(open(Path(self.cell, mosaic), 'r'))
if state.get('contents', None) != 'MosaicEditor_save':
raise TypeError("This does not appear to be MosaicEditor save data.")
if state['version'][0] > self._saveVersion[0]:
raise TypeError("Save data has version %d.%d, but this MosaicEditor only supports up to version %d.x." % (state['version'][0], state['version'][1], self._saveVersion[0]))
self.mosaics = []
root = state['rootPath']
# for i in state['items']:
# print(i['name'], i['alpha'], i['userTransform'])
for v in state['items']: # just copy the items that are relevant
if v['name'].startswith('video_') or v['name'].startswith('image_'):
self.mosaics.append(v)
self.videos = []
if videos is not None:
for v in videos:
self.videos.append(Path(self.cell, f"video_0{v:02d}"))
self.AR.setProtocol(self.cell)
# print('mosaics: ', self.mosaics)
def setWindow(self, x0, x1, y0, y1):
self.xlim = (x0, x1)
self.ylim = (y0, y1)
if not pd.isnull(x0):
self.window = True
print('window set!!!!!')
else:
self.window = False
def setOutputFile(self, filename):
self.outputfn = filename
def setPars(self, pdict):
for k in list(pdict.keys()):
if k == 'invert':
self.invert = pdict[k]
if k == 'vmax':
self.vmax = pdict[k]
if k == 'voff':
self.voff = pdict[k]
if k == 'ioff':
self.ioff = pdict[k]
if k == 'xscale':
self.xscale = pdict[k]
if k == 'yscale':
self.yscale = pdict[k]
if k == 'calbar':
self.calbar[0] = pdict[k][0]*1e-3
self.calbar[1] = pdict[k][1]*1e-12
if k == 'twin':
self.twin[0] = pdict[k][0]
self.twin[1] = pdict[k][1]
if k == 'ticks':
self.ticks = pdict[k]
if k == 'notch_freqs':
if not pd.isnull(pdict[k]):
p = pdict[k].replace('[', '').replace(']', '').replace('"', '')
fp = p.split(',')
lp = [float(f) for f in fp]
self.notch_freqs = np.array(lp)
self.notch_flag = True
if k == 'notch_q':
self.notch_Q = float(pdict[k])
if k == 'cellpos':
self.cellpos = pdict[k]
if k == 'experiment':
self.experiment = pdict[k]
if k == 'angle':
self.tbar_angle = pdict[k]
if k == 'cellID':
self.cellID = pdict[k]
if k == 'map':
self.mapname = pdict[k]
if k == 'offset':
self.offset[0] = pdict[k][0]
self.offset[1] = pdict[k][1]
def filter_data(self, tb, data, LPF=3000.):
self.HPF_flag = False
self.LPF = LPF
self.maxtime = 0.599 # sec
filtfunc = scipy.signal.filtfilt
samplefreq = 1./np.mean(np.diff(tb))
rate = 1.0/samplefreq
nyquistfreq = samplefreq*0.95
wn = self.LPF/nyquistfreq
b, a = scipy.signal.bessel(2, wn)
if self.HPF_flag:
wnh = self.HPF/nyquistfreq
bh, ah = scipy.signal.bessel(2, wnh, btype='highpass')
imax = int(max(np.where(tb < self.maxtime)[0]))
print(np.max(tb), imax)
# imax = len(tb)
data2 = np.zeros_like(data)
data2 = filtfunc(b, a, data[:,:imax] - np.mean(data[0:250]))
# if self.HPF_flag:
# data2[r,t,:imax] = filtfunc(bh, ah, data2[r, t, :imax]) # - np.mean(data[r, t, 0:250]))
data3 = np.zeros_like(data2)
if self.notch_flag:
print('Notch Filtering Enabled', self.notch_freqs)
for r in range(data2.shape[0]):
data3[r] = FILT.NotchFilterZP(data2[r], notchf=self.notch_freqs, Q=self.notch_Q,
QScale=False, samplefreq=samplefreq)
else:
data3 = data2
# mpl.figure()
# mpl.plot(tb[:imax], data2[0,:imax])
# mpl.plot(tb[:imax], data3[0,:imax])
# mpl.show()
# exit()
#
return data3
def plot_maps(self, protocols, ax=None, traces=None, linethickness=1.0, tbar=False, axb=None):
"""
Plot map or superimposed maps...
"""
print('plot_maps: plot_maps with protocols: ', protocols)
if ax is None:
self.figure = mpl.figure()
# print(dir(self.figure))
self.figure.set_size_inches(14., 8.)
if traces is None:
self.ax = self.figure.add_subplot('111')
# print('set ax')
else:
self.ax = self.figure.add_subplot('121')
self.ax2 = self.figure.add_subplot('122')
sns.despine(ax=self.ax2, left=True, bottom=True, right=True, top=True)
# print('set ax and ax2')
else:
self.ax = ax
print('ax: ', ax)
# self.ax3 = self.figure.add_subplot('133')
self.data = dict.fromkeys(list(protocols.keys()))
cols = ['r', 'b', 'c', 'g']
self.traces = traces
for i, p in enumerate(protocols):
prot = protocols[p]
self.datasets[p] = []
if i == 0:
self.setProtocol(prot, self.image)
self.show_traces(self.ax, pcolor=cols[i], name=p, linethickness=linethickness)
else:
self.setProtocol(prot)
self.show_traces(self.ax, pcolor=cols[i], name=p, linethickness=linethickness)
if self.traces is not None:
for tr in self.traces:
self.handle_event(index=tr)
PH.calbar(self.ax2, calbar=[0, -6., 50, 5.], scale=[1.0, 1.0],
axesoff=True, orient='left', unitNames=None, fontsize=11, weight='normal', color='k', font='Arial')
else:
self.picker = picker.Picker()
self.picker.setData(2, self.scp)
self.picker.setAction(self.handle_event)
# self.figure.canvas.mpl_connect('button_press_event', self.picker.pickEvent)
# self.figure.canvas.mpl_connect('pick_event', self.picker.pickEvent)
# self.figure.canvas.mpl_connect('motion_notify_event', self.picker.onMouseMotion)
x1, x2 = self.ax.get_xlim()
y1, y2 = self.ax.get_ylim()
self.XY = [self.get_XYlims()]
cp = self.cell.parts
cellname = '/'.join(cp[-4:])
self.update_tbar(None)
self.compute_sector_distance_map()
# self.plot_scholl()
if axb is not None:
self.plot_sector_distance_map(axb)
if self.ax is None:
self.figure.suptitle(cellname, fontsize=11)
self.fig2 = None
if self.outputfn is not None:
mpl.savefig(self.outputfn)
# mpl.show()
def get_XYlims(self):
x1, x2 = self.ax.get_xlim()
y1, y2 = self.ax.get_ylim()
return([x1, y1, x2, y2])
def adjust(self, extent, a):
# adjust the extent so that the minimum boundary to the edge of the plot is a,
# and so that the area plotted is "common" across datasets
minx = self.SI.centroid_sh.x - a/2.
maxx = self.SI.centroid_sh.x + a/2.
miny = self.SI.centroid_sh.y - a/2.
maxy = self.SI.centroid_sh.y + a/2.
return([minx, maxx, miny, maxy])
def set_minmax(self, imagedata):
"""
return the min, max for the data
"""
mind = SND.minimum(imagedata)
maxd = SND.maximum(imagedata)
if maxd == mind:
maxd = mind + 1
return [mind, maxd]
def show_traces(self, ax, pcolor='r', linethickness=0.5, name=None):
self.cell.glob('*')
# imageplotted = False
# imagetimes = []
# imagename = []
# maptimes = []
# mapname = []
supindex = self.AR.readDirIndex(currdir=self.cell)
self.SI = ScannerInfo(self.AR, self.offset)
self.extent = np.array([-1, 1, -1, 1])*7e-2
if self.invert:
cmap = 'gist_gray_r'
else:
cmap = 'gist_gray'
mapwidth = 1e-3
self.imageax = None
self.SI_ax = None # scanner image for rescale
self.max_camera = None
sc_alpha = 1.0
vid_alpha = 0.75
image_alpha = 0.75
mosaic_alpha = 0.75
box = None
if self.averageScannerImages:
sc_alpha = 1.0
vid_alpha = 0.5
image_alpha = 0.5
mosaic_alpha = 0.5
if self.averageScannerImages:
self.max_camera = self.AR.getAverageScannerImages(dataname='Camera/frames.ma', mode='max',
subtractFlag = True, firstonly=False, limit=None)
sm = self.max_camera
sm = sm/np.max(sm)
sm = sm*sm
sm = np.asarray(sm, dtype=float)
# sm = np.clip(sm, a_min=0.5, a_max=None)
self.extent0 = [np.min(self.SI.boxw[0]), np.max(self.SI.boxw[0]), np.min(self.SI.boxw[1]), np.max(self.SI.boxw[1])]
self.extent = self.adjust(self.extent0, mapwidth)
self.SI_ax = ax.imshow(sm, aspect='equal', cmap='Blues', alpha=sc_alpha, vmin = 0, vmax=np.max(sm),
extent=self.extent0)
# max_camera = scipy.ndimage.gaussian_filter(max_camera, sigma=256/(4.*10))
self.set_minmax(self.max_camera)
self.SI_minmax = list(self.SI_ax.get_clim())# use the clim for the min/max
self.SI_original_minmax = self.SI_ax.get_clim() # keep original
box = self.SI
if len(self.videos) > 0:
self.Montager = montage.Montager(celldir=self.cell)
self.Montager.run()
# M.list_images_and_videos()
self.Montager.process_videos(window='mpl', show=True, gamma=1.5, merge_gamma=-1., sigma=2.5)
# bounds are in self.Montager.bounds: (minx, miny, maxx, maxy)
bounds = self.Montager.bounds
self.extent0 = [bounds[0], bounds[2], bounds[1], bounds[3]]
self.extent = self.adjust(self.extent0, mapwidth)
self.imageax = ax.imshow(np.asarray(self.merged_image, dtype=float), aspect='equal', cmap=cmap, alpha=vid_alpha, vmin = 0, vmax=self.vmax,
extent=self.extent0)
self.cmin = SND.minimum(self.merged_image)
self.cmax = SND.maximum(self.merged_image)
box = self.extent
if self.image is not None:
# mpl.imshow(self.image_data)
# mpl.show()
self.extent0 = [np.min(self.ImgInfo.boxw[0]), np.max(self.ImgInfo.boxw[0]), np.min(self.ImgInfo.boxw[1]), np.max(self.ImgInfo.boxw[1])]
self.extent = self.adjust(self.extent0, mapwidth)
self.imageax = ax.imshow(np.asarray(self.image_data, dtype=float), aspect='equal', cmap=cmap, alpha=image_alpha, vmin = 0, vmax=self.vmax,
extent=self.extent0)
self.cmin = SND.minimum(self.image_data)
self.cmax = SND.maximum(self.image_data)
box = self.extent
if self.mosaics:
self.Montager = montage.montager.Montager(celldir=self.cell)
self.Montager.setup(self.mosaics)
# M.list_images_and_videos()
# should pass some info to process_videos to balance alpha etc from the mosaic.
self.Montager.process_videos(window=None, show=True, gamma=1.5, merge_gamma=-1., sigma=2.5, register=False, mosaic_data=self.mosaics)
# bounds are in self.Montager.bounds: (minx, miny, maxx, maxy)
bounds = self.Montager.image_boundary
self.extent0 = [bounds[0], bounds[2], bounds[1], bounds[3]]
self.extent = self.adjust(self.extent0, mapwidth)
mpl.imshow(self.Montager.merged_image)
self.imageax = ax.imshow(np.array(self.Montager.merged_image, dtype=float), aspect='equal', cmap=cmap, alpha=mosaic_alpha, vmin = 0, vmax=self.vmax,
extent=self.extent0)
self.cmin = SND.minimum(self.Montager.merged_image)
self.cmax = SND.maximum(self.Montager.merged_image)
box = self.extent
if self.window:
if self.xlim == (0., 0.) and self.ylim == (0., 0.):
xylims = self.extent
ax.set_xlim(xylims[0:2])
ax.set_ylim(xylims[2:4])
# print('autoset: ', xylims)
else:
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
# print('self set: ', self.xlim, self.ylim)
self.scp = self.SI.scannerpositions
scp = self.scp
xmin = np.min(scp[:,0])
xmax = np.max(scp[:,0])
ymin = np.min(scp[:,1])
ymax = np.max(scp[:,1])
# print(xmin, ymin, xmax, ymax)
ax.scatter(scp[:,0], scp[:,1], s=4, c='c', marker='o', alpha=0.3, picker=5)
# print('getdata: ', name, self.datasets)
self.AR.getData()
d = self.AR.data_array
d = self.filter_data(self.AR.time_base, d)
if name is not None:
self.datasets[name] = d
if self.AR.mode in ['v', 'V', 'VC']:
self.vscale = 1e5
self.off = self.voff
else:
self.vscale = 1e-3
self.off = self.ioff
# print(len(self.AR.traces))
tb = self.AR.time_base
im0 = np.argmin(np.fabs(tb - self.twin[0]))
im1 = np.argmin(np.fabs(tb - self.twin[1]))
self.im0 = im0
self.im1 = im1
self.tb = tb[im0:im1]-tb[im0]
# just plot as many as we have!
dshape = self.datasets[name].shape
for p in range(dshape[0]): # scp.shape[0]):
self._plot_one(ax, p, pcolor, name=name, ythick=linethickness)
self.plot_calbar(ax, xmin, ymin)
# plot length (physical dimension) bar
if box is not None:
PH.calbar(ax, calbar=[box[0]+0.00012,
box[3]-0.00005,
0.0001 , 0],
scale = [1e6, 1e6], axesoff=True, orient='left',
unitNames={'x': r'$\mu$m', 'y': ''}, fontsize=11, weight='normal', color='k', font='Arial')
ax.set_xlim(self.extent[0:2])
ax.set_ylim(self.extent[2:4])
if self.cellpos is not None:
mpl.gca().plot(self.cellpos[0], self.cellpos[1], color='blue', marker='P', markersize=6)
if self.experiment is not None:
mpl.gca().set_title(f"{self.experiment:s}", fontsize=9)
# print(dir(self.imageax))
def plot_calbar(self, ax, x0, y0):
x0 += 0.5e-4
y0 += 0.5e-4
xcal = self.xscale*3.5e-5*self.calbar[0]*1.25
ycal = self.yscale*self.vscale*self.calbar[1]*0.5
zero = 0
self.calbarobj = ax.plot(self.xscale*3.5e-5*np.array([0., 0., self.calbar[0]])+x0 - xcal,
(self.yscale*self.vscale*(np.array([self.calbar[1], 0., 0. ])-zero))+self.off*self.vscale+y0 - ycal, 'k-', linewidth=1)
self.calbartext = ax.text(x0-xcal, y0-ycal, f"{int(self.calbar[0]*1e3):d} ms\n{int(self.calbar[1]*1e12):d} pA",
verticalalignment='top', horizontalalignment='center', fontsize=8)
self.calx_zero = self.calbarobj[0].get_xdata()
self.caly_zero = self.calbarobj[0].get_ydata()
# self.reposition_cal()
def reposition_cal(self, movex=0, movey=0, home=False):
if not home:
calxdata = self.calbarobj[0].get_xdata()
calydata = self.calbarobj[0].get_ydata()
else:
calxdata = self.calx_zero
calydata = self.caly_zero
self.mx = 0
self.my = 0
# print('xdata: ', calxdata)
# print('ydata: ', calydata)
xd = calxdata[2] - calxdata[1]
yd = calydata[0] - calydata[1]
xl = sorted(self.ax.get_xlim())
yl = sorted(self.ax.get_ylim())
# print(xl, yl)
x0 = xl[0] + (movex+self.mx)*0.001*xl[1]
y0 = yl[0] + (movey+self.my)*0.001*yl[1]
self.calbarobj[0].set_xdata([x0, x0, x0+xd])
self.calbarobj[0].set_ydata([y0+yd, y0, y0])
# print([x0, x0, x0+xd])
# print([y0+yd, y0, y0])
self.mx += movex
self.my += movey
# print(dir(MT.calbartext))
# calxy = MT.calbartext.get_position()
calxy = [0, 0]
calxy[0] = x0 + xl[1]*(movex+self.mx)*0.001
calxy[1] = y0 + yl[1]*(movey+self.my)*0.001 - yl[1]*0.015
self.calbartext.set_position(calxy)
# print('reposition : ', movex, movey)
# print(calxy, self.calx_zero, self.caly_zero)
def _plot_one(self, ax, p, pcolor, name=None, yscaleflag=True, tscale=True, offflag=True, ystep = 0., ythick=0.3):
zero = 0.
vdat = FILT.SignalFilter_LPFBessel(self.datasets[name][p, :], 2000.,
samplefreq=self.AR.sample_rate[0], NPole=8)
if self.basezero:
zero = np.mean(vdat[self.im0:self.im0+20])
if offflag:
xoff = self.scp[p,0]
yoff = self.off*self.vscale+self.scp[p, 1]
else:
xoff = 0.
yoff = 0.
if yscaleflag:
y_scale = self.yscale*self.vscale
else:
y_scale = 1e3
yoff += ystep
if tscale:
ts = self.xscale*3.5e-5
else:
ts = 1e3
ax.plot(ts*self.tb+xoff, (y_scale*(vdat[self.im0:self.im1]-zero))+yoff, color=pcolor, linewidth=ythick)
if self.ticks is not None:
for t in self.ticks:
ax.plot(ts*np.array([t, t])+xoff, y_scale*np.array([-20e-12, 20e-12])+yoff, color='k', linewidth=0.8)
def handle_event(self, index):
# print('handle event index: ', index)
# print(self.SI.scannerpositions[index,:])
if self.ax2 is None:
return
if index in self.indicesplotted:
return
if self.overlay:
ystep = -self.nspots*10.
self.palette = itertools.cycle(sns.color_palette("colorblind", 10))
for i, name in enumerate(list(self.datasets.keys())):
c = next(self.palette)
self._plot_one(self.ax2, index, pcolor=c, name=name, yscaleflag=False, offflag=False,
ystep=ystep, ythick=1.3-(i+1)*0.3, tscale=False)
# if i == 1:
# self._plot_one(self.ax3, index, pcolor=c, name=name, yscaleflag=False, offflag=False,
# ystep=ystep, ythick=0.5, tscale=False)
# self.xscale*3.5e-5*self.tb+xoff, (y_scale*(vdat[self.im0:self.im1]-zero))+yoff, color=pcolor, linewidth=0.3)
trn = self.traces.index(index)+1
self.ax.text(self.SI.scannerpositions[index][0], self.SI.scannerpositions[index][1],
f"{trn:d}", fontsize=9, horizontalalignment='center')
self.ax2.text(0., ystep,
f"{trn:d}", fontsize=9, horizontalalignment='right')
self.nspots += 1
self.indicesplotted.append(index)
mpl.draw()
def toggle_tbar(self, event):
if self.tbar_visible and self.tbar_coords is not None:
self.tbar[0].set_alpha(0)
self.tbar_visible = False
return
else:
self.update_tbar(event)
def _getAngle(self, pt1, pt2):
"""
Pt1 and 2 must be shapely Point objects"""
x_diff = pt2.x - pt1.x
y_diff = pt2.y - pt1.y
return np.arctan2(y_diff, x_diff)
def update_tbar(self, event):
center = SH.geometry.Point(self.cellpos[0], self.cellpos[1]) # center (flip y axis)
# first time through, just draw the bar
if self.tbar_coords is None:
cx = self.cellpos[0] # center (cell pos)
cy = self.cellpos[1]
tlinex = [cx, cx, cx, cx-1e-4, cx+1e-4] # draw the T bar
tliney = [cy-1e-4, cy, cy+1e-4, cy+1e-4, cy+1e-4]
self.tbar_coords = SH.geometry.LineString([(tlinex[i], tliney[i]) for i in range(len(tlinex))])
self.tbar = self.ax.plot(self.tbar_coords.xy[0], self.tbar_coords.xy[1], 'ko-', linewidth=1, markersize=2.5)
elif event is not None and event.xdata is not None:
e = SH.geometry.Point([event.xdata, event.ydata]) # point in direction for top of T bar
t = SH.geometry.Point([self.tbar_coords.xy[0][2], self.tbar_coords.xy[1][2]])
angle1 = self._getAngle(center, e)
angle2 = self._getAngle(center, t)
self.tbar_angle = -(angle2-angle1)
print('angle: ', self.tbar_angle)
else:
pass
newT = SH.affinity.rotate(self.tbar_coords, self.tbar_angle, origin=center, use_radians=True)
self.tbar[0].set_xdata(newT.xy[0])
self.tbar[0].set_ydata(newT.xy[1])
self.tbar[0].set_alpha(1)
self.tbar_visible = True
self.compute_sector_distance_map()
self.plot_scholl()
def _plot_coords(self, ax, ob, c='#999999', **kwds):
"""
Plot the data in a shapely object
Parameters
----------
ax : matplotlib axis object
target axis to plot data into
ob : shapely object with a .xy list of values
c : matplotlib color value
RGBA, string, etc
Returns
-------
Nothing
"""
x, y = ob.xy
ax.plot(x, y, '-', color=c, **kwds)
def compute_sector_distance_map(self):
"""
Compute the responses divided by distance (scholl rings) and sector angle
"""
measure = 'ZScore'
thresh = 1.96
r = np.array([0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45])*1e-3 # convert to display units of meters from mm
nrings = r.shape[0]
t = self.cellpos
# first generate the rings and plot them
if not self.scholl_plot:
self.C_rings = [[] for _ in range(len(r))] # concentric rings
for i, rad in enumerate(r):
self.C_rings[i] = SG.Polygon([(rad*np.sin(theta) + t[0], rad*np.cos(theta) + t[1]) for theta in np.linspace(0., 2*np.pi, 100)])
# get the angles for each point in the map
x = self.SI.scannerpositions.T # self.eventdata['positions'].T # just for the way we handle the values here.
nsectors = 4
rot_angle = np.pi/nsectors
point_angles = np.arctan2(x[1,:]-t[1], x[0,:]-t[0])-self.tbar_angle # get angles relative to cell position, then rotate by tbar angle
try:
point_angles = np.where(point_angles < -rot_angle, point_angles+2*np.pi, point_angles)
except:
print('point_angles: ', point_angles)
print('rot_angle: ', rot_angle)
raise ValueError()
# compute the angles that divide the sectors
# and assign a sector index to each point that falls into
# the sector (list per sector are held in angle_group)
sector_angles = [] # sector angles for every sector
self.angle_group = [[] for _ in range(nsectors)]
for quad in range(nsectors):
quad0 = quad*np.pi/(nsectors/2.)
qa = quad0 - rot_angle
qb = quad0 + rot_angle
(qa, qb) = sorted([qa, qb])
sector_angles.append(qa)
# find all the points in this sector
pgr = np.where((point_angles >= qa) & (point_angles < qb))[0]
self.angle_group[quad] = pgr
# for q in range(nsectors):
# print(f'angle group[{q:d}]: ', angle_group[q])
# assign points by rings as well
Px2 = SG.MultiPoint([SG.Point(x[0,i], x[1,i]) for i in range(x.shape[1])])
ptlocs = np.zeros((x.shape[1], len(r)+1)) # for each point, all rings that it is in
for i, p in enumerate(Px2):
for j in range(len(self.C_rings)):
ptlocs[i,j] = self.C_rings[j].contains(p)
ring_index = [None]*ptlocs.shape[0] # hold the ring index for each point
edgecolor = ['r', 'm', 'b', 'c', 'y', 'g', 'turquoise', 'brown', 'lightblue']
sectorsymbol = ['o', 's', '^', 'D']
for i in range(ptlocs.shape[0]):
u = np.where(ptlocs[i,:] == 1)[0] # get the index of the first ring where the point is found
if len(u) > 0: # some may be outside the outermost ring
ring_index[i] = u[0] # get the index for this point (which ring)
zs = [[] for _ in range(nsectors)] # ring index by sectors,but value stored is total zscore....
for i in range(len(ring_index)):
if ring_index[i] is not None: # outside outermost ring; ignore
for j in range(nsectors): # find sector for this point
if i in self.angle_group[j]:
zs[j].append(i)
# angles.append(qb)
# for j in range(nsectors):
# try:
# print('?: ', self.eventdata['I_max'][0][zs[j]])
# except:
# print(j, zs[j])
# self.eventdata['I_max']
for j in range(nsectors):
print('sector: ', j, ' total score: ', np.sum(self.eventdata['ZScore'][0][zs[j]]))
print('sector: ', j, ' total charge: ',
|
np.sum(self.eventdata['Qr'][0][zs[j]])
|
numpy.sum
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import time
import datetime
from torch.autograd import grad
from torch.autograd import Variable
from torchvision.utils import save_image
from torchvision import transforms
from model import Generator
from model import Discriminator
from model import Segmentor
from PIL import Image
from util.visualizer import Visualizer
import util.util as util
from collections import OrderedDict
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
# print(y)
# print(y.size())
y=np.asarray(y)
# print(type(y))
y=np.eye(num_classes, dtype='uint8')[y]
return y
# class CrossEntropyLoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True, ignore_index=255):
# super(CrossEntropyLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index)
# def forward(self, inputs, targets):
# ce2d_loss = self.nll_loss(torch.unsqueeze(F.log_softmax(inputs[0]),0), torch.unsqueeze(targets[0],0))
# for i in range(len(inputs)-1):
# ce2d_loss = ce2d_loss + self.nll_loss(torch.unsqueeze(F.log_softmax(inputs[i+1]),0),torch.unsqueeze(targets[i+1],0))
# return ce2d_loss
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True, ignore_index=255):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index)
def forward(self, inputs, targets):
# print(targets.size())
return self.nll_loss(F.log_softmax(inputs), torch.squeeze(targets))
class Solver(object):
def __init__(self, celebA_loader, rafd_loader, config):
# Data loader
self.celebA_loader = celebA_loader
self.rafd_loader = rafd_loader
self.visualizer = Visualizer()
# Model hyper-parameters
self.c_dim = config.c_dim
self.s_dim = config.s_dim
self.c2_dim = config.c2_dim
self.image_size = config.image_size
self.g_conv_dim = config.g_conv_dim
self.d_conv_dim = config.d_conv_dim
self.g_repeat_num = config.g_repeat_num
self.d_repeat_num = config.d_repeat_num
self.d_train_repeat = config.d_train_repeat
# Hyper-parameteres
self.lambda_cls = config.lambda_cls
self.lambda_rec = config.lambda_rec
self.lambda_gp = config.lambda_gp
self.lambda_s = config.lambda_s
self.g_lr = config.g_lr
self.d_lr = config.d_lr
self.a_lr = config.a_lr
self.beta1 = config.beta1
self.beta2 = config.beta2
# Criterion
self.criterion_s = CrossEntropyLoss2d(size_average=True).cuda()
# Training settings
self.dataset = config.dataset
self.num_epochs = config.num_epochs
self.num_epochs_decay = config.num_epochs_decay
self.num_iters = config.num_iters
self.num_iters_decay = config.num_iters_decay
self.batch_size = config.batch_size
self.use_tensorboard = config.use_tensorboard
self.pretrained_model = config.pretrained_model
# Test settings
self.test_model = config.test_model
self.config = config
# Path
self.log_path = config.log_path
self.sample_path = config.sample_path
self.model_save_path = config.model_save_path
self.result_path = config.result_path
# Step size
self.log_step = config.log_step
self.visual_step = self.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
# Build tensorboard if use
self.build_model()
if self.use_tensorboard:
self.build_tensorboard()
# Start with trained model
if self.pretrained_model:
self.load_pretrained_model()
def build_model(self):
# Define a generator and a discriminator
if self.dataset == 'Both':
self.G = Generator(self.g_conv_dim, self.c_dim+self.c2_dim+2, self.g_repeat_num) # 2 for mask vector
self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim+self.c2_dim, self.d_repeat_num)
else:
self.G = Generator(self.g_conv_dim, self.c_dim, self.s_dim, self.g_repeat_num)
self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num)
self.A = Segmentor()
# Optimizers
self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])
self.a_optimizer = torch.optim.Adam(self.A.parameters(), self.a_lr, [self.beta1, self.beta2])
# Print networks
self.print_network(self.G, 'G')
self.print_network(self.D, 'D')
self.print_network(self.A, 'A')
if torch.cuda.is_available():
self.G.cuda()
self.D.cuda()
self.A.cuda()
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
def load_pretrained_model(self):
self.G.load_state_dict(torch.load(os.path.join(
self.model_save_path, '{}_G.pth'.format(self.pretrained_model))))
self.D.load_state_dict(torch.load(os.path.join(
self.model_save_path, '{}_D.pth'.format(self.pretrained_model))))
self.A.load_state_dict(torch.load(os.path.join(
self.model_save_path, '{}_A.pth'.format(self.pretrained_model))))
print('loaded trained models (step: {})..!'.format(self.pretrained_model))
def build_tensorboard(self):
from logger import Logger
self.logger = Logger(self.log_path)
def update_lr(self, g_lr, d_lr):
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
def reset_grad(self):
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def to_var(self, x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def denorm(self, x):
out = (x + 1) / 2
return out.clamp_(0, 1)
def threshold(self, x):
x = x.clone()
x[x >= 0.5] = 1
x[x < 0.5] = 0
return x
def compute_accuracy(self, x, y, dataset):
if dataset == 'CelebA':
x = F.sigmoid(x)
predicted = self.threshold(x)
correct = (predicted == y).float()
accuracy = torch.mean(correct, dim=0) * 100.0
else:
_, predicted = torch.max(x, dim=1)
correct = (predicted == y).float()
accuracy = torch.mean(correct) * 100.0
return accuracy
def one_hot(self, labels, dim):
"""Convert label indices to one-hot vector"""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def make_celeb_labels(self, real_c):
"""Generate domain labels for CelebA for debugging/testing.
if dataset == 'CelebA':
return single and multiple attribute changes
elif dataset == 'Both':
return single attribute changes
"""
y = [torch.FloatTensor([1, 0, 0]), # black hair
torch.FloatTensor([0, 1, 0]), # blond hair
torch.FloatTensor([0, 0, 1])] # brown hair
fixed_c_list = []
# single attribute transfer
for i in range(self.c_dim):
fixed_c = real_c.clone()
for c in fixed_c:
if i < 3:
c[:3] = y[i]
else:
c[i] = 0 if c[i] == 1 else 1 # opposite value
fixed_c_list.append(self.to_var(fixed_c, volatile=True))
# multi-attribute transfer (H+G, H+A, G+A, H+G+A)
if self.dataset == 'CelebA':
for i in range(4):
fixed_c = real_c.clone()
for c in fixed_c:
if i in [0, 1, 3]: # Hair color to brown
c[:3] = y[2]
if i in [0, 2, 3]: # Gender
c[3] = 0 if c[3] == 1 else 1
if i in [1, 2, 3]: # Aged
c[4] = 0 if c[4] == 1 else 1
fixed_c_list.append(self.to_var(fixed_c, volatile=True))
return fixed_c_list
def train(self):
"""Train StarGAN within a single dataset."""
# Set dataloader
if self.dataset == 'CelebA':
self.data_loader = self.celebA_loader
else:
self.data_loader = self.rafd_loader
# The number of iterations per epoch
iters_per_epoch = len(self.data_loader)
fixed_x = []
real_c = []
fixed_s = []
for i, (images, seg_i, seg, labels) in enumerate(self.data_loader):
fixed_x.append(images)
fixed_s.append(seg)
real_c.append(labels)
if i == 3:
break
# Fixed inputs and target domain labels for debugging
fixed_x = torch.cat(fixed_x, dim=0)
fixed_x = self.to_var(fixed_x, volatile=True)
real_c = torch.cat(real_c, dim=0)
fixed_s = torch.cat(fixed_s, dim=0)
fixed_s_list = []
fixed_s_list.append(self.to_var(fixed_s, volatile=True))
rand_idx = torch.randperm(fixed_s.size(0))
fixed_s_num = 5
fixed_s_vec = fixed_s[rand_idx][:fixed_s_num]
for i in range(fixed_s_num):
fixed_s_temp = fixed_s_vec[i].unsqueeze(0).repeat(fixed_s.size(0),1,1,1)
fixed_s_temp = self.to_var(fixed_s_temp)
fixed_s_list.append(fixed_s_temp)
# for i in range(4):
# rand_idx = torch.randperm(fixed_s.size(0))
# fixed_s_temp = self.to_var(fixed_s[rand_idx], volatile=True)
# fixed_s_list.append(fixed_s_temp)
if self.dataset == 'CelebA':
fixed_c_list = self.make_celeb_labels(real_c)
elif self.dataset == 'RaFD':
fixed_c_list = []
for i in range(self.c_dim):
fixed_c = self.one_hot(torch.ones(fixed_x.size(0)) * i, self.c_dim)
fixed_c_list.append(self.to_var(fixed_c, volatile=True))
# lr cache for decaying
g_lr = self.g_lr
d_lr = self.d_lr
# Start with trained model if exists
if self.pretrained_model:
start = int(self.pretrained_model.split('_')[0])-1
else:
start = 0
# Start training
start_time = time.time()
for e in range(start, self.num_epochs):
epoch_iter = 0
for i, (real_x, real_s_i, real_s, real_label) in enumerate(self.data_loader):
epoch_iter = epoch_iter + 1
# Generat fake labels randomly (target domain labels)
rand_idx = torch.randperm(real_label.size(0))
fake_label = real_label[rand_idx]
rand_idx = torch.randperm(real_label.size(0))
fake_s = real_s[rand_idx]
fake_s_i = real_s_i[rand_idx]
if self.dataset == 'CelebA':
real_c = real_label.clone()
fake_c = fake_label.clone()
else:
real_c = self.one_hot(real_label, self.c_dim)
fake_c = self.one_hot(fake_label, self.c_dim)
# Convert tensor to variable
real_x = self.to_var(real_x)
real_s = self.to_var(real_s)
real_s_i = self.to_var(real_s_i)
fake_s = self.to_var(fake_s)
fake_s_i = self.to_var(fake_s_i)
real_c = self.to_var(real_c) # input for the generator
fake_c = self.to_var(fake_c)
real_label = self.to_var(real_label) # this is same as real_c if dataset == 'CelebA'
fake_label = self.to_var(fake_label)
# ================== Train D ================== #
# Compute loss with real images
out_src, out_cls = self.D(real_x)
d_loss_real = - torch.mean(out_src)
if self.dataset == 'CelebA':
d_loss_cls = F.binary_cross_entropy_with_logits(
out_cls, real_label, size_average=False) / real_x.size(0)
else:
d_loss_cls = F.cross_entropy(out_cls, real_label)
# Compute classification accuracy of the discriminator
if (i+1) % self.log_step == 0:
accuracies = self.compute_accuracy(out_cls, real_label, self.dataset)
log = ["{:.2f}".format(acc) for acc in accuracies.data.cpu().numpy()]
if self.dataset == 'CelebA':
print('Classification Acc (Black/Blond/Brown/Gender/Aged): ', end='')
else:
print('Classification Acc (8 emotional expressions): ', end='')
print(log)
# Compute loss with fake images
fake_x = self.G(real_x, fake_c, fake_s)
fake_x = Variable(fake_x.data)
out_src, out_cls = self.D(fake_x)
d_loss_fake = torch.mean(out_src)
# Backward + Optimize
d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Compute gradient penalty
alpha = torch.rand(real_x.size(0), 1, 1, 1).cuda().expand_as(real_x)
interpolated = Variable(alpha * real_x.data + (1 - alpha) * fake_x.data, requires_grad=True)
out, out_cls = self.D(interpolated)
grad = torch.autograd.grad(outputs=out,
inputs=interpolated,
grad_outputs=torch.ones(out.size()).cuda(),
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad = grad.view(grad.size(0), -1)
grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
d_loss_gp = torch.mean((grad_l2norm - 1)**2)
# Backward + Optimize
d_loss = self.lambda_gp * d_loss_gp
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# ================== Train A ================== #
self.a_optimizer.zero_grad()
out_real_s = self.A(real_x)
# a_loss = self.criterion_s(out_real_s, real_s_i.type(torch.cuda.LongTensor)) * self.lambda_s
a_loss = self.criterion_s(out_real_s, real_s_i) * self.lambda_s
# a_loss = torch.mean(torch.abs(real_s - out_real_s))
a_loss.backward()
self.a_optimizer.step()
# Logging
loss = {}
loss['D/loss_real'] = d_loss_real.data[0]
loss['D/loss_fake'] = d_loss_fake.data[0]
loss['D/loss_cls'] = d_loss_cls.data[0]
loss['D/loss_gp'] = d_loss_gp.data[0]
# ================== Train G ================== #
if (i+1) % self.d_train_repeat == 0:
# Original-to-target and target-to-original domain
fake_x = self.G(real_x, fake_c, fake_s)
rec_x = self.G(fake_x, real_c, real_s)
# Compute losses
out_src, out_cls = self.D(fake_x)
g_loss_fake = - torch.mean(out_src)
g_loss_rec = self.lambda_rec * torch.mean(torch.abs(real_x - rec_x))
if self.dataset == 'CelebA':
g_loss_cls = F.binary_cross_entropy_with_logits(
out_cls, fake_label, size_average=False) / fake_x.size(0)
else:
g_loss_cls = F.cross_entropy(out_cls, fake_label)
# segmentation loss
out_fake_s = self.A(fake_x)
g_loss_s = self.lambda_s * self.criterion_s(out_fake_s, fake_s_i)
# Backward + Optimize
g_loss = g_loss_fake + g_loss_rec + g_loss_s + self.lambda_cls * g_loss_cls
# g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging
loss['G/loss_fake'] = g_loss_fake.data[0]
loss['G/loss_rec'] = g_loss_rec.data[0]
loss['G/loss_cls'] = g_loss_cls.data[0]
if (i+1) % self.visual_step == 0:
# save visuals
self.real_x = real_x
self.fake_x = fake_x
self.rec_x = rec_x
self.real_s = real_s
self.fake_s = fake_s
self.out_real_s = out_real_s
self.out_fake_s = out_fake_s
self.a_loss = a_loss
# save losses
self.d_real = - d_loss_real
self.d_fake = d_loss_fake
self.d_loss = d_loss
self.g_loss = g_loss
self.g_loss_fake = g_loss_fake
self.g_loss_rec = g_loss_rec
self.g_loss_s = g_loss_s
errors_D = self.get_current_errors('D')
errors_G = self.get_current_errors('G')
self.visualizer.display_current_results(self.get_current_visuals(), e)
self.visualizer.plot_current_errors_D(e, float(epoch_iter)/float(iters_per_epoch), errors_D)
self.visualizer.plot_current_errors_G(e, float(epoch_iter)/float(iters_per_epoch), errors_G)
# Print out log info
if (i+1) % self.log_step == 0:
elapsed = time.time() - start_time
elapsed = str(datetime.timedelta(seconds=elapsed))
log = "Elapsed [{}], Epoch [{}/{}], Iter [{}/{}]".format(
elapsed, e+1, self.num_epochs, i+1, iters_per_epoch)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, e * iters_per_epoch + i + 1)
# Translate fixed images for debugging
if (i+1) % self.sample_step == 0:
fake_image_list = [fixed_x]
fixed_c = fixed_c_list[0]
real_seg_list = []
for fixed_c in fixed_c_list:
for fixed_s in fixed_s_list:
fake_image_list.append(self.G(fixed_x, fixed_c, fixed_s))
real_seg_list.append(fixed_s)
fake_images = torch.cat(fake_image_list, dim=3)
real_seg_images = torch.cat(real_seg_list, dim=3)
save_image(self.denorm(fake_images.data),
os.path.join(self.sample_path, '{}_{}_fake.png'.format(e+1, i+1)),nrow=1, padding=0)
save_image(self.cat2class_tensor(real_seg_images.data),
os.path.join(self.sample_path, '{}_{}_seg.png'.format(e+1, i+1)),nrow=1, padding=0)
print('Translated images and saved into {}..!'.format(self.sample_path))
# Save model checkpoints
if (i+1) % self.model_save_step == 0:
torch.save(self.G.state_dict(),
os.path.join(self.model_save_path, '{}_{}_G.pth'.format(e+1, i+1)))
torch.save(self.D.state_dict(),
os.path.join(self.model_save_path, '{}_{}_D.pth'.format(e+1, i+1)))
torch.save(self.A.state_dict(),
os.path.join(self.model_save_path, '{}_{}_A.pth'.format(e+1, i+1)))
# Decay learning rate
if (e+1) > (self.num_epochs - self.num_epochs_decay):
g_lr -= (self.g_lr / float(self.num_epochs_decay))
d_lr -= (self.d_lr / float(self.num_epochs_decay))
self.update_lr(g_lr, d_lr)
print ('Decay learning rate to g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
def make_celeb_labels_test(self):
"""Generate domain labels for CelebA for debugging/testing.
if dataset == 'CelebA':
return single and multiple attribute changes
elif dataset == 'Both':
return single attribute changes
"""
y = [torch.FloatTensor([1, 0, 0]), # black hair
torch.FloatTensor([0, 1, 0]), # blond hair
torch.FloatTensor([0, 0, 1])] # brown hair
fixed_c_list = []
fixed_c_list.append(self.to_var(torch.FloatTensor([1,0,0,1,1]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,1,0,1,1]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,0,1,1,1]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([1,0,0,1,0]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,1,0,1,0]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,0,1,1,0]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([1,0,0,0,1]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,1,0,0,1]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,0,1,0,1]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([1,0,0,0,0]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,1,0,0,0]).unsqueeze(0), volatile=True))
fixed_c_list.append(self.to_var(torch.FloatTensor([0,0,1,0,0]).unsqueeze(0), volatile=True))
return fixed_c_list
def test(self):
"""Facial attribute transfer on CelebA or facial expression synthesis on RaFD."""
# Load trained parameters
G_path = os.path.join(self.model_save_path, '{}_G.pth'.format(self.test_model))
self.G.load_state_dict(torch.load(G_path))
self.G.eval()
fixed_c_list = self.make_celeb_labels_test()
transform = transforms.Compose([
transforms.CenterCrop(self.config.celebA_crop_size),
transforms.Scale(self.config.image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_seg1 = transforms.Compose([
transforms.CenterCrop(self.config.celebA_crop_size),
transforms.Scale(self.config.image_size)])
transform_seg2 = transforms.Compose([
transforms.ToTensor()])
for root, _, fnames in sorted(os.walk(self.config.test_image_path)):
for fname in fnames:
path = os.path.join(root, fname)
image = Image.open(path)
image = transform(image)
image = image.unsqueeze(0)
x = self.to_var(image, volatile=True)
fake_image_mat = []
for fixed_c in fixed_c_list:
fake_image_list = [x]
for i in range(11):
seg = Image.open(os.path.join(self.config.test_seg_path, '{}.png'.format(i+1)))
seg = transform_seg1(seg)
num_s = 7
seg_onehot = to_categorical(seg, num_s)
seg_onehot = transform_seg2(seg_onehot)*255.0
seg_onehot = seg_onehot.unsqueeze(0)
s = self.to_var(seg_onehot, volatile=True)
fake_x = self.G(x,fixed_c,s)
fake_image_list.append(fake_x)
# save_path = os.path.join(self.result_path, 'fake_x_{}.png'.format(i+1))
# save_image(self.denorm(fake_x.data), save_path, nrow=1, padding=0)
fake_images = torch.cat(fake_image_list, dim=2)
fake_image_mat.append(fake_images)
fake_images_save = torch.cat(fake_image_mat, dim=3)
save_path = os.path.join(self.result_path, 'fake_x_sum_{}.png'.format(fname))
print('Translated test images and saved into "{}"..!'.format(save_path))
save_image(self.denorm(fake_images_save.data), save_path, nrow=1, padding=0)
# # Start translations
# fake_image_list = [real_x]
# for target_c in target_c_list:
# fake_image_list.append(self.G(real_x, target_c))
# fake_images = torch.cat(fake_image_list, dim=3)
# save_path = os.path.join(self.result_path, '{}_fake.png'.format(i+1))
# save_image(self.denorm(fake_images.data), save_path, nrow=1, padding=0)
# print('Translated test images and saved into "{}"..!'.format(save_path))
def test_with_original_seg(self):
"""Facial attribute transfer on CelebA or facial expression synthesis on RaFD."""
# Load trained parameters
G_path = os.path.join(self.model_save_path, '{}_G.pth'.format(self.test_model))
self.G.load_state_dict(torch.load(G_path))
self.G.eval()
fixed_c_list = self.make_celeb_labels_test()
transform = transforms.Compose([
# transforms.CenterCrop(self.config.celebA_crop_size),
transforms.Scale(self.config.image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_seg1 = transforms.Compose([
transforms.CenterCrop(self.config.celebA_crop_size),
transforms.Scale(self.config.image_size)])
transform_seg2 = transforms.Compose([
transforms.ToTensor()])
for root, _, fnames in sorted(os.walk(self.config.test_image_path)):
for fname in fnames:
path = os.path.join(root, fname)
image = Image.open(path)
image = transform(image)
image = image.unsqueeze(0)
x = self.to_var(image, volatile=True)
fake_image_mat = []
for fixed_c in fixed_c_list:
fake_image_list = [x]
seg = Image.open(os.path.join(self.config.test_seg_path, '{}.png'.format(fname[:-4])))
seg = transform_seg1(seg)
num_s = 7
seg_onehot = to_categorical(seg, num_s)
seg_onehot = transform_seg2(seg_onehot)*255.0
seg_onehot = seg_onehot.unsqueeze(0)
s = self.to_var(seg_onehot, volatile=True)
fake_x = self.G(x,fixed_c,s)
fake_image_list.append(fake_x)
# save_path = os.path.join(self.result_path, 'fake_x_{}.png'.format(i+1))
# save_image(self.denorm(fake_x.data), save_path, nrow=1, padding=0)
fake_images = torch.cat(fake_image_list, dim=3)
fake_image_mat.append(fake_images)
fake_images_save = torch.cat(fake_image_mat, dim=2)
save_path = os.path.join(self.result_path, 'fake_x_sum_{}.png'.format(fname))
print('Translated test images and saved into "{}"..!'.format(save_path))
save_image(self.denorm(fake_images_save.data), save_path, nrow=1, padding=0)
# # Start translations
# fake_image_list = [real_x]
# for target_c in target_c_list:
# fake_image_list.append(self.G(real_x, target_c))
# fake_images = torch.cat(fake_image_list, dim=3)
# save_path = os.path.join(self.result_path, '{}_fake.png'.format(i+1))
# save_image(self.denorm(fake_images.data), save_path, nrow=1, padding=0)
# print('Translated test images and saved into "{}"..!'.format(save_path))
def test_seg(self):
"""Facial attribute transfer on CelebA or facial expression synthesis on RaFD."""
# Load trained parameters
A_path = os.path.join(self.model_save_path, '{}_A.pth'.format(self.test_model))
self.A.load_state_dict(torch.load(A_path))
self.A.eval()
transform = transforms.Compose([
# transforms.CenterCrop(self.config.celebA_crop_size),
transforms.Scale(self.config.image_size),
# transforms.Scale(178),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
for root, _, fnames in sorted(os.walk(self.config.test_image_path)):
for fname in fnames:
path = os.path.join(root, fname)
image = Image.open(path)
print('Read image "{}"..!'.format(fname))
image = transform(image)
image = image.unsqueeze(0)
x = self.to_var(image, volatile=True)
seg = self.A(x)
seg_numpy = seg.data[0].cpu().float().numpy()
seg_numpy = np.transpose(seg_numpy, (1, 2, 0)).astype(np.float)
import scipy.io as sio
sio.savemat('segnumpy.mat',{'seg':seg_numpy})
print('Translated seg images and saved into "{}"..!'.format('segnumpy.mat'))
def get_current_errors(self, label='all'):
D_fake = self.d_fake.data[0]
D_real = self.d_real.data[0]
# D_fake = self.D_fake.data[0]
# D_real = self.D_real.data[0]
A_loss = self.a_loss.data[0]
D_loss = self.d_loss.data[0]
G_loss = self.g_loss.data[0]
G_loss_fake = self.g_loss_fake.data[0]
G_loss_s = self.g_loss_s.data[0]
G_loss_rec = self.g_loss_rec.data[0]
if label == 'all':
return OrderedDict([('D_fake', D_fake),
('D_real', D_real),
('D', D_loss),
('A_loss', A_loss),
('G', G_loss),
('G_loss_fake', G_loss_fake),
('G_loss_s', G_loss_s),
('G_loss_rec', G_loss_rec)])
if label == 'D':
return OrderedDict([('D_fake', D_fake),
('D_real', D_real),
('D', D_loss),
('A_loss', A_loss)])
if label == 'G':
return OrderedDict([('A_loss', A_loss),
('G', G_loss),
('G_loss_fake', G_loss_fake),
('G_loss_s', G_loss_s),
('G_loss_rec', G_loss_rec)])
def get_current_visuals(self):
real_x = util.tensor2im(self.real_x.data)
fake_x = util.tensor2im(self.fake_x.data)
rec_x = util.tensor2im(self.rec_x.data)
real_s = util.tensor2im_seg(self.real_s.data)
fake_s = util.tensor2im_seg(self.fake_s.data)
out_real_s = util.tensor2im_seg(self.out_real_s.data)
out_fake_s = util.tensor2im_seg(self.out_fake_s.data)
return OrderedDict([('real_x', real_x),
('fake_x', fake_x),
('rec_x', rec_x),
('real_s', self.cat2class(real_s)),
('fake_s', self.cat2class(fake_s)),
('out_real_s', self.cat2class(out_real_s)),
('out_fake_s', self.cat2class(out_fake_s))
])
def cat2class(self, m):
y = np.zeros((np.size(m,0),np.size(m,1)),dtype='float64')
for i in range(np.size(m,2)):
y = y + m[:,:,i]*i
y = y / float(
|
np.max(y)
|
numpy.max
|
import numpy as np
import torch.nn.functional as F
import torch
def rotate_a_b_axis_angle_torch_batched(a, b):
a = a / torch.norm(a, dim=1, keepdim=True)
b = b / torch.norm(b, dim=1, keepdim=True)
rot_axis = torch.cross(a, b)
a_proj = b * torch.sum(a * b, dim=1, keepdim=True)
a_ort = a - a_proj
theta = np.arctan2(
torch.norm(a_ort, dim=1, keepdim=True),
torch.norm(a_proj, dim=1, keepdim=True)
)
theta[torch.sum(a * b, dim=1) < 0] = np.pi - theta[torch.sum(a * b, dim=1) < 0]
aa = rot_axis / torch.norm(rot_axis, dim=1, keepdim=True) * theta
return aa
def rotate_a_b_axis_angle(a, b):
a = a / np.linalg.norm(a)
b = b /
|
np.linalg.norm(b)
|
numpy.linalg.norm
|
"""
:py:mod:`mcmc_utils.py`
-------------------------------------
"""
import numpy as np
__all__ = ["estimate_burnin"]
# ================================
# emcee utils
# ================================
def estimate_burnin(sampler, est_burnin=True, thin_chains=True, verbose=False):
"""
Estimate the integrated autocorrelation length on the MCMC chain associated
with an emcee sampler object. With the integrated autocorrelation length,
we can then estimate the burn-in length for the MCMC chain. This procedure
follows the example outlined here:
https://emcee.readthedocs.io/en/stable/tutorials/autocorr/
:param sampler: (*emcee.EnsembleSampler, optional*)
emcee MCMC sampler object/backend handler, given a complete chain
:param est_burnin: (*bool, optional*)
Estimate burn-in time using integrated autocorrelation time
heuristic. Defaults to True. In general, we recommend users
inspect the chains and calculate the burnin after the fact to ensure
convergence, but this function works pretty well.
:param thin_chains: (*bool, optional*)
Whether or not to thin chains. Useful if running long chains.
Defaults to True. If true, estimates a thin cadence
via int(0.5*np.min(tau)) where tau is the intergrated autocorrelation
time.
:param verbose: (*bool, optional*)
Output all the diagnostics? Defaults to False.
:returns iburn: (*int*)
burn-in index estimate. If est_burnin == False, returns 0.
:returns ithin: (*int*)
thin cadence estimate. If thin_chains == False, returns 1.
"""
# Set tol = 0 so it always returns an answer
tau = sampler.get_autocorr_time(tol=0)
# Catch NaNs
if np.any(~
|
np.isfinite(tau)
|
numpy.isfinite
|
import numpy as np
import time
def dft(x):
x = np.asarray(x, dtype=np.cdouble)
N = x.shape[0]
n = np.arange(N)
k = n.reshape((N, 1))
M = np.exp(-2j * np.pi * k * n / N)
return np.dot(M, x)
def inv_dft(C):
xa = np.asarray(C, dtype=float)
N = xa.shape[0]
n = np.arange(N)
k = n.reshape((N, 1))
M = np.exp(2j * np.pi * k * n / N)
M_inverse = np.linalg.inv(M)
VC = np.dot(M_inverse,C)
return VC
def fft(poly_a):
poly_a = np.asarray(poly_a, dtype=np.cdouble)
N = poly_a.shape[0]
if N % 2 > 0:
raise ValueError("must be a power of 2")
elif N <= 2:
return dft(poly_a)
else:
even_terms = fft(poly_a[::2])
odd_terms = fft(poly_a[1::2])
terms = np.exp((-2j * np.pi *
|
np.arange(N)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 22:36:45 2022
@author: erri
This function provide statistics over different slicing methods of DoD.
input:
DoD: 2D numpy array
windows_mode: integer
windows_base: integer (in number of columns)
output:
mean_array_tot : numpy array
array of data average for each window
std_array_tot: numpy array
array of data standard deviation for each window
x_data_tot: numpy array
array of windows dimension coherent with mean and std data
window_boundary: numpy array
array of the windows boundary
"""
import numpy as np
import os
import math
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
home_dir = os.getcwd()
DoDs_dir = os.path.join(home_dir, 'DoDs')
DoD_path = os.path.join(DoDs_dir, 'DoD_' + run, DoD_name)
DoD = np.loadtxt(DoD_path, delimiter='\t')
DoD = np.where(DoD==-999, np.nan, DoD)
W = 12
window_mode = 3
mean_array_tot = []
std_array_tot= []
x_data_tot=[]
window_boundary = np.array([0,0])
if window_mode == 1:
# With overlapping
for w in range(1, int(math.floor(DoD.shape[1]/W))+1): # W*w is the dimension of every possible window
print(w*W)
mean_array = []
std_array= []
x_data=[]
for i in range(0, DoD.shape[1]+1):
if i+w*W <= DoD.shape[1]:
window = DoD[:, i:W*w+i]
boundary = np.array([i,W*w+i])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, w)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, np.nanstd(std_array)) #TODO check this
x_data_tot=np.append(x_data_tot, np.nanmean(x_data))
window_boundary = window_boundary[1,:]
if window_mode == 2:
# Without overlapping
for w in range(1, int(math.floor(DoD.shape[1]/W))+1): # W*w is the dimension of every possible window
print(w*W)
mean_array = []
std_array= []
x_data=[]
for i in range(0, DoD.shape[1]+1):
if W*w*(i+1) <= DoD.shape[1]:
window = DoD[:, W*w*i:W*w*(i+1)]
boundary = np.array([W*w*i,W*w*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, w)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, np.nanstd(std_array)) #TODO check this
x_data_tot=np.append(x_data_tot, np.nanmean(x_data))
window_boundary = window_boundary[1,:]
if window_mode == 3:
# Without overlapping
mean_array = []
std_array= []
x_data=[]
for i in range(0, DoD.shape[1]+1):
if W*(i+1) <= DoD.shape[1]:
print(i*W)
window = DoD[:, 0:W*(i+1)]
boundary = np.array([0,W*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array =
|
np.append(mean_array, mean)
|
numpy.append
|
"""
A module that handles variable data types.
"""
#-------------------------------------------------------------------------------
import numpy as np
import sympy
import functools
import operator
#-------------------------------------------------------------------------------
VTYPES = {
bool: {bool, np.dtype('bool')},
int: {int, np.dtype('int'), np.dtype('int32'), np.dtype('int64'),
np.uint, np.uint8, np.uint16, np.uint32, np.uint64},
float: {float, np.dtype('float'), np.dtype('float32'), np.dtype('float64')},
}
OO = sympy.oo
OO_TO_NP = {OO: np.inf, -OO: -np.inf}
#-------------------------------------------------------------------------------
def eval_vtype(vtype):
""" Evaluates a variable type from input vtype.
:param vtype: input object
:return: either bool, int, float if correctly detected.
Otherwise the dtype is returned.
"""
if isinstance(vtype, set):
vtype = list(vtype)
if isinstance(vtype, (list, tuple)):
if any([isinstance(_vtype, tuple) for _vtype in vtype]):
vtype = np.concatenate([np.array(_vtype).reshape([1]) \
for _vtype in vtype]).dtype
else:
vtype = np.array(vtype)
if hasattr(vtype, 'dtype'):
vtype = vtype.dtype
else:
_vtype = vtype
while _vtype != type:
vtype = _vtype
_vtype = type(vtype)
if vtype in VTYPES.keys():
return vtype
for key, val in VTYPES.items():
found = vtype in set(val)
if found:
vtype = key
break
return vtype
#-------------------------------------------------------------------------------
def isunitset(var, vtype=None):
""" Returns boolean flag as to whether var is a set of one element.
:param var: variable to assess.
:param vtype: optional filter for variable type (e.g. int or float)
"""
if vtype is None:
vtype = list(VTYPES.keys())
elif not isinstance(vtype, (tuple,list)):
vtype = [vtype]
vtypes = functools.reduce(operator.concat, [list(VTYPES[key]) for key in vtype])
if isinstance(var, set):
if len(var) == 1:
element_type = type(list(var)[0])
if element_type in vtypes:
return True
return False
#-------------------------------------------------------------------------------
def isunitsetint(var):
""" Returns boolean flag as to whether var is a set of one integer element.
Note: Usage depends on class:
RVs, RJs, RFs: set(int) is a sample specification denoting number of samples:
positive values request samples using linear interpolation
negative values request samples using random generation.
Dist: set(int): proxies as a 'value' for a variable as a set of size int.
"""
return isunitset(var, int)
#-------------------------------------------------------------------------------
def isunitsetfloat(var):
""" Returns boolean flag as to whether var is a set of one float element.
Usage requests a sampling of value from a ICDF for then given P.
"""
return isunitset(var, float)
#-------------------------------------------------------------------------------
def isscalar(var):
""" If var is a dimensionless numpy array of size 1 or if the var is a Sympy
Float or Integer, this function returns True, otherwise it returns
np.isscalar(var).
"""
if isinstance(var, np.ndarray):
if var.ndim == 0 and var.size == 1:
return True
if isinstance(var, (sympy.Float, sympy.Integer)):
return True
return np.isscalar(var)
#-------------------------------------------------------------------------------
def issingleton(var):
""" If isunitset(var) is True, this function returns True,
otherwise isscalar(var) is returned.
"""
# Here we define singleton as a unit set or scalar
if isunitset(var):
return True
return isscalar(var)
#-------------------------------------------------------------------------------
def isunitary(var):
""" If isscalar(var) is True, this function returns True,
otherwise if var is numpy array of size 1, this function returns True,
False is returned.
"""
if isscalar(var):
return True
if isinstance(var, np.ndarray):
if var.size == 1:
return True
return False
#-------------------------------------------------------------------------------
def isfinite(var):
if isinstance(var, np.ndarray) or np.isscalar(var):
return
|
np.isfinite(var)
|
numpy.isfinite
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.classify_input_type import check_col_type
def string_split(table, **params):
check_required_parameters(_string_split, params, ['table'])
return _string_split(table, **params)
def _string_split(table, input_col, hold_cols=None, delimiter=',', output_col_name='split', output_col_cnt=3, output_col_type='double', start_pos=0, end_pos=0):
out_table = pd.DataFrame()
output_arr = [x[start_pos:-end_pos].split(delimiter, output_col_cnt) \
if not pd.isnull(x) \
else [x] * output_col_cnt for x in list(table[input_col])] \
if end_pos > 0 \
else [x[start_pos:].split(delimiter, output_col_cnt) \
if not pd.isnull(x) \
else [x] * output_col_cnt for x in list(table[input_col])]
head_arr = [x[:start_pos] \
if not pd.isnull(x) \
else '' for x in list(table[input_col])]
tail_arr = [x[-end_pos:] \
if not pd.isnull(x) and len(x) >= start_pos + end_pos \
else '' for x in list(table[input_col])] \
if end_pos > 0 \
else [''] * len(list(table[input_col]))
remain_arr = [x[output_col_cnt] if len(x) > output_col_cnt else '' for x in output_arr]
remain_arr = [head_arr[i] + remain_arr[i] + tail_arr[i] \
if not pd.isnull(table[input_col][i]) \
else None for i in range(len(list(table[input_col])))]
for i, output in enumerate(output_arr):
if len(output) < output_col_cnt:
output += [None] * (output_col_cnt - len(output))
output_arr[i] = output_arr[i][:output_col_cnt]
for j, value in enumerate(output_arr[i]):
try:
if output_col_type in ['integer', 'integer_arr']:
tmp = value.split('.')
output_arr[i][j] = np.int32(tmp[0]) if len(tmp) <= 2 else None
elif output_col_type in ['long', 'long_arr']:
tmp = value.split('.')
output_arr[i][j] =
|
np.int64(tmp[0])
|
numpy.int64
|
# This code is adapted for our purposes from original code online:
# -----------------------------------------------------------------------------
# From Pytnon to Numpy
# Copyright (2017) <NAME> - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.animation import FuncAnimation, PillowWriter
from matplotlib.collections import PathCollection
import sys
import os
class MarkerCollection:
"""
Marker collection
"""
def __init__(self, n=100):
v = np.array([(-0.25, -0.25), (+0.0, +0.5), (+0.25, -0.25), (0, 0)])
c = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
self._base_vertices = np.tile(v.reshape(-1), n).reshape(n, len(v), 2)
self._vertices = np.tile(v.reshape(-1), n).reshape(n, len(v), 2)
self._codes = np.tile(c.reshape(-1), n)
self._scale = np.ones(n)
self._translate = np.zeros((n, 2))
self._rotate = np.zeros(n)
self._path = Path(vertices=self._vertices.reshape(n*len(v), 2),
codes=self._codes)
self._collection = PathCollection([self._path], linewidth=0.5,
facecolor="k", edgecolor="w")
def update(self):
n = len(self._base_vertices)
self._vertices[...] = self._base_vertices * self._scale
cos_rotate, sin_rotate = np.cos(self._rotate), np.sin(self._rotate)
R = np.empty((n, 2, 2))
R[:, 0, 0] = cos_rotate
R[:, 1, 0] = sin_rotate
R[:, 0, 1] = -sin_rotate
R[:, 1, 1] = cos_rotate
self._vertices[...] = np.einsum('ijk,ilk->ijl', self._vertices, R)
self._vertices += self._translate.reshape(n, 1, 2)
class Flock:
def __init__(self, count=500, width=500, height=250):
self.width = width
self.height = height
self.min_velocity = 0.5
self.max_velocity = 2.0
self.max_acceleration = 0.03
self.velocity = np.zeros((count, 2), dtype=np.float32)
self.position = np.zeros((count, 2), dtype=np.float32)
angle = np.random.uniform(0, 2*np.pi, count)
self.velocity[:, 0] = np.cos(angle)
self.velocity[:, 1] = np.sin(angle)
angle = np.random.uniform(0, 2*np.pi, count)
radius = min(width, height)/2*np.random.uniform(0, 1, count)
self.position[:, 0] = width/2 + np.cos(angle)*radius
self.position[:, 1] = height/2 + np.sin(angle)*radius
def set_positions(self, positions):
self.position = positions
def run(self):
global distance, counter, sep, ali, coh
position = self.position
velocity = self.velocity
min_velocity = self.min_velocity
max_velocity = self.max_velocity
max_acceleration = self.max_acceleration
n = len(position)
dx = np.absolute(np.subtract.outer(position[:, 0], position[:, 0]))
dx = np.minimum(dx, self.width-dx)
dy = np.absolute(np.subtract.outer(position[:, 1], position[:, 1]))
dy =
|
np.minimum(dy, self.height-dy)
|
numpy.minimum
|
import sys
import numpy as np
import pandas as pd
import pytest
if sys.version_info >= (3, 0):
from dku_config.stl_config import STLConfig
from dku_timeseries.dku_decomposition.stl_decomposition import STLDecomposition
from timeseries_preparation.preparation import TimeseriesPreparator
@pytest.fixture
def data():
return [855404., 912462., 870896., 640361., 319947., 276845.,
208366., 192450., 200367., 347625., 459965., 641737.,
833240., 744755., 755849., 511676., 359276., 202110.,
174317., 141332., 186421., 376528., 525109., 759468.,
1030616., 976795.]
@pytest.fixture
def input_df(data):
df = pd.DataFrame.from_dict(
{"value1": data, "value2": data, "date": pd.date_range("1-1-1959", periods=len(data), freq="M")})
return df
@pytest.fixture
def config():
config = {"frequency_unit": "M", "season_length_M": 12, "time_column": "date", "target_columns": ["value1"],
"long_format": False, "decomposition_model": "additive", "seasonal_stl": 13, "expert": True, "additional_parameters_STL": {}}
return config
@pytest.fixture
def input_dataset_columns():
return ["value1", "value2", "country", "date"]
@pytest.fixture
def dku_config(config, input_dataset_columns):
dku_config = STLConfig()
dku_config.add_parameters(config, input_dataset_columns)
return dku_config
@pytest.fixture
def expected_dates():
expected = {"3M": np.array(['1959-01-31T00:00:00.000000000', '1959-04-30T00:00:00.000000000',
'1959-07-31T00:00:00.000000000', '1959-10-31T00:00:00.000000000',
'1960-01-31T00:00:00.000000000', '1960-04-30T00:00:00.000000000',
'1960-07-31T00:00:00.000000000'], dtype='datetime64[ns]'),
"6M": np.array(['1959-01-31T00:00:00.000000000', '1959-07-31T00:00:00.000000000',
'1960-01-31T00:00:00.000000000', '1960-07-31T00:00:00.000000000',
'1961-01-31T00:00:00.000000000', '1961-07-31T00:00:00.000000000',
'1962-01-31T00:00:00.000000000'], dtype='datetime64[ns]'),
"M": np.array(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-03-31T00:00:00.000000000', '1959-04-30T00:00:00.000000000',
'1959-05-31T00:00:00.000000000', '1959-06-30T00:00:00.000000000',
'1959-07-31T00:00:00.000000000'], dtype='datetime64[ns]'),
"W": np.array(['1959-01-04T00:00:00.000000000', '1959-01-11T00:00:00.000000000',
'1959-01-18T00:00:00.000000000', '1959-01-25T00:00:00.000000000',
'1959-02-01T00:00:00.000000000', '1959-02-08T00:00:00.000000000',
'1959-02-15T00:00:00.000000000'], dtype='datetime64[ns]'),
"W-FRI": np.array(['1959-01-02T00:00:00.000000000', '1959-01-09T00:00:00.000000000',
'1959-01-16T00:00:00.000000000', '1959-01-23T00:00:00.000000000',
'1959-01-30T00:00:00.000000000', '1959-02-06T00:00:00.000000000',
'1959-02-13T00:00:00.000000000'], dtype='datetime64[ns]'),
"B": np.array(['1959-01-01T00:00:00.000000000', '1959-01-02T00:00:00.000000000',
'1959-01-05T00:00:00.000000000', '1959-01-06T00:00:00.000000000',
'1959-01-07T00:00:00.000000000', '1959-01-08T00:00:00.000000000',
'1959-01-09T00:00:00.000000000'], dtype='datetime64[ns]'),
"H": np.array(['1959-01-01T00:00:00.000000000', '1959-01-01T01:00:00.000000000',
'1959-01-01T02:00:00.000000000', '1959-01-01T03:00:00.000000000',
'1959-01-01T04:00:00.000000000', '1959-01-01T05:00:00.000000000',
'1959-01-01T06:00:00.000000000'], dtype='datetime64[ns]'),
"4H": np.array(['1959-01-01T00:00:00.000000000', '1959-01-01T04:00:00.000000000',
'1959-01-01T08:00:00.000000000', '1959-01-01T12:00:00.000000000',
'1959-01-01T16:00:00.000000000', '1959-01-01T20:00:00.000000000',
'1959-01-02T00:00:00.000000000'], dtype='datetime64[ns]'),
"D": np.array(['1959-01-01T00:00:00.000000000', '1959-01-02T00:00:00.000000000',
'1959-01-03T00:00:00.000000000', '1959-01-04T00:00:00.000000000',
'1959-01-05T00:00:00.000000000', '1959-01-06T00:00:00.000000000',
'1959-01-07T00:00:00.000000000'], dtype='datetime64[ns]'),
"min": np.array(['1959-01-01T00:00:00.000000000', '1959-01-01T00:01:00.000000000',
'1959-01-01T00:02:00.000000000', '1959-01-01T00:03:00.000000000',
'1959-01-01T00:04:00.000000000', '1959-01-01T00:05:00.000000000',
'1959-01-01T00:06:00.000000000'], dtype='datetime64[ns]'),
"12M": np.array(['1959-01-31T00:00:00.000000000', '1960-01-31T00:00:00.000000000',
'1961-01-31T00:00:00.000000000', '1962-01-31T00:00:00.000000000',
'1963-01-31T00:00:00.000000000', '1964-01-31T00:00:00.000000000',
'1965-01-31T00:00:00.000000000'], dtype='datetime64[ns]')
}
return expected
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
class TestSTLDecomposition:
def test_STL_multiplicative(self, dku_config, input_df):
dku_config.model = "multiplicative"
timeseries_preparator = TimeseriesPreparator(dku_config)
df_prepared = timeseries_preparator.prepare_timeseries_dataframe(input_df)
decomposition = STLDecomposition(dku_config)
results = decomposition.fit(df_prepared)
expected_array = [1.87080328, 1.94864198, 1.97546651, 1.47349625, 0.74672304,
0.6552587, 0.5000725, 0.46825876, 0.49417933, 0.86890043,
1.16434155, 1.63725892, 2.17084151, 2.106642, 1.95377386,
1.32400823, 0.92620183, 0.51855162, 0.44493062, 0.35877353,
0.47054681, 0.94481716, 1.30967762, 1.88240591, 2.51946737,
2.28270725]
rounded_results = np.round(results["value1_seasonal"].values, 8)
np.testing.assert_equal(rounded_results, expected_array)
assert np.mean(results["value1_trend"]) == 409265.35453951
assert np.mean(results["value1_seasonal"]) == 1.2698748679749627
assert np.mean(results["value1_residuals"]) == 0.9941032097902623
def test_STL_additive(self, dku_config, input_df):
timeseries_preparator = TimeseriesPreparator(dku_config)
df_prepared = timeseries_preparator.prepare_timeseries_dataframe(input_df)
decomposition = STLDecomposition(dku_config)
results = decomposition.fit(df_prepared)
expected_array = np.array(
[547017.8314, 537486.722, 528097.1954, 518846.2605, 509728.8989, 500744.2034, 491895.324, 483188.5115, 474630.5299, 466256.2782, 458496.2869,
454985.6935, 453114.0625, 452740.149, 453810.1866, 456404.7768, 463218.9767, 470913.292, 478947.2522, 487217.229, 495684.7824, 504325.6079,
513126.1746, 522081.8564, 531195.1428, 540473.2835])
rounded_results = np.round(results["value1_trend"].values, 4)
np.testing.assert_equal(rounded_results, expected_array)
assert np.mean(results["value1_trend"]) == 492101.0195351211
assert np.mean(results["value1_seasonal"]) == 32625.652227975654
assert np.mean(results["value1_residuals"]) == -5345.248686173698
def test_quarter_frequency(self, expected_dates):
results_df = get_additive_result_df("3M")
assert results_df.shape == (7, 5)
np.testing.assert_equal(results_df["date"].values, expected_dates["3M"])
np.testing.assert_equal(np.round(results_df["value1_trend"].values, 4), [301.5621, 311.6514, 321.7407, 331.83, 341.9193, 352.0086, 362.0979])
assert np.mean(results_df["value1_trend"]) == 331.8299999999998
assert np.mean(results_df["value1_seasonal"]) == 1.141428571428658
assert np.mean(results_df["value1_residuals"]) == 9.74458609042423e-14
def test_semiannual_frequency(self, expected_dates):
results_df = get_additive_result_df("6M")
assert results_df.shape == (7, 5)
np.testing.assert_equal(results_df["date"].values, expected_dates["6M"])
np.testing.assert_equal(np.round(results_df["value1_trend"].values, 4), [300.893, 309.6409, 314.1272, 319.7546, 349.1279, 372.5453, 390.7896])
assert np.mean(results_df["value1_trend"]) == 336.6969211857202
assert np.mean(results_df["value1_seasonal"]) == -3.868590336224215
assert np.mean(results_df["value1_residuals"]) == 0.1430977219325931
def test_monthly_frequency(self, expected_dates):
results_df = get_additive_result_df("M")
assert results_df.shape == (7, 5)
np.testing.assert_equal(results_df["date"].values, expected_dates["M"])
np.testing.assert_equal(np.round(results_df["value1_trend"].values, 4), [336.8828, 336.8828, 336.8828, 336.8828, 336.8828, 336.8828, 336.8828])
assert np.mean(results_df["value1_trend"]) == 336.88280446244863
assert np.mean(results_df["value1_seasonal"]) == -3.9113758910199925
assert np.mean(results_df["value1_residuals"]) == -8.120488408686859e-15
def test_weekly_frequencies(self, expected_dates):
results_df = get_additive_result_df("W")
assert results_df.shape == (7, 5)
np.testing.assert_equal(results_df["date"].values, expected_dates["W"])
np.testing.assert_equal(np.round(results_df["value1_trend"].values, 4), [175.9658, 175.9658, 175.9658, 175.9658, 175.9658, 175.9658, 175.9658])
assert np.mean(results_df["value1_trend"]) == 175.9658401676288
assert np.mean(results_df["value1_seasonal"]) == 157.00558840379978
assert np.mean(results_df["value1_residuals"]) == -1.6240976817373718e-14
results_df = get_additive_result_df("W", frequency_end_of_week="FRI")
assert results_df.shape == (7, 5)
np.testing.assert_equal(results_df["date"].values, expected_dates["W-FRI"])
np.testing.assert_equal(np.round(results_df["value1_trend"].values, 4), [175.9658, 175.9658, 175.9658, 175.9658, 175.9658, 175.9658, 175.9658])
assert np.mean(results_df["value1_trend"]) == 175.9658401676288
assert np.mean(results_df["value1_seasonal"]) == 157.00558840379978
assert np.mean(results_df["value1_residuals"]) == -1.6240976817373718e-14
def test_business_day_frequencies(self, expected_dates):
results_df = get_additive_result_df("B")
assert results_df.shape == (7, 5)
np.testing.assert_equal(results_df["date"].values, expected_dates["B"])
np.testing.assert_equal(np.round(results_df["value1_trend"].values, 4), [298.775, 309.556, 320.337, 331.118, 341.899, 352.68, 363.461])
assert np.mean(results_df["value1_trend"]) == 331.1179999999999
assert np.mean(results_df["value1_seasonal"]) == 1.8534285714286125
assert np.mean(results_df["value1_residuals"]) == 7.308439567818174e-14
def test_hour_frequencies(self, expected_dates):
results_df = get_additive_result_df("H")
assert results_df.shape == (7, 5)
|
np.testing.assert_equal(results_df["date"].values, expected_dates["H"])
|
numpy.testing.assert_equal
|
# IMPORTING ALL THE NECESSARY LIBRARIES
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import pickle
import random
Data_directory = 'path to where all folder of images are present' #giving the absolute path for the images
CATEGORIES = os.listdir("....path...\Training Data Images") #lists all the folder/file names
#these are the labels of the images
print(CATEGORIES)
# JUST FOR VISUALIZING THAT IT IS WORKING OR NOT
for category in CATEGORIES:
path = os.path.join(Data_dir ,category)
# os.path.join() , joins two paths together i.e D:\ELC Internship\Training Data Images\A (iteration 1)
for img in os.listdir(path): # now we are inside one of the folder(A,B,C,D,E) ,eg:A which contains 793 images
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
plt.imshow(img_array , cmap = 'gray')
plt.show()
break
break
IMG_SIZE = 128 #RESIZING ALL THE IMAGES SO, THE DATA IS NORMALIZED (you may choose according to your requirement)
new_array = cv2.resize(img_array , (IMG_SIZE,IMG_SIZE))
plt.imshow(new_array, cmap = 'gray')
plt.show()
training_data= [] # EMPTY LIST
def create_training_data():
for category in CATEGORIES: # traversing through the list of categories
path = os.path.join(Data_dir ,category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):#going inside the category A folder which contain images of 'A'
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
training_data.append([new_array,class_num])
create_training_data()
len(training_data)
random.shuffle(training_data) # Shuffling the training data
training_data[1]
X = []
Y = []
for features, label in training_data:
X.append(features)
Y.append(label)
X =
|
np.array(X)
|
numpy.array
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os.path
import unsw_nb15_dataset
import math
#Some settings related to the output from this file:
#This is only defined in the scope of this file
one_hot_categorical = False
#TODO FIXME I should get this list from the unsw_nb15_dataset object in a reliably-ordered way
numeric_features = ['dur', 'sbytes',
'dbytes', 'sttl', 'dttl', 'sloss', 'dloss', 'sload', 'dload',
'spkts', 'dpkts', 'swin', 'dwin', 'stcpb', 'dtcpb', 'smeansz',
'dmeansz', 'trans_depth', 'res_bdy_len', 'sjit', 'djit', 'stime',
'ltime', 'sintpkt', 'dintpkt', 'tcprtt', 'synack', 'ackdat']
#Load a CSV from the UNSW-NB15 dataset into a Pandas DataFrame
def load_unsw_nb15_dataset_as_data_frame(file_path = None, features_path = None):
if file_path is None or features_path is None:
return None
features_df = pd.read_csv(features_path, encoding="latin-1")
for i in range(len(features_df.columns.values)):
features_df.columns.values[i] = str(features_df.columns.values[i]).strip().lower()
#lower case all the types
for i in range(len(features_df)):
features_df.loc[i, ['type']] = str(features_df['type'][i]).strip().lower()
features_df.loc[i, ['name']] = str(features_df['name'][i]).strip().lower()
packet_data_df = pd.read_csv(file_path, encoding="latin-1", names=features_df['name'], header=None)
return packet_data_df, features_df
#How can we encode these various features, many of which are discrete integers?
#One-hot or Binary encoding seems logical, using Binary coding to keep things compact.
#Returns a list where each element are a 1 or 0, determining the binary encoding of value with
#at least bits number of bits. If the value cannot be encoded with the requested number of bits,
#None will be returned.
def binary_encode(value, bits):
encoding = []
while value != 0:
encoding.append(value % 2)
value //= 2
if bits < len(encoding):
return None #couldn't represent with requested number of bits
while len(encoding) < bits:
encoding.append(0)
encoding.reverse()
return encoding
#Takes binary integer in the form of a list containing 1's and 0's.
#Returns the base-10 (integer) representation of the binary value.
def binary_decode(value):
if len(value) == 0:
return None
out = 0
for i in range(0, len(value)):
if value[i] == 1:
out += 2**(len(value) - (i+1))
return out
#Converts a list/vector of floats in range [0,1] to a list/vetcor of integers
#equivalent to having rounded the floats to the nearest integer of 0, 1.
#eg. vector [0.2, 0.7] becomes [0, 1]
def float_to_binary(value):
out = []
for i in range(len(value)):
if value[i] >= 0.5:
out.append(1)
else:
out.append(0)
return out
#Takes an integer "target" in the range [0, num_classes] and returns the one-hot
#encoding of that target as a vector of length num_classes where the value at index
#target is 1.0 and all other values are 0.
def get_one_hot(target, num_classes):
one_hot = [0.0 for c in range(num_classes)]
one_hot[target] = 1.0
return one_hot
#Takes a vector/list which is one-hot encoded and returns the index of the first value
#which is 1.0 or 1. If there are other values which are non-zero, those are ignored.
#If no value is set to 1.0 or 1, None is returned.
def from_one_hot(one_hot):
for c in range(0, len(one_hot)):
if one_hot[c] == 1.0 or one_hot[c] == 1:
return c
return None
def get_minimum_bits(value):
min_bits = 1
while binary_encode(value, min_bits) is None:
min_bits += 1
return min_bits
def build_input_feature_tensor(unsw_nb15_dataset, packet_data_dict):
input_features = []
#source ip
srcip_segments = str(packet_data_dict['srcip']).split('.')
srcip_bits = []
for segment in srcip_segments:
for k in binary_encode(int(segment), 8):
srcip_bits.append(k)
#destination ip
dstip_segments = str(packet_data_dict['dstip']).split('.')
dstip_bits = []
for segment in dstip_segments:
for k in binary_encode(int(segment), 8):
dstip_bits.append(k)
#source port
sport = binary_encode(int(packet_data_dict['sport']), 16)
#destination port
dsport = binary_encode(int(packet_data_dict['dsport']), 16)
if one_hot_categorical == True:
#protocol
proto_category_index = np.where(unsw_nb15_dataset.categorical_column_values['proto'] == packet_data_dict['proto'])[0][0]
proto = get_one_hot(proto_category_index, len(unsw_nb15_dataset.categorical_column_values['proto']))
#state
state_category_index = np.where(unsw_nb15_dataset.categorical_column_values['state'] == packet_data_dict['state'])[0][0]
state = get_one_hot(state_category_index, len(unsw_nb15_dataset.categorical_column_values['state']))
else:
#protocol
proto_category_index =
|
np.where(unsw_nb15_dataset.categorical_column_values['proto'] == packet_data_dict['proto'])
|
numpy.where
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 17:45:24 2021
@author: mlampert
"""
import os
import copy
#FLAP imports and settings
import flap
import flap_nstx
import flap_mdsplus
flap_nstx.register()
flap_mdsplus.register('NSTX_MDSPlus')
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"../flap_nstx.cfg")
flap.config.read(file_name=fn)
#Scientific imports
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
#Other necessary imports
def get_fit_nstx_thomson_profiles(exp_id=None, #Shot number
pressure=False, #Return the pressure profile paramenters
temperature=False, #Return the temperature profile parameters
density=False, #Return the density profile parameters
spline_data=False, #Calculate the results from the spline data (no error is going to be taken into account)
device_coordinates=False, #Calculate the results as a function of device coordinates
radial_range=None, #Radial range of the pedestal (only works when the device coorinates is set)
flux_coordinates=False, #Calculate the results in flux coordinates
flux_range=None, #The normalaized flux coordinates range for returning the results
test=False,
output_name=None,
return_parameters=False,
plot_time=None,
pdf_object=None,
modified_tanh=False,
):
"""
Returns a dataobject which has the largest corresponding gradient based on the tanh fit.
Fitting is based on publication https://aip.scitation.org/doi/pdf/10.1063/1.4961554
The linear background is not usitlized, instead of the mtanh, only tanh is used.
"""
if ((device_coordinates and flux_range is not None) or
(flux_coordinates and radial_range is not None)):
raise ValueError('When flux or device coordinates are set, only flux or radial range can be set! Returning...')
d=flap.get_data('NSTX_THOMSON',
exp_id=exp_id,
name='',
object_name='THOMSON_DATA',
options={'pressure':pressure,
'temperature':temperature,
'density':density,
'spline_data':False,
'add_flux_coordinates':True,
'force_mdsplus':False})
if flux_coordinates:
r_coord_name='Flux r'
if device_coordinates:
r_coord_name='Device R'
time=d.coordinate('Time')[0][0,:]
thomson_profile={'Time':time,
'Data':d.data,
'Device R':d.coordinate('Device R')[0],
'Flux r':d.coordinate('Flux r')[0],
'a':np.zeros(time.shape),
'Height':np.zeros(time.shape),
'Width':np.zeros(time.shape),
'Global gradient':np.zeros(time.shape),
'Position':np.zeros(time.shape),
'Position r':np.zeros(time.shape),
'SOL offset':np.zeros(time.shape),
'Max gradient':np.zeros(time.shape),
'Value at max':np.zeros(time.shape),
'Error':{'Height':np.zeros(time.shape),
'SOL offset':np.zeros(time.shape),
'Position':np.zeros(time.shape),
'Position r':np.zeros(time.shape),
'Width':np.zeros(time.shape),
'Global gradient':np.zeros(time.shape),
'Max gradient':np.zeros(time.shape),
'Value at max':np.zeros(time.shape),
},
}
if modified_tanh:
thomson_profile['Slope']=np.zeros(time.shape)
thomson_profile['Error']['Slope']=np.zeros(time.shape)
if test:
plt.figure()
if flux_range is not None:
x_range=flux_range
if radial_range is not None:
x_range=radial_range
# def mtanh_fit_function(r, b_height, b_sol, b_pos, b_width, b_slope): #This version of the code is not working due to the b_slope linear dependence
# def mtanh(x,b_slope):
# return ((1+b_slope*x)*np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
# return (b_height-b_sol)/2*(mtanh((b_pos-r)/(2*b_width),b_slope)+1)+b_sol
if not modified_tanh:
def tanh_fit_function(r, b_height, b_sol, b_pos, b_width):
def tanh(x):
return (np.exp(x)-
|
np.exp(-x)
|
numpy.exp
|
from .BaseTarget import BaseTarget
from ..transformers import Constant,Affine
from ..Orchestrator import Orchestrator
import numpy as np
from scipy import signal
class ANPAN(BaseTarget):
def __init__(self, N, M, size=2**10, kernel_size=None, kernel_type='gaussian', sample_num=None):
x = np.arange(size)
y = np.arange(size)
xx, yy = np.meshgrid(x, y)
p = self.init_p(*[xx.flatten(),yy.flatten()]).reshape(size, -1)
#kernel
if kernel_size is None:
kernel_size = 2**6+1
if kernel_size != 1:
size = kernel_size
if kernel_type == 'gaussian':
if size%2==0:
print('kernel size should be odd')
return
sigma = (size-1)/2
# [0,size]→[-sigma, sigma] にずらす
x = y = np.arange(0,size) - sigma
X,Y = np.meshgrid(x,y)
mat = np.exp(-(X**2 + Y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2)
# 総和が1になるように
kernel = mat / np.sum(mat)
elif kernel_type == 'ma':
kernel = np.ones((size, size))/(size**2)
p = signal.convolve2d(p, kernel, mode='same', )
self.p = p
super().__init__(N, M, 2, sample_num)
def init_p(self, *arg):
self.N = 10
x = (np.array(arg[0])/(2**self.N)*2-1)*110
y = (np.array(arg[1])/(2**self.N)*2-1)*110
c_list = []
c_list.append((x/85)**2 +(y/85)**2 -1)
c_list.append(((abs(x)-55)/24)**2 + ((y+5)/24)**2 -1)
c_list.append(((x/28)**2 +((y+5)/23)**2) -1)
c_list.append(((abs(x)-18)/9)**2 +((y-40)/15)**2 -1)
c_list.append((np.sign(y-45) +1)/2*(((abs(x)-18)/12)**2 +((y-45)/20)**2 -2) +1)
c_list.append(((np.sign(-y-35)+1)/2)*((x/42)**2+((y+35)/18)**2 -2) +1)
c_list.append(-np.maximum(np.abs((x)/110), np.abs((y)/110))**3 + 1.5)
s = 1
epsilon = 1e-4
for c in c_list:
_c = np.abs(c).clip(0+epsilon,1)
# _c = np.where(c<0, -c, c)
# _c = np.where(_c>1, 1, _c)
s *= _c
s *= (-((x/85)**2 +(y/85)**2 -1)).clip(0, 1)
return s
def __call__(self, *arg):
x = (np.array(arg[0])/(2**self.N)*(2**10)).astype(np.int)
y = (np.array(arg[1])/(2**self.N)*(2**10)).astype(np.int)
s = self.p[-y, x]
return s/self.Z
class Target_gauss2d_independent:
def __init__(self,N,M):
self.N = N
self.M = M
x = np.linspace(-1,1,2**N)
y = np.linspace(-1,1,2**N)
x,y = np.meshgrid(x,y)
d = 2 * (x**2 + y**2)
p = np.exp(-d)
p/=np.sum(p)
self.p = p
def __call__(self,*arg):
sample_num = len(arg[0])
return np.array([self.p[arg[0][i],arg[1][i]] for i in range(sample_num)])
class Target_gauss2d_dependent:
def __init__(self,N,M):
self.N = N
self.M = M
x = np.linspace(-1,1,2**N)
y = np.linspace(-1,1,2**N)
x,y = np.meshgrid(x,y)
c,s = np.cos(np.pi/4),np.sin(np.pi/4)
x,y = x*c+y*s,x*s-y*c
d = 2 * (x**2 + 4*(y**2))
p = np.exp(-d)
p/=np.sum(p)
self.p = p
def __call__(self,*arg):
sample_num = len(arg[0])
return np.array([self.p[arg[0][i],arg[1][i]] for i in range(sample_num)])
class Target_gauss2d_dependent:
def __init__(self,N,M):
self.N = N
self.M = M
x = np.linspace(-1,1,2**N)
y = np.linspace(-1,1,2**N)
x,y = np.meshgrid(x,y)
c,s = np.cos(np.pi/4),np.sin(np.pi/4)
x,y = x*c+y*s,x*s-y*c
d = 2 * (x**2 + 4*(y**2))
p = np.exp(-d)
p/=np.sum(p)
self.p = p
def __call__(self,*arg):
sample_num = len(arg[0])
return np.array([self.p[arg[0][i],arg[1][i]] for i in range(sample_num)])
class Target_gauss2d_multi:
def __init__(self,N,M):
self.N = N
self.M = M
x = np.arange(0, 2**N)
y = np.arange(0, 2**N)
X, Y = np.meshgrid(x, y)
self.scale = np.sum(self.prob(X.reshape(-1, 1), Y.reshape(-1, 1)))
def prob(self, *arg):
x_samples = arg[0]
y_samples = arg[1]
p = np.zeros([len(x_samples)])
for i in range(len(x_samples)):
x = ( x_samples[i] / 2**self.N ) *2. -1
y = ( y_samples[i] / 2**self.N ) *2. -1
s0 = (x-0.5)+(y+0.5)
t0 = (x-0.5)-(y+0.5)
s1 = (x+0.5)+(y-0.5)
t1 = (x+0.5)-(y-0.5)
p[i] = np.exp( -(s0*s0+4*t0*t0) )/2+ np.exp( -(s1*s1+4*t1*t1) )/2
return p
def __call__(self, *arg):
p = self.prob(*arg)
p = np.array(p)/self.scale
return p
class Target_gauss2d_multi:
def __init__(self,N,M):
self.N = N
self.M = M
x = np.linspace(-1,1,2**N)
y = np.linspace(-1,1,2**N)
x,y = np.meshgrid(x,y)
x1,y1 = x+0.5,y+0.5
x2,y2 = x-0.5,y-0.5
d1 = 8 * (x1**2 + (y1**2))
d2 = 8 * (x2**2 + (y2**2))
p = np.exp(-d1) + np.exp(-d2)
p/=np.sum(p)
self.p = p
def __call__(self,*arg):
sample_num = len(arg[0])
return np.array([self.p[arg[0][i],arg[1][i]] for i in range(sample_num)])
class Target_circle2d:
def __init__(self,N,M):
self.N = N
self.M = M
x = np.linspace(-1,1,2**N)
y = np.linspace(-1,1,2**N)
x,y = np.meshgrid(x,y)
d = ( np.sqrt(x**2 + y**2) -1/2) ** 2
#p = np.exp(-d)
p =
|
np.exp(-d*32)
|
numpy.exp
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import openmdao.api as om
import wisdem.pyframe3dd.pyframe3dd as frame3dd
from wisdem.commonse import gravity
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse.utilities import find_nearest, nodal2sectional
from wisdem.commonse.cross_sections import Tube, IBeam
from wisdem.commonse.utilization_constraints import vonMisesStressUtilization
RIGID = 1
FREE = 0
def tube_prop(s, Di, ti):
L = s.max() - s.min()
def equal_pts(xi):
if len(xi) < len(s) and len(xi) == 2:
x = np.interp((s - s.min()) / L, [0, 1], xi)
elif len(xi) == len(s):
x = xi
else:
raise ValueError("Unknown grid of input", str(xi))
return x
D = equal_pts(Di)
t = equal_pts(ti)
return Tube(nodal2sectional(D)[0], nodal2sectional(t)[0])
class Hub_Rotor_LSS_Frame(om.ExplicitComponent):
"""
Run structural analysis of hub system with the generator rotor and main (LSS) shaft.
Parameters
----------
tilt : float, [deg]
Shaft tilt
s_lss : numpy array[5], [m]
Discretized s-coordinates along drivetrain, measured from bedplate (direct) or tower center (geared)
lss_diameter : numpy array[2], [m]
LSS outer diameter from hub to bearing 2
lss_wall_thickness : numpy array[2], [m]
LSS wall thickness
hub_system_mass : float, [kg]
Hub system mass
hub_system_cm : float, [m]
Hub system center of mass distance from hub flange
hub_system_I : numpy array[6], [kg*m**2]
Hub system moment of inertia
F_hub : numpy array[3, n_dlcs], [N]
Force vector applied to the hub (WITH WEIGHT???)
M_hub : numpy array[3, n_dlcs], [N*m]
Moment vector applied to the hub
s_mb1 : float, [m]
Bearing 1 s-coordinate along drivetrain, measured from bedplate (direct) or tower center (geared)
s_mb2 : float, [m]
Bearing 2 s-coordinate along drivetrain, measured from bedplate (direct) or tower center (geared)
s_rotor : float, [m]
Generator rotor attachment to lss s-coordinate measured from bedplate (direct) or tower center (geared)
generator_rotor_mass : float, [kg]
Generator rotor mass
generator_rotor_I : numpy array[3], [kg*m**2]
Generator rotor moment of inertia (measured about its cm)
gearbox_mass : float, [kg]
Gearbox rotor mass
gearbox_I : numpy array[3], [kg*m**2]
Gearbox moment of inertia (measured about its cm)
lss_E : float, [Pa]
modulus of elasticity
lss_G : float, [Pa]
shear modulus
lss_rho : float, [kg/m**3]
material density
lss_Xy : float, [Pa]
yield stress
Returns
-------
torq_deflection : float, [m]
Maximum deflection distance at rotor (direct) or gearbox (geared) attachment
torq_rotation : float, [rad]
Maximum rotation angle at rotor (direct) or gearbox (geared) attachment
torq_axial_stress : numpy array[5, n_dlcs], [Pa]
Axial stress in Curved_beam structure
torq_shear_stress : numpy array[5, n_dlcs], [Pa]
Shear stress in Curved_beam structure
torq_bending_stress : numpy array[5, n_dlcs], [Pa]
Hoop stress in Curved_beam structure calculated with Roarks formulae
constr_lss_vonmises : numpy array[5, n_dlcs]
Sigma_y/Von_Mises
F_mb1 : numpy array[3, n_dlcs], [N]
Force vector applied to bearing 1 in hub c.s.
F_mb2 : numpy array[3, n_dlcs], [N]
Force vector applied to bearing 2 in hub c.s.
F_torq : numpy array[3, n_dlcs], [N]
Force vector applied to generator rotor (direct) or gearbox (geared) in hub c.s.
M_mb1 : numpy array[3, n_dlcs], [N*m]
Moment vector applied to bearing 1 in hub c.s.
M_mb2 : numpy array[3, n_dlcs], [N*m]
Moment vector applied to bearing 2 in hub c.s.
M_torq : numpy array[3, n_dlcs], [N*m]
Moment vector applied to generator rotor (direct) or gearbox (geared) in hub c.s.
"""
def initialize(self):
self.options.declare("n_dlcs")
self.options.declare("direct_drive", default=True)
self.options.declare("modeling_options")
def setup(self):
n_dlcs = self.options["n_dlcs"]
self.add_discrete_input("upwind", True)
self.add_input("tilt", 0.0, units="deg")
self.add_input("s_lss", val=np.zeros(5), units="m")
self.add_input("lss_diameter", val=np.zeros(2), units="m")
self.add_input("lss_wall_thickness", val=np.zeros(2), units="m")
self.add_input("hub_system_mass", 0.0, units="kg")
self.add_input("hub_system_cm", 0.0, units="m")
self.add_input("hub_system_I", np.zeros(6), units="kg*m**2")
self.add_input("F_hub", val=np.zeros((3, n_dlcs)), units="N")
self.add_input("M_hub", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_input("s_mb1", val=0.0, units="m")
self.add_input("s_mb2", val=0.0, units="m")
self.add_input("s_rotor", val=0.0, units="m")
self.add_input("generator_rotor_mass", val=0.0, units="kg")
self.add_input("generator_rotor_I", val=np.zeros(3), units="kg*m**2")
self.add_input("gearbox_mass", val=0.0, units="kg")
self.add_input("gearbox_I", val=np.zeros(3), units="kg*m**2")
self.add_input("brake_mass", val=0.0, units="kg")
self.add_input("brake_I", val=np.zeros(3), units="kg*m**2")
self.add_input("carrier_mass", val=0.0, units="kg")
self.add_input("carrier_I", val=np.zeros(3), units="kg*m**2")
self.add_input("lss_E", val=0.0, units="Pa")
self.add_input("lss_G", val=0.0, units="Pa")
self.add_input("lss_rho", val=0.0, units="kg/m**3")
self.add_input("lss_Xy", val=0.0, units="Pa")
self.add_output("torq_deflection", val=0.0, units="m")
self.add_output("torq_rotation", val=0.0, units="rad")
self.add_output("lss_axial_stress", np.zeros((4, n_dlcs)), units="Pa")
self.add_output("lss_shear_stress", np.zeros((4, n_dlcs)), units="Pa")
self.add_output("constr_lss_vonmises", np.zeros((4, n_dlcs)))
self.add_output("F_mb1", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("F_mb2", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("F_torq", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("M_mb1", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_output("M_mb2", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_output("M_torq", val=np.zeros((3, n_dlcs)), units="N*m")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
upwind = discrete_inputs["upwind"]
Cup = -1.0 if upwind else 1.0
tilt = float(np.deg2rad(inputs["tilt"]))
s_lss = inputs["s_lss"]
D_lss = inputs["lss_diameter"]
t_lss = inputs["lss_wall_thickness"]
s_mb1 = float(inputs["s_mb1"])
s_mb2 = float(inputs["s_mb2"])
if self.options["direct_drive"]:
s_rotor = float(inputs["s_rotor"])
m_rotor = float(inputs["generator_rotor_mass"])
I_rotor = inputs["generator_rotor_I"]
m_brake = float(inputs["brake_mass"])
I_brake = inputs["brake_I"]
else:
m_gearbox = float(inputs["gearbox_mass"])
I_gearbox = inputs["gearbox_I"]
m_carrier = float(inputs["carrier_mass"])
I_carrier = inputs["carrier_I"]
rho = float(inputs["lss_rho"])
E = float(inputs["lss_E"])
G = float(inputs["lss_G"])
sigma_y = float(inputs["lss_Xy"])
gamma_f = float(self.options["modeling_options"]["gamma_f"])
gamma_m = float(self.options["modeling_options"]["gamma_m"])
gamma_n = float(self.options["modeling_options"]["gamma_n"])
m_hub = float(inputs["hub_system_mass"])
cm_hub = float(inputs["hub_system_cm"])
I_hub = inputs["hub_system_I"]
F_hub = inputs["F_hub"]
M_hub = inputs["M_hub"]
# ------- node data ----------------
n = len(s_lss)
inode = np.arange(1, n + 1)
ynode = znode = rnode = np.zeros(n)
xnode = Cup * s_lss.copy()
nodes = frame3dd.NodeData(inode, xnode, ynode, znode, rnode)
# Grab indices for later
i1 = inode[find_nearest(xnode, Cup * s_mb1)]
i2 = inode[find_nearest(xnode, Cup * s_mb2)]
iadd = inode[1]
# Differences between direct annd geared
if self.options["direct_drive"]:
itorq = inode[find_nearest(xnode, Cup * s_rotor)]
m_torq = m_rotor
I_torq = I_rotor
m_add = m_brake
I_add = I_brake
else:
itorq = inode[0]
m_torq = m_gearbox - m_carrier
I_torq = I_gearbox - I_carrier
m_add = m_carrier
I_add = I_carrier
# ------------------------------------
# ------ reaction data ------------
# Reactions at main bearings
rnode = np.r_[i1, i2, itorq]
Rx = np.array([RIGID, FREE, FREE]) # Upwind bearing restricts translational
Ry = np.array([RIGID, FREE, FREE]) # Upwind bearing restricts translational
Rz = np.array([RIGID, FREE, FREE]) # Upwind bearing restricts translational
Rxx = np.array([FREE, FREE, RIGID]) # Torque is absorbed by stator, so this is the best way to capture that
Ryy = np.array([FREE, RIGID, FREE]) # downwind bearing carry moments
Rzz =
|
np.array([FREE, RIGID, FREE])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This Module contains a collection of Mutation operators to be used in the ES-Framework
A Mutation operator mutates an Individual's genotype inline, thus returning nothing.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = '<NAME> <<EMAIL>>'
import numpy as np
import random
from numpy import add, bitwise_and, dot, exp, floor, mod, shape, zeros
from numpy.linalg import norm
from random import gauss
from math import sqrt
'''-----------------------------------------------------------------------------
# Mutation Helper Functions #
-----------------------------------------------------------------------------'''
def _keepInBounds(x, l_bound, u_bound):
"""
This function transforms x to t w.r.t. the low and high
boundaries lb and ub. It implements the function T^{r}_{[a,b]} as
described in Rui Li's PhD thesis "Mixed-Integer Evolution Strategies
for Parameter Optimization and Their Applications to Medical Image
Analysis" as alorithm 6.
:param x: Column vector to be kept in bounds
:param l_bound: Lower bound column vector
:param u_bound: Upper bound column vector
:returns: An in-bounds kept version of the column vector ``x``
"""
y = (x - l_bound) / (u_bound - l_bound)
floor_y = floor(y) # Local storage to prevent double calls
I = mod(floor_y, 2) == 0
yprime = zeros(shape(y))
yprime[I] = np.abs(y[I] - floor_y[I])
yprime[~I] = 1.0 - np.abs(y[~I] - floor_y[~I])
x = l_bound + (u_bound - l_bound) * yprime
return x
def adaptStepSize(individual):
"""
Given the current individual, randomly determine a new step size offset
that can be no greater than maxStepSize - baseStepSize
:param individual: The :class:`~modea.Individual.FloatIndividual` object whose step size should be adapted
"""
# Empirically determined, see paper
gamma = 0.22
offset = individual.stepSizeOffset
offset = 1 + ((1 - offset) / offset)
offset = 1 / (offset * exp(gamma * gauss(0, 1)))
individual.stepSizeOffset = min(offset, (individual.maxStepSize - individual.baseStepSize))
def _scaleWithThreshold(mutation_vector, threshold):
"""
Checks if norm(mutation_vector) is at least the given threshold.
If not, the vector is mirrored to the other side of the threshold,
i.e. scaled to be length: threshold + (threshold - norm(mutation_vector))
:param mutation_vector: Mutation vector to be scaled
:param threshold: Minimum length threshold. Vector is scaled if length does not reach threshold
:returns: The threshold-compliant mutation vector
"""
length = norm(mutation_vector)
if length < threshold:
new_length = threshold + (threshold - length)
mutation_vector *= (new_length / length)
return mutation_vector
def _adaptSigma(sigma, p_s, c=0.817):
"""
Adapt parameter sigma based on the 1/5th success rule
:param sigma: Sigma value to be adapted
:param p_s: Recent success rate, determines whether sigma is increased or decreased
:param c: Factor c that is used to increase or decrease sigma
:returns: New value sigma
"""
if p_s < 1/5:
sigma *= c
elif p_s > 1/5:
sigma /= c
return sigma
def _getXi():
"""
Randomly returns 5/7 or 7/5 with equal probability
:return: float Xi
"""
if bool(random.getrandbits(1)):
return 5/7
else:
return 7/5
'''-----------------------------------------------------------------------------
# ES Mutations #
-----------------------------------------------------------------------------'''
def addRandomOffset(individual, param, sampler):
"""
Mutation 1: x = x + sigma*N(0,I)
:param individual: :class:`~modea.Individual.FloatIndividual` to be mutated
:param param: :class:`~modea.Parameters.Parameters` object to store settings
:param sampler: :mod:`~modea.Sampling` module from which the random values should be drawn
"""
individual.genotype += param.sigma * sampler.next()
def CMAMutation(individual, param, sampler, threshold_convergence=False):
"""
CMA mutation: x = x + (sigma * B*D*N(0,I))
:param individual: :class:`~modea.Individual.FloatIndividual` to be mutated
:param param: :class:`~modea.Parameters.Parameters` object to store settings
:param sampler: :mod:`~modea.Sampling` module from which the random values should be drawn
:param threshold_convergence: Boolean: Should threshold convergence be applied. Default: False
"""
individual.last_z = sampler.next()
if threshold_convergence:
individual.last_z = _scaleWithThreshold(individual.last_z, param.threshold)
individual.mutation_vector = dot(param.B, (param.D * individual.last_z)) # y_k in cmatutorial.pdf)
mutation_vector = individual.mutation_vector * param.sigma
individual.genotype = _keepInBounds(add(individual.genotype, mutation_vector), param.l_bound, param.u_bound)
'''-----------------------------------------------------------------------------
# GA Mutations #
-----------------------------------------------------------------------------'''
def mutateBitstring(individual):
"""
Simple 1/n bit-flip mutation
:param individual: :mod:`~modea.Individual` with a bit-string as genotype to undergo p=1/n mutation
"""
bitstring = individual.genotype
n = len(bitstring)
p = 1/n
for i in range(n):
if np.random.random() < p:
bitstring[i] = 1-bitstring[i]
def mutateIntList(individual, param, num_options_per_module):
"""
Self-adaptive random integer mutation to mutate the structure of an ES
:param individual: :class:`~modea.Individual.MixedIntegerIndividual` whose integer-part will be mutated
:param param: :class:`~modea.Parameters.Parameters` object
:param num_options_per_module: List :data:`~modea.num_options` with the number of available modules per module
position that are available to choose from
"""
p = individual.baseStepSize + individual.stepSizeOffset
num_ints = individual.num_ints
int_list = individual.genotype[:num_ints-1] # Get the relevant slice
for i, val in enumerate(num_options_per_module):
if np.random.random() < p:
# -1 as random_integers is [1, val], -1 to simulate leaving out the current value
new_int = np.random.random_integers(val-1)-1
if int_list[i] == new_int:
new_int = val - 1 # If we randomly selected the same value, pick the value we left out
int_list[i] = new_int
if np.random.random() < p:
new_lambda =
|
np.random.random_integers(param.l_bound[num_ints-1], param.u_bound[num_ints-1])
|
numpy.random.random_integers
|
import configparser
import os
import sys
import marg_mcmc as wl
sys.path.insert(0, '../bin_analysis')
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
import time
#import whitelight2018 as wl
import batman
import get_limb as gl
#from wave_solution import orbits
def event_time(date, properties):
"""Program to determine the expected event time
Inputs
date: 1D array of the date of each exposure (MJD)
properties: 1D array containing the last observed eclipse
and the period. (MJD, days)"""
time=properties[1]
period=properties[4]
while time < date[0]:
time+=period
return float(time)
def get_orbits(date):
"""Procedure to organize light curve data by HST orbit"""
orbit=np.zeros(1).astype(int)
for i in range(len(date)-1):
t=date[i+1]-date[i]
if t*86400 > 1200.:
orbit=np.append(orbit, i+1) # 1800s is about half an HST orbit
return np.append(orbit, len(date))
def inputs(data, transit=True):
""" Function to read in priors for a system.
INPUTS:
data: data table of priors for a particular planet
OUTPUTS:
Returns array of system properties: [rprs, central event time, inc
,a/r, period, depth]
"""
inp_values=pd.read_table(data,sep=' ', index_col=None)
data_arr=inp_values.iloc[:,2].values
labels=inp_values.iloc[:,0].values
param_errs=inp_values.iloc[:,3].values
# Rj-m, Rsolar-m,AU-m, JD -> MJD
print("Fix this to read in a/rs and rp/rs automatically")
conversions=np.array([6.9911e7, 6.957e8, 1.49598e11, -2400000.5])
inc=data_arr[5]
period=data_arr[4]
a_R=data_arr[7]*conversions[2]/(data_arr[1]*conversions[1])
a_R_err=np.sqrt((param_errs[7]*conversions[2]/data_arr[1]/conversions[1])**2
+ (a_R*param_errs[1]/conversions[1])**2)
rprs = data_arr[0]*conversions[0]/(data_arr[1]*conversions[1])
if transit==True:
epoch=data_arr[6]+conversions[3]
depth=rprs*rprs
else:
epoch=data_arr[6]+conversions[3]+period/2.
depth = rprs*rprs*(data_arr[2]/data_arr[3])/3
props=np.zeros(6)
props[0]=rprs
props[1]=epoch
props[2]=inc
props[3]=a_R
props[4]=period
props[5]=depth
errors=np.zeros(6)
errors[0]=0
errors[1]=param_errs[6]
errors[2]=param_errs[5]
errors[3]=a_R_err
errors[4]=param_errs[4]
errors[5]=0
return [props,errors]
# def correction(inputs, date, flux, transit=False):
# params=batman.TransitParams()
# params.w=90.
# params.ecc=0
# params.rp=np.sqrt(inputs[0])
# t0=inputs[1]
# params.inc=inputs[2]
# params.a=inputs[3]
# params.per=inputs[4]
# depth=inputs[5]
# phase = (date-t0)/params.per
# phase = phase - np.floor(phase)
# phase[phase > 0.5] = phase[phase > 0.5] - 1.0
# if transit==True:
# params.t0=t0
# params.u=inputs[6:]
# params.limb_dark="quadratic"
# m=batman.TransitModel(params, date)
# model=m.light_curve(params)
# else:
# params.fp=inputs[5]
# params.t_secondary=t0
# params.u=[]
# params.limb_dark="uniform"
# m=batman.TransitModel(params, date, transittype="secondary")
# model=m.light_curve(params)
# corrected=flux/model
# return corrected
# def remove_bad_data(light_curve, spectra, light_corrected, date1, light_err, spec_err
# , user_inputs, check=False):
# """Procedure to remove "bad" data from light curve"""
# med= np.ma.median(light_corrected)
# sigma = np.sqrt(np.sum((light_corrected-med)**2)/(2*len(light_corrected)))
# medi=np.zeros_like(date1)+med
# sig3=medi+3*sigma
# sig4=medi+4*sigma
# sig5=medi+5*sigma
# sig3m=medi-3*sigma
# sig4m=medi-4*sigma
# sig5m=medi-5*sigma
# if check==False:
# nPasses=int(user_inputs[3])
# sigma_cut_factor=user_inputs[2]
# else:
# data=plt.plot(date1, light_corrected,'bo',ls='dotted')
# plt.xlabel('MJD')
# plt.ylabel('Total Flux')
# s5=plt.plot(date1, sig5,'pink',date1, sig5m, 'pink')
# s5[0].set_label('5-sigma')
# s4=plt.plot(date1, sig4,'g', date1, sig4m, 'g')
# s4[0].set_label('4-sigma')
# s3=plt.plot(date1, sig3,'r', date1, sig3m, 'r')
# s3[0].set_label('3-sigma')
# plt.plot(date1, medi, label='Median',ls='solid')
# plt.legend(scatterpoints=1)
# plt.show(block=False)
# cut = raw_input("Enter the sigma-cut factor (3-5 recommended): ")
# sigma_cut_factor = float(cut)
# user_inputs[2]=sigma_cut_factor
# passes=raw_input("Enter the number of passes for the sigma-cut: ")
# nPasses=int(passes)
# user_inputs[3]=nPasses
# plt.close()
# # Cut out the "bad" data
# for j in range(nPasses):
# med= np.ma.median(light_corrected)
# sigma = np.sqrt(np.sum((light_corrected-med)**2)/(2*len(light_corrected)))
# dif= np.abs(light_corrected-med)
# index=np.where(dif < sigma_cut_factor*sigma)[0]
# light_curve=light_curve[index]
# date1=date1[index]
# light_corrected=light_corrected[index]
# light_err=light_err[index]
# spectra=spectra[index,:]
# spec_err=spec_err[index,:]
# return [light_curve, spectra, light_corrected, date1, light_err, spec_err]
def preprocess_whitelight(visit
, direction
, x=0, y=0
, check=True
, inp_file=False
, save_processed_data=False
, transit=False
, data_plots=True
, mcmc=False
, openinc=False
, openar=True
, fixtime=False
, norandomt=True
, fit_plots=True
, save_mcmc=False
, save_model_info=False):
"""
Function to allow user to extract relevant orbital data from reduced time
series of a visit. Also allow user to exclude first orbit or first exposure
of each orbit. The selected data is then fed into "marg_mcmc" for model light
curve fitting.
INPUTS
See config.py file
[DATA]
x, y: Allow the user to reduce aperture by (x,y) pixels
checks: set to "on" to manually reduce data
inp_file: Allow user to load in preprocess information instead of manually
finding. Cannot have checks and inp_file both off or both on.
If checks is set to on, "user_inputs" will return the inputs
that the user used: [first orbit, last orbit, sigma cut factor,
number of passes, center eclipse time]. If checks is set to off, then
the user_inputs array will be used as inputs (easier to automate)
[MODEL]
mcmc: Use MCMC sampler, extracting corner plot and other diagnostics
openinc: Fit for inclination (default is fixed)
openar: Fit for a/Rstar (default is fixed)
fixtime: Fix center of event time (default is open)
norandomt: Do now allow center of event time starting point to be vary randomly
fit_plots: Show model light curve fit in real time
[SAVE]
save_processed_data: Save the data with the systematics removed for the best fit model
save_model_info: Save best fit parameters for every systematic model
save_mcmc: Save MCMC products, such as corner plot, autocorrelation, etc.
The save files will all be saved with key or name "planet/visitXX/direction"
"""
if direction != 'both':
folder = '../data_reduction/reduced/%s/%s/final/*.fits' % (visit, direction)
data=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(data)
print('There are %d exposures in this visit' % nexposure)
alldate=np.zeros(len(data))
time=np.zeros_like(alldate)
test=fits.open(data[0])
xlen, ylen = test[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
allspec=np.ma.zeros((len(data),xlen, ylen))
allerr=np.zeros((len(data),xlen,ylen))
xmin=x
xmax=xlen-x
ymin=y
ymax=ylen-y
for i, img in enumerate(data):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
expfile.close()
alldate[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
time[i]=hdr['EXPTIME']
expo=exp[xmin:xmax, ymin:ymax]
mask=mask[xmin:xmax, ymin:ymax]
errs=errs[xmin:xmax, ymin:ymax]
allspec[i,:,:]=np.ma.array(expo, mask=mask)
allerr[i,:,:]=np.ma.array(errs, mask=mask)
allspec1d=np.ma.sum(allspec,axis=1)
allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1))
median_flux = np.median(np.ma.sum(allspec1d, axis=1))
# Regardless of direction, if all exposures share the same one we make
# dir_array all zeros for easy parameter use in model fitting.
dir_array = np.zeros_like(alldate)
else:
direction = 'forward'
folder = '../data_reduction/reduced/%s/%s/final/*.fits' % (visit, direction)
data=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(data)
print('There are %d exposures in this visit' % nexposure)
alldate=np.zeros(len(data))
time=np.zeros_like(alldate)
test=fits.open(data[0])
xlen, ylen = test[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
allspec=np.ma.zeros((len(data),xlen, ylen))
allerr=np.zeros((len(data),xlen,ylen))
xmin=x
xmax=xlen-x
ymin=y
ymax=ylen-y
for i, img in enumerate(data):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
expfile.close()
alldate[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
time[i]=hdr['EXPTIME']
expo=exp[xmin:xmax, ymin:ymax]
mask=mask[xmin:xmax, ymin:ymax]
errs=errs[xmin:xmax, ymin:ymax]
allspec[i,:,:]=np.ma.array(expo, mask=mask)
allerr[i,:,:]=np.ma.array(errs, mask=mask)
allspec1d=np.ma.sum(allspec,axis=1)
allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1))
median_flux = np.median(np.ma.sum(allspec1d, axis=1))
# Now do for other direction
direction = 'reverse'
folder = '../data_reduction/reduced/%s/%s/final/*.fits' % (visit, direction)
rdata=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(rdata)
print('There are %d exposures in this visit' % nexposure)
rdate=np.zeros(len(rdata))
rtime=np.zeros_like(rdate)
rtest=fits.open(rdata[0])
rxlen,rylen = rtest[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
rallspec=np.ma.zeros((len(rdata),rxlen, rylen))
rallerr=np.zeros((len(rdata),rxlen,rylen))
rxmin=x
rxmax=rxlen-x
rymin=y
rymax=rylen-y
for i, img in enumerate(rdata):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
expfile.close()
rdate[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
rtime[i]=hdr['EXPTIME']
expo=exp[rxmin:rxmax, rymin:rymax]
mask=mask[rxmin:rxmax, rymin:rymax]
errs=errs[rxmin:rxmax, rymin:rymax]
rallspec[i,:,:]=np.ma.array(expo, mask=mask)
rallerr[i,:,:]=np.ma.array(errs, mask=mask)
rallspec1d=np.ma.sum(rallspec,axis=1)
rallerr1d=np.sqrt(np.ma.sum(rallerr*rallerr, axis=1))
rmedian_flux = np.median(np.ma.sum(rallspec1d, axis=1))
dir_factor = median_flux / rmedian_flux
#dir_factor=1
rallspec1d = rallspec1d * dir_factor
rallerr1d = rallerr1d * dir_factor
# Define array that has 0s for forward scan and 1s for reverse
dir_array = np.append(np.zeros_like(alldate), np.ones_like(rdate))
alldate = np.ma.append(alldate,rdate)
allspec1d = np.ma.append(allspec1d, rallspec1d, axis=0)
allerr1d = np.ma.append(allerr1d, rallerr1d, axis=0)
direction = 'both'
# Put in correct time order
date_order=np.argsort(alldate)
dir_array = dir_array[date_order]
dir_save = dir_array
alldate=alldate[date_order]
allspec1d=allspec1d[date_order,:]
allerr1d=allerr1d[date_order,:]
#ix = np.arange(len(dir_array))
#ix = ix[17:]
#ix=np.delete(ix, [0,5, 19,38,57])
#dir_array = dir_array[ix]
#alldate=alldate[ix]
#allspec1d=allspec1d[ix, :]
#allerr1d=allerr1d[ix, :]
#0, 19, 38, 57
# Classify the data by each HST orbit. Returns array (orbit)
# which contains the indeces for the start of each orbit
orbit=get_orbits(alldate)
planet=visit[:-8]
props, errs=inputs('../planets/%s/inputs.dat' % planet, transit)
a1=gl.get_limb(planet,14000.,'a1')
a2=gl.get_limb(planet,14000.,'a2')
a3=gl.get_limb(planet,14000.,'a3')
a4=gl.get_limb(planet,14000.,'a4')
props=np.append(props, [a1,a2,a3,a4])
errs=np.append(errs, np.zeros(4))
props_hold=props.copy()
#orbit = np.zeros(1)
print("Number of total orbits: %d" % (len(orbit)-1))
# Choose which orbits to include in the eclipse fitting. 1-2 on either
# side of the eclipse is recommended
check2=check
if check == False:
if inp_file == True:
df=pd.read_csv('./preprocess_info.csv')
df=df[df.loc[:,'Transit']==transit]
user_inputs=df.loc[visit+direction,'User Inputs'].values
else:
sys.exit('Either allow checking or give csv file with pandas info.')
#allspec1d=np.ma.sum(allspec,axis=1).data
#allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1)).data
first_orbit=user_inputs[0]
last_orbit=user_inputs[1]
first_data = orbit[first_orbit]
last_data=orbit[last_orbit+1]
date=alldate[first_data:last_data]
dir_array=dir_array[first_data:last_data]
#allspec2d=allspec[first_data:last_data,:,:]
#allerr2d=allerr[first_data:last_data,:,:]
spec1d=allspec[first_data:last_data,:]
err1d=allerr[first_data:last_data,:]
#allspec1d=np.ma.sum(allspec2d,axis=1) #spectra for each exposure: these axes may be backwards
#allerr1d=np.sqrt(np.ma.sum(allerr2d*allerr2d, axis=1))
light = np.ma.sum(spec1d, axis=1) # total light for each exposure
lighterr=np.sqrt(np.ma.sum(err1d*err1d, axis=1))
user_inputs[5], user_inputs[6] = first_data, last_data
sss
if check == True:
user_inputs=np.zeros(7)
while check2==True:
if data_plots==True:
print('woo')
#err=np.sqrt(np.sum(np.sum(allerr[:,:,:]*allerr[:,:,:], axis=1), axis=1))
#fl= np.sum(allspec[:,:,:], (1,2))
err=np.sqrt(np.sum(allerr1d*allerr1d, axis=1))
fl= np.sum(allspec1d, axis=1)
plt.errorbar(alldate,fl,err, fmt='o')
plt.xlabel('MJD')
plt.ylabel('Total Flux')
plt.show(block=False)
first = input("Enter the first orbit to include (starting from 0): ")
first_orbit=int(first)
user_inputs[0]=first_orbit
last= input("Enter the last orbit to include (starting form 0): ")
last_orbit=int(last)
if data_plots==True: plt.close()
user_inputs[1]=last_orbit
#allspec1d=np.ma.sum(allspec,axis=1).data
#allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1)).data
first_data = orbit[first_orbit]
last_data=orbit[last_orbit+1]
date=alldate[first_data:last_data]
dir_array=dir_array[first_data:last_data]
#spec2d=allspec[first_data:last_data,:,:]
#err2d=allerr[first_data:last_data,:,:]
spec1d=allspec1d[first_data:last_data,:]
err1d=allerr1d[first_data:last_data,:]
#spec1d=np.ma.sum(spec2d,axis=1)
#err1d=np.sqrt(np.ma.sum(err2d*err2d, axis=1))
light = np.ma.sum(spec1d,axis=1)
lighterr=np.sqrt(np.ma.sum(err1d*err1d, axis=1))
user_inputs[5], user_inputs[6] = first_data, last_data
if data_plots==True:
plt.errorbar(date, light/max(light),lighterr/max(light),fmt='o')
plt.xlabel('MJD')
plt.ylabel('Total Flux')
plt.show(block=False)
ans = input("Is this correct? (Y/N): ")
if ans.lower() in ['y','yes']: check2=False
if data_plots==True: plt.close()
props[1]=event_time(date, props)
user_inputs[4]=props[1]
# We are only interested in scatter within orbits, so correct for flux
# between orbits by setting the median of each orbit to the median of
# the first orbit
# light_corrected=correction(props, date1, light, transit)
# Do a 4-pass sigma cut. 3-5 sigma is ideal. Change n to see how data
# is affected. A sigma of 3, 4, or 5 could be used, it depends on the
# data
# light2=light.copy()
# lighterr2=lighterr.copy()
# allspec2=allspec1.copy()
# allerr2=allerr1.copy()
# date2=date1.copy()
# light_corrected2=light_corrected.copy()
# ans2=''
# if check==False:
# light, allspec1, light_corrected, date1, lighterr, allerr1 = remove_bad_data(light
# , allspec1
# , light_corrected
# , date1
# , lighterr
# , allerr1
# , user_inputs)
# if check==True:
# while check==True:
# light=light2.copy()
# lighterr=lighterr2.copy()
# allspec1=allspec2.copy()
# allerr1=allerr2.copy()
# date1=date2.copy()
# light_corrected=light_corrected2.copy()
# # This performs the sigma cut and returns input for the fitter: a
# # double array which contains a spectra for each data point
# light, allspec1, light_corrected, date1, lighterr, allerr1 = remove_bad_data(light
# , allspec1
# , light_corrected
# , date1
# , lighterr
# , allerr1
# , user_inputs
# , check=check)
# if ploton==True:
# plt.errorbar(date2, light2,lighterr2, fmt='ro')
# plt.xlabel('MJD')
# plt.ylabel('Total Flux')
# plt.errorbar(date1, light,lighterr, fmt='o',ls='dotted')
# plt.show(block=False)
# ans2=raw_input('This is the new data, with the red points removed. Is this okay? (Y/N): ')
# if ploton==True: plt.close()
# if ans2.lower() in ['y','yes']: check=False
"""if transit == True:
fixtime = False
norandomt = True
#openar = True
openar = True
openinc = False
mcmc = False
else:
fixtime = True
norandomt = True
openar = False
openinc = False
mcmc = False
save_name = visit + '/' + direction
#savemc = visit
save_model_info = False
#save_model_info = visit
#visit=False
#savedata=False
save_mcmc=False
#savewl=False"""
# Set inclination (2), ars (3) to desired value if you want
#props[2]=89.17
#props[3]=5.55
# dir_array has only been included in marg_mcmc so far
#results=wl.whitelight2018(props, date, spec1d.data, err1d.data,
# plotting=True, norandomt=norandomt,
# openinc=openinc, openar=openar, fixtime=fixtime,
# transit=transit, savewl=visit)
print(props)
sss
results=wl.whitelight2020(props, date, spec1d.data, err1d.data, dir_array,
plotting=fit_plots, mcmc=mcmc, norandomt=norandomt,
openinc=openinc, openar=openar, fixtime=fixtime,
transit=transit, save_mcmc=save_mcmc, save_model_info=save_model_info,
save_name =save_name)
#direction = 'forward'
if save_processed_data == True:
sh=wl.get_shift(allspec1d)
cols=['Pixel %03d' % i for i in range(allspec1d.shape[1])]
subindex=['Value']*allspec1d.shape[0] + ['Error']*allspec1d.shape[0]
ind=pd.MultiIndex.from_product([[save_name], subindex])
processed_data=pd.DataFrame(np.vstack((allspec1d,allerr1d)),columns=cols, index=ind)
processed_data['Date']=
|
np.append(alldate,alldate)
|
numpy.append
|
"""This file implements functions to encode and decode 3boxes."""
import numpy as np
def direct_box_encoding(cls_labels, points_xyz, boxes_3d):
return boxes_3d
def direct_box_decoding(cls_labels, points_xyz, encoded_boxes):
return encoded_boxes
def center_box_encoding(cls_labels, points_xyz, boxes_3d):
boxes_3d[:, 0] = boxes_3d[:, 0] - points_xyz[:, 0]
boxes_3d[:, 1] = boxes_3d[:, 1] - points_xyz[:, 1]
boxes_3d[:, 2] = boxes_3d[:, 2] - points_xyz[:, 2]
return boxes_3d
def center_box_decoding(cls_labels, points_xyz, encoded_boxes):
encoded_boxes[:, 0] = encoded_boxes[:, 0] + points_xyz[:, 0]
encoded_boxes[:, 1] = encoded_boxes[:, 1] + points_xyz[:, 1]
encoded_boxes[:, 2] = encoded_boxes[:, 2] + points_xyz[:, 2]
return encoded_boxes
def voxelnet_box_encoding(cls_labels, points_xyz, boxes_3d):
# offset
boxes_3d[:, 0] = boxes_3d[:, 0] - points_xyz[:, 0]
boxes_3d[:, 1] = boxes_3d[:, 1] - points_xyz[:, 1]
boxes_3d[:, 2] = boxes_3d[:, 2] - points_xyz[:, 2]
# Car
mask = cls_labels[:, 0] == 2
boxes_3d[mask, 0] = boxes_3d[mask, 0]/3.9
boxes_3d[mask, 1] = boxes_3d[mask, 1]/1.56
boxes_3d[mask, 2] = boxes_3d[mask, 2]/1.6
boxes_3d[mask, 3] = np.log(boxes_3d[mask, 3]/3.9)
boxes_3d[mask, 4] = np.log(boxes_3d[mask, 4]/1.56)
boxes_3d[mask, 5] = np.log(boxes_3d[mask, 5]/1.6)
# Pedestrian and Cyclist
mask = (cls_labels[:, 0] == 1) + (cls_labels[:, 0] == 3)
boxes_3d[mask, 0] = boxes_3d[mask, 0]/0.8
boxes_3d[mask, 1] = boxes_3d[mask, 1]/1.73
boxes_3d[mask, 2] = boxes_3d[mask, 2]/0.6
boxes_3d[mask, 3] = np.log(boxes_3d[mask, 3]/0.8)
boxes_3d[mask, 4] = np.log(boxes_3d[mask, 4]/1.73)
boxes_3d[mask, 5] = np.log(boxes_3d[mask, 5]/0.6)
# normalize all yaws
boxes_3d[:, 6] = boxes_3d[:, 6]/(np.pi*0.5)
return boxes_3d
def voxelnet_box_decoding(cls_labels, points_xyz, encoded_boxes):
# Car
mask = cls_labels[:, 0] == 2
encoded_boxes[mask, 0] = encoded_boxes[mask, 0]*3.9
encoded_boxes[mask, 1] = encoded_boxes[mask, 1]*1.56
encoded_boxes[mask, 2] = encoded_boxes[mask, 2]*1.6
encoded_boxes[mask, 3] = np.exp(encoded_boxes[mask, 3])*3.9
encoded_boxes[mask, 4] = np.exp(encoded_boxes[mask, 4])*1.56
encoded_boxes[mask, 5] = np.exp(encoded_boxes[mask, 5])*1.6
# Pedestrian and Cyclist
mask = (cls_labels[:, 0] == 1) + (cls_labels[:, 0] == 3)
encoded_boxes[mask, 0] = encoded_boxes[mask, 0]*0.8
encoded_boxes[mask, 1] = encoded_boxes[mask, 1]*1.73
encoded_boxes[mask, 2] = encoded_boxes[mask, 2]*0.6
encoded_boxes[mask, 3] = np.exp(encoded_boxes[mask, 3])*0.8
encoded_boxes[mask, 4] = np.exp(encoded_boxes[mask, 4])*1.73
encoded_boxes[mask, 5] = np.exp(encoded_boxes[mask, 5])*0.6
# offset
encoded_boxes[:, 0] = encoded_boxes[:, 0] + points_xyz[:, 0]
encoded_boxes[:, 1] = encoded_boxes[:, 1] + points_xyz[:, 1]
encoded_boxes[:, 2] = encoded_boxes[:, 2] + points_xyz[:, 2]
# recover all yaws
encoded_boxes[:, 6] = encoded_boxes[:, 6]*(np.pi*0.5)
return encoded_boxes
def classaware_voxelnet_box_encoding(cls_labels, points_xyz, boxes_3d):
"""
Args:
boxes_3d: [None, num_classes, 7]
"""
encoded_boxes_3d = np.zeros_like(boxes_3d)
num_classes = boxes_3d.shape[1]
points_xyz = np.expand_dims(points_xyz, axis=1)
points_xyz = np.tile(points_xyz, (1, num_classes, 1))
encoded_boxes_3d[:, :, 0] = boxes_3d[:, :, 0] - points_xyz[:, :, 0]
encoded_boxes_3d[:, :, 1] = boxes_3d[:, :, 1] - points_xyz[:, :, 1]
encoded_boxes_3d[:, :, 2] = boxes_3d[:, :, 2] - points_xyz[:, :, 2]
# Car horizontal
mask = cls_labels[:, 0] == 1
encoded_boxes_3d[mask, 0, 0] = encoded_boxes_3d[mask, 0, 0]/3.9
encoded_boxes_3d[mask, 0, 1] = encoded_boxes_3d[mask, 0, 1]/1.56
encoded_boxes_3d[mask, 0, 2] = encoded_boxes_3d[mask, 0, 2]/1.6
encoded_boxes_3d[mask, 0, 3] = np.log(boxes_3d[mask, 0, 3]/3.9)
encoded_boxes_3d[mask, 0, 4] = np.log(boxes_3d[mask, 0, 4]/1.56)
encoded_boxes_3d[mask, 0, 5] = np.log(boxes_3d[mask, 0, 5]/1.6)
encoded_boxes_3d[mask, 0, 6] = boxes_3d[mask, 0, 6]/(np.pi*0.25)
# Car vertical
mask = cls_labels[:, 0] == 2
encoded_boxes_3d[mask, 0, 0] = encoded_boxes_3d[mask, 0, 0]/3.9
encoded_boxes_3d[mask, 0, 1] = encoded_boxes_3d[mask, 0, 1]/1.56
encoded_boxes_3d[mask, 0, 2] = encoded_boxes_3d[mask, 0, 2]/1.6
encoded_boxes_3d[mask, 0, 3] = np.log(boxes_3d[mask, 0, 3]/3.9)
encoded_boxes_3d[mask, 0, 4] = np.log(boxes_3d[mask, 0, 4]/1.56)
encoded_boxes_3d[mask, 0, 5] = np.log(boxes_3d[mask, 0, 5]/1.6)
encoded_boxes_3d[mask, 0, 6] = (boxes_3d[mask, 0, 6]-np.pi*0.5)/(np.pi*0.25)
# Pedestrian horizontal
mask = cls_labels[:, 0] == 3
encoded_boxes_3d[mask, 0, 0] = encoded_boxes_3d[mask, 0, 0]/0.8
encoded_boxes_3d[mask, 0, 1] = encoded_boxes_3d[mask, 0, 1]/1.73
encoded_boxes_3d[mask, 0, 2] = encoded_boxes_3d[mask, 0, 2]/0.6
encoded_boxes_3d[mask, 0, 3] = np.log(boxes_3d[mask, 0, 3]/0.8)
encoded_boxes_3d[mask, 0, 4] = np.log(boxes_3d[mask, 0, 4]/1.73)
encoded_boxes_3d[mask, 0, 5] = np.log(boxes_3d[mask, 0, 5]/0.6)
encoded_boxes_3d[mask, 0, 6] = boxes_3d[mask, 0, 6]/(np.pi*0.25)
# Pedestrian vertical
mask = cls_labels[:, 0] == 4
encoded_boxes_3d[mask, 0, 0] = encoded_boxes_3d[mask, 0, 0]/0.8
encoded_boxes_3d[mask, 0, 1] = encoded_boxes_3d[mask, 0, 1]/1.73
encoded_boxes_3d[mask, 0, 2] = encoded_boxes_3d[mask, 0, 2]/0.6
encoded_boxes_3d[mask, 0, 3] = np.log(boxes_3d[mask, 0, 3]/0.8)
encoded_boxes_3d[mask, 0, 4] = np.log(boxes_3d[mask, 0, 4]/1.73)
encoded_boxes_3d[mask, 0, 5] = np.log(boxes_3d[mask, 0, 5]/0.6)
encoded_boxes_3d[mask, 0, 6] = (boxes_3d[mask, 0, 6]-np.pi*0.5)/(np.pi*0.25)
# Cyclist horizontal
mask = cls_labels[:, 0] == 5
encoded_boxes_3d[mask, 0, 0] = encoded_boxes_3d[mask, 0, 0]/1.76
encoded_boxes_3d[mask, 0, 1] = encoded_boxes_3d[mask, 0, 1]/1.73
encoded_boxes_3d[mask, 0, 2] = encoded_boxes_3d[mask, 0, 2]/0.6
encoded_boxes_3d[mask, 0, 3] = np.log(boxes_3d[mask, 0, 3]/1.76)
encoded_boxes_3d[mask, 0, 4] =
|
np.log(boxes_3d[mask, 0, 4]/1.73)
|
numpy.log
|
"""Runner script for single and multi-agent reinforcement learning experiments.
This script performs an RL experiment using the PPO algorithm. Choice of
hyperparameters can be seen and adjusted from the code below.
Usage
python train.py EXP_CONFIG
"""
import argparse
import json
import os
import sys
from time import strftime
from copy import deepcopy
from flow.core import rewards
from flow.core.util import ensure_dir
from flow.utils.registry import env_constructor
from flow.utils.rllib import FlowParamsEncoder, get_flow_params
from flow.utils.registry import make_create_env
import numpy as np
import wandb
# Callbacks
# Custom state can be stored for the episode in the info["episode"].user_data dict
# Custom scalar metrics reported by saving values to the info["episode"].custom_metrics dict
def on_episode_start(info):
episode = info["episode"]
episode.user_data["global_reward"] = []
def on_episode_step(info):
episode = info["episode"]
env = info["env"]
kernel = env.vector_env.envs[0].k
vel = np.array([
kernel.vehicle.get_speed(veh_id)
for veh_id in kernel.vehicle.get_ids()
])
rew = np.mean(vel) / 5 if all(vel > -100) else 0
mean_actions = np.mean(np.abs(np.array(episode.last_action_for())))
accel_threshold = 0
if mean_actions > accel_threshold:
rew += 4 * (accel_threshold - mean_actions)
# reward average velocity
episode.user_data["global_reward"].append(rew)
def on_episode_step_multi(info):
episode = info["episode"]
env = info["env"]
kernel = env.envs[0].k
vel = np.array([
kernel.vehicle.get_speed(veh_id)
for veh_id in kernel.vehicle.get_ids()
])
rew = np.mean(vel)/5 if all(vel > -100) else 0
mean_actions = np.mean(np.abs(np.array(episode.last_action_for())))
accel_threshold = 0
if mean_actions > accel_threshold:
rew += (accel_threshold - mean_actions)
# reward average velocity
episode.user_data["global_reward"].append(rew)
def on_episode_end(info):
episode = info["episode"]
mean_rew =
|
np.sum(episode.user_data["global_reward"])
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
<EMAIL>,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Action classes for representing 3D grasp actions.
Author
------
<NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass
import numpy as np
from autolab_core import Point
from .grasp import Grasp2D, SuctionPoint2D, MultiSuctionPoint2D
class Action(with_metaclass(ABCMeta, object)):
"""Abstract action class.
Attributes
----------
q_value : float
Grasp quality.
id : int
Integer identifier for the action.
metadata : dict
Key-value dict of extra data about the action.
"""
def __init__(self, q_value=0.0, id=-1, metadata={}):
self._q_value = q_value
self._id = id
self._metadata = metadata
@property
def q_value(self):
return self._q_value
@property
def id(self):
return self._id
@property
def metadata(self):
return self._metadata
class NoAction(Action):
"""Proxy for taking no action when none can be found."""
pass
class GraspAction3D(with_metaclass(ABCMeta, Action)):
"""Abstract grasp class with grasp specified as an end-effector pose.
Attributes
----------
T_grasp_world : :obj:`RigidTransform`
Pose of the grasp w.r.t. world coordinate frame.
"""
def __init__(self, T_grasp_world, q_value=0.0, id=-1, metadata={}):
self.T_grasp_world = T_grasp_world
Action.__init__(self, q_value, id, metadata)
@abstractmethod
def project(self, camera_intr, T_camera_world):
pass
class ParallelJawGrasp3D(GraspAction3D):
"""Grasping with a parallel-jaw gripper.
Attributes
----------
T_grasp_world : :obj:`RigidTransform`
Pose of the grasp wrt world coordinate frame.
"""
def project(self, camera_intr, T_camera_world, gripper_width=0.05):
# Compute pose of grasp in camera frame.
T_grasp_camera = T_camera_world.inverse() * self.T_grasp_world
y_axis_camera = T_grasp_camera.y_axis[:2]
if
|
np.linalg.norm(y_axis_camera)
|
numpy.linalg.norm
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.util import nest
class Plus1RNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(tf.nn.rnn_cell.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM"
"should be a tuple of ints.")
self._dims = dims
self._output_size = tf.TensorShape(self._dims)
self._state_size = (tf.TensorShape(self._dims), tf.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return tf.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return tf.identity(state)
class RNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(
values[-1],
max_length * np.ones((batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("drop_scope"):
dropped_outputs, _ = tf.nn.rnn(
full_dropout_cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(dropped_outputs,
feed_dict={inputs[0]: input_value})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def testDynamicCalculation(self):
cell = Plus1RNNCell()
sequence_length = tf.placeholder(tf.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = tf.nn.rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(dynamic_outputs,
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
dynamic_state_value = sess.run([dynamic_state],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(
dynamic_values[2],
np.vstack((
np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(
dynamic_state_value[0],
np.vstack((
1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
tf.initialize_all_variables()
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
return tf.nn.rnn(cell, inputs, dtype=tf.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class GRUTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testDynamic(self, use_gpu):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.nn.rnn_cell.GRUCell(num_units=num_units)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def testDynamic(self):
self._testDynamic(use_gpu=False)
self._testDynamic(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
tf.initialize_all_variables()
# check that all the variables names starts
# with the proper scope.
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.nn.rnn_cell.GRUCell(num_units=num_units)
return tf.nn.dynamic_rnn(cell, inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True, dtype=tf.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testNoProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(num_units, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testCellClipping(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True, cell_clip=0.0, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def _testNoProjNoShardingSimpleStateSaver(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=False, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={inputs[0]: input_value})
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=False, initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={inputs[0]: input_value})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, {"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3})
def _cell(i):
return tf.nn.rnn_cell.LSTMCell(
num_units + i, use_peepholes=False, initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = tf.nn.rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
state_names = (("c0", "m0"), ("c1", "m1"),
("c2", "m2"), ("c3", "m3"))
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
# Final output comes from _cell(3) which has state size num_units + 3
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(
list(nest.flatten(state)), feed_dict={inputs[0]: input_value})
saved_states = sess.run(
list(state_saver.saved_state.values()),
feed_dict={inputs[0]: input_value})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(
last_states[i],
named_saved_states[flat_state_names[i]])
def _testProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer,
state_is_tuple=False)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell_notuple = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer, state_is_tuple=False)
cell_tuple = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer, state_is_tuple=True)
outputs_notuple, state_notuple = tf.nn.rnn(
cell_notuple, inputs, dtype=tf.float32,
sequence_length=sequence_length)
tf.get_variable_scope().reuse_variables()
outputs_tuple, state_tuple = tf.nn.rnn(
cell_tuple, inputs, dtype=tf.float32,
sequence_length=sequence_length)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, tf.Tensor))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(
outputs_notuple, feed_dict={inputs[0]: input_value})
outputs_tuple_v = sess.run(
outputs_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run(
(state_notuple,), feed_dict={inputs[0]: input_value})
state_tuple_v = sess.run(
state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
def _testProjSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testTooManyShards(self, use_gpu):
num_units = 3
input_size = 5
num_proj = 4
num_proj_shards = 4
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
with self.assertRaises(ValueError):
tf.nn.rnn(cell, inputs, dtype=tf.float32)
def _testDoubleInput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float64, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = tf.nn.rnn(
cell, inputs, initial_state=cell.zero_state(batch_size, tf.float64))
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.asarray(np.random.randn(batch_size, input_size),
dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def _testShardNoShardEquivalentOutput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
initializer = tf.constant_initializer(0.001)
cell_noshard = tf.nn.rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
initializer=initializer, num_proj=num_proj,
state_is_tuple=False)
with tf.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = tf.nn.rnn(
cell_noshard, inputs, dtype=tf.float32)
with tf.variable_scope("shard_scope"):
outputs_shard, state_shard = tf.nn.rnn(
cell_shard, inputs, dtype=tf.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def _testDoubleInputWithDropoutAndDynamicCalculation(
self, use_gpu):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
sequence_length = tf.placeholder(tf.int64)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float64, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = tf.nn.rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = tf.nn.rnn(
dropout_cell, inputs, sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, tf.float64))
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run(feed_dict={sequence_length: [2, 3]})
input_value = np.asarray(np.random.randn(batch_size, input_size),
dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
state_value = sess.run([state], feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = tf.random_uniform_initializer(-1, 1, seed=self._seed+1)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer,
state_is_tuple=False)
cell_d = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer_d,
state_is_tuple=False)
with tf.variable_scope("share_scope"):
outputs0, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("share_scope", reuse=True):
outputs1, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("diff_scope"):
outputs2, _ = tf.nn.rnn(cell_d, inputs, dtype=tf.float32)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2*max_length]
outputs2_values = output_values[2*max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(
|
np.linalg.norm(o1-o3)
|
numpy.linalg.norm
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import os
import time
import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import statsmodels
import tensorflow as tf
from basenji import batcher
from basenji import params
from basenji import plots
from basenji import seqnn
################################################################################
# basenji_hidden.py
#
# Visualize the hidden representations of the test set.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'
parser = OptionParser(usage)
parser.add_option('-l', dest='layers',
default=None, help='Comma-separated list of layers to plot')
parser.add_option('-n', dest='num_seqs',
default=None, type='int',
help='Number of sequences to process')
parser.add_option('-o', dest='out_dir',
default='hidden', help='Output directory [Default: %default]')
parser.add_option('-t', dest='target_indexes',
default=None, help='Paint 2D plots with these target index values.')
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide paramters, model, and test data HDF5')
else:
params_file = args[0]
model_file = args[1]
data_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
if options.layers is not None:
options.layers = [int(li) for li in options.layers.split(',')]
#######################################################
# load data
#######################################################
data_open = h5py.File(data_file)
test_seqs = data_open['test_in']
test_targets = data_open['test_out']
if options.num_seqs is not None:
test_seqs = test_seqs[:options.num_seqs]
test_targets = test_targets[:options.num_seqs]
#######################################################
# model parameters and placeholders
#######################################################
job = params.read_job_params(params_file)
job['seq_length'] = test_seqs.shape[1]
job['seq_depth'] = test_seqs.shape[2]
job['num_targets'] = test_targets.shape[2]
job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))
t0 = time.time()
model = seqnn.SeqNN()
model.build_feed(job)
if options.target_indexes is None:
options.target_indexes = range(job['num_targets'])
else:
options.target_indexes = [
int(ti) for ti in options.target_indexes.split(',')
]
#######################################################
# test
#######################################################
# initialize batcher
batcher_test = batcher.Batcher(
test_seqs,
test_targets,
batch_size=model.hp.batch_size,
pool_width=model.hp.target_pool)
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# get layer representations
layer_reprs, _ = model.hidden(sess, batcher_test, options.layers)
if options.layers is None:
options.layers = range(len(layer_reprs))
for li in options.layers:
layer_repr = layer_reprs[li]
try:
print(layer_repr.shape)
except:
print(layer_repr)
# sample one nt per sequence
ds_indexes = np.arange(0, layer_repr.shape[1], 256)
nt_reprs = layer_repr[:, ds_indexes, :].reshape((-1, layer_repr.shape[2]))
print('nt_reprs', nt_reprs.shape)
########################################################
# plot raw
sns.set(style='ticks', font_scale=1.2)
plt.figure()
g = sns.clustermap(nt_reprs, cmap='RdBu_r',
xticklabels=False, yticklabels=False)
g.ax_heatmap.set_xlabel('Representation')
g.ax_heatmap.set_ylabel('Sequences')
plt.savefig('%s/l%d_reprs.pdf' % (options.out_dir, li))
plt.close()
########################################################
# plot variance explained ratios
model_full = PCA()
model_full.fit_transform(nt_reprs)
evr = model_full.explained_variance_ratio_
pca_n = 40
plt.figure()
plt.scatter(range(1, pca_n + 1), evr[:pca_n], c='black')
ax = plt.gca()
ax.set_xlim(0, pca_n + 1)
ax.set_xlabel('Principal components')
ax.set_ylim(0, evr[:pca_n].max() * 1.05)
ax.set_ylabel('Variance explained')
ax.grid(True, linestyle=':')
plt.savefig('%s/l%d_pca.pdf' % (options.out_dir, li))
plt.close()
########################################################
# visualize in 2D
model2 = PCA(n_components=2)
nt_2d = model2.fit_transform(nt_reprs)
for ti in options.target_indexes:
# slice for target
test_targets_ti = test_targets[:,:,ti]
# repeat to match layer_repr
target_repeat = layer_repr.shape[1] // test_targets.shape[1]
test_targets_ti = np.repeat(test_targets_ti, target_repeat, axis=1)
# downsample indexes
nt_targets = test_targets_ti[:,ds_indexes].flatten()
# log transform
nt_targets = np.log1p(nt_targets)
plt.figure()
plt.scatter(
nt_2d[:, 0], nt_2d[:, 1], alpha=0.5, c=nt_targets, cmap='RdBu_r')
plt.colorbar()
ax = plt.gca()
ax.grid(True, linestyle=':')
plt.savefig('%s/l%d_nt2d_t%d.pdf' % (options.out_dir, li, ti))
plt.close()
########################################################
# plot neuron-neuron correlations
# compute correlation matrix
hidden_cov =
|
np.corrcoef(nt_reprs.T)
|
numpy.corrcoef
|
# %%
import copy
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import requests
import torch
import torch.nn.functional as F
import torchvision
from PIL import Image
from sklearn.metrics import confusion_matrix
from torch import nn, optim
from torchvision.transforms import ToTensor
numb_batch = 28
# We get the training data from the Mnist library and set download to True.
# Then we transform the images. The same can be done for the validation data except that that train is False.
T = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
train_data = torchvision.datasets.MNIST(
"mnist_data", train=True, download=True, transform=T
)
val_data = torchvision.datasets.MNIST(
"mnist_data", train=False, download=True, transform=T
)
train_dl = torch.utils.data.DataLoader(train_data, batch_size=numb_batch)
val_dl = torch.utils.data.DataLoader(val_data, batch_size=numb_batch)
# %%
def create_lenet():
"""
Mnist dataset, we will be using the LeNet 5 architecture
"""
model = nn.Sequential(
nn.Conv2d(1, 6, 5, padding=2),
nn.ReLU(),
nn.AvgPool2d(2, stride=2),
nn.Conv2d(6, 16, 5, padding=0),
nn.ReLU(),
nn.AvgPool2d(2, stride=2),
nn.Flatten(),
nn.Linear(400, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10),
)
return model
def validate(model, data):
total = 0
correct = 0
for i, (images, labels) in enumerate(data):
images = images.cuda()
x = model(images)
value, pred = torch.max(x, 1)
pred = pred.data.cpu()
total += x.size(0)
correct += torch.sum(pred == labels)
return correct * 100.0 / total
def train(numb_epoch=3, lr=1e-3, device="cpu"):
accuracies = []
cnn = create_lenet().to(device)
cec = nn.CrossEntropyLoss()
optimizer = optim.Adam(cnn.parameters(), lr=lr)
max_accuracy = 0
for epoch in range(numb_epoch):
for i, (images, labels) in enumerate(train_dl):
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
pred = cnn(images)
loss = cec(pred, labels)
loss.backward()
optimizer.step()
accuracy = float(validate(cnn, val_dl))
accuracies.append(accuracy)
if accuracy > max_accuracy:
best_model = copy.deepcopy(cnn)
max_accuracy = accuracy
print("Saving Best Model with Accuracy: ", accuracy)
print("Epoch:", epoch + 1, "Accuracy :", accuracy, "%")
plt.plot(accuracies)
return best_model
# %%
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
print("No Cuda Available")
device
# %%
lenet = train(100, device=device)
torch.save(lenet.state_dict(), "lenet.pth")
# %%
def predict_dl(model, data):
y_pred = []
y_true = []
for i, (images, labels) in enumerate(data):
images = images.cuda()
x = model(images)
value, pred = torch.max(x, 1)
pred = pred.data.cpu()
y_pred.extend(list(pred.numpy()))
y_true.extend(list(labels.numpy()))
return np.array(y_pred),
|
np.array(y_true)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Key points:
1) as described in the first choice,
2) as described in the second choice.
Refer to 'development note' for more notes.
"""
import time
from numba import cuda
import numpy as np
"""
Choices:
1 - Show that once created, device arrays will stay there in the device memory
the same way as regular variables in the host memory. This phenomenon is
not to be confused with the 'deallocation behavior' mentioned in section
3.3.7 of Numba's documentation, which merely describes a mechanism also
existing in the host memory.
2 - Show that both host and device arrays created inside a host function will
be automatically deallocated, or 'freed up', once the function is finished.
Open your task manager and be ready.
"""
choice = 2
if choice == 1:
n = int(1e6)
base =
|
np.ones(n, dtype='float64')
|
numpy.ones
|
from __future__ import absolute_import, division, print_function, unicode_literals
# The six library is useful for Python 2 and 3 compatibility
import six
from scipy.ndimage.interpolation import rotate
#__all__ = ['pad_or_cut_to_size', 'frebin', \
# 'fshift', 'fourier_imshift', 'shift_subtract', 'align_LSQ']
import numpy as np
import logging
_log = logging.getLogger('pynrc')
from poppy.utils import krebin
from .coords import dist_image
from scipy.ndimage import fourier_shift
from astropy.io import fits
def pad_or_cut_to_size(array, new_shape, fill_val=0.0, offset_vals=None):
"""
Resize an array to a new shape by either padding with zeros
or trimming off rows and/or columns. The ouput shape can
be of any arbitrary amount.
Parameters
----------
array : ndarray
A 1D, 2D, or 3D array. If 3D, then taken to be a stack of images
that are cropped or expanded in the same fashion.
new_shape : tuple
Desired size for the output array. For 2D case, if a single value,
then will create a 2-element tuple of the same value.
fill_val : scalar, optional
Value to pad borders. Default is 0.0
offset_vals : tuple
Option to perform image shift in the (xpix) direction for 1D,
or (ypix,xpix) direction for 2D/3D.
Returns
-------
output : ndarray
An array of size new_shape that preserves the central information
of the input array.
"""
ndim = len(array.shape)
if ndim == 1:
# is_1d = True
# Reshape array to a 2D array with nx=1
array = array.reshape((1,1,-1))
nz, ny, nx = array.shape
if isinstance(new_shape, (float,int,np.int,np.int64)):
nx_new = int(new_shape+0.5)
ny_new = 1
new_shape = (ny_new, nx_new)
elif len(new_shape) < 2:
nx_new = new_shape[0]
ny_new = 1
new_shape = (ny_new, nx_new)
else:
ny_new, nx_new = new_shape
output = np.zeros(shape=(nz,ny_new,nx_new), dtype=array.dtype)
elif (ndim == 2) or (ndim == 3):
if ndim==2:
nz = 1
ny, nx = array.shape
array = array.reshape([nz,ny,nx])
else:
nz, ny, nx = array.shape
if isinstance(new_shape, (float,int,np.int,np.int64)):
ny_new = nx_new = int(new_shape+0.5)
new_shape = (ny_new, nx_new)
elif len(new_shape) < 2:
ny_new = nx_new = new_shape[0]
new_shape = (ny_new, nx_new)
else:
ny_new, nx_new = new_shape
output = np.zeros(shape=(nz,ny_new,nx_new), dtype=array.dtype)
else:
raise ValueError('Input image can only have 1 or 2 or 3 dimensions. \
Found {} dimensions.'.format(ndim))
# Return if no difference in shapes
# This needs to occur after the above so that new_shape is verified to be a tuple
# If offset_vals is set, then continue to perform shift function
if (array.shape == new_shape) and (offset_vals is None):
return array
# Input the fill values
if fill_val != 0:
output += fill_val
# Pixel shift values
if offset_vals is not None:
if ndim == 1:
ny_off = 0
if isinstance(offset_vals, (float,int,np.int,np.int64)):
nx_off = offset_vals
elif len(offset_vals) < 2:
nx_off = offset_vals[0]
else:
raise ValueError('offset_vals should be a single value.')
else:
if len(offset_vals) == 2:
ny_off, nx_off = offset_vals
else:
raise ValueError('offset_vals should have two values.')
else:
nx_off = ny_off = 0
if nx_new>nx:
n0 = (nx_new - nx) / 2
n1 = n0 + nx
elif nx>nx_new:
n0 = (nx - nx_new) / 2
n1 = n0 + nx_new
else:
n0, n1 = (0, nx)
n0 = int(n0+0.5)
n1 = int(n1+0.5)
if ny_new>ny:
m0 = (ny_new - ny) / 2
m1 = m0 + ny
elif ny>ny_new:
m0 = (ny - ny_new) / 2
m1 = m0 + ny_new
else:
m0, m1 = (0, ny)
m0 = int(m0+0.5)
m1 = int(m1+0.5)
if (nx_new>=nx) and (ny_new>=ny):
#print('Case 1')
output[:,m0:m1,n0:n1] = array.copy()
for i, im in enumerate(output):
output[i] = fshift(im, delx=nx_off, dely=ny_off, pad=True, cval=fill_val)
elif (nx_new<=nx) and (ny_new<=ny):
#print('Case 2')
if (nx_off!=0) or (ny_off!=0):
array_temp = array.copy()
for i, im in enumerate(array_temp):
array_temp[i] = fshift(im, delx=nx_off, dely=ny_off, pad=True, cval=fill_val)
output = array_temp[:,m0:m1,n0:n1]
else:
output = array[:,m0:m1,n0:n1]
elif (nx_new<=nx) and (ny_new>=ny):
#print('Case 3')
if nx_off!=0:
array_temp = array.copy()
for i, im in enumerate(array_temp):
array_temp[i] = fshift(im, delx=nx_off, pad=True, cval=fill_val)
output[:,m0:m1,:] = array_temp[:,:,n0:n1]
else:
output[:,m0:m1,:] = array[:,:,n0:n1]
for i, im in enumerate(output):
output[i] = fshift(im, dely=ny_off, pad=True, cval=fill_val)
elif (nx_new>=nx) and (ny_new<=ny):
#print('Case 4')
if ny_off!=0:
array_temp = array.copy()
for i, im in enumerate(array_temp):
array_temp[i] = fshift(im, dely=ny_off, pad=True, cval=fill_val)
output[:,:,n0:n1] = array_temp[:,m0:m1,:]
else:
output[:,:,n0:n1] = array[:,m0:m1,:]
for i, im in enumerate(output):
output[i] = fshift(im, delx=nx_off, pad=True, cval=fill_val)
# Flatten if input and output arrays are 1D
if (ndim==1) and (ny_new==1):
output = output.flatten()
elif ndim==2:
output = output[0]
return output
def pad_or_cut_to_size_old(array, new_shape, fill_val=0.0):
"""
Resize an array to a new shape by either padding with zeros
or trimming off rows and/or columns. The ouput shape can
be of any arbitrary amount.
Parameters
----------
array : ndarray
A 1D or 2D array representing some image
new_shape : tuple of 2 elements
Desired size for the output array. For 2D case, if a single value,
then will create a 2-element tuple of the same value.
fill_val : scalar, optional
Value to pad borders. Default is 0.0
Returns
-------
output : ndarray
An array of size new_shape that preserves the central information
of the input array.
"""
# Return if no difference in shapes
if array.shape == new_shape:
return array
ndim = len(array.shape)
if ndim == 1:
# is_1d = True
# Reshape array to a 2D array with nx=1
array = array.reshape((1,-1,1))
nz, ny, nx = array.shape
if isinstance(new_shape, (float,int,np.int,np.int64)):
ny_new = int(new_shape+0.5)
nx_new = 1
new_shape = (ny_new, nx_new)
elif len(new_shape) < 2:
ny_new = new_shape[0]
nx_new = 1
new_shape = (ny_new, nx_new)
else:
ny_new, nx_new = new_shape
output = np.zeros(shape=(nz,ny_new,nx_new), dtype=array.dtype)
elif (ndim == 2) or (ndim == 3):
if ndim==2:
nz = 1
ny, nx = array.shape
array = array.reshape([nz,ny,nx])
else:
nz, ny, nx = array.shape
if isinstance(new_shape, (float,int,np.int,np.int64)):
ny_new = nx_new = int(new_shape+0.5)
new_shape = (ny_new, nx_new)
elif len(new_shape) < 2:
ny_new = nx_new = new_shape[0]
new_shape = (ny_new, nx_new)
else:
ny_new, nx_new = new_shape
output = np.zeros(shape=(nz,ny_new,nx_new), dtype=array.dtype)
else:
raise ValueError('Input image can only have 1 or 2 or 3 dimensions. \
Found {} dimensions.'.format(ndim))
# Input the fill values
if fill_val != 0:
output += fill_val
if nx_new>nx:
n0 = (nx_new - nx) / 2
n1 = n0 + nx
elif nx>nx_new:
n0 = (nx - nx_new) / 2
n1 = n0 + nx_new
else:
n0 = 0; n1 = nx
n0 = int(n0+0.5)
n1 = int(n1+0.5)
if ny_new>ny:
m0 = (ny_new - ny) / 2
m1 = m0 + ny
elif ny>ny_new:
m0 = (ny - ny_new) / 2
m1 = m0 + ny_new
else:
m0 = 0; m1 = ny
m0 = int(m0+0.5)
m1 = int(m1+0.5)
if (nx_new>=nx) and (ny_new>=ny):
#print('Case 1')
output[:,m0:m1,n0:n1] = array
elif (nx_new<=nx) and (ny_new<=ny):
#print('Case 2')
output = array[:,m0:m1,n0:n1]
elif (nx_new<=nx) and (ny_new>=ny):
#print('Case 3')
output[:,m0:m1,:] = array[:,:,n0:n1]
elif (nx_new>=nx) and (ny_new<=ny):
#print('Case 4')
output[:,:,n0:n1] = array[:,m0:m1,:]
# Flatten if input and output arrays are 1D
if (ndim==1) and (nx_new==1):
output = output.flatten()
elif ndim==2:
output = output[0]
return output
def fshift(image, delx=0, dely=0, pad=False, cval=0.0):
""" Fractional image shift
Ported from IDL function fshift.pro.
Routine to shift an image by non-integer values.
Parameters
----------
image: ndarray
1D or 2D array to be shifted
delx : float
shift in x (same direction as IDL SHIFT function)
dely: float
shift in y
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
cval : sequence or float, optional
The values to set the padded values for each axis. Default is 0.
((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis.
((before, after),) yields same before and after constants for each axis.
(constant,) or int is a shortcut for before = after = constant for all axes.
Returns
-------
ndarray
Shifted image
"""
if len(image.shape) == 1:
# Return if delx is 0
if np.isclose(delx, 0, atol=1e-5):
return image
# separate shift into an integer and fraction shift
intx = np.int(delx)
fracx = delx - intx
if fracx < 0:
fracx += 1
intx -= 1
# Pad ends with constant value
if pad:
padx = np.abs(intx) + 1
out = np.pad(image,np.abs(intx),'constant',constant_values=cval)
else:
padx = 0
out = image.copy()
# shift by integer portion
out = np.roll(out, intx)
# if significant fractional shift...
if not np.isclose(fracx, 0, atol=1e-5):
out = out * (1.-fracx) + np.roll(out,1) * fracx
out = out[padx:padx+image.size]
return out
elif len(image.shape) == 2:
# Return if both delx and dely are 0
if np.isclose(delx, 0, atol=1e-5) and np.isclose(dely, 0, atol=1e-5):
return image
# separate shift into an integer and fraction shift
intx = np.int(delx)
inty = np.int(dely)
fracx = delx - intx
fracy = dely - inty
if fracx < 0:
fracx += 1
intx -= 1
if fracy < 0:
fracy += 1
inty -= 1
# Pad ends with constant value
if pad:
padx = np.abs(intx) + 1
pady = np.abs(inty) + 1
pad_vals = ([pady]*2,[padx]*2)
out = np.pad(image,pad_vals,'constant',constant_values=cval)
else:
padx = 0; pady = 0
out = image.copy()
# shift by integer portion
out = np.roll(np.roll(out, intx, axis=1), inty, axis=0)
# Check if fracx and fracy are effectively 0
fxis0 = np.isclose(fracx,0, atol=1e-5)
fyis0 = np.isclose(fracy,0, atol=1e-5)
# If fractional shifts are significant
# use bi-linear interpolation between four pixels
if not (fxis0 and fyis0):
# Break bi-linear interpolation into four parts
# to avoid NaNs unnecessarily affecting integer shifted dimensions
part1 = out * ((1-fracx)*(1-fracy))
part2 = 0 if fyis0 else np.roll(out,1,axis=0)*((1-fracx)*fracy)
part3 = 0 if fxis0 else np.roll(out,1,axis=1)*((1-fracy)*fracx)
part4 = 0 if (fxis0 or fyis0) else np.roll(np.roll(out, 1, axis=1), 1, axis=0) * fracx*fracy
out = part1 + part2 + part3 + part4
out = out[pady:pady+image.shape[0], padx:padx+image.shape[1]]
return out
#if not np.allclose([fracx,fracy], 0, atol=1e-5):
# x = x * ((1-fracx)*(1-fracy)) + \
# np.roll(x,1,axis=0) * ((1-fracx)*fracy) + \
# np.roll(x,1,axis=1) * (fracx*(1-fracy)) + \
# np.roll(np.roll(x, 1, axis=1), 1, axis=0) * fracx*fracy
#x = x[pady:pady+image.shape[0], padx:padx+image.shape[1]]
#return x
else:
raise ValueError('Input image can only have 1 or 2 dimensions. \
Found {} dimensions.'.format(len(image.shape)))
def fourier_imshift(image, xshift, yshift, pad=False, cval=0.0):
"""Fourier shift image
Shift an image by use of Fourier shift theorem
Parameters
----------
image : nd array
N x K image
xshift : float
Number of pixels to shift image in the x direction
yshift : float
Number of pixels to shift image in the y direction
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
cval : sequence or float, optional
The values to set the padded values for each axis. Default is 0.
((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis.
((before, after),) yields same before and after constants for each axis.
(constant,) or int is a shortcut for before = after = constant for all axes.
Returns
-------
ndarray
Shifted image
"""
# Pad ends with zeros
if pad:
padx = np.abs(np.int(xshift)) + 1
pady = np.abs(np.int(yshift)) + 1
pad_vals = ([pady]*2,[padx]*2)
im = np.pad(image,pad_vals,'constant',constant_values=cval)
else:
padx = 0; pady = 0
im = image
offset = fourier_shift( np.fft.fft2(im), (yshift,xshift) )
offset = np.fft.ifft2(offset).real
offset = offset[pady:pady+image.shape[0], padx:padx+image.shape[1]]
return offset
def shift_subtract(params, reference, target, mask=None, pad=False,
shift_function=fshift):
"""Shift and subtract image
Use Fourier Shift theorem for subpixel shifts for
input into least-square optimizer.
Parameters
----------
params : tuple
xshift, yshift, beta
reference : ndarray
See align_fourierLSQ
target : ndarray
See align_fourierLSQ
mask : ndarray, optional
See align_fourierLSQ
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
shift_function : func
which function to use for sub-pixel shifting
Returns
-------
ndarray
1D array of target-reference residual after
applying shift and intensity fraction.
"""
xshift, yshift, beta = params
if shift_function is not None:
offset = shift_function(reference, xshift, yshift, pad)
else:
offset = reference
if mask is not None:
return ( (target - beta * offset) * mask ).ravel() #.flatten()
else:
return ( target - beta * offset ).ravel() #.flatten()
def align_LSQ(reference, target, mask=None, pad=False,
shift_function=fshift):
"""Find best shift value
LSQ optimization with option of shift alignment algorithm
Parameters
----------
reference : ndarray
N x K image to be aligned to
target : ndarray
N x K image to align to reference
mask : ndarray, optional
N x K image indicating pixels to ignore when
performing the minimization. The masks acts as
a weighting function in performing the fit.
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
shift_function : func
which function to use for sub-pixel shifting.
Options are fourier_imshift or fshift.
fshift tends to be 3-5 times faster for similar results.
Returns
-------
list
(x, y, beta) values from LSQ optimization, where (x, y)
are the misalignment of target from reference and beta
is the fraction by which the target intensity must be
reduced to match the intensity of the reference.
"""
from scipy.optimize import least_squares#, leastsq
init_pars = [0.0, 0.0, 1.0]
# Use loss='soft_l1' for least squares robust against outliers
# May want to play around with f_scale...
res = least_squares(shift_subtract, init_pars, diff_step=0.1,
loss='soft_l1', f_scale=1.0, args=(reference,target),
kwargs={'mask':mask,'pad':pad,'shift_function':shift_function})
out = res.x
#out,_ = leastsq(shift_subtract, init_pars,
# args=(reference,target,mask,pad,shift_function))
#results = [out[0],out[1],out[2]] #x,y,beta
return res.x
def frebin(image, dimensions=None, scale=None, total=True):
"""Fractional rebin
Python port from the IDL frebin.pro
Shrink or expand the size of a 1D or 2D array by an arbitary amount
using bilinear interpolation. Conserves flux by ensuring that each
input pixel is equally represented in the output array.
Parameters
----------
image : ndarray
Input image, 1-d or 2-d ndarray.
dimensions : tuple or None
Desired size of output array (take priority over scale).
scale : tuple or None
Factor to scale output array size. A scale of 2 will increase
the number of pixels by 2 (ie., finer pixel scale).
total : bool
Conserves the surface flux. If True, the output pixels
will be the sum of pixels within the appropriate box of
the input image. Otherwise, they will be the average.
Returns
-------
ndarray
The binned ndarray
"""
if dimensions is not None:
if isinstance(dimensions, float):
dimensions = [int(dimensions)] * len(image.shape)
elif isinstance(dimensions, int):
dimensions = [dimensions] * len(image.shape)
elif len(dimensions) != len(image.shape):
raise RuntimeError("The number of input dimensions don't match the image shape.")
elif scale is not None:
if isinstance(scale, float) or isinstance(scale, int):
dimensions = list(map(int, map(lambda x: x+0.5, map(lambda x: x*scale, image.shape))))
elif len(scale) != len(image.shape):
raise RuntimeError("The number of input dimensions don't match the image shape.")
else:
dimensions = [scale[i]*image.shape[i] for i in range(len(scale))]
else:
raise RuntimeError('Incorrect parameters to rebin.\n\frebin(image, dimensions=(x,y))\n\frebin(image, scale=a')
#print(dimensions)
shape = image.shape
if len(shape)==1:
nlout = 1
nsout = dimensions[0]
nsout = int(nsout+0.5)
dimensions = [nsout]
elif len(shape)==2:
nlout, nsout = dimensions
nlout = int(nlout+0.5)
nsout = int(nsout+0.5)
dimensions = [nlout, nsout]
if len(shape) > 2:
raise ValueError('Input image can only have 1 or 2 dimensions. Found {} dimensions.'.format(len(shape)))
if nlout != 1:
nl = shape[0]
ns = shape[1]
else:
nl = nlout
ns = shape[0]
sbox = ns / float(nsout)
lbox = nl / float(nlout)
#print(sbox,lbox)
# Contract by integer amount
if (sbox.is_integer()) and (lbox.is_integer()):
image = image.reshape((nl,ns))
result = krebin(image, (nlout,nsout))
if not total: result /= (sbox*lbox)
if nl == 1:
return result[0,:]
else:
return result
ns1 = ns - 1
nl1 = nl - 1
if nl == 1:
#1D case
_log.debug("Rebinning to Dimension: %s" % nsout)
result = np.zeros(nsout)
for i in range(nsout):
rstart = i * sbox
istart = int(rstart)
rstop = rstart + sbox
if int(rstop) < ns1:
istop = int(rstop)
else:
istop = ns1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
#add pixel values from istart to istop and subtract fraction pixel
#from istart to rstart and fraction pixel from rstop to istop
result[i] = np.sum(image[istart:istop + 1]) - frac1 * image[istart] - frac2 * image[istop]
if total:
return result
else:
return result / (float(sbox) * lbox)
else:
_log.debug("Rebinning to Dimensions: %s, %s" % tuple(dimensions))
#2D case, first bin in second dimension
temp = np.zeros((nlout, ns))
result = np.zeros((nsout, nlout))
#first lines
for i in range(nlout):
rstart = i * lbox
istart = int(rstart)
rstop = rstart + lbox
if int(rstop) < nl1:
istop = int(rstop)
else:
istop = nl1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
if istart == istop:
temp[i, :] = (1.0 - frac1 - frac2) * image[istart, :]
else:
temp[i, :] = np.sum(image[istart:istop + 1, :], axis=0) -\
frac1 * image[istart, :] - frac2 * image[istop, :]
temp = np.transpose(temp)
#then samples
for i in range(nsout):
rstart = i * sbox
istart = int(rstart)
rstop = rstart + sbox
if int(rstop) < ns1:
istop = int(rstop)
else:
istop = ns1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
if istart == istop:
result[i, :] = (1. - frac1 - frac2) * temp[istart, :]
else:
result[i, :] = np.sum(temp[istart:istop + 1, :], axis=0) -\
frac1 * temp[istart, :] - frac2 * temp[istop, :]
if total:
return np.transpose(result)
else:
return np.transpose(result) / (sbox * lbox)
# Fix NaN values
def fix_nans_with_med(im, niter_max=5, verbose=False, **kwargs):
"""Iteratively fix NaNs with surrounding Real data"""
sh_orig = im.shape
nan_mask = np.isnan(im)
n_nans = np.where(nan_mask)[0].size
if verbose: print('{} NaNs to start'.format(n_nans))
for ii in range(niter_max):
im = im.flatten()
nan_mask = np.isnan(im)
im = im.reshape(sh_orig)
# Return if we no NaNs
if not np.any(nan_mask): return im
if verbose: print('Iter {}'.format(ii))
# Shift
im_smth = []
for i in
|
np.arange(-1,2)
|
numpy.arange
|
"""
Clenshaw-Curtis quadrature rule.
This module implements the Clenshaw-Curtis quadrature rule and associated functions.
Formula for nodes and weights:
[1] Sparse Grid Quadrature in High Dimensions with Applications in Finance and Insurance
Holtz, M., Springer, 2010(, Chapter 3, p. 42ff)
URL:
https://books.google.de/books?id=XOfMm-4ZM9AC&pg=PA42&lpg=PA42&dq=filippi+formu
la+clenshaw+curtis&source=bl&ots=gkhNu9F1fp&sig=ACfU3U3zdH-OHx0PqqB_KAXb1mM5iXI
ojw&hl=de&sa=X&ved=2ahUKEwijovCC1O7kAhUKr6QKHaVWCwQQ6AEwD3oECAgQAQ#v=onepage&q=
filippi%20formula%20clenshaw%20curtis&f=false
Note
----
This is a lot of old code. It is well tested, but the _compute_*(...)
functions are really hard to read (they are efficient, though).
"""
import numpy as np
from probnum.quad.polynomial.polynomialquadrature import PolynomialQuadrature
from probnum import utils
class ClenshawCurtis(PolynomialQuadrature):
"""
Clenshaw-Curtis quadrature rule.
Method of numerical integration based on an expansion of the
integrand in terms of a discrete cosine transform.
The nodes of the Clenshaw-Curtis rule are the roots of the Chebyshev
polynomials. The :math:`i^\\text{th}` root is
.. math:: x_i = \\frac{1}{2} \\left(1 - \\cos\\left( \\frac{i \\pi}{n+1} \\right) \\right)
for :math:`i=1, ..., n`. The :math:`i^\\text{th}` weight is given by
.. math:: w_i = \\frac{2}{n+1} \\sin\\left(\\frac{i \\pi}{n+1}\\right)\\sum_{j=1}^{(n+1)/2} \\frac{1}{2j-1}\\sin\\left(\\frac{(2j-1)i \\pi}{n+1}\\right).
These formulas can be found in [1]_. For an :math:`r`-times
differentiable integrand, the Clenshaw-Curtis approximation error is
proportional to :math:`\\mathcal{O}(n^{-r})`. It integrates
polynomials of degree :math:`\\leq n+1` exactly.
Parameters
----------
npts_per_dim : int
Number of evaluation points per dimension. The resulting mesh
will have `npts_per_dim**ndim` elements.
ndim : int
Number of dimensions.
bounds : ndarray, shape=(n, 2)
Integration bounds.
See Also
--------
PolynomialQuadrature : Quadrature rule based on polynomial functions.
References
----------
.. [1] <NAME>., Sparse Grid Quadrature in High Dimensions with Applications in Finance and Insurance, Springer, 2010
Examples
--------
>>> cc = ClenshawCurtis(npts_per_dim=3, ndim=2, bounds=np.array([[0, 1], [0, 0.1]]))
>>> print(cc.nodes)
[[0.14644661 0.01464466]
[0.14644661 0.05 ]
[0.14644661 0.08535534]
[0.5 0.01464466]
[0.5 0.05 ]
[0.5 0.08535534]
[0.85355339 0.01464466]
[0.85355339 0.05 ]
[0.85355339 0.08535534]]
>>> print(cc.weights)
[0.01111111 0.01111111 0.01111111 0.01111111 0.01111111 0.01111111
0.01111111 0.01111111 0.01111111]
>>> print(cc.integrate(lambda x: x[0] + x[1]))
0.05500000000000001
>>> cc = ClenshawCurtis(npts_per_dim=7, ndim=1, bounds=np.array([[0, 1]]))
>>> print(cc.weights)
[0.08898234 0.12380952 0.19673195 0.18095238 0.19673195 0.12380952
0.08898234]
>>> print(cc.nodes)
[[0.03806023]
[0.14644661]
[0.30865828]
[0.5 ]
[0.69134172]
[0.85355339]
[0.96193977]]
>>> print(cc.integrate(lambda x: np.sin(x)))
[0.45969769]
"""
def __init__(self, npts_per_dim, ndim, bounds):
utils.assert_is_2d_ndarray(bounds)
weights = _compute_weights(npts_per_dim, ndim, bounds)
nodes = _compute_nodes(npts_per_dim, ndim, bounds)
PolynomialQuadrature.__init__(self, nodes, weights, bounds)
def _compute_weights(npts, ndim, ilbds):
"""
Computes 1D Clenshaw-Curtis weights and aligns them in
correspondence to the computed nodes. Since the resulting mesh is of
size (n**d, d), the weight array is of size (n**d,).
"""
if npts ** ndim * ndim >= 1e9:
raise MemoryError("Weights for tensor-mesh too large for memory.")
num_tiles = np.arange(ndim)
num_reps = ndim - np.arange(ndim) - 1
weights = _compute_weights_1d(npts, ndim, ilbds[0])
prodweights = np.repeat(weights, npts ** (num_reps[0]))
for i in range(1, ndim):
weights = _compute_weights_1d(npts, ndim, ilbds[i])
column = np.repeat(
np.tile(weights, int(npts ** i)), int(npts ** (ndim - 1 - i))
)
prodweights *= column
return prodweights
def _compute_weights_1d(npts, ndim, ilbds1d):
"""
Computes weights of Clenshaw-Curtis formula.
The :math:`i^\textrm{th}` weight is given by
.. math:: w_i = \\frac{2}{n+1} \\sin\\left(\\frac{i \\pi}{n+1}\\right)\\sum_{j=1}^{(n+1)/2} \\frac{1}{2j-1}\\sin\\left(\\frac{(2j-1)i \\pi}{n+1}\\right).
"""
if npts % 2 == 0:
raise ValueError("Please enter odd npts")
nhalfpts = int((npts + 1.0) / 2.0)
ind_j = 2.0 * np.arange(1, nhalfpts + 1) - 1.0
ind_i = np.arange(1, npts + 1)
arr1 = 2.0 / (npts + 1.0) * np.sin(ind_i * np.pi / (npts + 1.0))
arr2 = 1.0 / ind_j
arr3 = np.sin(np.outer(ind_j, ind_i) * np.pi / (npts + 1.0))
weights = arr1 * (arr2 @ arr3)
return (ilbds1d[1] - ilbds1d[0]) * weights
def _compute_nodes(npts, ndim, ilbds):
"""
Computes 1D Clenshaw-Curtis nodes and aligns them in order to create
a tensor mesh: each point is aligned with each point to create a
mesh of size (n^d, d).
"""
if npts ** ndim * ndim >= 1e9:
raise ValueError("Tensor-mesh too large for memory.")
nodes = _compute_nodes_1d(npts, ilbds[0])
productmesh = np.repeat(nodes, npts ** (ndim - 1))
for i in range(1, ndim):
nodes = _compute_nodes_1d(npts, ilbds[i])
column = np.repeat(np.tile(nodes, int(npts ** i)), int(npts ** (ndim - 1 - i)))
productmesh =
|
np.vstack((productmesh.T, column))
|
numpy.vstack
|
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import tf_util
import helper
import transforms3d.euler as t3d
parser = argparse.ArgumentParser()
parser.add_argument('-log','--log_dir', required=True, default='log_PCRNet', help='Log dir [default: log]')
parser.add_argument('-mode','--mode', required=True, type=str, default='no_mode', help='mode: train or test')
parser.add_argument('-results','--results', required=True, type=str, default='best_model', help='Store the best model')
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pcr_model', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=501, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=400000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
parser.add_argument('--model_path', type=str, default='log_5fclayers_airplane_multi_models_trial2_data90_1/model500.ckpt', help='Path of the weights (.ckpt file) to be used for test')
parser.add_argument('--centroid_sub', type=bool, default=True, help='Centroid Subtraction from Source and Template before Pose Prediction.')
parser.add_argument('--use_pretrained_model', type=bool, default=False, help='Use a pretrained model of airplane to initialize the training.')
parser.add_argument('--use_random_poses', type=bool, default=False, help='Use of random poses to train the model in each batch')
parser.add_argument('--data_dict', type=str, default='car_data',help='Data used to train templates or multi_model_templates')
parser.add_argument('--train_poses', type=str, default='itr_net_train_data45.csv', help='Poses for training')
parser.add_argument('--eval_poses', type=str, default='itr_net_eval_data45.csv', help='Poses for evaluation')
parser.add_argument('--feature_size', type=int, default=1024, help='Size of features extracted from PointNet')
FLAGS = parser.parse_args()
TRAIN_POSES = FLAGS.train_poses
EVAL_POSES = FLAGS.eval_poses
# Change batch size during test mode.
if FLAGS.mode == 'test':
BATCH_SIZE = 1
else:
BATCH_SIZE = FLAGS.batch_size
# Parameters for data
NUM_POINT = FLAGS.num_point
MAX_NUM_POINT = 2048
NUM_CLASSES = 40
centroid_subtraction_switch = FLAGS.centroid_sub
# Network hyperparameters
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
# Model Import
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
# Take backup of all files used to train the network with all the parameters.
if FLAGS.mode == 'train':
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) # Create Log_dir to store the log.
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train_PCRNet.py %s' % (LOG_DIR)) # bkp of train procedure
os.system('cp -a utils/ %s/'%(LOG_DIR)) # Store the utils code.
os.system('cp helper.py %s'%(LOG_DIR))
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')# Create a text file to store the loss function data.
LOG_FOUT.write(str(FLAGS)+'\n')
# Write all the data of loss function during training.
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
# Calculate Learning Rate during training.
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def train():
with tf.Graph().as_default():
with tf.device('/cpu:0'):
batch = tf.Variable(0) # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
with tf.device('/gpu:'+str(GPU_INDEX)):
is_training_pl = tf.placeholder(tf.bool, shape=()) # Flag for dropouts.
learning_rate = get_learning_rate(batch) # Calculate Learning Rate at each step.
# Define a network to backpropagate the using final pose prediction.
with tf.variable_scope('Network_L') as _:
# Object of network class.
network_L = MODEL.Network()
# Get the placeholders.
source_pointclouds_pl_L, template_pointclouds_pl_L = network_L.placeholder_inputs(BATCH_SIZE, NUM_POINT)
# Extract Features.
source_global_feature_L, template_global_feature_L = network_L.get_model(source_pointclouds_pl_L, template_pointclouds_pl_L, FLAGS.feature_size, is_training_pl, bn_decay=None)
# Find the predicted transformation.
predicted_transformation_L = network_L.get_pose(source_global_feature_L,template_global_feature_L,is_training_pl,bn_decay=None)
# Find the loss using source and transformed template point cloud.
loss = network_L.get_loss_b(predicted_transformation_L,BATCH_SIZE,template_pointclouds_pl_L,source_pointclouds_pl_L)
# Get training optimization algorithm.
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
# Update Network_L.
train_op = optimizer.minimize(loss, global_step=batch)
with tf.device('/cpu:0'):
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Add the loss in tensorboard.
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('loss', loss)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = False
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
if FLAGS.mode == 'train': # Create summary writers only for train mode.
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
eval_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'eval'))
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl: True})
# Just to initialize weights with pretrained model.
if FLAGS.use_pretrained_model:
saver.restore(sess,os.path.join('log_8fclayers_gpu','model300.ckpt'))
# Create a dictionary to pass the tensors and placeholders in train and eval function for Network_L.
ops_L = {'source_pointclouds_pl': source_pointclouds_pl_L,
'template_pointclouds_pl': template_pointclouds_pl_L,
'is_training_pl': is_training_pl,
'predicted_transformation': predicted_transformation_L,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
templates = helper.loadData(FLAGS.data_dict) # Read all the templates.
print(templates.shape)
poses = helper.read_poses(FLAGS.data_dict, TRAIN_POSES) # Read all the poses data for training.
print(poses.shape)
eval_poses = helper.read_poses(FLAGS.data_dict, EVAL_POSES) # Read all the poses data for evaluation.
if FLAGS.mode == 'train':
# For actual training.
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
# Train for all triaining poses.
train_one_epoch(sess, ops_L, train_writer, templates, poses)
save_path = saver.save(sess, os.path.join(LOG_DIR, FLAGS.results+".ckpt"))
if epoch % 10 == 0:
# Evaluate the trained network after 50 epochs.
eval_one_epoch(sess, ops_L, eval_writer, templates, eval_poses)
# Save the variables to disk.
if epoch % 50 == 0:
# Store the Trained weights in log directory.
save_path = saver.save(sess, os.path.join(LOG_DIR, "model"+str(epoch)+".ckpt"))
log_string("Model saved in file: %s" % save_path)
if FLAGS.mode == 'test':
# Just to test the results
test_one_epoch(sess, ops_L, templates, eval_poses, saver, FLAGS.model_path)
# Train the Network_L and copy weights from Network_L to Network19 to find the poses between source and template.
def train_one_epoch(sess, ops_L, train_writer, templates, poses):
# Arguments:
# sess: Tensorflow session to handle tensors.
# ops_L: Dictionary for tensors of Network_L
# ops19: Dictionary for tensors of Network19
# templates: Training Point Cloud data.
# poses: Training pose data.
is_training = True
display_ptClouds = False
display_poses = False
display_poses_in_itr = False
display_ptClouds_in_itr = False
#templates = helper.shuffle_templates(templates) # Shuffle Templates.
if not FLAGS.use_random_poses:
poses = helper.shuffle_poses(poses) # Shuffle Poses.
loss_sum = 0 # Total Loss in each batch.
num_batches = int(poses.shape[0]/BATCH_SIZE) # Number of batches in an epoch.
# Training for each batch.
for fn in range(num_batches):
start_idx = fn*BATCH_SIZE # Start index of poses.
end_idx = (fn+1)*BATCH_SIZE # End index of poses.
template_data = np.copy(templates[2,:,:]).reshape(1,-1,3)
template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))
batch_euler_poses = poses[start_idx:end_idx] # Extract poses for batch training.
# template_data = helper.shuffle_templates(template_data) # Shuffle the templates for batch training.
source_data = helper.apply_transformation(template_data,batch_euler_poses) # Apply the poses on the templates to get source data.
# Chose Random Points from point clouds for training.
if np.random.random_sample()<0:
source_data = helper.select_random_points(source_data, NUM_POINT) # 30% probability that source data has different points than template
else:
source_data = source_data[:,0:NUM_POINT,:]
if np.random.random_sample()<0:
source_data = helper.add_noise(source_data) # 50% chance of having noise in training data.
# Only chose limited number of points from the source and template data.
template_data = template_data[:,0:NUM_POINT,:]
source_data = source_data[:,0:NUM_POINT,:]
# Subtract the Centroids from the Point Clouds.
if centroid_subtraction_switch:
source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
template_data = template_data - np.mean(template_data, axis=1, keepdims=True)
# To visualize the source and point clouds:
if display_ptClouds:
helper.display_clouds_data(source_data[0])
helper.display_clouds_data(template_data[0])
# Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
feed_dict = {ops_L['source_pointclouds_pl']: source_data,
ops_L['template_pointclouds_pl']: template_data,
ops_L['is_training_pl']: is_training}
# Ask the network to predict transformation, calculate loss using distance between actual points, calculate & apply gradients for Network_L and copy the weights to Network19.
summary, step, _, loss_val, predicted_transformation = sess.run([ops_L['merged'], ops_L['step'], ops_L['train_op'], ops_L['loss'], ops_L['predicted_transformation']], feed_dict=feed_dict)
train_writer.add_summary(summary, step) # Add all the summary to the tensorboard.
# Display the ground truth pose and predicted pose for first Point Cloud in batch
if display_poses:
print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))
# print(batch_euler_poses[0,0:3],batch_euler_poses[0,3:6]*(180/np.pi))
# print(final_pose[0,0:3],final_pose[0,3:6]*(180/np.pi))
# Display Loss Value.
print("Batch: {} & Loss: {}\r".format(fn,loss_val),end='')
# Add loss for each batch.
loss_sum += loss_val
print('\n')
log_string('Train Mean loss: %f' % (loss_sum/num_batches)) # Store and display mean loss of epoch.
def eval_one_epoch(sess, ops_L, eval_writer, templates, poses):
# Arguments:
# sess: Tensorflow session to handle tensors.
# ops_L: Dictionary for tensors of Network_L
# ops19: Dictionary for tensors of Network19
# templates: Training Point Cloud data.
# poses: Training pose data.
is_training = False
display_ptClouds = False
display_poses = False
display_poses_in_itr = False
display_ptClouds_in_itr = False
#templates = helper.shuffle_templates(templates)
#poses = helper.shuffle_poses(poses)
loss_sum = 0 # Total Loss in each batch.
num_batches = int(poses.shape[0]/BATCH_SIZE) # Number of batches in an epoch.
for fn in range(num_batches):
start_idx = fn*BATCH_SIZE # Start index of poses.
end_idx = (fn+1)*BATCH_SIZE # End index of poses.
template_data = np.copy(templates[2,:,:]).reshape(1,-1,3)
template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))
batch_euler_poses = poses[start_idx:end_idx] # Extract poses for batch training.
source_data = helper.apply_transformation(template_data, batch_euler_poses) # Apply the poses on the templates to get source data.
# Chose Random Points from point clouds for training.
if np.random.random_sample()<0:
source_data = helper.select_random_points(source_data, NUM_POINT) # 30% probability that source data has different points than template
else:
source_data = source_data[:,0:NUM_POINT,:]
if np.random.random_sample()<0:
source_data = helper.add_noise(source_data) # 50% chance of having noise in training data.
# Only chose limited number of points from the source and template data.
template_data = template_data[:,0:NUM_POINT,:]
source_data = source_data[:,0:NUM_POINT,:]
# Subtract the Centroids from the Point Clouds.
if centroid_subtraction_switch:
source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
template_data = template_data - np.mean(template_data, axis=1, keepdims=True)
# To visualize the source and point clouds:
if display_ptClouds:
helper.display_clouds_data(source_data[0])
helper.display_clouds_data(template_data[0])
# Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
feed_dict = {ops_L['source_pointclouds_pl']: source_data,
ops_L['template_pointclouds_pl']: template_data,
ops_L['is_training_pl']: is_training}
# Ask the network to predict transformation, calculate loss using distance between actual points.
summary, step, loss_val, predicted_transformation = sess.run([ops_L['merged'], ops_L['step'], ops_L['loss'], ops_L['predicted_transformation']], feed_dict=feed_dict)
eval_writer.add_summary(summary, step) # Add all the summary to the tensorboard.
# Display the ground truth pose and predicted pose for first Point Cloud in batch
if display_poses:
print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))
# Display Loss Value.
print("Batch: {} & Loss: {}\r".format(fn,loss_val),end='')
# Add loss for each batch.
loss_sum += loss_val
print('\n')
log_string('Eval Mean loss: %f' % (loss_sum/num_batches)) # Store and display mean loss of epoch.
def test_one_epoch(sess, ops_L, templates, shuffled_poses, saver, model_path):
# Arguments:
# sess: Tensorflow session to handle tensors.
# ops_L: Dictionary for tensors of Network_L
# ops19: Dictionary for tensors of Network19
# templates: Training Point Cloud data.
# poses: Training pose data.
# saver: To restore the weights.
# model_path: Path of log directory.
saver.restore(sess, model_path) # Restore the weights of trained network.
is_training = False
display_ptClouds = False
display_poses = False
display_poses_in_itr = False
display_ptClouds_in_itr = False
swap_case = False
templates = helper.process_templates('templates')
template_data =
|
np.zeros((BATCH_SIZE,MAX_NUM_POINT,3))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
__all__ = ['graymask2rgb',
]
def graymask2rgb(mask, channel=0):
# Assert image shape
assert len(mask.shape) in [2, 3], "Mask shape error"
if len(mask.shape) == 3:
assert mask.shape[2] == 1, "Not a proper mask"
if np.amax(mask) <= 1.0:
mask = (mask * 255.0).astype(np.uint8)
zero_img =
|
np.zeros((mask.shape[0], mask.shape[1]), np.uint8)
|
numpy.zeros
|
"""
Mask R-CNN
Train on the toy My dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 my.py train --dataset=/path/to/my/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 my.py train --dataset=/path/to/my/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 my.py train --dataset=/path/to/my/dataset --weights=imagenet
# Apply color splash to an image
python3 my.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color splash to video using the last weights you trained
python3 my.py splash --weights=last --video=<URL or path to file>
"""
import os
import sys
import json, math
import datetime
from datetime import datetime
import numpy as np
import skimage.draw
import time
from skimage.measure import find_contours
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
import colorsys
import random
# Root directory of the project
# ROOT_DIR = os.path.abspath(".\\")
# Import Mask RCNN
# sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
# Path to trained weights file
COCO_WEIGHTS_PATH = "D:\\Projects\\Mask_RCNN\\mask_rcnn_coco.h5"
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = "logs"
############################################################
# Configurations
############################################################
class MyConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "mask"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # Background + my
# Number of training steps per epoch
STEPS_PER_EPOCH = 1000
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.5
IMAGE_RESIZE_MODE = "pad64"
IMAGE_MIN_DIM = 640
IMAGE_MAX_DIM = 1280
IMAGE_MIN_SCALE = 0
BACKBONE = "resnet50"
############################################################
# Dataset
############################################################
class MyDataset(utils.Dataset):
def print_size(self, poly):
for p in poly:
a = np.array(p['all_points_y'])
height = a.max() - a.min()
a = np.array(p['all_points_x'])
width = a.max() - a.min()
self.areas.append(height * width)
#if height * width < 4096:
# print(width, height)
def load_my(self, dataset_dir, subset, class_dict):
"""Load a subset of the My dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
self.areas = []
# Add classes. We have only one class to add.
for (k, v) in class_dict.items():
self.add_class("my", v, k)
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json"),encoding='UTF-8'))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
print(class_dict)
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
# print(a['regions'])
# print(a['filename'])
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
class_ids = [class_dict[r['region_attributes']['type']] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
class_ids = [class_dict[r['region_attributes']['type']] for r in a['regions']]
self.print_size(polygons)
# print(class_ids)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"my",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
class_ids=class_ids)
self.areas.sort()
#print(np.unique(np.round(np.sqrt(self.areas))))
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a my dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "my":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
class_ids = np.array(info['class_ids'])
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), class_ids.astype(np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "my":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
class_dict = {}
if args.label:
label_file = open(args.label)
label_lines = label_file.readlines()
label_id = 1
for label_line in label_lines:
label_line = label_line.replace('\n', '')
class_dict[label_line] = label_id
label_id = label_id + 1
# Training dataset.
dataset_train = MyDataset()
dataset_train.load_my(args.dataset, "train", class_dict)
dataset_train.prepare()
# Validation dataset
dataset_val = MyDataset()
dataset_val.load_my(args.dataset, "val", class_dict)
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=600,
layers='all')
def display_differences(image,
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
class_names, title="", ax=None,
show_mask=True, show_box=True,
iou_threshold=0.5, score_threshold=0.5):
"""Display ground truth and prediction instances on the same image."""
# Match predictions to ground truth
gt_match, pred_match, overlaps = utils.compute_matches(
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold, score_threshold=score_threshold)
# # Ground truth = green. Predictions = red
# colors = [(0, 1, 0, .8)] * len(gt_match)\
# + [(1, 0, 0, 1)] * len(pred_match)
# # Concatenate GT and predictions
# class_ids = np.concatenate([gt_class_id, pred_class_id])
# scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])
# boxes = np.concatenate([gt_box, pred_box])
# masks = np.concatenate([gt_mask, pred_mask], axis=-1)
# # Captions per instance show score/IoU
# captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format(
# pred_score[i],
# (overlaps[i, int(pred_match[i])]
# if pred_match[i] > -1 else overlaps[i].max()))
# for i in range(len(pred_match))]
# # Set title if not provided
# title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU"
# # Display
# display_instances(
# image,
# boxes, masks, class_ids,
# class_names, scores, ax=ax,
# show_bbox=show_box, show_mask=show_mask,
# colors=colors, captions=captions,
# title=title)
return gt_match, pred_match, overlaps
def toSquareBox(bbox):
"""bbox:[y1, x1, y2, x2]
将它按照宽高比转换为正方形
并调整左上和右下的坐标
正方形的坐标 [y1, x1, y2, x2]
"""
box_height = bbox[2] - bbox[0]
box_width = bbox[3] - bbox[1]
wh_ratio = box_width / box_height
box_size = box_width / math.sqrt(wh_ratio)
y1 = int(bbox[0] + box_height / 2 - box_size / 2)
y2 = int(y1 + box_size)
x1 = int(bbox[1] + box_width / 2 - box_size / 2)
x2 = int(x1 + box_size)
return wh_ratio, box_size, box_height * box_width, [y1, x1, y2, x2]
def recall(model, class_names):
class_dict = {}
label_dict = ['background']
if args.label:
label_file = open(args.label)
label_lines = label_file.readlines()
label_id = 1
for label_line in label_lines:
label_line = label_line.replace('\n', '')
class_dict[label_line] = label_id
label_dict.append(label_line)
label_id = label_id + 1
# Validation dataset
dataset_val = MyDataset()
dataset_val.load_my(args.dataset, "val", class_dict)
dataset_val.prepare()
pre_correct_dict = {}
pre_total_dict = {}
pre_iou_dict = {}
pre_scores_dict = {}
gt_total_dict = {}
for i in range(1, len(class_dict) + 1):
pre_correct_dict[i] = 0
pre_total_dict[i] = 0
pre_iou_dict[i] = 0.0
pre_scores_dict[i] = 0.0
gt_total_dict[i] = 0
backbone_shapes = modellib.compute_backbone_shapes(config, [768,1280,3])
anchor_boxes = utils.generate_pyramid_anchors(
config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
#utils.generate_anchors(300, config.RPN_ANCHOR_RATIOS, [40,40], 32, config.RPN_ANCHOR_STRIDE)
#print(anchor_boxes)
rois = []
obj_groups = []
# {image_file, [gt_class_id], [gt_box, (y1,x1,y2,x2)], [gt_bbox_area], [gt_wh_ratio], [gt_mask_area], [gt_mask_ratio], [gt_size], }
for image_id in dataset_val.image_ids:
image, image_meta, gt_class_id, gt_box, gt_mask = modellib.load_image_gt(dataset_val, config, image_id, use_mini_mask=False)
#print(image.shape)
gt_detects = {}
gt_detects['image'] = dataset_val.image_reference(image_id)
gt_detects['gt_class_id'] = gt_class_id
gt_detects['gt_bbox'] = gt_box
gt_detects['gt_bbox_area'] = []
gt_detects['gt_wh_ratio'] = []
gt_detects['gt_mask_area'] = []
gt_detects['gt_mask_ratio'] = []
gt_detects['gt_size'] = []
for i in range(0, len(gt_class_id)):
gt_total_dict[gt_class_id[i]] = gt_total_dict[gt_class_id[i]] + 1
wh_ratio, box_size, box_area, square_box = toSquareBox(gt_box[i])
mask_area = np.sum(gt_mask[:,:,i]==True)
mask_ratio = mask_area / box_area
gt_detects['gt_bbox_area'].append(box_area)
gt_detects['gt_wh_ratio'].append(wh_ratio)
gt_detects['gt_mask_area'].append(mask_area)
gt_detects['gt_mask_ratio'].append(mask_ratio)
gt_detects['gt_size'].append(box_size)
molded_image = modellib.mold_image(image, config)
#print(molded_image.shape)
# Anchors
"""
anchors = model.get_anchors(molded_image.shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
print(anchors)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, rpn_class, rpn_bbox =\
model.keras_model.predict([np.expand_dims(molded_image, 0), np.expand_dims(image_meta, 0), anchors], verbose=0)
print(detections[0])
print(mrcnn_class[0])
print(rpn_class[0])
"""
#skimage.io.imsave("test.jpg", image)
start_time = time.time()
results = model.detect_molded(np.expand_dims(molded_image, 0), np.expand_dims(image_meta, 0), verbose=0)
end_time = time.time()
#print("Time: %s" % str(end_time - start_time))
#print(results)
r = results[0]
pre_class_ids = r['class_ids']
for i in range(0, len(pre_class_ids)):
pre_total_dict[pre_class_ids[i]] = pre_total_dict[pre_class_ids[i]] + 1
pre_scores = r['scores']
#print(r['rois'])
for roi in r['rois']:
whr, bsize, _, _ = toSquareBox(roi)
rois.append([bsize, whr])
#print(gt_detects['gt_size'])
#overlaps = utils.compute_iou(roi, gt_detects['gt_bbox'], roi_area, gt_detects['gt_bbox_area'])
#print(overlaps)
gt_match, pred_match, overlap = display_differences(image,
gt_box, gt_class_id, gt_mask,
r['rois'], pre_class_ids, pre_scores, r['masks'],
class_names, title="", ax=None,
show_mask=True, show_box=True,
iou_threshold=0.1, score_threshold=0.1)
gt_detects['rois'] = r['rois']
gt_detects['gt_match'] = gt_match
gt_detects['pred_match'] = pred_match
#print(gt_match)
"""
visualize.display_differences(image,
gt_box, gt_class_id, gt_mask,
r['rois'], pre_class_ids, pre_scores, r['masks'],
class_names, title="", ax=None,
show_mask=True, show_box=True,
iou_threshold=0.1, score_threshold=0.1)
"""
for i in range(0, len(pred_match)):
if pred_match[i] > -1.0:
#print(r['rois'][i])
pre_correct_dict[pre_class_ids[i]] = pre_correct_dict[pre_class_ids[i]] + 1
pre_iou_dict[pre_class_ids[i]] = pre_iou_dict[pre_class_ids[i]] + overlap[i, int(pred_match[i])]
pre_scores_dict[pre_class_ids[i]] = pre_scores_dict[pre_class_ids[i]] + pre_scores[i]
obj_groups.append(gt_detects)
#print(rois)
print("图片,类别,标注框,标注宽高比,标注尺寸,检测框,检测宽高比,检测尺寸,最大IOU")
for det in obj_groups:
for i in range(0, len(det['gt_class_id'])):
overlaped = utils.compute_overlaps(anchor_boxes, np.reshape(det['gt_bbox'][i],(1,4)))
omax = max(overlaped)
#if det['gt_size'][i] > 150 and det['gt_size'][i] < 367:
if omax[0] > 0.0:
print(det['image'], end='')
print(",", label_dict[det['gt_class_id'][i]], ",", det['gt_bbox'][i], ",", det['gt_wh_ratio'][i], ",", det['gt_size'][i], end="")
if det['gt_match'][i] > -1.0:
idx = int(det['gt_match'][i])
#print(idx, det['rois'])
whr, bsize, _, _ = toSquareBox(det['rois'][idx])
print(",", det['rois'][idx], ",", whr, ",", bsize, ",", omax[0])
else:
print(",", 0, ",", 0, ",", 0, ",", omax[0])
tol_pre_correct_dict = 0
tol_pre_total_dict = 0
tol_pre_iou_dict = 0
tol_pre_scores_dict = 0
tol_gt_total_dict = 0
lines = []
tile_line = 'Type,Number,Correct,Proposals,Total,Rps/img,Avg IOU,Avg score,Recall,Precision\n'
lines.append(tile_line)
for key in class_dict:
tol_pre_correct_dict = tol_pre_correct_dict + pre_correct_dict[class_dict[key]]
tol_pre_total_dict = pre_total_dict[class_dict[key]] + tol_pre_total_dict
tol_pre_iou_dict = pre_iou_dict[class_dict[key]] + tol_pre_iou_dict
tol_pre_scores_dict = pre_scores_dict[class_dict[key]] + tol_pre_scores_dict
tol_gt_total_dict = gt_total_dict[class_dict[key]] + tol_gt_total_dict
type_rps_img = pre_total_dict[class_dict[key]] / len(dataset_val.image_ids)
if pre_correct_dict[class_dict[key]] > 0:
type_avg_iou = pre_iou_dict[class_dict[key]] / pre_correct_dict[class_dict[key]]
type_avg_score = pre_scores_dict[class_dict[key]] / pre_correct_dict[class_dict[key]]
else:
type_avg_iou = 0
type_avg_score = 0
if gt_total_dict[class_dict[key]] > 0:
type_recall = pre_total_dict[class_dict[key]] / gt_total_dict[class_dict[key]]
else:
type_recall = 0
if pre_total_dict[class_dict[key]] > 0:
type_precision = pre_correct_dict[class_dict[key]] / pre_total_dict[class_dict[key]]
else:
type_precision = 0
line = '{:s},{:d},{:d},{:d},{:d},{:.2f},{:.2f}%,{:.2f},{:.2f}%,{:.2f}%\n'.format(key, len(dataset_val.image_ids), pre_correct_dict[class_dict[key]], pre_total_dict[class_dict[key]], gt_total_dict[class_dict[key]], type_rps_img, type_avg_iou * 100, type_avg_score, type_recall * 100, type_precision * 100)
lines.append(line)
print(line)
tol_rps_img = tol_pre_total_dict / len(dataset_val.image_ids)
if tol_pre_correct_dict > 0:
tol_avg_iou = tol_pre_iou_dict / tol_pre_correct_dict
tol_avg_score = tol_pre_scores_dict / tol_pre_correct_dict
else:
tol_avg_iou = 0
tol_avg_score = 0
if tol_gt_total_dict > 0:
tol_recall = tol_pre_total_dict / tol_gt_total_dict
else:
tol_recall = 0
if tol_pre_total_dict > 0:
tol_precision = tol_pre_correct_dict / tol_pre_total_dict
else:
tol_precision = 0
totle_line = '{:s},{:d},{:d},{:d},{:d},{:.2f},{:.2f}%,{:.2f},{:.2f}%,{:.2f}%\n'.format('Total', len(dataset_val.image_ids), tol_pre_correct_dict, tol_pre_total_dict, tol_gt_total_dict, type_rps_img, tol_avg_iou * 100, tol_avg_score, tol_recall * 100, tol_precision * 100)
print(totle_line)
lines.append(totle_line)
result_file_name = "result_{:%Y%m%dT%H%M%S}.csv".format(datetime.now())
result_file = open(result_file_name, 'w+')
result_file.writelines(lines)
result_file.close()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash =
|
np.where(mask, image, gray)
|
numpy.where
|
from numpy import exp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def Loss(y):
# Computes the loss for a given output of the model, y
return (-y**2 + y**3)*exp(-(y**2))
def dLossdW(x,y):
# Computes the gradient of the loss w.r.t the parameter w (Using the chain Rule)
# First the derivative of the loss w.r.t y
dlossdy = ((-2*y + 3*(y**2))*exp(-(y**2))) + ((-y**2 + y**3)*(-2*y*exp(-(y**2))))
# Then the derivative of y w.r.t w
dydw = x
# Finally we return the multiplication of the these two, that is the gradient
# of the loss w.r.t w
dlossdw = dlossdy*dydw
return dlossdw
################################### First plot a 3D error surface across all inputs and all outputs
# Define a range of values for input data
X = np.linspace(-5, 5, 100)
# Define a range of values for the parameter w
W = np.linspace(-0.7, 0.7, 100)
# Create a mesh grid of these to vectors
x, w = np.meshgrid(X, W)
# Compute the output of the model for each pair of values in the mesh
y = w*x
# Create a figure
fig = plt.figure(figsize=(16,9))
# Tell matplotlib that this is going to be a 3-dimensional plot
ax = plt.axes(projection='3d')
# use the plot_surface function and a nice cmap to plot the loss surface w.r.t all pairs of (x,w) in the mesh
ax.plot_surface(x, w, Loss(y), rstride=2, cstride=2,
cmap='hot', edgecolor='none')
# Set labels for the axes
ax.set_xlabel('Input Data (x)', size=17)
ax.set_ylabel('The model parameter (w)', size=17)
ax.set_zlabel('Loss', size=17)
# Compute the value of the global minimum
plt.title('Global Minimum is %.2f' % np.min(Loss(y)), size=17)
# Show the 3-dimensional surface
plt.show()
#################################### plot a 2D error surface per each input value across a range of values in w
# 3 data points between -5 and 5 are selected
X = np.linspace(-5, 5, 3)
# A range of possible values for parameter w is selected
W = np.linspace(-0.7, 0.7, 100)
# Create a figure
plt.figure(figsize=(16,9))
# iterate through the entire dataset and for each value of x repeat the following
for x in X:
# compute the output of the model
y = W*x
# Plot the current loss surface for x, across the the entire range of values
# For the parameter w
plt.plot(W, Loss(y), c='r', alpha=0.7)
# define the limits for horizontal axis (the weight axis)
plt.xlim(min(W), max(W))
# put the labels for weight and loss axes
plt.xlabel('Weight Values (w)', size=17)
plt.ylabel('One individual loss surface per input data (%d surfaces)' % len(X), size=17)
# Put grids on the plot for better visualization
plt.grid()
# Show the plot
plt.show()
##################################### plotting error surfaces, computing gradients at w_0, and computing w_new.
###################################### Finally, using w_not and w_new, we can plot the gradient vector##########
# Define a range of values for input data x
X = np.linspace(-5, 5, 10)
# Define a grid of values for the parameter w
W = np.linspace(-0.7, 0.7, 100)
# Define an initial value of w_not (where we start learning from)
w_not = -0.3
# Define the learning rate
eta = 0.01
# Create a figure in which we will put 2 subplots
fig = plt.figure(figsize=(16,9))
# Create the first subplot
plt.subplot(1, 2, 1)
# Give a title for the subplot
plt.title('Update vectors computed using \n '
'the gradients at w_not for different error surfaces',size=17)
# This variable is giong to be used for plotting the update vectors! This
# Makes the vectors look nice and symmetrical
prop = dict(arrowstyle="-|>,head_width=0.4,head_length=0.8",shrinkA=0, shrinkB=0)
# This will hold the computed gradients at w_not, across all loss surfaces given each individual data x
gradients = []
# Go through the entire dataset X, and for each data point x do the following
for x in X:
# Compute model output
y = w_not*x
# Compute the gradient of the error w.r.t the parameter w, given current value of the input data
dedw = dLossdW(x,y)
# Add the gradient to the list for future visualizations
gradients.append(dedw)
# Compute the new value of the parameter w NOTE: We are not yet updating w_not!
w_new = w_not - eta*dedw
# Plot the loss surface for the current input data x, across possible values of the parameter w
plt.plot(W,Loss(W*x), c='r', alpha=0.7)
# Plot the initial w_not and its corresponding loss value Loss(y) given x, so we know where on
# The loss surface we reside
plt.scatter(w_not, Loss(y), c='k')
# Using the (w_not,Loss(y)) and the new value of the weight that results in the point (w_new, Loss(w_new*x))
# Plot the update vector between w_not and w_new
plt.annotate("", xy=(w_new, Loss(w_new*x)), xytext=(w_not, Loss(y)), arrowprops=prop)
# Put a limit on the weight axis using the minimum and maximum values we have considered for w
plt.xlim(min(W), max(W))
# Put labels per axis
plt.xlabel('Weight (w)',size=17)
plt.ylabel('%d Individual loss surfaces per data input x' % len(X), size=17)
# Plot a nice vertical blue line at w_not so we know where we stand INITIALLY across ALL loss surfaces
plt.axvline(w_not, ls='--', c='b',label='w_not=%.2f' % w_not)
# Show the legends
plt.legend()
# Put a nice grid
plt.grid()
# Prepare the second subplot
plt.subplot(1,2,2)
# Put a nice title for the histogram
plt.title('Frequency of the magnitudes of the computed gradients',size=17)
# Plot the histogram of gradients, along some nice statistics of these gradients
plt.hist(gradients, label='(Min, Max, Mean)=(%.2f,%.2f,%.2f)' % (np.min(gradients),
|
np.max(gradients)
|
numpy.max
|
"""
Example run
```
python3 preprocessing.py
```
"""
from PIL import Image
from scipy import ndimage
from skimage.filters import rank
from skimage.morphology import square
from skimage.morphology import disk
from skimage.morphology import white_tophat
import numpy
import cv2
import matplotlib.pyplot as plt
import PIL
from unbuffered import Unbuffered
import sys
# make print() not wait on the same buffer as the
# def it exists in:
sys.stdout = Unbuffered(sys.stdout)
class Preprocess:
"""
Preprocess class is responsible for anything preprocessing. It is build
for easy convolution of the preprocessing operations. Such that
operations may be easily followed by each other in any order by dotting
them out like so:
```
obj = Preprocess(
image="./STARE/im0255.ppm"
).meanFilter(
).show(
).greyOpening(
).show()
```
Notice how `show()` can be called after any operation. `show()` uses the
PIL Image debugger to show the image.
The implemented methods are generally limited to the methods describedin
Marin et al ITM 2011. However some methods allow for different
parameters to be used in the operation where the ones described in Marin
et al ITM 2011 are merely defaults.
To run the methods described in Marin et al 2011 in the same order as
described then the method `process` can be used:
```
obj = Preprocess(
image="./STARE/im0003.ppm"
).process(
).show(
).save(
path="./im0003_processed.png"
)
```
Non standard requesites for running are:
- scipy https://www.scipy.org/
- cv2 http://opencv-python-tutroals.readthedocs.io/en/latest/
- skimage http://scikit-image.org/
@class Preprocess
@param image {string} The path to the image to be preprocessed.
@param maskTh {int} The threshold value to create the mask from
@property source {string} Image source
@property image {PIL obj} PIL Image object
@property mask {numpy array} The mask matrix which is 0 in the area
outside FOV and 1's inside FOV
@property threshold {int} The threshold value from which the mask is
made from. Lower intensity than threshold and the pixel is
considered outside FOV and inside otherwise.
"""
def __init__(self, image, maskTh=50):
self.initialized = False
self.__printStatus(
"Initialize preprocessing for: " + image,
isEnd=True,
initial=True
)
self.source = image
self.name = image.split("/")[-1].split(".")[0]
self.image = Image.open(image)
self.loaded = self.image.load()
# self.threshold=50
self.threshold = maskTh
self.extractColorBands()
self.mask = numpy.uint8(
numpy.greater(
self.red_array,
self.threshold
).astype(int)
)
def save(self, path, array=numpy.empty(0), useMask=False, rotate=True):
"""
Saves the image array as png at the desired path.
@method save
@param path {string} the path where the image will be saved.
@param array {numpy array} The array which the image is made from,
default is self.image_array
@param useMask {Bool} Wether to reset non FOV pixel using the mask.
Default is False
"""
if not array.any():
array = self.image_array
if useMask:
array = array * self.mask
self._arrayToImage(array).save(path, "png", rotate=rotate)
self.__printStatus("saving to " + path + "...")
self.__printStatus("[done]", True)
return self
def _arrayToImage(self, array=numpy.empty(0), rotate=True):
"""
@private
@method arrayToImage
@param array {numpy array} array which is converted to an image
@param rotate {Bool} If true the image is transposed and rotated to
counter the numpy conversion of arrays.
"""
self.__printStatus("array to image...")
if not array.any():
array = self.image_array
img = Image.fromarray(numpy.uint8(array))
self.__printStatus("[done]", True)
if rotate:
return img.transpose(Image.FLIP_TOP_BOTTOM).rotate(-90)
else:
return img
def show(
self,
array=numpy.empty(0),
rotate=True,
invert=False,
useMask=False,
mark=None
):
"""
@method show
@param array {numpy array} image array to be shown.
@param rotate {Bool} Wether to rotate countering numpys array
conversion, default True.
@param invert {Bool} Invert the image, default False.
@param useMask {Bool} Reset non FOV pixels using the mask, default
is False.
"""
if not array.any():
array = self.image_array
im = self._arrayToImage(array, rotate=rotate)
self.__printStatus("show image...")
if useMask:
array = array * self.mask
if mark:
im = im.convert("RGB")
pixels = im.load()
x, y = mark
for i in range(x-1, x+1):
for j in range(y-1, y+1):
# color an area around the mark
# blue, for easilier visibility
pixels[i, j] = (0, 0, 255)
if invert:
Image.eval(im, lambda x:255-x).show()
else:
print("#####", im.mode, "#####")
im.show()
self.__printStatus("[done]", True)
return self
def extractColorBands(self):
"""
Returns a greyscaled array from the green channel in
the original image.
@method extractColorBands
"""
self.__printStatus("Extract color bands...")
green_array =
|
numpy.empty([self.image.size[0], self.image.size[1]], int)
|
numpy.empty
|
import numpy as np
import datajoint as dj
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
from pipeline import experiment, ephys, psth
from pipeline.plot.util import (_plot_with_sem, _extract_one_stim_dur, _get_units_hemisphere,
_get_trial_event_times, _get_clustering_method,
_plot_stacked_psth_diff, _plot_avg_psth, jointplot_w_hue)
m_scale = 1200
_plt_xmin = -3
_plt_xmax = 2
def plot_clustering_quality(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat * ephys.ProbeInsertion.InsertionLocation
& probe_insertion & {'clustering_method': clustering_method}).fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')
metrics = {'amp': amp,
'snr': snr,
'isi': np.array(isi_violation) * 100, # to percentage
'rate': np.array(spk_rate)}
label_mapper = {'amp': 'Amplitude',
'snr': 'Signal to noise ratio (SNR)',
'isi': 'ISI violation (%)',
'rate': 'Firing rate (spike/s)'}
fig = None
if axs is None:
fig, axs = plt.subplots(2, 3, figsize = (12, 8))
fig.subplots_adjust(wspace=0.4)
assert axs.size == 6
for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):
ax.plot(metrics[m1], metrics[m2], '.k')
ax.set_xlabel(label_mapper[m1])
ax.set_ylabel(label_mapper[m2])
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return fig
def plot_unit_characteristic(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
amp, snr, spk_rate, x, y, insertion_depth = (
ephys.Unit * ephys.ProbeInsertion.InsertionLocation * ephys.UnitStat
& probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"').fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'unit_posx', 'unit_posy', 'dv_location')
insertion_depth = np.where(np.isnan(insertion_depth), 0, insertion_depth)
metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(), x, y - insertion_depth))))
metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
ymin = metrics.y.min() - 100
ymax = metrics.y.max() + 200
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)
# manually draw the legend
lg_ypos = ymax
data = pd.DataFrame({'x': [3, 20, 40], 'y': [lg_ypos, lg_ypos, lg_ypos], 'size_ratio': np.array([0.2, 0.5, 0.8])})
for ax, ax_maxval in zip(axs.flatten(), (amp.max(), snr.max(), spk_rate.max())):
sns.scatterplot(data=data, x='x', y='y', s=data.size_ratio*m_scale, ax=ax, **dict(cosmetic, facecolor='k'))
for _, r in data.iterrows():
ax.text(r['x']-4, r['y']+70, (r['size_ratio']*ax_maxval).astype(int))
# cosmetic
for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(title)
ax.set_xlim((-10, 60))
ax.add_patch(mpl.patches.Rectangle((-7, lg_ypos-80), 62, 210, fill=False))
ax.set_ylim((ymin, ymax + 150))
return fig
def plot_unit_selectivity(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',
'ipsi_firing_rate', 'unit_posx', 'unit_posy', 'dv_location']
selective_units = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* experiment.Period & probe_insertion & {'clustering_method': clustering_method}
& 'period_selectivity != "non-selective"').fetch(*attr_names)
selective_units = pd.DataFrame(selective_units).T
selective_units.columns = attr_names
selective_units.period_selectivity.astype('category')
# --- account for insertion depth (manipulator depth)
selective_units.unit_posy = (selective_units.unit_posy
- np.where(np.isnan(selective_units.dv_location.values.astype(float)),
0, selective_units.dv_location.values.astype(float)))
# --- get ipsi vs. contra firing rate difference
f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)
selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()
# --- prepare for plotting
cosmetic = {'legend': None,
'linewidth': 0.0001}
ymin = selective_units.unit_posy.min() - 100
ymax = selective_units.unit_posy.max() + 100
# a bit of hack to get the 'open circle'
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .7]
open_circle = mpl.path.Path(vert)
# --- plot
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
for (title, df), ax in zip(((p, selective_units[selective_units.period == p])
for p in ('sample', 'delay', 'response')), axs):
sns.scatterplot(data=df, x='unit_posx', y='unit_posy',
s=df.f_rate_diff.values.astype(float)*m_scale,
hue='period_selectivity', marker=open_circle,
palette={'contra-selective': 'b', 'ipsi-selective': 'r'},
ax=ax, **cosmetic)
contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{title}\n% contra: {contra_p:.2f}\n% ipsi: {100-contra_p:.2f}')
ax.set_xlim((-10, 60))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_ylim((ymin, ymax))
return fig
def plot_unit_bilateral_photostim_effect(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
dv_loc = (ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch1('dv_location')
no_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim'}).fetch1('KEY')
bi_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_stim'}).fetch1('KEY')
# get photostim duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& probe_insertion).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
units = ephys.Unit & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"'
metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])
_, cue_onset = _get_trial_event_times(['delay'], units, 'all_noearlylick_both_alm_nostim')
cue_onset = cue_onset[0]
# XXX: could be done with 1x fetch+join
for u_idx, unit in enumerate(units.fetch('KEY', order_by='unit')):
x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')
# obtain unit psth per trial, for all nostim and bistim trials
nostim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(no_stim_cond['trial_condition_name'])
bistim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(bi_stim_cond['trial_condition_name'])
nostim_psths, nostim_edge = psth.compute_unit_psth(unit, nostim_trials.fetch('KEY'), per_trial=True)
bistim_psths, bistim_edge = psth.compute_unit_psth(unit, bistim_trials.fetch('KEY'), per_trial=True)
# compute the firing rate difference between contra vs. ipsi within the stimulation duration
ctrl_frate = np.array([nostim_psth[np.logical_and(nostim_edge >= cue_onset,
nostim_edge <= cue_onset + stim_dur)].mean()
for nostim_psth in nostim_psths])
stim_frate = np.array([bistim_psth[np.logical_and(bistim_edge >= cue_onset,
bistim_edge <= cue_onset + stim_dur)].mean()
for bistim_psth in bistim_psths])
frate_change = (stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()
frate_change = abs(frate_change) if frate_change < 0 else 0.0001
metrics.loc[u_idx] = (int(unit['unit']), x, y - dv_loc, frate_change)
metrics.frate_change = metrics.frate_change / metrics.frate_change.max()
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(4, 8))
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,
ax=axs, **cosmetic)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('% change')
axs.set_xlim((-10, 60))
return fig
def plot_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], vlines=period_starts, flip=True)
axs[0].set_title('Contra-selective Units')
axs[0].set_ylabel('Unit (by depth)')
axs[0].set_xlabel('Time to go (s)')
_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], vlines=period_starts)
axs[1].set_title('Ipsi-selective Units')
axs[1].set_ylabel('Unit (by depth)')
axs[1].set_xlabel('Time to go (s)')
return fig
def plot_avg_contra_ipsi_psth(units, axs=None):
units = units.proj()
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
hemi = _get_units_hemisphere(units)
good_unit = ephys.Unit & 'unit_quality != "all"'
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
psth_is_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_is_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
_plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],
'Contra-selective')
_plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],
'Ipsi-selective')
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
return fig
def plot_psth_bilateral_photostim_effect(units, axs=None):
units = units.proj()
hemi = _get_units_hemisphere(units)
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_stim_left'}).fetch('unit_psth')
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim_left'}).fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_stim_right'}).fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim_right'}).fetch('unit_psth')
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# get photostim duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& units).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Bilateral ALM photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
# add shaded bar for photostim
stim_time = period_starts[np.where(period_names == 'delay')[0][0]]
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
return fig
def plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):
"""
For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction
The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: bilateral ALM)
"""
units = units.proj()
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
hemi = _get_units_hemisphere(units)
# no photostim:
psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]
psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]
psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# get photostim duration
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials(stim_trial_cond_name)
& units).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
# add shaded bar for photostim
stim_time = period_starts[np.where(period_names == 'delay')[0][0]]
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
return fig
def plot_coding_direction(units, time_period=None, axs=None):
_, proj_contra_trial, proj_ipsi_trial, time_stamps, _ = psth.compute_CD_projected_psth(
units.fetch('KEY'), time_period=time_period)
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(8, 6))
# plot
_plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')
_plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')
for x in period_starts:
axs.axvline(x=x, linestyle = '--', color = 'k')
# cosmetic
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_ylabel('CD projection (a.u.)')
axs.set_xlabel('Time (s)')
return fig
def plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):
"""
Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)
Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period
"""
_, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps, unit_g1_hemi = psth.compute_CD_projected_psth(
unit_g1.fetch('KEY'), time_period=time_period)
_, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps, unit_g2_hemi = psth.compute_CD_projected_psth(
unit_g2.fetch('KEY'), time_period=time_period)
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], unit_g1, 'good_noearlylick')
if labels:
assert len(labels) == 2
else:
labels = ('unit group 1', 'unit group 2')
# plot projected trial-psth
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
_plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')
_plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')
_plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')
_plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')
# cosmetic
for ax, label in zip(axs, labels):
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('CD projection (a.u.)')
ax.set_xlabel('Time (s)')
ax.set_title(label)
# plot trial CD-endpoint correlation - if 2 unit-groups are from 2 hemispheres,
# then contra-ipsi definition is based on the first group
p_start, p_end = time_period
contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
if unit_g1_hemi == unit_g1_hemi:
contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_ipsi_trial_g2[:,
|
np.logical_and(time_stamps >= p_start, time_stamps < p_end)
|
numpy.logical_and
|
import h5py
import numpy as np
from setup import *
from casadi import *
import rospy
from std_msgs.msg import Float64MultiArray
import scipy.interpolate
class MPC:
def __init__(self):
rospy.init_node('MPC_controller', anonymous=True)
self.v2 = -0.15
hf = h5py.File('alpha_U_beta_Ext.mat','r')
self.V1 = hf['alpha_U_beta1_Ext'] #array dimensions are reversed
self.V2 = hf['alpha_U_beta2_Ext']
self.p = load_params(hf)
self.u = Float64MultiArray()
self.u.data = [float(0),float(0)]
self.integration_iters = 2
self.solver, self.bc = self.casadi_setup()
xg = np.linspace(self.gmin[0],self.gmax[0],self.N[0])
yg = np.linspace(self.gmin[1],self.gmax[1],self.N[1])
self.thg = np.linspace(self.gmin[2],self.gmax[2],self.N[2]+1)
self.thg = self.thg[:-1]
self.velg = np.linspace(self.gmin[3],self.gmax[3],self.N[3])
y2g = np.linspace(self.gmin[4],self.gmax[4],self.N[4])
tg = np.linspace(0,14,np.shape(self.V1)[0])
self.interp_function_V1 = scipy.interpolate.RegularGridInterpolator((tg,y2g,self.velg,self.thg,yg,xg),np.array(self.V1))
self.interp_function_V2 = scipy.interpolate.RegularGridInterpolator((tg,y2g,self.velg,self.thg,yg,xg),np.array(self.V2))
self.ctrl_publisher = rospy.Publisher('/ctrl_MATLAB',Float64MultiArray,queue_size=1)
rospy.Subscriber('/StateSpace',Float64MultiArray,self.mpc_callback)
def mpc_callback(self,msg):
sim_start = rospy.get_time()
self.states = [msg.data[0],msg.data[1],msg.data[2],msg.data[3],msg.data[4],msg.data[5]]
self.OptStates = self.loadOptimalStates()
lbx = self.bc['lbx']
ubx = self.bc['ubx']
lbg = self.bc['lbg']
ubg = self.bc['ubg']
lbx[:4] = self.states[:4]
ubx[:4] = self.states[:4]
lbx[4:6] = [self.u.data[1],self.u.data[0]]
ubx[4:6] = [self.u.data[1],self.u.data[0]]
sol = self.solver(lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg, p=self.OptStates)
w_opt = sol['x'].full().flatten()
u_omega = w_opt[10]
u_accel = w_opt[11]
self.u.data = [float(u_accel),float(u_omega)]
#print(rospy.get_time()-sim_start)
self.ctrl_publisher.publish(self.u)
# import matplotlib.pyplot as plt
# w_opt_th = w_opt[2::6]
# w_opt_vel = w_opt[3::6]
# print('w_opt_th:', w_opt_th)
# print('MPC_th:', self.OptStates[0:11])
# print('w_opt_vel:', w_opt_vel)
# print('MPC_vel:', self.OptStates[11:23])
#
# plt.figure(1)
# plt.clf()
# plt.plot(w_opt_th, '--')
# plt.plot(self.OptStates[0:11], '-')
#
# plt.figure(2)
# plt.clf()
# plt.plot(w_opt_vel, '--')
# plt.plot(self.OptStates[11:23], '-')
# plt.show()
def loadOptimalStates(self):
th_bar = []
vel_bar = []
temp_states = list(self.states)
#print(self.states)
t = np.linspace(temp_states[5],temp_states[5]+self.T,self.iters+1)
for i in range(
|
np.size(t)
|
numpy.size
|
#!/usr/bin/env python
from __future__ import print_function
import re
import numpy as np
import numpy.testing as npt
import logging
logging.basicConfig(level=logging.NOTSET) # test LONGDEBUG logging level
import unittest
# import the module
import tomographer
class BasicStuff(unittest.TestCase):
def test_version(self):
# test for __version__
print("Tomographer version: "+tomographer.__version__)
# parse __version__
m = re.match(r'^v(?P<maj>\d+)\.(?P<min>\d+)(?P<suffix>[a-zA-Z][a-zA-Z0-9]*)?'
r'(-(?P<gitsuffix>\d+-?g?[a-fA-F0-9]*))?$',
tomographer.__version__)
self.assertIsNotNone(m)
print("Verision MAJ=", m.group('maj'), " MIN=", m.group('min'), " SUFFIX=", m.group('suffix'),
" GITSUFFIX=", m.group('gitsuffix'))
self.assertTrue(m.group('maj') == str(tomographer.version.version_info.major))
self.assertTrue(m.group('min') == str(tomographer.version.version_info.minor))
print("CFLAGS = "+repr(tomographer.version.compile_info['cflags']))
self.assertTrue('cflags' in tomographer.version.compile_info)
self.assertTrue('eigen' in tomographer.version.compile_info)
self.assertTrue('boost' in tomographer.version.compile_info)
def test_cxxlogger(self):
# test that the cxxlogger object exists, and check we can set a log level.
tomographer.cxxlogger.level = logging.INFO
def test_exception(self):
# test that the C++ code is able to raise an exception
with self.assertRaises(ValueError):
# wrong array shape & dimensions -- should cause an exception
tomographer.Histogram().load(np.array([ [1, 2], [3, 4] ]))
class Histograms(unittest.TestCase):
def test_HistogramParams(self):
# constructors
params = tomographer.HistogramParams(2.0, 3.0, 5)
self.assertAlmostEqual(params.min, 2.0)
self.assertAlmostEqual(params.max, 3.0)
self.assertEqual(params.num_bins, 5)
npt.assert_array_almost_equal(params.values_lower, np.array([2.0, 2.2, 2.4, 2.6, 2.8]))
npt.assert_array_almost_equal(params.values_upper, np.array([2.2, 2.4, 2.6, 2.8, 3.0]))
npt.assert_array_almost_equal(params.values_center, np.array([2.1, 2.3, 2.5, 2.7, 2.9]))
# default constructor
paramsdflt = tomographer.HistogramParams()
self.assertTrue(paramsdflt.min < paramsdflt.max and paramsdflt.num_bins > 0)
# w/ keyword arguments
params = tomographer.HistogramParams(min=2.0, max=3.0, num_bins=5)
self.assertAlmostEqual(params.min, 2.0)
self.assertAlmostEqual(params.max, 3.0)
self.assertEqual(params.num_bins, 5)
# binCenterValue()
self.assertAlmostEqual(params.binCenterValue(0), 2.1)
self.assertAlmostEqual(params.binCenterValue(4), 2.9)
with self.assertRaises(tomographer.TomographerCxxError):
x = params.binCenterValue(999)
with self.assertRaises(tomographer.TomographerCxxError):
x = params.binCenterValue(5)
# binLowerValue()
self.assertAlmostEqual(params.binLowerValue(0), 2.0)
self.assertAlmostEqual(params.binLowerValue(4), 2.8)
with self.assertRaises(tomographer.TomographerCxxError):
x = params.binLowerValue(999)
with self.assertRaises(tomographer.TomographerCxxError):
x = params.binLowerValue(5)
# binUpperValue()
self.assertAlmostEqual(params.binUpperValue(0), 2.2)
self.assertAlmostEqual(params.binUpperValue(4), 3.0)
with self.assertRaises(tomographer.TomographerCxxError):
x = params.binUpperValue(999)
with self.assertRaises(tomographer.TomographerCxxError):
x = params.binUpperValue(5)
#
self.assertAlmostEqual(params.binResolution(), 0.2)
self.assertTrue(params.isWithinBounds(2.0))
self.assertTrue(params.isWithinBounds(2.2))
self.assertFalse(params.isWithinBounds(3.0001))
self.assertFalse(params.isWithinBounds(1.99))
# repr()
self.assertEqual(repr(params), 'HistogramParams(min=2,max=3,num_bins=5)')
def test_Histogram(self):
self.do_test_hist(tomographer.Histogram, int, False)
# def test_HistogramReal(self):
# self.do_test_hist(tomographer.HistogramReal, float, False)
def test_HistogramWithErrorBars(self):
self.do_test_hist(tomographer.HistogramWithErrorBars, float, True)
def do_test_hist(self, HCl, cnttype, has_error_bars):
print("do_test_hist()")
# constructor & params member
self.assertAlmostEqual(HCl(2.0, 3.0, 5).params.min, 2.0)
self.assertAlmostEqual(HCl(2.0, 3.0, 5).params.max, 3.0)
self.assertEqual(HCl(2.0, 3.0, 5).params.num_bins, 5)
self.assertAlmostEqual(HCl(tomographer.HistogramParams(2.0, 3.0, 5)).params.min, 2.0)
self.assertAlmostEqual(HCl(tomographer.HistogramParams(2.0, 3.0, 5)).params.max, 3.0)
self.assertEqual(HCl(tomographer.HistogramParams(2.0, 3.0, 5)).params.num_bins, 5)
h = HCl() # has default constructor
print("Default histogram parameters: ", h.params.min, h.params.max, h.params.num_bins)
# values_center, values_lower, values_upper
npt.assert_array_almost_equal(HCl(2.0,3.0,5).values_lower, np.array([2.0, 2.2, 2.4, 2.6, 2.8]))
npt.assert_array_almost_equal(HCl(2.0,3.0,5).values_upper, np.array([2.2, 2.4, 2.6, 2.8, 3.0]))
npt.assert_array_almost_equal(HCl(2.0,3.0,5).values_center, np.array([2.1, 2.3, 2.5, 2.7, 2.9]))
# constructor sets zero bin values & off_chart
h = HCl()
npt.assert_array_almost_equal(h.bins, np.zeros(h.numBins()))
self.assertAlmostEqual(h.off_chart, 0) # almost-equal in case cnttype=float
if has_error_bars: npt.assert_array_almost_equal(h.delta, np.zeros(h.numBins()))
h = HCl(2.0, 3.0, 5)
npt.assert_array_almost_equal(h.bins, np.array([0,0,0,0,0]))
self.assertAlmostEqual(h.off_chart, 0) # almost-equal in case cnttype=float
if has_error_bars: npt.assert_array_almost_equal(h.delta, np.zeros(h.numBins()))
h = HCl(tomographer.HistogramParams(2.0, 3.0, 5))
npt.assert_array_almost_equal(h.bins, np.array([0,0,0,0,0]))
self.assertAlmostEqual(h.off_chart, 0) # almost-equal in case cnttype=float
if has_error_bars: npt.assert_array_almost_equal(h.delta, np.zeros(h.numBins()))
# has_error_bars
h = HCl(2.0, 3.0, 5)
self.assertTrue(h.has_error_bars == has_error_bars)
# numBins()
h = HCl(2.0, 3.0, 5)
self.assertEqual(h.numBins(), 5)
# bins, off_chart
h = HCl(2.0, 3.0, 5)
h.bins = np.array([1, 4, 9, 5, 2])
npt.assert_array_almost_equal(h.bins, np.array([1, 4, 9, 5, 2]))
h.off_chart = 156
self.assertAlmostEqual(h.off_chart, 156) # almost-equal in case cnttype=float
# delta [if error bars]
if has_error_bars:
h = HCl(2.0, 3.0, 5)
h.delta = np.array([1, 2, 3, 4, 0.25])
npt.assert_array_almost_equal(h.delta, np.array([1, 2, 3, 4, 0.25]))
# count()
h = HCl(2.0, 3.0, 5)
h.bins = np.array([1, 4, 9, 5, 2])
h.off_chart = 17
for i in range(h.numBins()):
self.assertAlmostEqual(h.bins[i], h.count(i))
with self.assertRaises(IndexError):
n = h.count(9999) # out of range
# errorBar() [if error bars]
if has_error_bars:
h = HCl(2.0, 3.0, 5)
h.delta = np.array([1, 2, 3, 4, 0.25])
for i in range(h.numBins()):
self.assertAlmostEqual(h.delta[i], h.errorBar(i))
with self.assertRaises(IndexError):
n = h.errorBar(9999) # out of range
# load()
def load_values_maybe_error_bars(h, values, errors, off_chart=None):
if off_chart is None:
if not has_error_bars:
h.load(values)
else:
h.load(values, errors)
else:
if not has_error_bars:
h.load(values, off_chart)
else:
h.load(values, errors, off_chart)
# load(x[, e])
h = HCl(2.0, 3.0, 5)
h.bins = np.array([1, 4, 9, 5, 2])
load_values_maybe_error_bars(h, np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]))
npt.assert_array_almost_equal(h.bins, np.array([10, 20, 30, 40, 50]))
if has_error_bars:
npt.assert_array_almost_equal(h.delta, np.array([1, 2, 3, 4, 5]))
self.assertAlmostEqual(h.off_chart, 0) # almost-equal in case cnttype=float
# load(x[, e], off_chart)
h = HCl(2.0, 3.0, 5)
h.bins = np.array([1, 4, 9, 5, 2])
load_values_maybe_error_bars(h, np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]), 28)
npt.assert_array_almost_equal(h.bins, np.array([10, 20, 30, 40, 50]))
if has_error_bars:
npt.assert_array_almost_equal(h.delta, np.array([1, 2, 3, 4, 5]))
self.assertAlmostEqual(h.off_chart, 28) # almost-equal in case cnttype=float
# load(x, e) error if wrong signature
if has_error_bars:
with self.assertRaises(Exception): h.load(np.array([10, 20, 30, 40, 50]))
with self.assertRaises(Exception): h.load(np.array([10, 20, 30, 40, 50]), 10.0)
else:
with self.assertRaises(Exception): h.load(np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]))
with self.assertRaises(Exception): h.load(np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]), 10.0)
# add()
if not has_error_bars:
h = HCl(2.0, 3.0, 5)
h.bins = np.array([1, 4, 9, 5, 2])
h.add(np.array([10, 20, 30, 40, 50]))
npt.assert_array_almost_equal(h.bins, np.array([11, 24, 39, 45, 52]))
self.assertAlmostEqual(h.off_chart, 0) # almost-equal in case cnttype=float
# add()
if not has_error_bars:
h = HCl(2.0, 3.0, 5)
h.bins = np.array([1, 4, 9, 5, 2])
h.off_chart = 4
h.add(np.array([10, 20, 30, 40, 50]), 28)
npt.assert_array_almost_equal(h.bins, np.array([11, 24, 39, 45, 52]))
self.assertAlmostEqual(h.off_chart, 32) # almost-equal in case cnttype=float
# reset()
h = HCl(2.0, 3.0, 5)
load_values_maybe_error_bars(h, np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]), 28)
h.reset()
npt.assert_array_almost_equal(h.bins, np.array([0,0,0,0,0]))
self.assertAlmostEqual(h.off_chart, 0) # almost-equal in case cnttype=float
# record()
if not has_error_bars:
h = HCl(2.0, 3.0, 5)
load_values_maybe_error_bars(h, np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]), 28)
h.record(2.569)
self.assertAlmostEqual(h.count(2), 31)
h.record(2.569, cnttype(2))
self.assertAlmostEqual(h.count(2), 33)
h.record(2.981)
self.assertAlmostEqual(h.count(4), 51)
h.record(2.981, cnttype(7))
self.assertAlmostEqual(h.count(4), 58)
# normalization(), normalized()
h = HCl(2.0, 3.0, 5)
load_values_maybe_error_bars(h, np.array([10, 20, 30, 40, 50]), np.array([1, 2, 3, 4, 5]), 28)
#print("BINS=",repr(h.bins), repr(h.off_chart))
n = h.normalization()
#print("NORMALIZATION=",n)
self.assertAlmostEqual(n, np.sum(np.array([10, 20, 30, 40, 50])) / 5.0 + 28)
hn = h.normalized()
npt.assert_array_almost_equal(hn.bins, h.bins / n)
self.assertAlmostEqual(hn.off_chart, h.off_chart / n)
if has_error_bars:
|
npt.assert_array_almost_equal(hn.delta, h.delta / n)
|
numpy.testing.assert_array_almost_equal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.