prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
from mdp import random_mdp
from matplotlib import pyplot
from test_simple_estimator import Estimator
import time
import multiprocessing
import os
import pickle
def generate_mdps(num_states, num_actions, num_deterministic):
mdp = random_mdp(n_states=num_states, n_actions=num_actions)
mdp.P0 = np.zeros(num_states)
mdp.P0[0] = 1
deterministic_P = np.random.rand(num_states, num_actions, num_states)
deterministic_P = deterministic_P / deterministic_P.sum(axis=-1)[:, :, np.newaxis]
for s in range(num_deterministic):
next_state = s + 1
deterministic_P[s, action_sequence[s], :] = 0
deterministic_P[s, action_sequence[s], next_state] = 1
mdp.reset()
return mdp, deterministic_P
if __name__ == '__main__':
action_length = 10
num_actions = 3
num_states = 10
gamma = 1.
alpha = 0.0
delta_alpha = 0.1
max_alpha = 1.05
n = 200
budget = 50
bins = 50
true_mean_samples = 10000
min_alpha = alpha
max_workers = 10
num_deterministic = 7
num_experiments = 100
action_sequence = np.random.choice(num_actions, size=action_length)
alphas = []
ys_mc = []
stds_mc = []
ys_particle_simple = []
samples_p_simple = []
ess_p_simple = []
ys_particle_bh = []
stds_bh = []
samples_p_bh = []
ess_p_bh = []
while alpha < max_alpha:
alphas.append(alpha)
alpha += delta_alpha
out_dir = 'logs/particle_estimator_alpha_exp/'
if out_dir is not None:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
res = []
# Run the evaluation on multiple threads
num_alpha = len(alphas)
start = time.time()
n_workers = min(num_alpha, multiprocessing.cpu_count())
n_workers = min(n_workers, max_workers)
iterations = max(num_alpha // n_workers, 1)
remainder = num_alpha % n_workers if n_workers < num_alpha else 0
mc = np.zeros(num_alpha)
particle = np.zeros(num_alpha)
for i in range(num_experiments):
mdp, deterministic_P = generate_mdps(num_states, num_actions, num_deterministic)
mdp.reset()
signature = mdp.get_signature()
random_P = np.array(mdp.P)
ys_mc = []
ys_particle_bh = []
def evaluate(alpha):
new_P = (1 - alpha) * random_P + alpha * deterministic_P
new_P = new_P / new_P.sum(axis=-1)[:, :, np.newaxis]
mdp.P = new_P
mdp.reset()
mdp.set_signature(signature)
estimator = Estimator(mdp, action_sequence, gamma=gamma)
estimations_mc = estimator.run_monte_carlo_estimation(true_mean_samples, action_length)
mean = np.mean(estimations_mc)
estimations_mc = estimator.run_monte_carlo_estimation(n, budget)
error_mc = ((np.array(estimations_mc) - mean) ** 2).mean()
error_mc_std = ((np.array(estimations_mc) - mean) ** 2).std()
print("Finished MC with alpha=" + str(alpha))
# estimations_particle, ess, _, counts = estimator.run_particle_estimation(n, budget, bh=False)
# error_simple = ((np.array(estimations_particle) - mean) ** 2).mean()
# counts_simple = np.mean(counts)
# ess_simple = np.mean(ess)
# print("Finished Particle Simple with alpha=" + str(alpha))
estimations_particle, ess, _, counts = estimator.run_particle_estimation(n, budget, bh=True)
error_bh = ((np.array(estimations_particle) - mean) ** 2).mean()
error_bh_std = ((np.array(estimations_particle) - mean) ** 2).std()
counts_bh = np.mean(counts)
ess_bh = np.mean(ess)
print("Finished Particle BH with alpha=" + str(alpha))
print("Finished alpha " + str(alpha))
return error_mc, error_mc_std, error_bh, error_bh_std, counts_bh, ess_bh # error_simple, counts_simple, ess_simple,
for it in range(iterations):
p = multiprocessing.Pool(n_workers)
results = p.starmap(evaluate, [(alpha,) for alpha in alphas[it * n_workers: (it + 1) * n_workers]])
print("Time to perform evaluation episodes:", time.time() - start, "s")
# Unpack results
for r in results:
ys_mc.append(np.array(r[0]))
stds_mc.append(r[1])
ys_particle_bh.append(r[2])
stds_bh.append(r[3])
samples_p_bh.append(np.array(r[4]))
ess_p_bh.append(r[5])
# ys_particle_simple.append(np.array(r[4]))
# samples_p_simple.append(np.array(r[5]))
# ess_p_simple.append(r[6])
p.close()
if remainder > 0:
p = multiprocessing.Pool(remainder)
results = p.starmap(evaluate, [(alpha,) for alpha in alphas[-remainder:]])
print("Time to perform evaluation episodes:", time.time() - start, "s")
# Unpack results
for r in results:
ys_mc.append(np.array(r[0]))
stds_mc.append(r[1])
ys_particle_bh.append(r[2])
stds_bh.append(r[3])
samples_p_bh.append(np.array(r[4]))
ess_p_bh.append(r[5])
# ys_particle_simple.append(np.array(r[4]))
# samples_p_simple.append(np.array(r[5]))
# ess_p_simple.append(r[6])
mc += np.array(ys_mc)
particle += np.array(ys_particle_bh)
xs = | np.array(alphas) | numpy.array |
import os
import cv2
import time
import pickle
import argparse
import numpy as np
from skimage import transform as trans
from multiprocessing import Pool
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='test')
parser.add_argument('--image_root', default='/data4/lixz/wider/body')
parser.add_argument('--select', default=1, type=int)
args = parser.parse_args()
def select(weight, height, bboxes, landmarks):
if bboxes is None:
return None, None
if len(bboxes) == 1:
ind = 0
else:
dist, area = [], []
midx, midy = weight / 2, height / 3
for bbox in bboxes:
x, y = (bbox[0] + bbox[2])/2, (bbox[1] + bbox[3])/2
dist.append(np.sqrt((x - midx) * (x - midx) + (y - midy) * (y - midy)))
area.append((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))
dist = np.array(dist)
area = np.array(area)
p = area / dist
ind = np.argsort(p)[-1]
return bboxes[ind], landmarks[ind] if landmarks is not None else None
def cast_select(width, height, bboxes, landmarks, scores):
if bboxes is None:
return None, None
if len(bboxes) == 1:
ind = 0
else:
dist, area = [], []
midx, midy = width / 2, height / 3
for bbox in bboxes:
x, y = (bbox[0] + bbox[2])/2, (bbox[1] + bbox[3])/2
dist.append(np.sqrt((x - midx) * (x - midx) + (y - midy) * (y - midy)))
area.append((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))
dist = np.array(dist)
area = np.array(area)
scores = (scores > 0.95).astype(np.int)
if np.sum(scores) == 0:
return None, None
p = scores * area / dist
ind = np.argsort(p)[-1]
return bboxes[ind], landmarks[ind] if landmarks is not None else None
def candi_select(width, height, bboxes, landmarks, scores):
if bboxes is None:
return None, None
inds, dists, areas = [], [], []
midx, midy = width / 2, height / 3
thres = width * height / 50
for i in range(len(bboxes)):
bbox = bboxes[i]
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
x, y = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
if scores[i] > 0.9 and area > thres and (width / 6 < x < width * 5 / 6):
inds.append(i)
dists.append(np.sqrt((x - midx) * (x - midx) + (y - midy) * (y - midy)))
areas.append(area)
if len(inds) == 0:
return None, None
else:
dists = np.array(dists)
areas = np.array(areas)
p = areas / dists
ind = np.argsort(p)[-1]
ind = inds[ind]
return bboxes[ind], landmarks[ind] if landmarks is not None else None
def preprocess(img, bbox=None, landmark=None, image_size=(112, 112), margin=44):
if landmark is not None: # do align using landmark
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041]], dtype=np.float32)
dst = landmark.reshape((2, 5)).T.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
# M = cv2.estimateRigidTransform( dst.reshape(1,5,2), src.reshape(1,5,2), False)
# src = src[0:3,:]
# dst = dst[0:3,:]
warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0)
# tform3 = trans.ProjectiveTransform()
# tform3.estimate(src, dst)
# warped = trans.warp(img, tform3, output_shape=_shape)
return warped
else:
if bbox is None: # use center crop
det = | np.zeros(4, dtype=np.int32) | numpy.zeros |
'''
DESCRIPTION
----------
An assortment of code written for sanity checks on our 2017 TESS GI proposal
about difference imaging of clusters.
Most of this involving parsing Kharchenko et al (2013)'s table, hence the name
`parse_MWSC.py`.
The tools here do things like:
* Find how many open clusters we could observe
* Find how many member stars within those we could observe
* Compute TESS mags for everything (mostly via `ticgen`)
* Estimate blending effects, mainly through the dilution (computed just by
summing magnitudes appropriately)
* Using K+13's King profile fits, estimate the surface density of member stars.
It turns out that this radically underestimates the actual surface density
of stars (because of all the background blends). Moreover, for purposes of
motivating our difference imaging, "the number of stars in your aperture"
is more relevant than "a surface density", and even more relevant than both
of those is dilution.
So I settled on the dilution calculation.
The plotting scripts here also make the skymap figure of the proposal. (Where
are the clusters on the sky?)
USAGE
----------
From /src/, select desired functions from __main__ below. Then:
>>> python parse_MWSC.py > output.log
'''
import matplotlib.pyplot as plt, seaborn as sns
import pandas as pd, numpy as np
from astropy.table import Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
from math import pi
import pickle, os
from scipy.interpolate import interp1d
global COLORS
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# cite:
#
# <NAME>. & <NAME>. 2017, ticgen: A tool for calculating a TESS
# magnitude, and an expected noise level for stars to be observed by TESS.,
# v1.0.0, Zenodo, doi:10.5281/zenodo.888217
#
# and Stassun & friends (2017).
#import ticgen as ticgen
# # These two, from the website
# # http://dc.zah.uni-heidelberg.de/mwsc/q/clu/form
# # are actually outdated or something. They provided too few resuls..
# close_certain = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
# close_junk = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
def get_cluster_data():
# Downloaded the MWSC from
# http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=J%2FA%2BA%2F558%2FA53&target=http&
tab = Table.read('../data/Kharchenko_2013_MWSC.vot', format='votable')
df = tab.to_pandas()
for colname in ['Type', 'Name', 'n_Type', 'SType']:
df[colname] = [e.decode('utf-8') for e in list(df[colname])]
# From erratum:
# For the Sun-like star, a 4 Re planet produces a transit depth of 0.13%. The
# limiting magnitude for transits to be detectable is about I_C = 11.4 . This
# also corresponds to K_s ~= 10.6 and a maximum distance of 290 pc, assuming no
# extinction.
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
N_c_r0 = int(np.sum(close['N1sr0']))
N_c_r1 = int(np.sum(close['N1sr1']))
N_c_r2 = int(np.sum(close['N1sr2']))
N_f_r0 = int(np.sum(far['N1sr0']))
N_f_r1 = int(np.sum(far['N1sr1']))
N_f_r2 = int(np.sum(far['N1sr2']))
type_d = {'a':'association', 'g':'globular cluster', 'm':'moving group',
'n':'nebulosity/presence of nebulosity', 'r':'remnant cluster',
's':'asterism', '': 'no label'}
ntype_d = {'o':'object','c':'candidate','':'no label'}
print('*'*50)
print('\nMilky Way Star Clusters (close := <500pc)'
'\nN_clusters: {:d}'.format(len(close))+\
'\nN_stars (in core): {:d}'.format(N_c_r0)+\
'\nN_stars (in central part): {:d}'.format(N_c_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_c_r2))
print('\n'+'*'*50)
print('\nMilky Way Star Clusters (far := <1000pc)'
'\nN_clusters: {:d}'.format(len(far))+\
'\nN_stars (in core): {:d}'.format(N_f_r0)+\
'\nN_stars (in central part): {:d}'.format(N_f_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_f_r2))
print('\n'+'*'*50)
####################
# Post-processing. #
####################
# Compute mean density
mean_N_star_per_sqdeg = df['N1sr2'] / (pi * df['r2']**2)
df['mean_N_star_per_sqdeg'] = mean_N_star_per_sqdeg
# Compute King profiles
king_profiles, theta_profiles = [], []
for rt, rc, k, d in zip(np.array(df['rt']),
np.array(df['rc']),
np.array(df['k']),
np.array(df['d'])):
sigma, theta = get_king_proj_density_profile(rt, rc, k, d)
king_profiles.append(sigma)
theta_profiles.append(theta)
df['king_profile'] = king_profiles
df['theta'] = theta_profiles
ra = np.array(df['RAJ2000'])
dec = np.array(df['DEJ2000'])
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
galactic_long = np.array(c.galactic.l)
galactic_lat = np.array(c.galactic.b)
ecliptic_long = np.array(c.barycentrictrueecliptic.lon)
ecliptic_lat = np.array(c.barycentrictrueecliptic.lat)
df['galactic_long'] = galactic_long
df['galactic_lat'] = galactic_lat
df['ecliptic_long'] = ecliptic_long
df['ecliptic_lat'] = ecliptic_lat
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
return close, far, df
def distance_histogram(df):
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
hist, bin_edges = np.histogram(
df['d'],
bins=np.append(np.logspace(1,6,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist), 'k-', where='post')
ax.set_xlabel('distance [pc]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xlim([5e1,1e4])
ax.set_xscale('log')
ax.set_yscale('log')
f.tight_layout()
f.savefig('d_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_cumdist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist),
where='post', label=t+' '+scale_d[k])
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='upper left', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,7), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t+' '+scale_d[k],
alpha=0.7)
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def mean_density_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
hist, bin_edges = np.histogram(
dat['mean_N_star_per_sqdeg'],
bins=np.append(np.logspace(0,4,9), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t,
alpha=0.7)
ix += 1
def tick_function(N_star_per_sqdeg):
tess_px = 21*u.arcsec
tess_px_area = tess_px**2
deg_per_tess_px = tess_px_area.to(u.deg**2).value
vals = N_star_per_sqdeg * deg_per_tess_px
outstrs = ['%.1E'%z for z in vals]
outstrs = ['$'+o[0] + r'\! \cdot \! 10^{\mathrm{-}' + o[-1] + r'}$' \
for o in outstrs]
return outstrs
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('mean areal density [stars/$\mathrm{deg}^{2}$]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.logspace(0,4,5)
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('mean areal density [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout()
f.savefig('mean_density_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def plot_king_profiles(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f, axs = plt.subplots(figsize=(4,7), nrows=2, ncols=1, sharex=True)
for theta, profile in zip(close['theta'], close['king_profile']):
axs[0].plot(theta, profile, alpha=0.2, c=colors[0])
for theta, profile in zip(far['theta'], far['king_profile']):
axs[1].plot(theta, profile, alpha=0.1, c=colors[1])
# Add text in top right.
axs[0].text(0.95, 0.95, '$d < 500\ \mathrm{pc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[0].transAxes,
fontsize='large')
axs[1].text(0.95, 0.95, '$d < 1\ \mathrm{kpc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[1].transAxes,
fontsize='large')
xmin, xmax = 1, 1e3
for ax in axs:
ax.set_xscale('log')
ax.set_xlim([xmin, xmax])
if ax == axs[1]:
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('angular distance [TESS px]')
ax.tick_params(which='both', direction='in', zorder=0)
ax.set_ylabel(r'$\Sigma(r)$ [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout(h_pad=0)
f.savefig('king_density_profiles_close_MWSC.pdf', dpi=300,
bbox_inches='tight')
def get_king_proj_density_profile(r_t, r_c, k, d):
'''
r_t: King's tidal radius [pc]
r_c: King's core radius [pc]
k: normalization [pc^{-2}]
d: distance [pc]
returns density profile in number per sq tess pixel
'''
# Eq 4 of Ernst et al, 2010 https://arxiv.org/pdf/1009.0710.pdf
# citing King (1962).
r = np.logspace(-2, 2.4, num=int(2e4))
X = 1 + (r/r_c)**2
C = 1 + (r_t/r_c)**2
vals = k * (X**(-1/2) - C**(-1/2))**2
#NOTE: this fails when r_t does not exist. This might be important...
vals[r>r_t] = 0
# vals currently in number per square parsec. want in number per TESS px.
# first convert to number per square arcsec
# N per sq arcsec. First term converts to 1/AU^2. Then the angular surface
# density scales as the square of the distance (same number of things,
# smaller angle)
sigma = vals * 206265**(-2) * d**2
tess_px = 21*u.arcsec
arcsec_per_px = 21
sigma_per_sq_px = sigma * arcsec_per_px**2 # N per px^2
# r is in pc. we want the profile vs angular distance.
AU_per_pc = 206265
r *= AU_per_pc # r now in AU
theta = r / d # angular distance in arcsec
tess_px = 21 # arcsec per px
theta *= (1/tess_px) # angular distance in px
return sigma_per_sq_px, theta
def make_wget_script(df):
'''
to download stellar data for each cluster, need to run a script of wgets.
this function makes the script.
'''
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
names = np.array(df['Name'])
f = open('../data/MWSC_stellar_data/get_stellar_data.sh', 'w')
outstrs = []
for mwsc_id, name in zip(mwsc_ids, names):
startstr = 'wget '+\
'ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/558/A53/stars/2m_'
middlestr = str(mwsc_id) + '_' + str(name)
endstr = '.dat.bz2 ;\n'
outstr = startstr + middlestr + endstr
outstrs.append(outstr)
f.writelines(outstrs)
f.close()
print('made wget script!')
def get_stellar_data_too(df, savstr, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster, it computes surface density vs angular distance from
cluster center.
%%%Method 1 (outdated):
%%%Interpolating these results over the King profiles, it associates a surface
%%% density with each star.
%%%(WARNING: how many clusters do not have King profiles?)
Method 2 (used):
Associate a surface density with each star by counting stars in annuli.
This is also not very useful.
It then returns "close", "far", and the entire dataframe
'''
names = np.array(df['Name'])
r2s = np.array(df['r2']) # cluster radius (deg)
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s)):
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name != 'Melotte_20':
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
member_T_mags = np.array(temp['Tmag'])
noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = noise
#########################################################################
## METHOD #1 to assign surface densities:
## The King profile for the cluster is already known. Assign each member
## star a surface density from the King profile evaluated at the member
## star's angular position.
#king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
#king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
## theta is saved in units of TESS px. Get each star's distance from the
## center in TESS pixels.
#arcsec_per_tesspx = 21
#Rcl = np.array(mdf['Rcl'])*u.deg
#dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
## interpolate over the King profile
#func = interp1d(theta, king_profile, fill_value='extrapolate')
#try:
# density_per_sq_px = func(dists_from_center)
#except:
# print('SAVED OUTPUT TO ../data/Kharachenko_full.p')
# pickle.dump(outd, open('../data/Kharachenko_full.p', 'wb'))
# print('interpolation failed. check!')
# import IPython; IPython.embed()
#mdf['density_per_sq_px'] = density_per_sq_px
#########################################################################
#########################################################################
# METHOD #2 for surface densities (because Method #1 only counts
# member stars!).
# Just count stars in annuli.
king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
inds = (tab['Rcl'] < r2)
stars_in_annulus = tab[inds]
sia = stars_in_annulus.to_pandas()
arcsec_per_tesspx = 21
Rcl = np.array(sia['Rcl'])*u.deg
dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
maxdist = ((r2*u.deg).to(u.arcsec).value/arcsec_per_tesspx)
n_pts = np.min((50, int(len(sia)/2)))
angsep_grid = np.linspace(0, maxdist, num=n_pts)
# Attempt to compute Tmags for everything. Only count stars with
# T<limiting magnitude as "contaminants" (anything else is probably too
# faint to really matter!)
mags = sia[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
T_mags = np.array(temp['Tmag'])
all_dists = dists_from_center[(T_mags > 0) & (T_mags < 17) & \
(np.isfinite(T_mags))]
N_in_bin, edges = np.histogram(
all_dists,
bins=angsep_grid,
normed=False)
# compute empirical surface density, defined on the midpoints
outer, inner = angsep_grid[1:], angsep_grid[:-1]
sigma = N_in_bin / (pi * (outer**2 - inner**2))
midpoints = angsep_grid[:-1] + np.diff(angsep_grid)/2
# interpolate over the empirical surface density as a function of
# angular separation to assign surface densities to member stars.
func = interp1d(midpoints, sigma, fill_value='extrapolate')
member_Rcl = np.array(mdf['Rcl'])*u.deg
member_dists_from_center = np.array(member_Rcl.to(u.arcsec).value/\
arcsec_per_tesspx)
try:
member_density_per_sq_px = func(member_dists_from_center)
except:
print('SAVED OUTPUT TO ../data/Kharachenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharachenko_full_{:s}.p'.format(savstr), 'wb'))
print('interpolation failed. check!')
import IPython; IPython.embed()
mdf['density_per_sq_px'] = member_density_per_sq_px
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = int(len(mdf))
got_Tmag = (np.array(mdf['Tmag']) > 0)
N_with_Tmag = len(mdf[got_Tmag])
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
lens = np.array([len(member_T_mags),
len(noise),
len(member_dists_from_center),
len(member_density_per_sq_px)])
np.testing.assert_equal(lens, lens[0]*np.ones_like(lens))
# for members
outd[name]['Tmag'] = np.array(mdf['Tmag'])
outd[name]['noise_1hr'] = np.array(mdf['noise_1hr'])
outd[name]['Rcl'] = member_dists_from_center
outd[name]['density_per_sq_px'] = member_density_per_sq_px
# Ocassionally, do some output plots to compare profiles
if ix%50 == 0:
plt.close('all')
f, ax=plt.subplots()
ax.scatter(member_dists_from_center, member_density_per_sq_px)
ax.plot(king_theta, king_profile)
ax.set_ylim([0,np.max((np.max(member_density_per_sq_px),
np.max(king_profile) ) )])
ax.set_xlim([0, 1.02*np.max(member_dists_from_center)])
ax.set_xlabel('angular sep [TESS px]')
ax.set_ylabel('surface density (line: King model, dots: empirical'
' [per tess px area]', fontsize='xx-small')
f.savefig('king_v_empirical/{:s}_{:d}.pdf'.format(name, ix),
bbox_inches='tight')
del mdf
ix += 1
print(50*'*')
print('SAVED OUTPUT TO ../data/Kharchenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharchenko_full_{:s}.p'.format(savstr), 'wb'))
print(50*'*')
close = df[df['d'] < 500]
far = df[df['d'] < 1000]
return close, far, df
def get_dilutions_and_distances(df, savstr, faintest_Tmag=16, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster member, it then finds all cataloged stars (not necessarily
cluster members) within 2, 3, 4, 5, 6 TESS pixels.
It sums the fluxes, and computes a dilution.
It saves (for each cluster member):
* number of stars in various apertures
* dilution for various apertures
* distance of cluster member
* Tmag of cluster member
* noise_1hr for cluster member
* ra,dec for cluster member
'''
names = np.array(df['Name'])
r2s = np.array(df['r2'])
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
start, step = 3, 7
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s))[start::step]:
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
outpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
if os.path.exists(outpath):
print('found {:s}, continue'.format(outpath))
continue
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name not in ['Melotte_20', 'Sco_OB4']:
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
member_T_mags = np.array(temp['Tmag'])
member_noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = member_noise
desired_Tmag_inds = ((member_T_mags > 0) & (member_T_mags < faintest_Tmag) & \
(np.isfinite(member_T_mags)) )
sel_members = mdf[desired_Tmag_inds]
# Compute T mag for everything in this cluster field. NOTE this
# consistently seems to fail for ~10% of the stars. This is not
# precision science (we are getting coarse estimates), so ignore this
# likely bug.
mags = tab[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_pandas().to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
all_Tmag = np.array(temp['Tmag'])
tab['Tmag'] = all_Tmag
Tmag_inds = ((all_Tmag>0) & (all_Tmag<28) & (np.isfinite(all_Tmag)))
sel_in_field = tab[Tmag_inds]
# Want, for all cluster members with T<faintest_Tmag
# * distance of cluster member
# * Tmag of cluster member
# * noise_1hr for cluster member
# * ra,dec for cluster member
# * number of stars in various apertures
# * dilution for various apertures
sel_members['dist'] = np.ones_like(np.array(sel_members['RAhour']))*\
float(df.loc[df['Name']==name, 'd'])
Nstar_dict, dil_dict = {}, {}
arcsec_per_px = 21
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
Nstar_dict[Nstar_str] = []
dil_dict[dil_str] = []
# Iterate over members, then over apertures.
print('finding all neighbors and computing dilutions')
for sm_ra, sm_dec, sm_Tmag in zip(sel_members['RAhour'],
sel_members['DEdeg'],
sel_members['Tmag']):
member_c = SkyCoord(ra=sm_ra*u.hourangle, dec=sm_dec*u.degree)
nbhr_RAs = np.array(sel_in_field['RAhour'])*u.hourangle
nbhr_DECs = np.array(sel_in_field['DEdeg'])*u.degree
c = SkyCoord(ra=nbhr_RAs, dec=nbhr_DECs)
seps = c.separation(member_c)
# Find neighboring stars in aperture.
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
aper_radius_in_as = aper_radius * arcsec_per_px * u.arcsecond
in_aperture = (seps < aper_radius_in_as)
stars_in_aperture = sel_in_field[in_aperture]
Nstar_in_aperture = len(stars_in_aperture)
# NB this list includes the target star.
Tmags_in_aperture = np.array(stars_in_aperture['Tmag'])
# Compute dilution.
numerator = 10**(-0.4 * sm_Tmag)
denominator = np.sum( 10**(-0.4 * Tmags_in_aperture) )
dilution = numerator/denominator
Nstar_dict[Nstar_str].append(Nstar_in_aperture)
dil_dict[dil_str].append(dilution)
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
sel_members[Nstar_str] = Nstar_dict[Nstar_str]
sel_members[dil_str] = dil_dict[dil_str]
print('done computing dilutions')
out = sel_members[
['dist','Tmag','noise_1hr','RAhour','DEdeg',
'Nstar_2px','Nstar_3px','Nstar_4px','Nstar_5px','Nstar_6px',
'dil_2px','dil_3px','dil_4px','dil_5px','dil_6px'
]
]
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = len(mdf)
N_with_Tmag = len(out)
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
#########################################################################
fpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
print('saving to {:s}'.format(fpath))
out.to_csv(fpath, index=False)
print('done with dilution calculation')
def plot_King_density_vs_Tmag_scatter(close, far):
c_names = np.sort(close['Name'])
f_names = np.sort(far['Name'])
obj = pickle.load(open('../data/Kharachenko_full.p','rb'))
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# Close clusters
Tmags, densities = np.array([]), np.array([])
for c_name in c_names:
c = obj[c_name]
#XXX FIXME THIS IS WRONG!!!!!!!!
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='hex',
color=colors[0],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-6,0])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{King}}\ [\mathrm{member\ stars/TESS\ px}^2]$)')
g.savefig('king_density_vs_Tmag_scatter_close.pdf', dpi=300,
bbox_inches='tight')
# Far clusters
Tmags, densities = np.array([]), | np.array([]) | numpy.array |
"""
Empirical Likelihood Linear Regression Inference
The script contains the function that is optimized over nuisance parameters to
conduct inference on linear regression parameters. It is called by eltest
in OLSResults.
General References
-----------------
<NAME>.(2001). Empirical Likelihood. Chapman and Hall
"""
import numpy as np
from statsmodels.emplike.descriptive import _OptFuncts
class _ELRegOpts(_OptFuncts):
"""
A class that holds functions to be optimized over when conducting
hypothesis tests and calculating confidence intervals.
Parameters
----------
OLSResults : Results instance
A fitted OLS result
"""
def __init__(self):
pass
def _opt_nuis_regress(self, nuisance_params, param_nums=None,
endog=None, exog=None,
nobs=None, nvar=None, params=None, b0_vals=None,
stochastic_exog=None):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(nvar),
param_nums))
params[nuis_param_index] = nuisance_params
new_params = params.reshape(nvar, 1)
self.new_params = new_params
est_vect = exog * \
(endog - np.squeeze(np.dot(exog, new_params))).reshape(int(nobs), 1)
if not stochastic_exog:
exog_means = np.mean(exog, axis=0)[1:]
exog_mom2 = (np.sum(exog * exog, axis=0))[1:]\
/ nobs
mean_est_vect = exog[:, 1:] - exog_means
mom2_est_vect = (exog * exog)[:, 1:] - exog_mom2
regressor_est_vect = np.concatenate((mean_est_vect, mom2_est_vect),
axis=1)
est_vect = np.concatenate((est_vect, regressor_est_vect),
axis=1)
wts = np.ones(int(nobs)) * (1. / nobs)
x0 = np.zeros(est_vect.shape[1]).reshape(-1, 1)
try:
eta_star = self._modif_newton(x0, est_vect, wts)
denom = 1. + | np.dot(eta_star, est_vect.T) | numpy.dot |
import sys, datetime, os, math
import numpy as np
import classifiers
import data_processing as data_proc
from keras.models import model_from_json
from sklearn import metrics
from sklearn.feature_extraction import DictVectorizer
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
import keras.backend as K
from collections import Counter, OrderedDict
from pandas import read_csv
from numpy.random import seed, shuffle
path = os.getcwd()[:os.getcwd().rfind('/')]
def load_file(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text.split("\n")
def save_file(lines, filename):
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
def load_data_panda(filename, shuffle_sets=False):
print("Reading data from file %s..." % filename)
data = read_csv(filename, sep="\t+", header=None, engine='python')
data.columns = ["Set", "Label", "Text"]
print('The shape of this data set is: ', data.shape)
x_train, labels_train = np.array(data["Text"]), np.array(data["Label"])
if shuffle_sets:
np.random.seed(12346598)
indices = np.arange(len(x_train))
np.random.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
return x_train, labels_train
def save_as_dataset(data, labels, filename):
lines = []
first_word = "TrainSet" if "train" in filename else "TestSet"
for i in range(len(labels)):
if data[i] is not None:
lines.append(first_word + '\t' + str(labels[i]) + '\t' + str(data[i]))
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
def save_dictionary(dictionary, filename):
lines = []
for k, v in dictionary.items():
lines.append(k + '\t' + str(v))
file = open(filename, 'w')
file.write('\n'.join(lines))
file.close()
def load_dictionary(filename):
dictionary = {}
file = open(filename, 'r')
lines = file.read()
file.close()
for line in lines.split("\n"):
key, value = line.split("\t")
dictionary[key] = value
return dictionary
def save_model(model, json_name, h5_weights_name):
model_json = model.to_json()
with open(json_name, "w") as json_file:
json_file.write(model_json)
model.save_weights(h5_weights_name)
print("Saved model with json name %s, and weights %s" % (json_name, h5_weights_name))
def load_model(json_name, h5_weights_name, verbose=False):
# In case of saved model (not to json or yaml)
# model = models.load_model(model_path, custom_objects={'f1_score': f1_score})
loaded_model_json = open(json_name, 'r').read()
model = model_from_json(loaded_model_json)
model.load_weights(h5_weights_name)
if verbose:
print("Loaded model with json name %s, and weights %s" % (json_name, h5_weights_name))
return model
# Given any number of dicts, shallow copy and merge into a new dict,
# precedence goes to key value pairs in latter dicts.
# This is in case a Python3.5 version is NOT used. (needed for my access to the zCSF cluster)
def merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
# Just a primitive batch generator
def batch_generator(x, y, batch_size):
seed(1655483)
size = x.shape[0]
x_copy = x.copy()
y_copy = y.copy()
indices = np.arange(size)
np.random.shuffle(indices)
x_copy = x_copy[indices]
y_copy = y_copy[indices]
i = 0
while True:
if i + batch_size <= size:
yield x_copy[i:i + batch_size], y_copy[i:i + batch_size]
i += batch_size
else:
i = 0
indices = np.arange(size)
np.random.shuffle(indices)
x_copy = x_copy[indices]
y_copy = y_copy[indices]
continue
def shuffle_data(labels, n):
seed(532908)
indices = range(len(labels))
pos_indices = [i for i in indices if labels[i] == 1]
neg_indices = [i for i in indices if labels[i] == 0]
shuffle(pos_indices)
shuffle(neg_indices)
top_n = pos_indices[0:n] + neg_indices[0:n]
shuffle(top_n)
return top_n
# Get some idea about the max and mean length of the tweets (useful for deciding on the sequence length)
def get_max_len_info(tweets, average=False):
sum_of_length = sum([len(l.split()) for l in tweets])
avg_tweet_len = sum_of_length / float(len(tweets))
print("Mean of train tweets: ", avg_tweet_len)
max_tweet_len = len(max(tweets, key=len).split())
print("Max tweet length is = ", max_tweet_len)
if average:
return avg_tweet_len
return max_tweet_len
def get_classes_ratio(labels):
positive_labels = sum(labels)
negative_labels = len(labels) - sum(labels)
ratio = [max(positive_labels, negative_labels) / float(negative_labels),
max(positive_labels, negative_labels) / float(positive_labels)]
print("Class ratio: ", ratio)
return ratio
def get_classes_ratio_as_dict(labels):
ratio = Counter(labels)
ratio_dict = {0: float(max(ratio[0], ratio[1]) / ratio[0]), 1: float(max(ratio[0], ratio[1]) / ratio[1])}
print('Class ratio: ', ratio_dict)
return ratio_dict
def extract_features_from_dict(train_features, test_features):
# Transform the list of feature-value mappings to a vector
vector = DictVectorizer(sparse=False)
# Learn a list of feature name -> indices mappings and transform X_train_features
x_train_features = vector.fit_transform(train_features).tolist()
# Just transform the X_test_features, based on the list fitted on X_train_features
# Disadvantage: named features not encountered during fit_transform will be silently ignored.
x_test_features = vector.transform(test_features).tolist()
print('Size of the feature sets: train = ', len(x_train_features[0]), ', test = ', len(x_test_features[0]))
return x_train_features, x_test_features
def feature_scaling(features):
scaled_features = []
max_per_col = []
for i in range(len(features[0])):
maxx = max([abs(f[i]) for f in features])
if maxx == 0.0:
maxx = 1.0
max_per_col.append(maxx)
for f in features:
scaled_features.append([float(f[i]) / float(max_per_col[i]) for i in range(len(f))])
return scaled_features
def run_supervised_learning_models(train_features, train_labels, test_features, test_labels,
make_feature_analysis=False, feature_names=None, top_features=0, plot_name="coeff"):
class_ratio = get_classes_ratio_as_dict(train_labels) # alternatively, can be set class_ratio = 'balanced'
classifiers.linear_svm_grid(train_features, train_labels, test_features, test_labels, class_ratio,
make_feature_analysis, feature_names, top_features, plot_name)
classifiers.logistic_regression_grid(train_features, train_labels, test_features, test_labels, class_ratio,
make_feature_analysis, feature_names, top_features, plot_name)
# classifiers.nonlinear_svm(train_features, train_labels, test_features, test_labels, class_ratio,
# make_feature_analysis, feature_names, top_features, plot_name)
# Convert tweets into an array of indices of shape (m, max_tweet_length)
def tweets_to_indices(tweets, word_to_index, max_tweet_len):
m = tweets.shape[0]
tweet_indices = np.zeros((m, max_tweet_len))
for i in range(m):
sentence_words = [w.lower() for w in tweets[i].split()]
j = 0
for w in sentence_words:
tweet_indices[i, j] = word_to_index[w]
j = j + 1
return tweet_indices
def encode_text_as_matrix(train_tweets, test_tweets, mode, max_num_words=None, lower=False, char_level=False):
# Create the tokenizer
tokenizer = Tokenizer(num_words=max_num_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=lower, split=" ", char_level=char_level)
# Fit the tokenizer on the documents
tokenizer.fit_on_texts(train_tweets)
# Encode each example using a 'mode' scoring method (mode can be count, binary, freq, tf-idf)
x_train = tokenizer.texts_to_matrix(train_tweets, mode=mode)
x_test = tokenizer.texts_to_matrix(test_tweets, mode=mode)
return tokenizer, x_train, x_test
def encode_text_as_word_indexes(train_tweets, test_tweets, max_num_words=None, lower=False, char_level=False):
# Create the tokenizer
tokenizer = Tokenizer(num_words=max_num_words, filters='', lower=lower, split=" ", char_level=char_level)
# Fit the tokenizer on the documents
tokenizer.fit_on_texts(train_tweets)
# Encode each example as a sequence of word indexes based on the vocabulary of the tokenizer
x_train = tokenizer.texts_to_sequences(train_tweets)
x_test = tokenizer.texts_to_sequences(test_tweets)
return tokenizer, x_train, x_test
# Build random vector mappings of a vocabulary
def build_random_word2vec(tweets, embedding_dim=100, variance=1):
print("\nBuilding random vector of mappings with dimension %d..." % embedding_dim)
word2vec_map = {}
seed(1457873)
words = set((' '.join(tweets)).split())
for word in words:
embedding_vector = word2vec_map.get(word)
if embedding_vector is None:
word2vec_map[word] = np.random.uniform(-variance, variance, size=(embedding_dim,))
return word2vec_map
# Load a set of pre-trained embeddings (can be GLoVe or emoji2vec)
def load_vectors(filename='glove.6B.100d.txt'):
print("\nLoading vector mappings from %s..." % filename)
word2vec_map = {}
if 'emoji' in filename:
f = open(path + '/models/emoji2vec/' + filename)
else: # by default, load the GLoVe embeddings
f = open(path + '/res/glove/' + filename)
for line in f:
values = line.split()
word = values[0]
weights = np.asarray(values[1:], dtype='float32')
word2vec_map[word] = weights
f.close()
print('Found %s word vectors and with embedding dimmension %s'
% (len(word2vec_map), next(iter(word2vec_map.values())).shape[0]))
return word2vec_map
# Compute the word-embedding matrix
def get_embedding_matrix(word2vec_map, word_to_index, embedding_dim, init_unk=True, variance=None):
# Get the variance of the embedding map
if init_unk and variance is None:
variance = embedding_variance(word2vec_map)
print("Word vectors have variance ", variance)
# Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors)
embedding_matrix = np.zeros((len(word_to_index) + 1, embedding_dim))
for word, i in word_to_index.items():
embedding_vector = word2vec_map.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
elif init_unk:
# Unknown tokens are initialized randomly by sampling from a uniform distribution [-var, var]
seed(1337603)
embedding_matrix[i] = np.random.uniform(-variance, variance, size=(1, embedding_dim))
# else:
# print("Not found: ", word)
return embedding_matrix
# Get the vec representation of a set of tweets based on a specified embedding (can be a word or emoji mapping)
def get_tweets_embeddings(tweets, vec_map, embedding_dim=100, init_unk=False, variance=None, weighted_average=True):
# Get the variance of the embedding map
if init_unk and variance is None:
variance = embedding_variance(vec_map)
print("Vector mappings have variance ", variance)
# If set, calculate the tf-idf weight of each embedding, otherwise, no weighting (all weights are 1.0)
if weighted_average:
weights = get_tf_idf_weights(tweets, vec_map)
else:
weights = {k: 1.0 for k in vec_map.keys()}
tw_emb = np.zeros((len(tweets), embedding_dim))
for i, tw in enumerate(tweets):
total_valid = 0
for word in tw.split():
embedding_vector = vec_map.get(word)
if embedding_vector is not None:
tw_emb[i] = tw_emb[i] + embedding_vector * weights[word]
total_valid += 1
elif init_unk:
seed(1337603)
tw_emb[i] = np.random.uniform(-variance, variance, size=(1, embedding_dim))
# else:
# print("Not found: ", word)
# Get the average embedding representation for this tweet
tw_emb[i] /= float(max(total_valid, 1))
return tw_emb
# Based on the deepmoji project, predicting emojis for each tweet -- done using their pre-trained weights
# Here we extract the relevant emojis (with an individual probability of being accurate over teh set threshold)
def get_deepmojis(filename, threshold=0.05):
print("\nGetting deep-mojis for each tweet in %s..." % filename)
df = read_csv(path + "/res/deepmoji/" + filename, sep='\t')
pred_mappings = load_file(path + "/res/emoji/wanted_emojis.txt")
emoji_pred = []
for index, row in df.iterrows():
tw_pred = []
for top in range(5):
if row['Pct_%d' % (top+1)] >= threshold:
tw_pred.append(row['Emoji_%d' % (top + 1)])
emoji_pred.append([pred_mappings[t] for t in tw_pred])
print("Couldn't find a strong emoji prediction for %d emojis" % len([pred for pred in emoji_pred if pred == []]))
return emoji_pred
# Just a dummy function that I used for the demo (printing the predicted deepmojis for each tweet in my demo set)
def get_demo_emojis(filename, data):
deepmojis = load_file(path + "/res/datasets/demo/deepmoji_" + filename)
emojis = data_proc.extract_emojis(data)
all_emojis = [deepmojis[i] if emojis[i] == ''
else emojis[i] + ' ' + deepmojis[i] for i in range(len(emojis))]
all_emojis = [' '.join(set(e.split())) for e in all_emojis]
for d, e in zip(data[20:40], all_emojis[20:40]):
print("Tweet: ", d)
print("Predicted emojis: ", e, "\n")
return all_emojis
# Calculate the variance of an embedding (like glove, word2vec, emoji2vec, etc)
# Used to sample new uniform distributions of vectors in the interval [-variance, variance]
def embedding_variance(vec_map):
variance = np.sum([ | np.var(vec) | numpy.var |
import numpy as np
from .utils import gaussian_pdf, mutation_kernel, resize_to_exp_limits_det
from .utils import prob_low_det_high_measurement
from .model_parameters import low_en_exp_cutoff, high_en_exp_cutoff, low_en_threshold
class det_pop:
'''
Deterministic population function class. This class implements the
deterministic evolution of a population of cells, as defined in our model.
A population is represented as a continuous distribution in the binding
energy space. The class features methods to perform cell duplication and
mutation, selection and differentiation.
The class displays the following methods:
- __init__: default class constructor
- create_empty: initializes an empty population
- create_with_explicit_attributes: creates an object having specified
population size and distribution
- create_copy_without_kernel: creates a copy of an object, copying every
attribute but the mutation kernel.
- merge_with: modifies the population by merging it with the one passed as
argument
- select_with_psurv: given a survival probability it models the effect of
selection on the population according to this survival probability.
- differentiate: implements cell differentiation and returns the
differentiated MC/PC populations
- carrying_cap: implements a finite carrying capacity
- expand: implements the combination of duplications and mutations that
occur during a single evolution round.
- bareps: returns the current value of bar-epsilon for the population
- N: returns the current population size
- energies: returns the energy domain of the distribution (by reference!)
- mean_en: returns the mean energy of the population
'''
def __init__(self, par, mc_seed=None):
'''
Initializes the population using the parameter set specified.
Args:
- par: the model parameters dictionary
- mc_seed (stoch_pop object, optional): MC seed population. If
specified then the population is seeded by a weighted mixture of
reactivated memory and naive cells, according to the weight
specified in the parameters.
'''
xlim_m, xlim_p, dx = par['xlim_minus'], par['xlim_plus'], par['dx']
Ni, mu_i, sigma_i = par['N_i'], par['mu_i'], par['sigma_i']
# distribution domain and discretization step
self.x = np.arange(xlim_m, xlim_p, dx)
self.dx = dx
# number of cells in the population
self.N = Ni
# naive cells normalized distribution
self.varphi = gaussian_pdf(x=self.x, mu=mu_i, sigma=sigma_i)
# if mc_seed specified then initialize distribution with a mixture of
# naive and memory cells
if mc_seed is not None:
# the weight is specified in the parameters dictionary
if par['f_mem_reinit'] == 'pop':
# it either depends on the amount of MCs collected so far
w = mc_seed.N / (self.N + mc_seed.N)
else:
# or it is a constant fraction
w = par['f_mem_reinit']
self.varphi = self.varphi * (1. - w) + mc_seed.varphi * w
# build mutation kernel
_, self.ker = mutation_kernel(par)
@classmethod
def create_empty(cls, par):
'''
Initialize an empty population. Both the distribution and the
population size are set to zero.
Args:
- par: model parameters dictionary.
'''
pop = cls.__new__(cls)
pop.N = 0 # zero population size
# create distribution domain according to model parameters.
pop.x = np.arange(par['xlim_minus'], par['xlim_plus'], par['dx'])
pop.dx = par['dx']
pop.varphi = np.zeros_like(pop.x) # null distribution
return pop
@classmethod
def create_with_explicit_attributes(cls, N, x, dx, varphi):
'''
Creates a new object having the attributes passed as argugment. Lists
are copied in the process.
Args:
- N (float): population size
- x (float array): distribution energy domain
- dx (float): discretization interval of the energy domain
- varphi (float array): values of the normalized distribution
'''
pop = cls.__new__(cls)
# initialize parameters with the arguments specified
pop.N = N
pop.x = np.copy(x) # creates a copy
pop.dx = dx
pop.varphi = np.copy(varphi) # creates a copy
return pop
def create_copy_without_kernel(self):
'''
Creates a copy of the caller. It copies everything attribute except the
mutation kernel, which is usually not needed in the copy.
'''
pop = det_pop.create_with_explicit_attributes(
self.N, self.x, self.dx, self.varphi)
return pop
def merge_with(self, pop_add):
'''
Function that merges the current population with the population passed
as argument.
Args:
- pop_add (det_pop object): population to be merged with the caller.
'''
if self.N > 0:
# weight of the normalized distribution sum
w = self.N / (self.N + pop_add.N)
# merge distributions and renormalize
self.varphi = self.varphi * w + pop_add.varphi * (1. - w)
# add up sizes
self.N += pop_add.N
else:
# if the caller population is empty then the result is simply the
# added population
self.N = pop_add.N
self.varphi = pop_add.varphi
def __renormalize_varphi(self):
'''
Renormalize the distribution after an operation that changes its size,
and report the modification to the population size. This method should
remain private.
'''
# evaluate the current normalization of the distribution
N_factor = np.sum(self.varphi) * self.dx
# update population size with the resulting factor
self.N *= N_factor
# renormalize the distribution
self.varphi /= N_factor
def select_with_psurv(self, psurv_x):
'''
Given a probability of survival, this method applies it to the
population.
Args:
- psurv_x (float array): this array should contain the survival
probability as a function of the energy domain of the distribution.
'''
# multiply the distribution by the probability of survival
self.varphi *= psurv_x
# renormalize the distribution and update population size
self.__renormalize_varphi()
def differentiate(self, prob_mc, prob_pc):
'''
This function implements differentiation. It returns the resulting
populations of MCs and PCs.
Args:
- prob_mc, prob_pc (float): probabilities of respectivelt MC and PC
differentiation.
Returns:
- MC_pop. PC_pop (det_pop objects): populations of differentiated
MCs and PCs
'''
# create differentiated MC population from a copy of the current pop
MC_pop = self.create_copy_without_kernel()
# multiplied by the probability of differentiation
MC_pop.N *= prob_mc
# same for the plasma cell population
PC_pop = self.create_copy_without_kernel()
PC_pop.N *= prob_pc
# remove the differentiated cells from the population size
self.N *= (1 - prob_mc - prob_pc)
return MC_pop, PC_pop
def carrying_cap(self, par):
'''
This function implements a finite carry capacity.
Args:
- par: model parameters dictionary
'''
# if population size exceeds the carrying capacity remove the excess
self.N = np.min([self.N, par['GC_carrying_capacity']])
def expand(self, *args):
'''
This function it implements population expansion and mutation according
to the model parameters.
'''
# perform convolution (amplification + mutation multiple times)
self.varphi = np.convolve(self.ker, self.varphi,
'same') * self.dx
# renormalize the distribution and update population size
self.__renormalize_varphi()
def bareps(self):
'''
This function evaluate and returns the current value of bar-epsilon
for the population.
'''
beps = -np.log(np.dot(self.varphi, np.exp(-self.x)) * self.dx)
return beps
def N_cells(self):
'''
This function returns the current population size.
'''
return self.N
def energies(self):
'''
returns the distribution domain. NB: it is returned by reference.
Therefore one must be careful not to modify them!
'''
return self.x
def mean_en(self):
'''
returns the mean binding energy of the population. It returns None if
the population is empty.
'''
norm = | np.sum(self.varphi) | numpy.sum |
import numpy as np
from typing import Type
import librosa
from pydub import AudioSegment
import random
def int_samples_to_float(y: np.ndarray, dtype: Type):
assert isinstance(y, np.ndarray)
assert issubclass(y.dtype.type, np.integer)
assert issubclass(dtype, np.floating)
y = y.astype(dtype) / np.iinfo(y.dtype).max
return y
def float_samples_to_int(y: np.ndarray, dtype: Type):
assert isinstance(y, np.ndarray)
assert issubclass(y.dtype.type, np.floating)
assert issubclass(dtype, np.integer)
return (y * np.iinfo(dtype).max).astype(dtype)
def log_mel_energy(inputs: np.ndarray,
sr,
n_fft=400,
stride=160,
n_mels=40,
freq_min=20,
freq_max=8000) -> np.ndarray:
""" Computes the Log mel filter bank energies of the waveform input"""
specto = librosa.feature.melspectrogram(
inputs,
sr=sr,
n_fft=n_fft,
hop_length=stride,
n_mels=n_mels,
power=1, # 1 for energy, 2 for power
fmin=freq_min,
fmax=freq_max)
log_specto = librosa.core.amplitude_to_db(specto, ref=np.max)
# R -> Time x Freq
return log_specto.T
def mfcc(data: np.ndarray,
sample_rate: int = 16000,
n_mfcc: int = 40,
stride: int = 20,
window_size: int = 40):
"""
computes the mel-frequency cepstral coefficients of the input data
data - np.float32 ndarray (n,)
stride - ms
window_size - ms
"""
assert isinstance(data, np.ndarray)
assert isinstance(n_mfcc, int)
assert isinstance(sample_rate, int)
assert isinstance(stride, int)
assert isinstance(window_size, int)
assert data.dtype.type == np.float32
stride = int(sample_rate * stride / 1000)
window_size = int(sample_rate * window_size / 1000)
result: np.ndarray = librosa.feature.mfcc(y=data,
sr=sample_rate,
n_mfcc=n_mfcc,
hop_length=stride,
n_fft=window_size).astype(
np.float32)
# Features x Time > Time x Features
return result.T
def background_noise_augment(y_overlay: np.ndarray, dBFS: float,
bg_noise: AudioSegment, snr_range) -> np.ndarray:
"""Augment by overlaying with background noise"""
assert isinstance(y_overlay, np.ndarray)
assert issubclass(y_overlay.dtype.type, np.floating)
# Select within range
snr = random.random()
snr *= (snr_range[1] - snr_range[0])
target_noise_dBFS = dBFS - snr
gain = target_noise_dBFS - bg_noise.dBFS
bg_noise = bg_noise.apply_gain(gain)
bg_noise = np.array(bg_noise.get_array_of_samples())
bg_noise = int_samples_to_float(bg_noise, np.float32)
return y_overlay + bg_noise
def speed_augment(y_speed: np.ndarray) -> np.ndarray:
"Apply speed augmentation"
assert isinstance(y_speed, np.ndarray)
assert issubclass(y_speed.dtype.type, np.floating)
speed_change = np.random.uniform(low=0.9, high=1.1)
tmp = librosa.effects.time_stretch(y_speed, speed_change)
minlen = min(y_speed.shape[0], tmp.shape[0])
y_speed = np.zeros_like(y_speed)
y_speed[0:minlen] = tmp[0:minlen]
return y_speed
def white_noise_augment(y_noise: np.ndarray) -> np.ndarray:
""" Apply white noise augmentation to the input data"""
# dBFS
assert isinstance(y_noise, np.ndarray)
assert issubclass(y_noise.dtype.type, np.floating)
noise_amp = 0.005 * np.random.uniform() * np.amax(y_noise)
y_noise = y_noise + (noise_amp * np.random.normal(size=y_noise.shape[0]))
return y_noise.astype(np.float32)
def pitch_augment(y_pitch: np.ndarray,
sample_rate: int,
bins_per_octave: int = 24,
pitch_pm: int = 4) -> np.ndarray:
assert isinstance(y_pitch, np.ndarray)
assert issubclass(y_pitch.dtype.type, np.floating)
pitch_change = pitch_pm * 2 * (np.random.uniform() - 0.5)
y_pitch = librosa.effects.pitch_shift(y_pitch,
sample_rate,
n_steps=pitch_change,
bins_per_octave=bins_per_octave)
return y_pitch
def value_augment(y_aug: np.ndarray, low=0.5, high=1.1) -> np.ndarray:
""" Randomly distort the audio input by multiplying with random coefficients """
assert isinstance(y_aug, np.ndarray)
assert issubclass(y_aug.dtype.type, np.floating)
dyn_change = np.random.uniform(low=low, high=high)
y_aug = y_aug * dyn_change
return y_aug
def random_shift_augment(y_shift: AudioSegment) -> np.ndarray:
assert isinstance(y_shift, np.ndarray)
assert issubclass(y_shift.dtype.type, np.floating)
timeshift_fac = 0.2 * 2 * ( | np.random.uniform() | numpy.random.uniform |
import os
import lightgbm as lgbm
import numpy as np
import pandas as pd
import pytest
from funcy import first
from sklearn import datasets
from sklearn.model_selection import train_test_split
from dvclive.lgbm import DvcLiveCallback
from tests.test_main import read_logs
# pylint: disable=redefined-outer-name, unused-argument
@pytest.fixture
def model_params():
return {"objective": "multiclass", "n_estimators": 5, "seed": 0}
@pytest.fixture
def iris_data():
iris = datasets.load_iris()
x = pd.DataFrame(iris["data"], columns=iris["feature_names"])
y = iris["target"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.33, random_state=42
)
return (x_train, y_train), (x_test, y_test)
def test_lgbm_integration(tmp_dir, model_params, iris_data):
model = lgbm.LGBMClassifier()
model.set_params(**model_params)
model.fit(
iris_data[0][0],
iris_data[0][1],
eval_set=(iris_data[1][0], iris_data[1][1]),
eval_metric=["multi_logloss"],
callbacks=[DvcLiveCallback()],
)
assert os.path.exists("dvclive")
logs, _ = read_logs("dvclive")
assert len(logs) == 1
assert len(first(logs.values())) == 5
def test_lgbm_model_file(tmp_dir, model_params, iris_data):
model = lgbm.LGBMClassifier()
model.set_params(**model_params)
model.fit(
iris_data[0][0],
iris_data[0][1],
eval_set=(iris_data[1][0], iris_data[1][1]),
eval_metric=["multi_logloss"],
callbacks=[DvcLiveCallback("lgbm_model")],
)
preds = model.predict(iris_data[1][0])
model2 = lgbm.Booster(model_file="lgbm_model")
preds2 = model2.predict(iris_data[1][0])
preds2 = | np.argmax(preds2, axis=1) | numpy.argmax |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.optimize import curve_fit
def welch_t(a, b, ua=None, ub=None):
# t = (mean(a) - mean(b)) / sqrt(std(a)**2 + std(b)**2)
if ua is None:
ua = a.std()
if ub is None:
ub = b.std()
xa = a.mean()
xb = b.mean()
t = np.abs(xa - xb) / np.sqrt(ua ** 2 + ub ** 2)
return t
def cohen_d(a, b):
sa = a.std()
sb = b.std()
s = ((a.size - 1) * sa ** 2 + (b.size - 1) * sb ** 2) / (a.size + b.size - 2)
s = np.sqrt(s)
d = np.abs(a.mean() - b.mean()) / s
return d
def gauss(x, height, mu, sig, floor):
return height * np.exp(-(((x - mu) / sig) ** 2) / 2) + floor
def gaussfit(x, y, p0=None):
"""
Fit a simple gaussian to data
gauss(x, a, mu, sigma, floor) = a * exp(-z**2/2) + floor
with z = (x - mu) / sigma
Parameters
----------
x : array(float)
x values
y : array(float)
y values
Returns
-------
gauss(x), parameters
fitted values for x, fit paramters (a, mu, sigma)
"""
if p0 is None:
p0 = [np.max(y) - | np.min(y) | numpy.min |
#
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provides :class:`BayesianDenseLayer`, which implements a
variational Bayesian dense (fully-connected) neural network layer as a Keras
:class:`~tf.keras.layers.Layer`.
"""
from typing import Callable, Optional, Union
import numpy as np
import tensorflow as tf
from gpflow import Parameter, default_float
from gpflow.base import TensorType
from gpflow.kullback_leiblers import gauss_kl
from gpflow.models.model import MeanAndVariance
from gpflow.utilities.bijectors import positive, triangular
from gpflux.helpers import xavier_initialization_numpy
from gpflux.layers.trackable_layer import TrackableLayer
from gpflux.types import ShapeType
class BayesianDenseLayer(TrackableLayer):
"""
A dense (fully-connected) layer for variational Bayesian neural networks.
This layer holds the mean and square-root of the variance of the
distribution over the weights. This layer also has a temperature for
cooling (or heating) the posterior.
"""
def __init__(
self,
input_dim: int,
output_dim: int,
num_data: int,
w_mu: Optional[np.ndarray] = None,
w_sqrt: Optional[np.ndarray] = None,
activation: Optional[Callable] = None,
is_mean_field: bool = True,
temperature: float = 1e-4, # TODO is this intentional?
):
"""
:param input_dim: The input dimension (excluding bias) of this layer.
:param output_dim: The output dimension of this layer.
:param num_data: The number of points in the training dataset (used for
scaling the KL regulariser).
:param w_mu: Initial value of the variational mean for weights + bias.
If not specified, this defaults to `xavier_initialization_numpy`
for the weights and zero for the bias.
:param w_sqrt: Initial value of the variational Cholesky of the
(co)variance for weights + bias. If not specified, this defaults to
1e-5 * Identity.
:param activation: The activation function. If not specified, this defaults to the identity.
:param is_mean_field: Determines whether the approximation to the
weight posterior is mean field. Must be consistent with the shape
of ``w_sqrt``, if specified.
:param temperature: For cooling (< 1.0) or heating (> 1.0) the posterior.
"""
super().__init__(dtype=default_float())
assert input_dim >= 1
assert output_dim >= 1
assert num_data >= 1
if w_mu is not None: # add + 1 for the bias
assert w_mu.shape == ((input_dim + 1) * output_dim,)
if w_sqrt is not None:
if not is_mean_field:
assert w_sqrt.shape == (
(input_dim + 1) * output_dim,
(input_dim + 1) * output_dim,
)
else:
assert w_sqrt.shape == ((input_dim + 1) * output_dim,)
assert temperature > 0.0
self.input_dim = input_dim
self.output_dim = output_dim
self.num_data = num_data
self.w_mu_ini = w_mu
self.w_sqrt_ini = w_sqrt
self.activation = activation
self.is_mean_field = is_mean_field
self.temperature = temperature
self.dim = (input_dim + 1) * output_dim
self.full_output_cov = False
self.full_cov = False
self.w_mu = Parameter(np.zeros((self.dim,)), dtype=default_float(), name="w_mu") # [dim]
self.w_sqrt = Parameter(
np.zeros((self.dim, self.dim)) if not self.is_mean_field else np.ones((self.dim,)),
transform=triangular() if not self.is_mean_field else positive(),
dtype=default_float(),
name="w_sqrt",
) # [dim, dim] or [dim]
def initialize_variational_distribution(self) -> None:
if self.w_mu_ini is None:
w = xavier_initialization_numpy(self.input_dim, self.output_dim)
b = | np.zeros((1, self.output_dim)) | numpy.zeros |
import numpy as np
import imageio
import os
import time
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from data import set_up_data
from utils import get_cpu_stats_over_ranks
from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema
def training_step(H, data_input, target, vae, ema_vae, optimizer, iterate):
t0 = time.time()
vae.zero_grad()
stats = vae.forward(data_input, target)
stats['elbo'].backward()
grad_norm = torch.nn.utils.clip_grad_norm_(vae.parameters(), H.grad_clip).item()
distortion_nans = torch.isnan(stats['distortion']).sum()
rate_nans = torch.isnan(stats['rate']).sum()
stats.update(dict(rate_nans=0 if rate_nans == 0 else 1, distortion_nans=0 if distortion_nans == 0 else 1))
stats = get_cpu_stats_over_ranks(stats)
skipped_updates = 1
# only update if no rank has a nan and if the grad norm is below a specific threshold
if stats['distortion_nans'] == 0 and stats['rate_nans'] == 0 and (H.skip_threshold == -1 or grad_norm < H.skip_threshold):
optimizer.step()
skipped_updates = 0
update_ema(vae, ema_vae, H.ema_rate)
t1 = time.time()
stats.update(skipped_updates=skipped_updates, iter_time=t1 - t0, grad_norm=grad_norm)
return stats
def eval_step(data_input, target, ema_vae):
with torch.no_grad():
stats = ema_vae.forward(data_input, target)
stats = get_cpu_stats_over_ranks(stats)
return stats
def get_sample_for_visualization(data, preprocess_fn, num, dataset):
for x in DataLoader(data, batch_size=num):
break
orig_image = (x[0] * 255.0).to(torch.uint8).permute(0, 2, 3, 1) if dataset == 'celeba128' else x[0]
preprocessed = preprocess_fn(x)[0]
return orig_image, preprocessed
def train_loop(H, data_train, data_valid, preprocess_fn, vae, ema_vae, logprint):
optimizer, scheduler, cur_eval_loss, iterate, starting_epoch = load_opt(H, vae, logprint)
train_sampler = DistributedSampler(data_train, num_replicas=H.mpi_size, rank=H.rank)
viz_batch_original, viz_batch_processed = get_sample_for_visualization(data_valid, preprocess_fn, H.num_images_visualize, H.dataset)
early_evals = set([1] + [2 ** exp for exp in range(3, 14)])
stats = []
iters_since_starting = 0
H.ema_rate = torch.as_tensor(H.ema_rate).cuda()
for epoch in range(starting_epoch, H.num_epochs):
train_sampler.set_epoch(epoch)
for x in DataLoader(data_train, batch_size=H.n_batch, drop_last=True, pin_memory=True, sampler=train_sampler):
data_input, target = preprocess_fn(x)
training_stats = training_step(H, data_input, target, vae, ema_vae, optimizer, iterate)
stats.append(training_stats)
scheduler.step()
if iterate % H.iters_per_print == 0 or iters_since_starting in early_evals:
logprint(model=H.desc, type='train_loss', lr=scheduler.get_last_lr()[0], epoch=epoch, step=iterate, **accumulate_stats(stats, H.iters_per_print))
if iterate % H.iters_per_images == 0 or (iters_since_starting in early_evals and H.dataset != 'celeba128') and H.rank == 0:
write_images(H, ema_vae, viz_batch_original, viz_batch_processed, f'{H.save_dir}/samples-{iterate}.png', logprint)
iterate += 1
iters_since_starting += 1
if iterate % H.iters_per_save == 0 and H.rank == 0:
if np.isfinite(stats[-1]['elbo']):
logprint(model=H.desc, type='train_loss', epoch=epoch, step=iterate, **accumulate_stats(stats, H.iters_per_print))
fp = os.path.join(H.save_dir, 'latest')
logprint(f'Saving model@ {iterate} to {fp}')
save_model(fp, vae, ema_vae, optimizer, H)
if iterate % H.iters_per_ckpt == 0 and H.rank == 0:
save_model(os.path.join(H.save_dir, f'iter-{iterate}'), vae, ema_vae, optimizer, H)
if epoch % H.epochs_per_eval == 0:
valid_stats = evaluate(H, ema_vae, data_valid, preprocess_fn)
logprint(model=H.desc, type='eval_loss', epoch=epoch, step=iterate, **valid_stats)
def evaluate(H, ema_vae, data_valid, preprocess_fn):
stats_valid = []
valid_sampler = DistributedSampler(data_valid, num_replicas=H.mpi_size, rank=H.rank)
for x in DataLoader(data_valid, batch_size=H.n_batch, drop_last=True, pin_memory=True, sampler=valid_sampler):
data_input, target = preprocess_fn(x)
stats_valid.append(eval_step(data_input, target, ema_vae))
vals = [a['elbo'] for a in stats_valid]
finites = | np.array(vals) | numpy.array |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
from scipy.optimize import curve_fit
from auspex.log import logger
from copy import copy
import matplotlib.pyplot as plt
from .fits import AuspexFit, Auspex2DFit
from .signal_analysis import KT_estimation
class RabiAmpFit(AuspexFit):
"""A fit to a Rabi amplitude curve, assuming a cosine model.
"""
xlabel = "Amplitude"
ylabel = r"<$\sigma_z$>"
title = "Rabi Amp Fit"
@staticmethod
def _model(x, *p):
return p[0] - p[1]*np.cos(2*np.pi*p[2]*(x - p[3]))
def _initial_guess(self):
#seed Rabi frequency from largest FFT component
N = len(self.ypts)
yfft = np.fft.fft(self.ypts)
f_max_ind = np.argmax(np.abs(yfft[1:N//2]))
f_0 = 0.5 * max([1, f_max_ind]) / self.xpts[-1]
amp_0 = 0.5*(self.ypts.max() - self.ypts.min())
offset_0 = np.mean(self.ypts)
phase_0 = 0
if self.ypts[N//2 - 1] > offset_0:
amp_0 = -amp_0
return [offset_0, amp_0, f_0, phase_0]
def _fit_dict(self, p):
return {"y0": p[0],
"Api": p[1],
"f": p[2],
"phi": p[3]}
def __str__(self):
return "y0 - Api*cos(2*pi*f*(t - phi))"
@property
def pi_amp(self):
"""Returns the pi-pulse amplitude of the fit.
"""
return 0.5/self.fit_params["f"]
def annotation(self):
return r"$A_\pi$ = {0:.2e} {1} {2:.2e}".format(self.pi_amp, chr(177), self.fit_errors["Api"])
class RabiWidthFit(AuspexFit):
"""Fit to a single-frequency decaying cosine for fitting Rabi-vs-time experiments
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = "Rabi Width Fit"
@staticmethod
def _model(x, *p):
return p[0] + p[1]*np.exp(-x/p[2])*np.cos(2*np.pi*p[3]*(x - p[4]))
def _initial_guess(self):
frabi, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 1)
offset = np.average(self.xpts)
amp = np.max(self.ypts)
trabi = self.xpts[np.size(self.ypts) // 3]# assume Trabi is 1/3 of the scan
phase = 90.0
return [offset, amp, trabi, frabi[0], phase]
def _fit_dict(self, p):
return {"y0": p[0],
"A": p[1],
'T': p[2],
"f": p[3],
"phi": p[4]}
def __str__(self):
return "y0 + A*exp(-x/T)*cos(2*pi*f*(t - phi))"
@property
def t_rabi(self):
return self.fit_params["T"]
def annotation(self):
return r"$T_\pi$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["T"], chr(177), self.fit_errors["T"])
class T1Fit(AuspexFit):
"""Fit to a decaying exponential for T1 measurement experiments.
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = r"$T_1$ Fit"
@staticmethod
def _model(x, *p):
return p[0]*np.exp(-x/p[1]) + p[2]
def _initial_guess(self):
## Initial guess using method of linear regression via integral equations
## https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
N = len(self.xpts)
S = np.zeros(N)
for j in range(2, N):
S[j] = S[j-1] + 0.5*((self.ypts[j] + self.ypts[j-1]) *
(self.xpts[j] - self.xpts[j-1]))
xs = self.xpts - self.xpts[0]
ys = self.ypts - self.ypts[0]
M = np.array([[np.sum(xs**2), np.sum(xs * S)], [np.sum(xs * S), np.sum(S**2)]])
B1 = (np.linalg.inv(M) @ np.array([np.sum(ys * xs), np.sum(ys * S)]).T)[1]
theta = np.exp(B1 * self.xpts)
M2 = np.array([[N, np.sum(theta)], [np.sum(theta), np.sum(theta**2)]])
A = np.linalg.inv(M2) @ np.array([np.sum(self.ypts), np.sum(self.ypts * theta)]).T
return [A[1], -1.0/B1, A[0]]
def _fit_dict(self, p):
return {"A": p[0], "T1": p[1], "A0": p[2]}
def __str__(self):
return "A0 + A*exp(-t/T1)"
@property
def T1(self):
"""Return the measured T1 (i.e. decay constant of exponential).
"""
return self.fit_params["T1"]
def make_plots(self):
"""Create plot on both linear and semilog scale
"""
logger.info("Semilog plot of |1> state probability requires calibrated data.")
plt.figure(figsize=(2*6.4, 4.8))
plt.subplot(121)
plt.plot(self.xpts, self.ypts, ".", markersize=15, label="Data")
plt.plot(self.xpts, self.model(self.xpts), "-", linewidth=3, label="Fit")
plt.xlabel(self.xlabel, fontsize=14)
plt.ylabel(self.ylabel, fontsize=14)
plt.annotate(self.annotation(), xy=(0.4, 0.10), xycoords='axes fraction', size=12)
plt.subplot(122)
plt.semilogy(self.xpts, -1/2*(self.ypts - self.fit_params["A0"]), ".", markersize=15, label="Data")
plt.semilogy(self.xpts, -1/2*(self.model(self.xpts) - self.fit_params["A0"]), "-", linewidth=3, label="Fit")
plt.xlabel(self.xlabel, fontsize=14)
plt.ylabel('|1> probability', fontsize=14)
plt.suptitle(self.title, fontsize=14)
def annotation(self):
return r"$T_1$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["T1"], chr(177), self.fit_errors["T1"])
class RamseyFit(AuspexFit):
"""Fit to a Ramsey experiment using either a one or two frequency decaying
sine model.
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = "Ramsey Fit"
def __init__(self, xpts, ypts, two_freqs=True, AIC=True, make_plots=False, force=False, ax=None):
"""One or two frequency Ramsey experiment fit. If a two-frequency fit is selected
by the user or by comparing AIC scores, fit parameters are returned as tuples instead
of single numbers.
Args:
xpts (numpy.array): Time data points.
ypts (numpy.array): Qubit measurements.
two_freqs (Bool): If true, attempt a two-frequency fit of the data.
AIC (Bool): Decide between one and two frequency fits using the Akaike
information criterion.
make_plots (Bool): Display a plot of data and fit result.
ax (Axes, optional): Axes on which to draw plot. If None, new figure is created
force (Bool): Force the selection of a two-frequency fit regardless of AIC score.
"""
self.AIC = AIC
self.dict_option = two_freqs
self.two_freqs = two_freqs
self.force = force
self.plots = make_plots
self.ax = ax
assert len(xpts) == len(ypts), "Length of X and Y points must match!"
self.xpts = xpts
self.ypts = ypts
self._do_fit()
def _initial_guess_1f(self):
freqs, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 1)
return [freqs[0], abs(amps[0]), Tcs[0], np.angle(amps[0]), np.mean(self.ypts)]
def _initial_guess_2f(self):
freqs, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 2)
return [*freqs, *abs(amps), *Tcs, *np.angle(amps), np.mean(self.ypts)]
@staticmethod
def _ramsey_1f(x, f, A, tau, phi, y0):
return A*np.exp(-x/tau)*np.cos(2*np.pi*f*x + phi) + y0
@staticmethod
def _model_2f(x, *p):
return (RamseyFit._ramsey_1f(x, p[0], p[2], p[4], p[6], p[8]) + RamseyFit._ramsey_1f(x, p[1], p[3], p[5], p[7], p[8]))
@staticmethod
def _model_1f(x, *p):
return RamseyFit._ramsey_1f(x, p[0], p[1], p[2], p[3], p[4])
def _aicc(self, e, k, n):
return 2*k+e+(2*k*(k+1))/(n-k-1)
def _do_fit(self):
if self.two_freqs:
self.dict_option = True
self._initial_guess = self._initial_guess_2f
self._model = self._model_2f
try:
super()._do_fit()
two_freq_chi2 = self.sq_error
except:
self.two_freqs = False
logger.info("Two-frequency fit failed. Trying single-frequency fit.")
if self.two_freqs and self.AIC:
#Compare the one and two frequency fits
self.dict_option = False
self._initial_guess = self._initial_guess_1f
self._model = self._model_1f
super()._do_fit()
one_freq_chi2 = self.sq_error
aic = self._aicc(two_freq_chi2, 9, len(self.xpts)) - self._aicc(one_freq_chi2, 5, len(self.xpts))
if aic > 0 and not self.force:
self.two_freqs = False
rl = 100*np.exp(-aic/2)
logger.info(f"Selecting one-frequency fit with relative likelihood = {rl:.2f}%")
if rl>33:
logger.info("Relative likelihood of 2nd frequency high, take more averages or set force = True.")
else:
self.dict_option = True
self._initial_guess = self._initial_guess_2f
self._model = self._model_2f
super()._do_fit()
if not self.two_freqs:
self.dict_option = False
self._initial_guess = self._initial_guess_1f
self._model = self._model_1f
super()._do_fit()
if self.plots:
self.make_plots()
def annotation(self):
if self.two_freqs:
return r"$T_2$ = {0:.2e} {1} {2:.2e} "'\n'"$T_2$ = {3:.2e} {4} {5:.2e}".format(self.fit_params["tau1"], chr(177), self.fit_errors["tau1"], self.fit_params["tau2"], chr(177), self.fit_errors["tau2"])
else:
return r"$T_2$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["tau"], chr(177), self.fit_errors["tau"])
@property
def T2(self):
if self.two_freqs:
return self.fit_params["tau1"], self.fit_params["tau2"]
else:
return self.fit_params["tau"]
@property
def ramsey_freq(self):
if self.two_freqs:
return self.fit_params["f1"], self.fit_params["f2"]
else:
return self.fit_params["f"]
def _fit_dict(self, p):
if self.dict_option:
return {"f1": p[0],
"A1": p[2],
"tau1": p[4],
"phi1": p[6],
"f2": p[1],
"A2": p[3],
"tau2": p[5],
"phi2": p[7],
"y0": p[8]}
else:
return {"f": p[0],
"A": p[1],
"tau": p[2],
"phi": p[3],
"y0": p[4]}
class SingleQubitRBFit(AuspexFit):
"""Fit to an RB decay curve using the model A*(r^n) + B
"""
ylabel = r"<$\sigma_z$>"
title = "Single Qubit RB Fit"
def __init__(self, lengths, data, make_plots=False, log_scale_x=True, smart_guess=True, bounded_fit=True, ax=None):
self.lengths = sorted(list(set(lengths)))
repeats = len(data) // len(self.lengths)
xpts = np.array(self.lengths)
ypts = np.mean(np.reshape(data,(len(self.lengths),repeats)),1)
self.data = data
self.data_points = np.reshape(data,(len(self.lengths),repeats))
self.errors = np.std(self.data_points, 1)
self.log_scale_x = log_scale_x
self.ax = ax
self.smart_guess = smart_guess
if log_scale_x:
self.xlabel = r"$log_2$ Clifford Number"
else:
self.xlabel = "Clifford Number"
if bounded_fit:
self.bounds = ((0, -np.inf, 0), (1, np.inf, 1))
super().__init__(xpts, ypts, make_plots=make_plots, ax=ax)
@staticmethod
def _model(x, *p):
return p[0] * (1-p[1])**x + p[2]
def _initial_guess(self):
if self.smart_guess:
## Initial guess using method of linear regression via integral equations
## https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
N = len(self.xpts)
S = np.zeros(N)
for j in range(2, N):
S[j] = S[j-1] + 0.5*((self.ypts[j] + self.ypts[j-1]) *
(self.xpts[j] - self.xpts[j-1]))
xs = self.xpts - self.xpts[0]
ys = self.ypts - self.ypts[0]
M = np.array([[np.sum(xs**2), np.sum(xs * S)],
[np.sum(xs * S), np.sum(S**2)]])
B1 = (np.linalg.inv(M) @ np.array([np.sum(ys * xs), np.sum(ys * S)]).T)[1]
theta = np.exp(B1 * self.xpts)
M2 = np.array([[N, np.sum(theta)], [np.sum(theta), np.sum(theta**2)]])
A = np.linalg.inv(M2) @ np.array([np.sum(self.ypts), np.sum(self.ypts * theta)]).T
return [A[1], 1-np.exp(B1), A[0]]
return [1, 0, 0.5]
def __str__(self):
return "A*(1 - r)^N + B"
def _fit_dict(self, p):
return {"A": p[0], "r": p[1]/2, "B": p[2]}
def annotation(self):
return r'avg. error rate r = {:.2e} {} {:.2e}'.format(self.fit_params["r"], chr(177), self.fit_errors["r"])
def make_plots(self):
if self.ax is None:
plt.figure()
#plt.plot(self.xpts, self.data,'.',markersize=15, label='data')
plt.errorbar(self.lengths, self.ypts, yerr=self.errors/np.sqrt(len(self.lengths)),
fmt='*', elinewidth=2.0, capsize=4.0, label='mean')
plt.plot(range(int(self.lengths[-1])), self.model(range(int(self.lengths[-1]))), label='fit')
if self.log_scale_x:
plt.xscale('log')
plt.xlabel(self.xlabel)
plt.ylabel(self.ylabel)
plt.legend()
plt.annotate(self.annotation(), xy=(0.4, 0.10),
xycoords='axes fraction', size=12)
else:
self.ax.errorbar(self.lengths, self.ypts, yerr=self.errors/np.sqrt(len(self.lengths)),
fmt='*', elinewidth=2.0, capsize=4.0, label='mean')
self.ax.plot(range(int(self.lengths[-1])), self.model(range(int(self.lengths[-1]))), label='fit')
if self.log_scale_x:
self.ax.set_xscale('log')
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
self.ax.legend()
self.ax.annotate(self.annotation(), xy=(0.4, 0.10),
xycoords='axes fraction', size=12)
class SingleQubitLeakageRBFit(SingleQubitRBFit):
def __init__(self, lengths, data, make_plots=False, log_scale_x=True, smart_guess=True, bounded_fit=True, ax=None, leakage=True, fit=True, cal_repeats=1):
# Compute populations from the tomography data
a = | np.mean(data[-3*cal_repeats:-2*cal_repeats]) | numpy.mean |
"""Demo code for computing Neuron's tuning w.r.t """
from hessian_eigenthings.power_iter import Operator, deflated_power_iteration
from hessian_eigenthings.lanczos import lanczos
from lanczos_generalized import lanczos_generalized
from GAN_hvp_operator import GANHVPOperator, compute_hessian_eigenthings
#%%
import torch
import numpy as np
from time import time
from imageio import imwrite
from build_montages import build_montages
import matplotlib.pylab as plt
from os.path import join
import torch.nn.functional as F
#%% Prepare the Networks
import sys
sys.path.append(r"E:\Github_Projects\PerceptualSimilarity")
sys.path.append(r"D:\Github\PerceptualSimilarity")
import models
model_squ = models.PerceptualLoss(model='net-lin', net='squeeze', use_gpu=1, gpu_ids=[0])
model_squ.requires_grad_(False).cuda()
from GAN_utils import upconvGAN
G = upconvGAN("fc6")
G.requires_grad_(False).cuda() # this notation is incorrect in older pytorch
#%% Set up hook and the linear network based on the CNN
# Set up a network
from collections import OrderedDict
class ModuleHook:
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
self.module = None
self.features = None
def hook_fn(self, module, input, output):
self.module = module
self.features = output
def close(self):
self.hook.remove()
def hook_model(model, layerrequest = None):
features = OrderedDict()
alllayer = layerrequest is None
# recursive hooking function
def hook_layers(net, prefix=[]):
if hasattr(net, "_modules"):
for name, layer in net._modules.items():
if layer is None:
# e.g. GoogLeNet's aux1 and aux2 layers
continue
cur_layername = "_".join(prefix + [name])
if alllayer:
features[cur_layername] = ModuleHook(layer)
elif not alllayer and cur_layername in layerrequest:
features[cur_layername] = ModuleHook(layer)
hook_layers(layer, prefix=prefix + [name])
hook_layers(model)
def hook(layer):
# if layer == "input":
# return image
if layer == "labels":
return list(features.values())[-1].features
return features[layer].features
return hook, features
def get_model_layers(model, getLayerRepr=False):
layers = OrderedDict() if getLayerRepr else []
# recursive function to get layers
def get_layers(net, prefix=[]):
if hasattr(net, "_modules"):
for name, layer in net._modules.items():
if layer is None:
# e.g. GoogLeNet's aux1 and aux2 layers
continue
if getLayerRepr:
layers["_".join(prefix+[name])] = layer.__repr__()
else:
layers.append("_".join(prefix + [name]))
get_layers(layer, prefix=prefix+[name])
get_layers(model)
return layers
#%
def FeatLinModel(VGG, layername='features_20', type="weight", weight=None, chan=0, pos=(10, 10)):
"""A factory of linear models on """
layers_all = get_model_layers(VGG)
if 'features' in layername:
layeridx = layers_all.index(layername) - 1 + 1 # -1 for the "features" layer
VGGfeat = VGG.features[:layeridx]
else:
VGGfeat = VGG
hooks, feat_dict = hook_model(VGG, layerrequest=(layername,))
layernames = list(feat_dict.keys())
print(layernames)
if type == "weight":
def weight_objective(img, scaler=True):
VGGfeat.forward(img.cuda())
feat = hooks(layername)
if scaler:
return -(feat * weight.unsqueeze(0)).mean()
else:
batch = img.shape[0]
return -(feat * weight.unsqueeze(0)).view(batch, -1).mean(axis=1)
return weight_objective
elif type == "neuron":
def neuron_objective(img, scaler=True):
VGGfeat.forward(img.cuda())
feat = hooks(layername)
if scaler:
return -(feat[:, chan, pos[0], pos[1]]).mean()
else:
batch = img.shape[0]
return -(feat[:, chan, pos[0], pos[1]]).view(batch, -1).mean(axis=1)
return neuron_objective
# for name, hk in feat_dict.items():
# hk.close()
#%%
import torchvision as tv
# VGG = tv.models.vgg16(pretrained=True)
alexnet = tv.models.alexnet(pretrained=True).cuda()
for param in alexnet.parameters():
param.requires_grad_(False)
#%% This is not working.... The local 2nd order derivative is 0
feat = torch.randn((4096), dtype=torch.float32).requires_grad_(False).cuda()
GHVP = GANHVPOperator(G, feat, model_squ)
GHVP.apply(torch.randn((4096)).requires_grad_(False).cuda())
#%%
weight = torch.randn(512,32,32).cuda()
objective = FeatLinModel(VGG, layername='features_19', type="weight", weight=weight)
activHVP = GANHVPOperator(G, 5*feat, objective, activation=True)
#%
activHVP.apply(5*torch.randn((4096)).requires_grad_(False).cuda())
#%%
feat = torch.randn(4096).cuda()
feat.requires_grad_(True)
objective = FeatLinModel(VGG, layername='features_4', type="neuron", weight=None)
act = objective(G.visualize(feat))
#%%
from hessian import hessian
# activHVP = GANHVPOperator(G, 5*feat, objective, activation=True)
H = hessian(act, feat)
#%%
#%%
feat = torch.randn(4096).cuda()
feat.requires_grad_(True)
#%%
weight = torch.randn(192, 31, 31).cuda()
objective = FeatLinModel(alexnet, layername='features_4', type="weight", weight=weight)
act = objective(G.visualize(feat))
#%%
gradient = torch.autograd.grad(act, feat, retain_graph=True, create_graph=True,)
torch.autograd.grad(gradient[0], feat, retain_graph=True, only_inputs=True, grad_outputs=10*torch.ones(4096).cuda())
#%%
import numpy as np
feat = torch.tensor(np.random.randn(4096)).float().cuda()
feat.requires_grad_(True)
img = G.visualize(feat)
fc8 = alexnet.forward(img)
act = - fc8[0, 1]
H = hessian(act, feat, create_graph=False)
#%%
import numpy as np
feat = torch.tensor(np.random.randn(4096)).float().cuda()
feat.requires_grad_(True)
img = G.visualize(feat)
act = - img.mean()
# fc8 = alexnet.forward(img)
# act = - fc8[0, 1]
# H = hessian(act, feat, create_graph=False)
#%%
gradient = torch.autograd.grad(act, feat, retain_graph=True, create_graph=True,)
torch.autograd.grad(gradient[0], feat, retain_graph=True, only_inputs=True, grad_outputs=10*torch.ones(4096).cuda())
#%%
H = hessian(act, feat, create_graph=False)
#%%
x = torch.tensor([1.0,2])
x.requires_grad_(True)
A = torch.tensor([[2.0, 3], [3, 1]])
y = x.view(1, -1)@[email protected](-1, 1)
x_grad = torch.autograd.grad(y, x, retain_graph=True, create_graph=True)
torch.autograd.grad(x_grad, x, retain_graph=True, only_inputs=True)
#%%
feat = torch.tensor(np.random.randn(4096)).float().cuda()
feat.requires_grad_(True)
img = G.visualize(feat)
resz_img = F.interpolate(img, (224, 224), mode='bilinear', align_corners=True)
obj = alexnet.features[:10](resz_img)[0, :, 6, 6].mean().pow(2) # esz_img.std()
ftgrad = torch.autograd.grad(obj, feat, retain_graph=True, create_graph=True, only_inputs=True)
torch.autograd.grad(1 * ftgrad[0], feat, retain_graph=True, only_inputs=True, grad_outputs=torch.randn(4096).cuda(), )
# torch.autograd.grad(ftgrad, img, retain_graph=True, only_inputs=True, grad_outputs=torch.randn(4096).cuda(), )
#%% Approximate Forward Differencing
"""
So here is the conclusion, as the Perceptual loss take a squared difference when comparing
feature tensros, the dependency of loss on image is more than power 1, and the derivative
of it is not independent of image. However if the
"""
def torch_corr(vec1, vec2):
return torch.mean((vec1 - vec1.mean()) * (vec2 - vec2.mean())) / vec1.std(unbiased=False) / vec2.std(unbiased=False)
feat = torch.tensor(np.random.randn(4096)).float().cuda()
feat.requires_grad_(False)
vect = torch.tensor(np.random.randn(4096)).float().cuda()
vect = vect / vect.norm()
vect.requires_grad_(False)
#%% Through this I can show that the HVP is converging
# Forward differencing method. One Free parameter is the "eps" i.e. the norm of perturbation to apply on the central
# vector. Too small norm of this will make the
hvp_col = []
for eps in [50, 25, 10, 5, 1, 5E-1, 1E-1, 1E-2, 1E-3, 1E-4, 1E-5, 1E-6, ]:
perturb_vecs = 5*feat.detach() + eps * torch.tensor([1, -1.0]).view(-1, 1).cuda() * vect.detach()
perturb_vecs.requires_grad_(True)
img = G.visualize(perturb_vecs)
resz_img = F.interpolate(img, (224, 224), mode='bilinear', align_corners=True)
obj = alexnet.features[:10](resz_img)[:, :, 6, 6].mean() # esz_img.std()
ftgrad_both = torch.autograd.grad(obj, perturb_vecs, retain_graph=False, create_graph=False, only_inputs=True)
hvp = (ftgrad_both[0][0, :] - ftgrad_both[0][1, :]) / (2 * eps)
hvp_col.append(hvp)
print(hvp)
# img = G.visualize(feat - eps * vect)
# resz_img = F.interpolate(img, (224, 224), mode='bilinear', align_corners=True)
# obj = alexnet.features[:10](resz_img)[0, :, 6, 6].sum() #esz_img.std()
# ftgrad_neg = torch.autograd.grad(obj, vect, retain_graph=False, create_graph=False, only_inputs=True)
# hvp = (ftgrad_pos[0] - ftgrad_neg[0]) / eps / 2
#%
for i in range(len(hvp_col)):
print("correlation %.4f mse %.1E" % (torch_corr(hvp_col[i], hvp_col[1]).item(),
F.mse_loss(hvp_col[i], hvp_col[1]).item()))
#%%
savedir = r"E:\OneDrive - Washington University in St. Louis\HessTune\HessDecomp_Method"
hvp_arr = torch.cat(tuple(hvp.unsqueeze(0) for hvp in hvp_col), dim=0)
corrmat = np.corrcoef(hvp_arr.cpu().numpy())
plt.matshow(corrmat, cmap=plt.cm.jet)
plt.yticks(range(12), labels=[50, 25, 10, 5, 1, 5E-1, 1E-1, 1E-2, 1E-3, 1E-4, 1E-5, 1E-6, ])
plt.xticks(range(12), labels=[50, 25, 10, 5, 1, 5E-1, 1E-1, 1E-2, 1E-3, 1E-4, 1E-5, 1E-6, ])
plt.ylim(top = -0.5, bottom=11.5)
plt.xlim(left = -0.5, right=11.5)
plt.xlabel("Perturb Vector Length")
plt.suptitle("Correlation of HVP result\nusing different EPS in forward differencing")
plt.colorbar()
plt.savefig(join(savedir, "HVP_corr_oneTrial.jpg") )
plt.show()
#%%
class GANForwardHVPOperator(Operator):
def __init__(
self,
model,
code,
objective,
preprocess=lambda img: F.interpolate(img, (224, 224), mode='bilinear', align_corners=True),
use_gpu=True,
EPS=1E-2,
# activation=False,
):
if use_gpu:
device = "cuda"
else:
device = "cpu"
self.device = device
if hasattr(model, "parameters"):
for param in model.parameters():
param.requires_grad_(False)
if hasattr(objective, "parameters"):
for param in objective.parameters():
param.requires_grad_(False)
self.model = model
self.objective = objective
self.preprocess = preprocess
self.code = code.clone().requires_grad_(False).float().to(device) # torch.float32
self.img_ref = self.model.visualize(self.code)
resz_img = self.preprocess(self.img_ref) # F.interpolate(self.img_ref, (224, 224), mode='bilinear', align_corners=True)
activ = self.objective(resz_img)
self.size = self.code.numel()
self.EPS = EPS
self.perturb_norm = self.code.norm() * self.EPS
def select_code(self, code):
self.code = code.clone().requires_grad_(False).float().to(self.device) # torch.float32
self.perturb_norm = self.code.norm() * self.EPS
self.img_ref = self.model.visualize(self.code + self.perturb_vec)
resz_img = self.preprocess(self.img_ref)
activ = self.objective(resz_img)
gradient = torch.autograd.grad(activ, self.perturb_vec, create_graph=False, retain_graph=False)[0]
self.gradient = gradient.view(-1)
def apply(self, vec, EPS=None):
"""
Returns H*vec where H is the hessian of the loss w.r.t.
the vectorized model parameters
"""
vecnorm = vec.norm()
if vecnorm < 1E-8:
return torch.zeros_like(vec).cuda()
EPS = self.EPS if EPS is None else EPS
self.perturb_norm = self.code.norm() * EPS
eps = self.perturb_norm / vecnorm
# take the second gradient by comparing 2 first order gradient.
perturb_vecs = self.code.detach() + eps * torch.tensor([1, -1.0]).view(-1, 1).to(self.device) * vec.detach()
perturb_vecs.requires_grad_(True)
img = self.model.visualize(perturb_vecs)
resz_img = self.preprocess(img)
activs = self.objective(resz_img) # , scaler=True
# obj = alexnet.features[:10](resz_img)[:, :, 6, 6].sum() # esz_img.std()
ftgrad_both = torch.autograd.grad(activs, perturb_vecs, retain_graph=False, create_graph=False, only_inputs=True)[0]
hessian_vec_prod = (ftgrad_both[0, :] - ftgrad_both[1, :]) / (2 * eps)
return hessian_vec_prod
def vHv_form(self, vec):
"""
Returns Bilinear form vec.T*H*vec where H is the hessian of the loss.
If vec is eigen vector of H this will return the eigen value.
"""
hessian_vec_prod = self.apply(vec)
vhv = (hessian_vec_prod * vec).sum()
return vhv
def zero_grad(self):
"""
Zeros out the gradient info for each parameter in the model
"""
pass
#%%
from torchvision.transforms import Normalize, Compose
RGB_mean = torch.tensor([0.485, 0.456, 0.406]).view(1,-1,1,1).cuda()
RGB_std = torch.tensor([0.229, 0.224, 0.225]).view(1,-1,1,1).cuda()
preprocess = Compose([lambda img: (F.interpolate(img, (224, 224), mode='bilinear', align_corners=True) - RGB_mean) / RGB_std])
# weight = torch.randn(256, 13, 13).cuda()
# objective = FeatLinModel(alexnet, layername='features_10', type="weight", weight=weight)
objective = FeatLinModel(alexnet, layername='features_10', type="neuron", chan=slice(None), pos=(10, 10))
feat = 5*torch.randn(4096).cuda()
activHVP = GANForwardHVPOperator(G, feat, objective, preprocess=preprocess)
activHVP.apply(1*torch.randn((4096)).requires_grad_(False).cuda())
#%%
import torch.optim as optim
feat = 5*torch.randn(4096).cuda()
feat.requires_grad_(True)
optimizer = optim.Adam([feat], lr=5e-2)
for step in range(200):
optimizer.zero_grad()
obj = objective(preprocess(G.visualize(feat)))
obj.backward()
optimizer.step()
if np.mod((step + 1), 10) == 0:
print("step %d: %.2f"%(step, obj.item()))
#%%
feat.requires_grad_(False)
activHVP = GANForwardHVPOperator(G, feat, objective, preprocess=preprocess)
activHVP.apply(1*torch.randn((4096)).requires_grad_(False).cuda())
#%%
t0 = time()
eigvals, eigvects = lanczos(activHVP, num_eigenthings=500, use_gpu=True)
print(time() - t0) # 40 sec
eigvals = eigvals[::-1]
eigvects = eigvects[::-1, :]
#%%
eigvals_u = eigvals
eigvects_u = eigvects
#%%
feat.requires_grad_(False)
metricHVP = GANHVPOperator(G, feat, model_squ)
t0 = time()
eigvals, eigvects = lanczos_generalized(activHVP, metric_operator=metricHVP, num_eigenthings=2, use_gpu=True, tol=1e-2)
print(time() - t0) # 40 sec
eigvals = eigvals[::-1]
eigvects = eigvects[::-1, :]
#%%
summary_dir = r"E:\OneDrive - Washington University in St. Louis\HessTune\HessDecomp_Method"
#%%
RND = np.random.randint(100)
ref_vect = (feat / feat.norm()).cpu().numpy()
save_indiv = False
save_row = False
vec_norm = feat.norm().item()
ang_step = 180 / 10
theta_arr_deg = ang_step * | np.linspace(-5, 5, 21) | numpy.linspace |
# coding: utf-8
# In[1]:
import mxnet as mx
import numpy as np
import matplotlib.pyplot as plt
import cv2
import scipy.io as sio
import pylab as pl
from collections import namedtuple
import time
Batch = namedtuple('Batch', ['data'])
# In[2]:
MAX_INPUT_DIM=5000.0
prob_thresh = 0.5
nms_thresh = 0.1
# In[3]:
def loadmeta(matpath):
f = sio.loadmat(matpath)
net = f['net']
clusters = np.copy(net['meta'][0][0][0][0][6])
averageImage = np.copy(net['meta'][0][0][0][0][2][0][0][2])
averageImage = averageImage[:, np.newaxis]
return clusters, averageImage
# In[4]:
def nms(dets, prob_thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= prob_thresh)[0]
order = order[inds + 1]
return keep
# In[5]:
clusters, averageImage = loadmeta('./hr_res101.mat')
# In[6]:
clusters_h = clusters[:,3] - clusters[:,1] + 1
clusters_w = clusters[:,2] - clusters[:,0] + 1
normal_idx = np.where(clusters[:,4] == 1)
# In[7]:
raw_img = cv2.imread('./stair_exit_000026.jpg')
"""
raw_img1=raw_img[:,:,0]
raw_img2=raw_img[:,:,1]
raw_img3=raw_img[:,:,2]
plt.imshow(raw_img1)
plt.show()
plt.imshow(raw_img2)
plt.show()
plt.imshow(raw_img3)
plt.show()
"""
raw_h = raw_img.shape[0]
raw_w = raw_img.shape[1]
#raw_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
raw_img_f = raw_img.astype(np.float32)
# In[8]:
min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx]/raw_w))), np.floor(np.log2(np.max(clusters_h[normal_idx]/raw_h))))
max_scale = min(1.0, -np.log2(max(raw_h, raw_w)/MAX_INPUT_DIM))
# In[9]:
scales_down = pl.frange(min_scale, 0, 1.)
scales_up = pl.frange(0.5, max_scale,0.5)
scales_pow = | np.hstack((scales_down, scales_up)) | numpy.hstack |
from nutils import *
from nutils.testing import *
from nutils.elementseq import References
from nutils.topology import Topology
import numpy
import copy
import sys
import pickle
import subprocess
import base64
import itertools
import os
import unittest
def as_rounded_list(data):
return numpy.round(data, 5).tolist()
def pairwise(items):
return [[i, j] for i, j in zip(items[:-1], items[1:])]
def subdiv(V):
V = iter(V)
items = [next(V)]
for v in V:
items += [(items[-1] + v) / 2, v]
return items
class CommonAssertions:
def assertVertices(self, topo, desired_coords):
assert len(desired_coords) == len(topo)
bezier = topo.sample('bezier', 2)
actual_coords_flat = as_rounded_list(bezier.eval(self.geom))
for ielem, desired_elem_coords in enumerate(desired_coords):
actual_elem_coords = numpy.take(actual_coords_flat, bezier.getindex(ielem), axis=0)
self.assertEqual(actual_elem_coords.tolist(), desired_elem_coords)
def assertTake(self, topo, selection):
# Like `assertVertices` but using `self.desired_vertices[selection]`.
selection = numpy.asarray(selection, dtype=int).ravel()
verts = [self.desired_vertices[i] for i in selection]
self.assertVertices(topo, verts)
def assertCompressed(self, topo, mask):
# Like `assertVertices` but using `self.desired_vertices[mask]`.
mask = numpy.asarray(mask, dtype=bool).ravel()
assert mask.size == self.desired_nelems
verts = [verts for select, verts in zip(mask, self.desired_vertices) if select]
self.assertVertices(topo, verts)
def assertUnorderedVertices(self, topo, desired_coords):
assert len(desired_coords) == len(topo)
bezier = topo.sample('bezier', 2)
actual_coords_flat = as_rounded_list(bezier.eval(self.geom))
actual_coords = []
for ielem, desired_elem_coords in enumerate(desired_coords):
actual_elem_coords = numpy.take(actual_coords_flat, bezier.getindex(ielem), axis=0)
actual_coords.append(actual_elem_coords.tolist())
self.assertEqual(sorted(actual_coords), sorted(desired_coords))
class CommonTests(CommonAssertions):
def test_empty_like(self):
empty = self.topo.empty_like()
self.assertEqual(len(empty), 0)
self.assertEqual(empty.spaces, self.desired_spaces)
self.assertEqual(empty.space_dims, self.desired_space_dims)
self.assertEqual(empty.ndims, self.desired_ndims)
def test_spaces(self):
self.assertEqual(self.topo.spaces, self.desired_spaces)
def test_space_dims(self):
self.assertEqual(self.topo.space_dims, self.desired_space_dims)
def test_ndims(self):
self.assertEqual(self.topo.ndims, self.desired_ndims)
def test_len(self):
self.assertEqual(len(self.topo), self.desired_nelems)
def test_references(self):
assert len(self.desired_references) == self.desired_nelems
self.assertSequenceEqual(self.topo.references, self.desired_references)
def test_elements(self):
# This sort of tests the `self.topo.transforms` by evaluating `self.geom`
# and comparing with `self.desired_vertices`.
self.assertVertices(self.topo, self.desired_vertices)
def test_get_groups_nonexistent(self):
self.assertFalse(self.topo.get_groups('nonexistent'))
def test_getitem_empty(self):
with self.assertRaises(KeyError):
self.topo['nonexistent']
def test_take(self):
self.assertFalse(self.topo.take([]))
for ielem in range(self.desired_nelems):
self.assertTake(self.topo.take([ielem]), [ielem])
def test_take_invalid_indices(self):
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.take(numpy.array(0))
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.take(numpy.array([[0, 1], [2, 3]]))
def test_compress(self):
self.assertFalse(self.topo.compress([False]*self.desired_nelems))
for ielem in range(self.desired_nelems):
self.assertTake(self.topo.compress([i == ielem for i in range(self.desired_nelems)]), [ielem])
def test_slice_invalid_dim(self):
with self.assertRaisesRegex(IndexError, '^dimension index out of range$'):
self.topo.slice(slice(None), self.desired_ndims)
def test_f_index(self):
self.assertEqual(self.topo.sample('gauss', 0).eval(self.topo.f_index).tolist(), list(range(self.desired_nelems)))
def test_unit_integral(self):
self.assertAlmostEqual(self.topo.integral(function.J(self.geom), degree=0).eval(), sum(self.desired_volumes))
def test_unit_integrate(self):
self.assertAlmostEqual(self.topo.integrate(function.J(self.geom), degree=0), sum(self.desired_volumes))
def test_unit_integrate_elementwise(self):
self.assertEqual(as_rounded_list(self.topo.integrate_elementwise(function.J(self.geom), degree=0)), self.desired_volumes)
def test_refine_spaces_none(self):
self.assertEqual(self.topo.refine_spaces([]), self.topo)
def test_invalid_intersections(self):
with self.assertRaises(ValueError):
self.topo & Topology.empty(tuple('other' + space for space in self.desired_spaces), self.desired_space_dims, self.desired_ndims)
with self.assertRaises(ValueError):
self.topo & Topology.empty(self.desired_spaces, tuple(dim + 1 for dim in self.desired_space_dims), self.desired_ndims)
with self.assertRaises(ValueError):
self.topo & Topology.empty(self.desired_spaces, self.desired_space_dims, self.desired_ndims + 1)
def test_invalid_unions(self):
with self.assertRaises(ValueError):
self.topo | Topology.empty(tuple('other' + space for space in self.desired_spaces), self.desired_space_dims, self.desired_ndims)
with self.assertRaises(ValueError):
self.topo | Topology.empty(self.desired_spaces, tuple(dim + 1 for dim in self.desired_space_dims), self.desired_ndims)
with self.assertRaises(ValueError):
self.topo | Topology.empty(self.desired_spaces, self.desired_space_dims, self.desired_ndims + 1)
def test_select(self):
if self.desired_ndims == 0:
return
if self.desired_nelems:
centers = numpy.stack([numpy.mean(v, axis=0) for v in self.desired_vertices])
center = numpy.mean(centers, axis=0)
center = centers[numpy.argmin(((centers - center)**2).sum(1).round(5))]
else:
center = numpy.zeros(self.desired_ndims)
direction = 1 / (self.desired_ndims - numpy.arange(self.desired_ndims))
for i in range(self.desired_ndims):
desired_selection, = numpy.where([(numpy.sum((numpy.array(v) - center) * direction, axis=1) > 0).any() for v in self.desired_vertices])
desired_vertices = [self.desired_vertices[i] for i in desired_selection]
self.assertVertices(self.topo.select(((self.geom - center) * direction).sum()), desired_vertices)
direction = numpy.roll(direction, shift=1)
class ConformingTests:
@property
def edge_map(self):
# Mapping from edge vertices to pairs of element and edge indices based on
# `self.desired_references` and `self.desired_vertices`.
assert len(self.desired_references) == len(self.desired_vertices) == self.desired_nelems
edge_map = {}
for ielem, (ref, verts) in enumerate(zip(self.desired_references, self.desired_vertices)):
local_verts = as_rounded_list(ref.vertices)
for iedge, (trans, edge) in enumerate(ref.edges):
local_edge_verts = as_rounded_list(trans.apply(edge.vertices))
edge_verts = tuple(tuple(verts[local_verts.index(v)]) for v in local_edge_verts)
edge_map.setdefault(edge_verts, set()).add((ielem, iedge))
return edge_map
@property
def connectivity(self):
assert len(self.desired_references) == self.desired_nelems
connectivity = [[-1] * ref.nedges for ref in self.desired_references]
for sides in self.edge_map.values():
assert len(sides) <= 2
if len(sides) == 2:
(ielem1, iedge1), (ielem2, iedge2) = sides
connectivity[ielem1][iedge1] = ielem2
connectivity[ielem2][iedge2] = ielem1
return connectivity
def test_connectivity(self):
self.assertEqual(list(map(list, self.topo.connectivity)), self.connectivity)
def test_boundary_all_spaces(self):
boundary_vertices = [list(map(list, verts)) for verts, sides in self.edge_map.items() if len(sides) == 1]
self.assertUnorderedVertices(self.topo.boundary, boundary_vertices)
def test_interfaces_all_spaces(self):
interface_vertices = [list(map(list, verts)) for verts, sides in self.edge_map.items() if len(sides) == 2]
self.assertUnorderedVertices(self.topo.interfaces, interface_vertices)
def test_basis_std_degree1(self):
basis = self.topo.basis('std', degree=1)
values, verts = self.topo.sample('bezier', 2).eval([basis, self.geom])
dofs_to_verts = {}
verts_to_dofs = {}
for val, vert in zip(map(as_rounded_list, values), (tuple(as_rounded_list(v)) for v in verts)):
self.assertCountEqual(val, [1]+[0]*(len(val)-1))
dof = val.index(1)
if dof in dofs_to_verts:
self.assertEqual(dofs_to_verts[dof], vert)
else:
dofs_to_verts[dof] = vert
if vert in verts_to_dofs:
self.assertEqual(verts_to_dofs[vert], dof)
else:
verts_to_dofs[vert] = dof
self.assertEqual(sorted(dofs_to_verts), list(range(len(basis))))
self.assertEqual(sorted(verts_to_dofs), sorted(set(tuple(v) for e in self.desired_vertices for v in e)))
class NewTopologyRefine(TestCase, CommonAssertions):
# Tests for default implementations of `Topology.refine_*`.
def setUp(self):
super().setUp()
class TestTopo(Topology):
def __init__(self, real):
self.real = real
super().__init__(real.spaces, real.space_dims, real.references)
def refine_spaces(self, spaces):
return TestTopo(self.real.refine_spaces(spaces))
def sample(self, ischeme, degree):
return self.real.sample(ischeme, degree)
topo, self.geom = mesh.newrectilinear([4, 2], spaces=['X', 'Y'])
self.topo = TestTopo(topo)
@staticmethod
def mkverts(XX, YY):
return [[[x, y] for x in X for y in Y] for X in pairwise(XX) for Y in pairwise(YY)]
def test_refine_count_iter(self):
refine_iter = iter(self.topo.refine_iter)
X, Y = range(5), range(3)
for i in range(3):
desired = self.mkverts(X, Y)
self.assertVertices(self.topo.refine_count(i), desired)
self.assertVertices(self.topo.refine(i), desired)
self.assertVertices(next(refine_iter), desired)
X, Y = subdiv(X), subdiv(Y)
def test_refine_spaces(self):
# We only test `Topology.refine` because `Topology.refine_spaces` is
# abstract.
self.assertVertices(self.topo.refine(['X']), self.mkverts(subdiv(range(5)), range(3)))
self.assertVertices(self.topo.refine(['Y']), self.mkverts(range(5), subdiv(range(3))))
def test_refine_spaces_count(self):
self.assertVertices(self.topo.refine(dict(X=1, Y=2)), self.mkverts(subdiv(range(5)), subdiv(subdiv(range(3)))))
def test_refine_count_negative(self):
with self.assertRaisesRegex(ValueError, '^Negative counts are invalid.$'):
self.topo.refine_count(-1)
with self.assertRaisesRegex(ValueError, '^Negative counts are invalid.$'):
self.topo.refine(-1)
with self.assertRaisesRegex(ValueError, '^Negative counts are invalid.$'):
self.topo.refine_spaces_count(dict(X=-1))
def test_refine_unknown_space(self):
with self.assertRaisesRegex(ValueError, '^This topology does not have space Z.$'):
self.topo.refine_spaces(['Z'])
class NewTopologyTake(TestCase, CommonAssertions):
# Tests for default implementations of `Topology.take` and
# `Topology.compress`.
def setUp(self):
super().setUp()
class TestTopo(Topology):
def __init__(self, real):
self.real = real
super().__init__(real.spaces, real.space_dims, real.references)
def sample(self, ischeme, degree):
return self.real.sample(ischeme, degree)
topo, self.geom = mesh.newrectilinear([4, 2], spaces=['X', 'Y'])
self.topo = TestTopo(topo)
self.desired_vertices = [[[x, y] for x in X for y in Y] for X in pairwise(range(5)) for Y in pairwise(range(3))]
def test_take(self):
self.assertTake(self.topo.take([1, 3, 4]), [1, 3, 4])
self.assertTake(self.topo.take(numpy.array([1, 3, 4])), [1, 3, 4])
def test_getitem(self):
self.assertTake(self.topo[[1, 3, 4]], [1, 3, 4])
self.assertTake(self.topo[numpy.array([1, 3, 4])], [1, 3, 4])
def test_take_empty(self):
self.assertTake(self.topo.take([]), [])
self.assertTake(self.topo.take(numpy.array([], dtype=int)), [])
# Test whether an empty float array is allowed.
self.assertTake(self.topo.take(numpy.array([])), [])
def test_take_invalid_array(self):
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.take(numpy.array([[1, 2], [3, 4]]))
with self.assertRaises(TypeError):
self.topo.take(numpy.array([1, 2], dtype=float))
def test_compress(self):
self.assertTake(self.topo.compress([False, True, False, True, True, False, False, False]), [1, 3, 4])
def test_compress_invalid_array(self):
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.compress([[False, True]]*4)
with self.assertRaisesRegex(ValueError, '^length of mask does not match number of elements$'):
self.topo.compress([False])
class NewTopologySlice(TestCase, CommonAssertions):
# Tests for default implementation of `Topology.__getitem__`.
def setUp(self):
super().setUp()
self.topo, self.geom = mesh.newrectilinear([4, 2], spaces=['X', 'Y'])
self.desired_vertices = [[[x, y] for x in X for y in Y] for X in pairwise(range(5)) for Y in pairwise(range(3))]
self.idx = numpy.arange(8).reshape(4, 2)
def test_slice(self):
self.assertTake(self.topo.slice(slice(None), 0), self.idx)
self.assertTake(self.topo.slice(slice(None), 1), self.idx)
self.assertTake(self.topo.slice(slice(2, None), 0), self.idx[2:])
self.assertTake(self.topo.slice(slice(None, 1), 1), self.idx[:, :1])
def test_getitem(self):
self.assertTake(self.topo[:], self.idx)
self.assertTake(self.topo[:, :], self.idx)
self.assertTake(self.topo[..., :], self.idx)
self.assertTake(self.topo[..., :, :], self.idx)
self.assertTake(self.topo[:, ..., :], self.idx)
self.assertTake(self.topo[:, :, ...], self.idx)
self.assertTake(self.topo[2:], self.idx[2:])
self.assertTake(self.topo[2:, 1:], self.idx[2:, 1:])
def test_getitem_multiple_ellipsis(self):
with self.assertRaisesRegex(Exception, '^only one ellipsis is allowed$'):
self.topo[..., :, ...]
def test_getitem_too_many_indices(self):
with self.assertRaisesRegex(Exception, '^too many indices'):
self.topo[:, :, :]
def test_slice_invalid_dimensions(self):
with self.assertRaises(IndexError):
self.topo.slice(slice(None), -1)
with self.assertRaises(IndexError):
self.topo.slice(slice(None), 2)
class NewTopologyBoundaryInterfaces(TestCase):
def setUp(self):
super().setUp()
self.topo1, self.geom = mesh.line([0, 1, 2], space='X')
self.topo0 = self.topo1.boundary_spaces(['X'])
def test_boundary_0d(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no boundary.$'):
self.topo0.boundary_spaces(['X'])
def test_interfaces_0d(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no interfaces.$'):
self.topo0.interfaces_spaces(['X'])
def test_boundary_empty_spaces(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no boundary.$'):
self.topo0.boundary_spaces([])
def test_interfaces_empty_spaces(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no interfaces.$'):
self.topo0.interfaces_spaces([])
def test_boundary_unknown_space(self):
with self.assertRaisesRegex(ValueError, '^This topology does not have space Y.$'):
self.topo1.boundary_spaces(['Y'])
def test_interfaces_unknown_space(self):
with self.assertRaisesRegex(ValueError, '^This topology does not have space Y.$'):
self.topo1.interfaces_spaces(['Y'])
def test_basis_0d(self):
basis = self.topo0.basis('std', degree=0)
sampled = self.topo0.sample('bezier', 2).eval(basis)
self.assertEqual(as_rounded_list(sampled), [[1.], [1.]])
class NewEmpty(TestCase, CommonTests, ConformingTests):
def setUp(self):
super().setUp()
self.desired_spaces = 'a', 'b'
self.desired_space_dims = 1, 2
self.desired_ndims = 3
self.topo = Topology.empty(self.desired_spaces, self.desired_space_dims, self.desired_ndims)
self.geom = function.concatenate([function.rootcoords(space, dim) for space, dim in zip(self.desired_spaces, self.desired_space_dims)])
self.desired_nelems = 0
self.desired_volumes = []
self.desired_references = []
self.desired_vertices = []
def test_opposite(self):
self.assertEqual(len(~self.topo), 0)
def test_intersection(self):
atrans = transformseq.IndexTransforms(1, 1, 0)
btrans = transformseq.IndexTransforms(2, 1, 1)
other = topology.SimplexTopology('a', numpy.array([[0, 1]]), atrans, atrans) * topology.SimplexTopology('b', numpy.array([[0, 1, 2]]), btrans, btrans)
self.assertEqual(self.topo & other, self.topo)
self.assertEqual(other & self.topo, self.topo)
def test_union(self):
atrans = transformseq.IndexTransforms(1, 1, 0)
btrans = transformseq.IndexTransforms(2, 1, 1)
other = topology.SimplexTopology('a', numpy.array([[0, 1]]), atrans, atrans) * topology.SimplexTopology('b', numpy.array([[0, 1, 2]]), btrans, btrans)
self.assertEqual(self.topo | other, other)
self.assertEqual(other | self.topo, other)
def test_indicator(self):
self.assertEqual(self.topo.indicator('group').shape, ())
def test_f_coords(self):
self.assertEqual(self.topo.f_coords.shape, (3,))
class NewDisjointUnion(TestCase, CommonTests, ConformingTests):
def setUp(self):
super().setUp()
topo, self.geom = mesh.newrectilinear([8, 3], spaces='XY')
self.topo = Topology.disjoint_union(topo.slice(slice(0, 3), 0), topo.slice(slice(4, 8), 0).slice(slice(0, 2), 1))
self.desired_spaces = 'X', 'Y'
self.desired_space_dims = 1, 1
self.desired_ndims = 2
self.desired_nelems = 17
self.desired_volumes = [1] * 17
self.desired_references = [element.LineReference()**2]*17
self.desired_vertices = self.mkverts(pairwise(range(4)), pairwise(range(4))) + self.mkverts(pairwise(range(4, 9)), pairwise(range(3)))
@staticmethod
def mkverts(XX, YY):
return [[[x, y] for x in X for y in Y] for X in XX for Y in YY]
def test_refine(self):
self.assertVertices(self.topo.refine_spaces([]), self.mkverts(pairwise(range(4)), pairwise(range(4))) + self.mkverts(pairwise(range(4, 9)), pairwise(range(3))))
self.assertVertices(self.topo.refine_spaces(['X']), self.mkverts(pairwise(subdiv(range(4))), pairwise(range(4))) + self.mkverts(pairwise(subdiv(range(4, 9))), pairwise(range(3))))
self.assertVertices(self.topo.refine_spaces(['Y']), self.mkverts(pairwise(range(4)), pairwise(subdiv(range(4)))) + self.mkverts(pairwise(range(4, 9)), pairwise(subdiv(range(3)))))
self.assertVertices(self.topo.refine_spaces(['X', 'Y']), self.mkverts(pairwise(subdiv(range(4))), pairwise(subdiv(range(4)))) + self.mkverts(pairwise(subdiv(range(4, 9))), pairwise(subdiv(range(3)))))
def test_take(self):
self.assertVertices(self.topo.take([0]), self.mkverts([[0, 1]], [[0, 1]]))
self.assertVertices(self.topo.take([9, 10]), self.mkverts([[4, 5]], pairwise([0, 1, 2])))
self.assertVertices(self.topo.take([0, 9, 10]), self.mkverts([[0, 1]], [[0, 1]]) + self.mkverts([[4, 5]], pairwise([0, 1, 2])))
def test_slice_unstructured(self):
for i in range(self.desired_ndims):
with self.assertRaisesRegex(ValueError, '^cannot slice'):
self.topo.slice(slice(None), i)
def test_f_index(self):
with self.assertRaises(NotImplementedError):
self.topo.f_index
def test_basis_std_degree1(self):
with self.assertRaises(Exception):
self.topo.basis('std', degree=1)
def test_trim(self):
topo, x = mesh.line([0, 1, 2, 3], space='X')
topo = Topology.disjoint_union(topo.slice(slice(0, 1), 0), topo.slice(slice(2, 3), 0))
self.assertEqual(as_rounded_list(topo.trim(x-0.5, maxrefine=0).volume(x[None])), 1.5)
self.assertEqual(as_rounded_list(topo.trim(x-2.5, maxrefine=0).volume(x[None])), 0.5)
self.assertEqual(as_rounded_list(topo.trim(0.5-x, maxrefine=0).volume(x[None])), 0.5)
class NewMul(TestCase, CommonTests, ConformingTests):
def setUp(self):
super().setUp()
self.topo1, self.x = mesh.line([0, 1, 2], bnames=['a', 'b'], space='X')
self.topo2, self.y = mesh.line([0, 1, 2, 3], bnames=['c', 'd'], space='Y')
self.topo = self.topo1 * self.topo2
self.geom = function.stack([self.x, self.y])
self.desired_spaces = 'X', 'Y'
self.desired_space_dims = 1, 1
self.desired_ndims = 2
self.desired_nelems = 6
self.desired_volumes = [1]*6
self.desired_references = [element.LineReference()**2]*6
self.desired_vertices = self.mkverts(pairwise(range(3)), pairwise(range(4)))
@staticmethod
def mkverts(XX, YY):
return [[[x, y] for x in X for y in Y] for X in XX for Y in YY]
def test_f_coords(self):
self.assertEqual(as_rounded_list(self.topo.sample('bezier', 2).eval(self.topo.f_coords)), ([[0., 0.], [0., 1.]]*3+[[1., 0.], [1., 1.]]*3)*2)
def test_refine_spaces(self):
self.assertVertices(self.topo.refine_spaces([]), self.mkverts(pairwise(range(3)), pairwise(range(4))))
self.assertVertices(self.topo.refine_spaces(['X']), self.mkverts(pairwise(subdiv(range(3))), pairwise(range(4))))
self.assertVertices(self.topo.refine_spaces(['Y']), self.mkverts(pairwise(range(3)), pairwise(subdiv(range(4)))))
self.assertVertices(self.topo.refine_spaces(['X', 'Y']), self.mkverts(pairwise(subdiv(range(3))), pairwise(subdiv(range(4)))))
def test_boundary_spaces(self):
bX = self.mkverts([[0], [2]], pairwise(range(4)))
bY = self.mkverts(pairwise(range(3)), [[0], [3]])
self.assertVertices(self.topo.boundary_spaces(['X']), bX)
self.assertVertices(self.topo.boundary_spaces(['Y']), bY)
self.assertVertices(self.topo.boundary_spaces(['X', 'Y']), bY+bX)
def test_boundary_0d(self):
topo0 = mesh.line([0, 1, 2], space='Z')[0].boundary
for topo in self.topo * topo0, topo0 * self.topo:
with self.assertRaisesRegex(ValueError, '^A 0D topology has no boundary.$'):
topo.boundary_spaces('Z')
def test_interfaces_spaces(self):
iX = self.mkverts([[1]], pairwise(range(4)))
iY = self.mkverts(pairwise(range(3)), [[1], [2]])
self.assertVertices(self.topo.interfaces_spaces(['X']), iX)
self.assertVertices(self.topo.interfaces_spaces(['Y']), iY)
self.assertVertices(self.topo.interfaces_spaces(['X', 'Y']), iY+iX)
def test_interfaces_0d(self):
topo0 = mesh.line([0, 1, 2], space='Z')[0].boundary
for topo in self.topo * topo0, topo0 * self.topo:
with self.assertRaisesRegex(ValueError, '^A 0D topology has no interfaces.$'):
topo.interfaces_spaces('Z')
def test_slice(self):
self.assertVertices(self.topo.slice(slice(0, 1), 0), self.mkverts([[0, 1]], pairwise(range(4))))
self.assertVertices(self.topo.slice(slice(1, 3), 1), self.mkverts(pairwise(range(3)), pairwise(range(1, 4))))
def test_get_groups(self):
topo = self.topo1.withsubdomain(e=self.topo1[:1], g=self.topo1[:1]) * self.topo2.withsubdomain(f=self.topo2[:1], g=self.topo2[:1], h=self.topo2[2:])
self.assertVertices(topo.get_groups('e'), self.mkverts([[0, 1]], pairwise(range(0, 4))))
self.assertVertices(topo.get_groups('f'), self.mkverts(pairwise(range(3)), pairwise(range(0, 2))))
self.assertVertices(topo.get_groups('h'), self.mkverts(pairwise(range(3)), pairwise(range(2, 4))))
self.assertVertices(topo.get_groups('f', 'h'), self.mkverts(pairwise(range(3)), [[0, 1], [2, 3]]))
with self.assertRaises(NotImplementedError):
topo.get_groups('g')
def test_indicator(self):
topo = self.topo1.withsubdomain(e=self.topo1[:1], g=self.topo1[:1]) * self.topo2.withsubdomain(f=self.topo2[:1], g=self.topo2[:1], h=self.topo2[2:])
self.assertEqual(as_rounded_list(self.topo.sample('gauss', 0).eval(topo.indicator('e'))), [1, 1, 1, 0, 0, 0])
self.assertEqual(as_rounded_list(self.topo.sample('gauss', 0).eval(topo.indicator('f'))), [1, 0, 0, 1, 0, 0])
self.assertEqual(as_rounded_list(self.topo.sample('gauss', 0).eval(topo.indicator('h'))), [0, 0, 1, 0, 0, 1])
self.assertEqual(as_rounded_list(self.topo.sample('gauss', 0).eval(topo.indicator('f,h'))), [1, 0, 1, 1, 0, 1])
self.assertEqual(as_rounded_list(self.topo.sample('gauss', 0).eval(topo.indicator('nonexistent'))), [0, 0, 0, 0, 0, 0])
with self.assertRaises(NotImplementedError):
topo.indicator('g')
def test_common_spaces(self):
with self.assertRaisesRegex(ValueError, '^Cannot multiply'):
Topology.empty(['X'], [1], 1) * Topology.empty(['X', 'Y'], [1, 2], 3)
def test_basis(self):
self.assertEqual(len(self.topo.basis('spline', degree=1)), 3*4)
self.assertEqual(len(self.topo.basis('spline', degree=1, periodic=[0])), 2*4)
self.assertEqual(len(self.topo.basis('spline', degree=1, periodic=[1])), 3*3)
self.assertEqual(len(self.topo.basis('spline', degree=1, periodic=[0, 1])), 2*3)
self.assertEqual(len(self.topo.basis('spline', degree=[0, 1])), 2*4)
self.assertEqual(len(self.topo.basis('spline', degree=[1, 0])), 3*3)
self.assertEqual(len(self.topo.basis('spline', continuity=-2, degree=1)), (2*2)*(2*3))
self.assertEqual(len(self.topo.basis('spline', continuity=[-2, -1], degree=1)), (2*2)*4)
self.assertEqual(len(self.topo.basis('spline', continuity=[-1, -2], degree=1)), 3*(2*3))
self.assertEqual(len(self.topo.basis('spline', knotmultiplicities=[None, [1, 2, 1, 1]], degree=1)), 3*5)
self.assertEqual(len(self.topo.basis('spline', knotmultiplicities=[[1, 2, 1], [1, 2, 1, 1]], degree=1)), 4*5)
with self.assertRaisesRegex(ValueError, '^argument `degree` must have length'):
self.topo.basis('spline', degree=[0, 1, 2])
with self.assertRaisesRegex(ValueError, '^argument `degree` must be'):
self.topo.basis('spline', degree='a')
with self.assertRaisesRegex(ValueError, '^argument `continuity` must have length'):
self.topo.basis('spline', degree=1, continuity=[-1, -2, -1])
with self.assertRaisesRegex(ValueError, '^argument `continuity` must be'):
self.topo.basis('spline', degree=1, continuity='a')
with self.assertRaisesRegex(ValueError, '^argument `periodic` must be'):
self.topo.basis('spline', degree=1, periodic=['a', 'b'])
with self.assertRaisesRegex(ValueError, '^argument `knotmultiplicities` must have length'):
self.topo.basis('spline', degree=1, knotmultiplicities=[[1, 1, 1], [1, 1, 1, 1], [1, 1]])
with self.assertRaises(ValueError):
self.topo.basis('spline', degree=1, knotmultiplicities=['a', 'b'])
class NewWithGroupAliases(TestCase, CommonTests, ConformingTests):
def setUp(self):
super().setUp()
self.topo1, self.x = mesh.line([0, 1, 2], bnames=['a', 'b'], space='X')
self.topo1 = self.topo1.withsubdomain(e=self.topo1[:1])
self.topo2, self.y = mesh.line([0, 1, 2, 3], bnames=['c', 'd'], space='Y')
self.topo2 = self.topo2.withsubdomain(f=self.topo2[:1], g=self.topo2[2:])
self.topo = (self.topo1 * self.topo2).withgroups(vgroups=dict(ealias='e', falias='f', galias='g', fgalias='f,g'))
self.geom = function.stack([self.x, self.y])
self.desired_spaces = 'X', 'Y'
self.desired_space_dims = 1, 1
self.desired_ndims = 2
self.desired_nelems = 6
self.desired_volumes = [1]*6
self.desired_references = [element.LineReference()**2]*6
self.desired_vertices = [[[x, y] for x in X for y in Y] for X in pairwise(range(3)) for Y in pairwise(range(4))]
self.masks = dict(e=[1, 1, 1, 0, 0, 0], f=[1, 0, 0, 1, 0, 0], g=[0, 0, 1, 0, 0, 1])
def test_slice(self):
self.assertCompressed(self.topo.slice(slice(0, 1), 0), [1, 1, 1, 0, 0, 0])
def test_indicator(self):
e, f, g, fg = self.topo.sample('gauss', 0).eval(list(self.topo.indicator(g+'alias') for g in ('e', 'f', 'g', 'fg')))
self.assertEqual(as_rounded_list(e), [1, 1, 1, 0, 0, 0])
self.assertEqual(as_rounded_list(f), [1, 0, 0, 1, 0, 0])
self.assertEqual(as_rounded_list(g), [0, 0, 1, 0, 0, 1])
self.assertEqual(as_rounded_list(fg), [1, 0, 1, 1, 0, 1])
class TopologyAssertions:
def assertConnectivity(self, domain, geom):
boundary = domain.boundary
interfaces = domain.interfaces
bmask = numpy.zeros(len(boundary), dtype=int)
imask = numpy.zeros(len(interfaces), dtype=int)
coordinates = evaluable.Points(evaluable.NPoints(), boundary.ndims)
transform_chain = transform.EvaluableTransformChain.from_argument('trans', domain.transforms.todims, boundary.ndims)
lowered_geom = geom.lower(coordinates.shape[:-1], {domain.space: (transform_chain,)*2}, {domain.space: coordinates}).simplified
for ielem, ioppelems in enumerate(domain.connectivity):
for iedge, ioppelem in enumerate(ioppelems):
etrans, eref = domain.references[ielem].edges[iedge]
trans = domain.transforms[ielem] + (etrans,)
if ioppelem == -1:
index = boundary.transforms.index(trans)
bmask[index] += 1
else:
ioppedge = util.index(domain.connectivity[ioppelem], ielem)
oppetrans, opperef = domain.references[ioppelem].edges[ioppedge]
opptrans = domain.transforms[ioppelem] + (oppetrans,)
try:
index = interfaces.transforms.index(trans)
except ValueError:
index = interfaces.transforms.index(opptrans)
self.assertEqual(interfaces.opposites[index], trans)
else:
self.assertEqual(interfaces.opposites[index], opptrans)
imask[index] += 1
self.assertEqual(eref, opperef)
points = eref.getpoints('gauss', 2)
a0 = lowered_geom.eval(trans=trans, _points=points)
a1 = lowered_geom.eval(trans=opptrans, _points=points)
numpy.testing.assert_array_almost_equal(a0, a1)
self.assertTrue(numpy.equal(bmask, 1).all())
self.assertTrue(numpy.equal(imask, 2).all())
def assertBoundaries(self, domain, geom):
# Test ∫_Ω f_,i = ∫_∂Ω f n_i.
f = ((0.5 - geom)**2).sum(axis=0)
lhs = domain.integrate(f.grad(geom)*function.J(geom), ischeme='gauss2')
rhs = domain.boundary.integrate(f*function.normal(geom)*function.J(geom), ischeme='gauss2')
numpy.testing.assert_array_almost_equal(lhs, rhs)
def assertInterfaces(self, domain, geom, periodic, interfaces=None, elemindicator=None):
# If `periodic` is true, the domain should be a unit hypercube or this test
# might fail. The function `f` defined below is C0 continuous on a periodic
# hypercube and Cinf continuous inside the hypercube.
if interfaces is None:
interfaces = domain.interfaces
x1, x2, n1, n2 = interfaces.sample('gauss', 2).eval([geom, function.opposite(geom), geom.normal(), function.opposite(geom.normal())])
if not periodic:
numpy.testing.assert_array_almost_equal(x1, x2)
numpy.testing.assert_array_almost_equal(n1, -n2)
# Test ∫_E f_,i = ∫_∂E f n_i ∀ E in `domain`.
f = ((0.5 - geom)**2).sum(axis=0)
if elemindicator is None:
elemindicator = domain.basis('discont', degree=0)
elemindicator = elemindicator.vector(domain.ndims)
lhs = domain.integrate((elemindicator*f.grad(geom)[None]).sum(axis=1)*function.J(geom), ischeme='gauss2')
rhs = interfaces.integrate((-function.jump(elemindicator)*f*function.normal(geom)[None]).sum(axis=1)*function.J(geom), ischeme='gauss2')
if len(domain.boundary):
rhs += domain.boundary.integrate((elemindicator*f*function.normal(geom)[None]).sum(axis=1)*function.J(geom), ischeme='gauss2')
numpy.testing.assert_array_almost_equal(lhs, rhs)
@parametrize
class structure(TestCase, TopologyAssertions):
def setUp(self):
super().setUp()
domain, self.geom = mesh.rectilinear([[-1, 0, 1]]*self.ndims)
self.domain = domain.refine(self.refine)
def test_domain(self):
self.assertConnectivity(self.domain, self.geom)
def test_boundaries(self):
for grp in ['left', 'right', 'top', 'bottom', 'front', 'back'][:self.ndims*2]:
bnd = self.domain.boundary[grp]
xn = bnd.sample('gauss', 1).eval(self.geom.dotnorm(self.geom))
numpy.testing.assert_array_less(0, xn, 'inward pointing normals')
self.assertConnectivity(bnd, self.geom)
structure(ndims=2, refine=0)
structure(ndims=3, refine=0)
structure(ndims=2, refine=1)
structure(ndims=3, refine=1)
class picklability(TestCase):
def assert_pickle_dump_load(self, data):
script = b'from nutils import *\nimport pickle, base64\npickle.loads(base64.decodebytes(b"""' \
+ base64.encodebytes(pickle.dumps(data)) \
+ b'"""))'
p = subprocess.Popen([sys.executable], stdin=subprocess.PIPE)
p.communicate(script)
self.assertEqual(p.wait(), 0, 'unpickling failed')
def test_domain(self):
domain, geom = mesh.rectilinear([[0, 1, 2]]*2)
self.assert_pickle_dump_load(domain)
def test_geom(self):
domain, geom = mesh.rectilinear([[0, 1, 2]]*2)
self.assert_pickle_dump_load(geom)
def test_basis(self):
domain, geom = mesh.rectilinear([[0, 1, 2]]*2)
basis = domain.basis('spline', degree=2)
self.assert_pickle_dump_load(basis)
@parametrize
class common_refine(TestCase):
def _shield(self, topo):
return topo if self.special \
else topology.TransformChainsTopology(space=topo.space, references=topo.references, transforms=topo.transforms, opposites=topo.opposites)
def test(self):
dom, geom = mesh.rectilinear([[0, 1, 2], [0, 1, 2]])
doms, funs, vals = {}, {}, {}
indices = tuple(range(len(dom.transforms)))
doms['1'] = dom.refined_by(indices[:1])
funs['1'] = doms['1'].basis('th-std', degree=1)
vals['1'] = 0.375, 0.25, 0.375, 0.9375, 0.5, 0.25, 0.5, 0.25, 0.0625, 0.125, 0.125, 0.25
doms['234'] = dom.refined_by(indices[1:])
funs['234'] = doms['234'].basis('th-std', degree=1)
vals['234'] = 0.25, 0.375, 0.375, 0.5625, 0.125, 0.0625, 0.25, 0.125, 0.25, 0.125, 0.125, 0.25, 0.25, 0.25, 0.125, 0.0625, 0.125, 0.125, 0.125, 0.0625
doms['123'] = dom.refined_by(indices[:-1])
funs['123'] = doms['123'].basis('th-std', degree=1)
vals['123'] = 0.5625, 0.375, 0.375, 0.25, 0.0625, 0.125, 0.125, 0.125, 0.0625, 0.125, 0.25, 0.25, 0.25, 0.125, 0.125, 0.25, 0.125, 0.25, 0.0625, 0.125
doms['4'] = dom.refined_by(indices[-1:])
funs['4'] = doms['4'].basis('th-std', degree=1)
vals['4'] = 0.25, 0.5, 0.25, 0.5, 0.9375, 0.375, 0.25, 0.375, 0.25, 0.125, 0.125, 0.0625
for a, b, n in ('1', '234', 16), ('1', '4', 10), ('123', '234', 16):
with self.subTest('ref{}vs{}'.format(a, b)):
common = doms[a] & self._shield(doms[b])
self.assertEqual(len(common), n)
for c in a, b:
testvals = common.integrate(funs[c]*function.J(geom), ischeme='gauss1')
| numpy.testing.assert_array_almost_equal(testvals, vals[c]) | numpy.testing.assert_array_almost_equal |
"""Contains functions to parse and preprocess information from the input file"""
import sys
import os
import h5py
import logging
import multiprocessing as mp
import numpy as np
import pandas as pd
import pickle
import signal as sig
from .io_ import decodeUTF8
from .namedtuples import CountInfo
from .namedtuples import GeneInfo
from .namedtuples import GeneTable
from .namedtuples import ReadingFrameTuple
from .utils import encode_chromosome
from .utils import find_overlapping_cds_simple
from .utils import get_successor_list
from .utils import leq_strand
def genes_preprocess_batch(genes, gene_idxs, gene_cds_begin_dict, all_read_frames=False):
gene_info = []
for gene in genes:
gene.from_sparse()
gene.name = gene.name.split('.')[0] #Do not consider the version
assert (gene.strand in ["+", "-"])
assert (len(gene.transcripts) == len(gene.exons))
# Ignore genes that have no CDS annotated in annotated frame mode
if (not all_read_frames) and (gene.name not in gene_cds_begin_dict):
gene_info.append(None)
continue
vertex_succ_list = get_successor_list(gene.splicegraph.edges, gene.splicegraph.vertices, gene.strand)
if gene.strand == "+":
vertex_order = np.argsort(gene.splicegraph.vertices[0, :])
else: # gene.strand=="-"
vertex_order = np.argsort(gene.splicegraph.vertices[1, :])[::-1]
# get the reading_frames
reading_frames = {}
vertex_len_dict = {}
if not all_read_frames:
for idx in vertex_order:
reading_frames[idx] = set()
v_start = gene.splicegraph.vertices[0, idx]
v_stop = gene.splicegraph.vertices[1, idx]
cds_begins = find_overlapping_cds_simple(v_start, v_stop, gene_cds_begin_dict[gene.name], gene.strand)
vertex_len_dict[idx] = v_stop - v_start
# Initialize reading regions from the CDS transcript annotations
for cds_begin in cds_begins:
line_elems = cds_begin[2]
cds_strand = line_elems[6]
assert (cds_strand == gene.strand)
cds_phase = int(line_elems[7])
cds_left = int(line_elems[3])-1
cds_right = int(line_elems[4])
#TODO: need to remove the redundance of (cds_start, cds_stop, item)
if gene.strand == "-":
cds_right_modi = max(cds_right - cds_phase,v_start)
cds_left_modi = v_start
n_trailing_bases = cds_right_modi - cds_left_modi
else:
cds_left_modi = min(cds_left + cds_phase,v_stop)
cds_right_modi = v_stop
n_trailing_bases = cds_right_modi - cds_left_modi
read_phase = n_trailing_bases % 3
reading_frames[idx].add(ReadingFrameTuple(cds_left_modi, cds_right_modi, read_phase))
gene.to_sparse()
gene_info.append(GeneInfo(vertex_succ_list, vertex_order, reading_frames, vertex_len_dict, gene.splicegraph.vertices.shape[1]))
return gene_info, gene_idxs, genes
def genes_preprocess_all(genes, gene_cds_begin_dict, parallel=1, all_read_frames=False):
""" Preprocess the gene and generate new attributes under gene object
Modify the gene object directly
Parameters
----------
genes: List[Object]. List of gene objects. The object is generated by SplAdder
gene_cds_begin_dict: Dict. str -> List(int) From gene name to list of cds start positions
"""
if parallel > 1:
global genes_info
global genes_modif
global cnt
genes_info = np.zeros((genes.shape[0],), dtype=object)
genes_modif = np.zeros((genes.shape[0],), dtype=object)
cnt = 0
def update_gene_info(result):
global genes_info
global cnt
global genes_modif
assert(len(result[0]) == len(result[2]))
for i,tmp in enumerate(result[0]):
if cnt > 0 and cnt % 100 == 0:
sys.stdout.write('.')
if cnt % 1000 == 0:
sys.stdout.write('%i/%i\n' % (cnt, genes.shape[0]))
sys.stdout.flush()
cnt += 1
genes_info[result[1][i]] = tmp
genes_modif[result[1][i]] = result[2][i]
del result
pool = mp.Pool(processes=parallel, initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
for i in range(0, genes.shape[0], 100):
gene_idx = np.arange(i, min(i + 100, genes.shape[0]))
_ = pool.apply_async(genes_preprocess_batch, args=(genes[gene_idx], gene_idx, gene_cds_begin_dict, all_read_frames,), callback=update_gene_info)
pool.close()
pool.join()
else:
genes_info = genes_preprocess_batch(genes, np.arange(genes.shape[0]), gene_cds_begin_dict, all_read_frames)[0]
genes_modif = genes
return genes_info, genes_modif
def preprocess_ann(ann_path):
""" Extract information from annotation file (.gtf, .gff and .gff3)
Parameters
----------
ann_path: str. Annotation file path
Returns
-------
gene_table: NamedTuple.store the gene-transcript-cds mapping tables derived
from .gtf file. has attribute ['gene_to_cds_begin', 'ts_to_cds', 'gene_to_cds']
chromosome_set: set. Store the chromosome naming.
"""
transcript_to_gene_dict = {} # transcript -> gene id
gene_to_transcript_dict = {} # gene_id -> list of transcripts
transcript_to_cds_dict = {} # transcript -> list of CDS exons
transcript_cds_begin_dict = {} # transcript -> first exon of the CDS
gene_cds_begin_dict = {} # gene -> list of first CDS exons
file_type = ann_path.split('.')[-1]
chromesome_set = set()
# collect information from annotation file
for line in open(ann_path, 'r'):
if line[0] == '#':
continue
item = line.strip().split('\t')
chromesome_set.add(item[0])
feature_type = item[2]
attribute_item = item[-1]
attribute_dict = attribute_item_to_dict(attribute_item, file_type, feature_type)
# store relationship between gene ID and its transcript IDs
if feature_type in ['transcript', 'mRNA']:
gene_id = attribute_dict['gene_id']
gene_id = gene_id.split('.')[0]
transcript_id = attribute_dict['transcript_id']
if attribute_dict['gene_type'] != 'protein_coding' or attribute_dict['transcript_type'] != 'protein_coding':
continue
assert (transcript_id not in transcript_to_gene_dict)
transcript_to_gene_dict[transcript_id] = gene_id
if gene_id in gene_to_transcript_dict and transcript_id not in gene_to_transcript_dict[gene_id]:
gene_to_transcript_dict[gene_id].append(transcript_id)
else:
gene_to_transcript_dict[gene_id] = [transcript_id]
# Todo python is 0-based while gene annotation file(.gtf, .vcf, .maf) is one based
elif feature_type == "CDS":
parent_ts = attribute_dict['transcript_id']
strand_mode = item[6]
cds_left = int(item[3])-1
cds_right = int(item[4])
frameshift = int(item[7])
if parent_ts in transcript_to_cds_dict:
transcript_to_cds_dict[parent_ts].append((cds_left, cds_right, frameshift))
else:
transcript_to_cds_dict[parent_ts] = [(cds_left, cds_right, frameshift)]
if strand_mode == "+" :
cds_start, cds_stop = cds_left, cds_right
else:
cds_start, cds_stop = cds_right, cds_left
# we only consider the start of the whole CoDing Segment
if parent_ts not in transcript_cds_begin_dict or \
leq_strand(cds_start, transcript_cds_begin_dict[parent_ts][0], strand_mode):
transcript_cds_begin_dict[parent_ts] = (cds_start, cds_stop, item)
# collect first CDS exons for all transcripts of a gene
for ts_key in transcript_to_gene_dict:
target_gene = transcript_to_gene_dict[ts_key]
if target_gene not in gene_cds_begin_dict:
gene_cds_begin_dict[target_gene] = []
if ts_key in transcript_cds_begin_dict:
gene_cds_begin_dict[target_gene].append(transcript_cds_begin_dict[ts_key])
# sort list of CDS exons per transcript
for ts_key in transcript_to_cds_dict:
transcript_to_cds_dict[ts_key] = sorted(transcript_to_cds_dict[ts_key], key=lambda coordpair: coordpair[0])
genetable = GeneTable(gene_cds_begin_dict, transcript_to_cds_dict, gene_to_transcript_dict)
return genetable,chromesome_set
def attribute_item_to_dict(a_item, file_type, feature_type):
""" From attribute item in annotation file to get corresponding dictionary
Parameters
----------
a_item: str. attribute item
file_type: str. Choose from {'gtf', 'gff', 'gff3'}
feature_type: str. Extract other fields. We only
consider 'CDS', 'mRNA' and 'transcript'
Returns
-------
gtf_dict: dict. store all the necessary data
"""
gtf_dict = {}
if file_type.lower() == 'gtf':
attribute_list = a_item.split('; ')
for attribute_pair in attribute_list:
pair = attribute_pair.split(' ')
gtf_dict[pair[0]] = pair[1][1:-1]
elif file_type.lower() == 'gff3':
attribute_list = a_item.split(';')
for attribute_pair in attribute_list:
pair = attribute_pair.split('=')
gtf_dict[pair[0]] = pair[1]
elif file_type.lower() == 'gff':
gff_dict = {}
attribute_list = a_item.split(';')
for attribute_pair in attribute_list:
pair = attribute_pair.split('=')
gff_dict[pair[0]] = pair[1] # delete "", currently now work on level 2
if feature_type == 'CDS':
gtf_dict['transcript_id'] = gff_dict['Parent']
elif feature_type in {'mRNA', 'transcript'}: # mRNA or transcript
gtf_dict['gene_id'] = gff_dict['geneID']
gtf_dict['transcript_id'] = gff_dict['ID']
gtf_dict['gene_type'] = gff_dict['gene_type']
gtf_dict['transcript_type'] = gff_dict['transcript_type']
return gtf_dict
def search_edge_metadata_segmentgraph(gene, coord, countinfo, Idx, edge_idxs=None, edge_counts=None, cross_graph_expr=None):
"""Given the ordered edge coordinates of the edge, return expression information of the edge
Parameters
----------
gene: Object. Generated by SplAdder
coord: np.array of length 4. Sorted coordinates of 4 positions in ascending order
countinfo: NamedTuple, contains SplAdder count info
Idx: Namedtuple, has attribute idx.gene and idx.sample
edge_idxs: np.array, containing the edge index values for the current gene
egde_counts: np.array, containing the edge count values for the current gene
Returns
-------
count: tuple of float. Expression level for the given edges.
"""
def get_segmentgraph_edge_expr(sorted_pos, edge_idxs, edge_counts=None):
a = | np.searchsorted(segmentgraph.segments[1, :], sorted_pos[1]) | numpy.searchsorted |
import numpy as np
import pylab as pl
from . import utils
from sklearn.utils import check_X_y,check_array
from sklearn.neural_network import MLPClassifier as MultilayerPerceptronClassifier
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression as LogReg
from sklearn.linear_model import SGDClassifier
from .supervised_numpynet import *
class GenericClassifier(object):
def percent_correct(self,vectors,targets):
return self.score(vectors,targets)*100.0
def predict_names(self,vectors,names):
result=self.predict(vectors)
return [names[i] for i in result]
class SVM(SVC,GenericClassifier):
pass
class LogisticRegression(LogReg,GenericClassifier):
pass
class BackProp(MultilayerPerceptronClassifier,GenericClassifier):
def __init__(self,**kwargs):
if 'tol' not in kwargs:
kwargs['tol']=1e-7
MultilayerPerceptronClassifier.__init__(self,**kwargs)
self.equivalent={'weights':'coefs_',
}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
MultilayerPerceptronClassifier.fit(self,*args,**kwargs)
for name in self.equivalent:
super(MultilayerPerceptronClassifier,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def output(self, X):
"""Fit the model to the data X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples)
Predicted target values per element in X.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = []
activations.append(X)
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations)
y_pred = activations[-1]
return activations[1:]
from sklearn.neighbors import KNeighborsClassifier
class kNearestNeighbor(KNeighborsClassifier,GenericClassifier):
def __init__(self,k=5):
self.k=k
KNeighborsClassifier.__init__(self,n_neighbors=k)
from sklearn.naive_bayes import GaussianNB
class NaiveBayes(GaussianNB,GenericClassifier):
def __init__(self):
GaussianNB.__init__(self)
self.var_smoothing=1e-2 # make it much more stable
self.equivalent={'means':'theta_',
'stddevs':'sigma_',
'fraction':'class_prior_'}
#self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
GaussianNB.fit(self,*args,**kwargs)
for name in self.equivalent:
super(GaussianNB,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def anotherfit(self, X, y):
X,y=check_X_y(X,y)
GaussianNB.fit(self,X,y)
for name in self.equivalent:
super(GaussianNB,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def predict_probability(X):
return predict_proba(X)
def plot_centers(self):
ax=pl.gca().axis()
colors=utils.bold_colors
angle=np.linspace(0,2*np.pi,100)
i=0
for c,r in zip(self.means,self.stddevs):
pl.plot(c[0],c[1],'*',color=colors[i],markersize=15)
i+=1
i=0
for c,r in zip(self.means,self.stddevs):
for k in range(3):
xd=np.cos(angle)*r[0]*(k+1) + c[0]
yd=np.sin(angle)*r[1]*(k+1) + c[1]
pl.plot(xd,yd,'-',linewidth=3,color='k',alpha=0.5)
i+=1
#pl.axis('equal')
pl.gca().axis(ax)
from sklearn.linear_model import Perceptron as skPerceptron
class Perceptron(skPerceptron,GenericClassifier):
def __init__(self,number_of_iterations=50,tol=1e-3):
skPerceptron.__init__(self,shuffle=True,max_iter=number_of_iterations,tol=tol)
self.equivalent={'weights':'coef_',
'biases':'intercept_',
}
#self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
skPerceptron.fit(self,*args,**kwargs)
for name in self.equivalent:
super(skPerceptron,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def output(self,vectors):
return self.decision_function(vectors)
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.base import BaseEstimator, ClassifierMixin
class RCEsk(BaseEstimator, ClassifierMixin):
def __init__(self, metric='euclidean',r_min=0.1,r_max=1.0,r_step=1e-30,verbose=False):
self.r_min=r_min
self.r_max=r_max
self.r_step=r_step
self.metric = metric
self.centers_=np.array([],dtype=np.float)
self.radii_=np.array([],dtype=np.float)
self.targets_=np.array([],dtype=np.int)
self.verbose=verbose
def _add_center(self,center,radius,target):
try:
center=center.toarray() # deal with sparse
except AttributeError:
pass
center=np.array(center,dtype=np.float)
radius=np.array([radius],dtype=np.float)
target=np.array([target],dtype=np.int)
if len(self.centers_)==0:
self.centers_=center
self.targets_=target
self.radii_=radius
else:
self.centers_=np.vstack( (self.centers_,center) )
self.targets_=np.concatenate( (self.targets_,target) )
self.radii_=np.concatenate( (self.radii_,radius) )
def fit(self, X, y):
X,y=check_X_y(X,y)
# X, y = check_arrays(X, y, sparse_format="csr")
# y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has fewer than 2 classes')
if len(self.centers_)>0:
assert len(self.centers_[0])==n_features
# first pass
pass_number=0
for v,t in zip(X,y): # Go through all of the data points
v=v.reshape(1, -1)
if len(self.centers_)==0:
self._add_center(v,self.r_max,t)
continue
match=self.targets_[ (pairwise_distances(v,self.centers_,metric=self.metric)<self.radii_).ravel() ]
# if a point is not already in a sphere, of correct category,
# add a sphere, centered at that point, of the correct category
if not t in match:
self._add_center(v,self.r_max,t)
continue
pass_number+=1
if self.verbose:
print("%d clusters." % (len(self.centers_)))
# second pass
stop=False
while not stop:
old_radii_=self.radii_.copy()
for v,t in zip(X,y): # Go through all of the data points (again)
v=v.reshape(1, -1)
D=pairwise_distances(v,self.centers_,metric=self.metric).ravel()
within_centers=(D<self.radii_)
matched=(t==self.targets_) & (within_centers)
# not already in a sphere, of correct category --> add a sphere,
# centered at that point, of the correct category
if not any(matched):
self._add_center(v,self.r_max,t)
continue
not_matched=(t!=self.targets_) & (within_centers)
# in a sphere of wrong category -- > shrink the wrong sphere as much as possible
self.radii_[not_matched]-=D[not_matched]-self.r_step
self.radii_[self.radii_<self.r_min]=self.r_min
pass_number+=1
if self.verbose:
print("%d clusters." % (len(self.centers_)))
if len(old_radii_)!=len(self.radii_):
continue
# Repeat until no changes
if sum(abs(self.radii_-old_radii_))<1e-10:
stop=True
def predict(self,X):
X = check_array(X)
if len(self.centers_)==0:
raise AttributeError("Model has not been trained yet.")
result=[]
for vector in X:
vector=vector.reshape(1, -1)
D=pairwise_distances(vector, self.centers_, metric=self.metric)/self.radii_
result.append(self.targets_[D.argmin()])
return np.array(result)
class RCE(RCEsk,GenericClassifier):
def __init__(self, **kwargs):
RCEsk.__init__(self, **kwargs)
self.equivalent={'centers':'centers_',
'radii':'radii_',
'targets':'targets_'}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
RCEsk.fit(self,*args,**kwargs)
for name in self.equivalent:
super(RCE,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def plot_centers(self):
colors=utils.bold_colors
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
pl.plot(c[0],c[1],'*',color=colors[t])
angle=np.linspace(0,2*np.pi,100)
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
xd=np.cos(angle)*r + c[0]
yd=np.sin(angle)*r + c[1]
pl.plot(xd,yd,'-',color=colors[t])
pl.axis('equal')
class CSCsk(BaseEstimator, ClassifierMixin):
def __init__(self, metric='euclidean',r_step=1e-30,verbose=False):
self.r_step=r_step
self.metric = metric
self.centers_=np.array([],dtype=np.float)
self.radii_=np.array([],dtype=np.float)
self.targets_=np.array([],dtype=np.int)
self.verbose=verbose
def _add_center(self,center,radius,target):
try:
center=center.toarray() # deal with sparse
except AttributeError:
pass
center=np.array(center,dtype=np.float)
radius=np.array([radius],dtype=np.float)
target=np.array([target],dtype=np.int)
if len(self.centers_)==0:
self.centers_=center
self.targets_=target
self.radii_=radius
else:
self.centers_=np.vstack( (self.centers_,center) )
self.targets_=np.concatenate( (self.targets_,target) )
self.radii_=np.concatenate( (self.radii_,radius) )
def fit(self, X, y):
X,y=check_X_y(X,y)
# X, y = check_arrays(X, y, sparse_format="csr")
# y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has fewer than 2 classes')
if len(self.centers_)>0:
assert len(self.centers_[0])==n_features
radii=[]
count=[]
# first pass - only need the radii, because the vectors and the targets are already stored
pass_number=0
i=0
for v,t in zip(X,y):
v=v.reshape(1, -1)
D=pairwise_distances(v,X).ravel()
r=max(D[y!=t].min()-1e-10,1e-10)
radii.append(r)
within=D[y==t]<=r
count.append(within.sum())
i+=1
radii=np.array(radii)
count=np.array(count)
# second pass
for v,t in zip(X,y): # Go through all of the data points
#Select the sphere that contains that point,
# and the largest number of other points,
# and add it to the final spheres list
v=v.reshape(1, -1)
D=pairwise_distances(v,X).ravel()
within_centers=(D<=radii)
matched=(t==y) & (within_centers)
idx=np.arange(len(y))
idx_matched=idx[matched]
best=idx_matched[np.argmax(count[matched])]
self._add_center(X[best],radii[best],y[best])
pass_number+=1
def predict(self,X):
X = check_array(X)
if len(self.centers_)==0:
raise AttributeError("Model has not been trained yet.")
result=[]
for vector in X:
vector=vector.reshape(1, -1)
D=pairwise_distances(vector, self.centers_, metric=self.metric)/self.radii_
result.append(self.targets_[D.argmin()])
return np.array(result)
class CSC(CSCsk,GenericClassifier):
def __init__(self, **kwargs):
CSCsk.__init__(self, **kwargs)
self.equivalent={'centers':'centers_',
'radii':'radii_',
'targets':'targets_'}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
CSCsk.fit(self,*args,**kwargs)
for name in self.equivalent:
super(CSC,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def plot_centers(self):
colors=utils.bold_colors
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
pl.plot(c[0],c[1],'*',color=colors[t])
angle=np.linspace(0,2*np.pi,100)
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
xd=np.cos(angle)*r + c[0]
yd=np.sin(angle)*r + c[1]
pl.plot(xd,yd,'-',color=colors[t])
pl.axis('equal')
# from http://danielfrg.com/blog/2013/07/03/basic-neural-network-python/
from scipy import optimize
class NN_1HLsk(BaseEstimator, ClassifierMixin):
def __init__(self, hidden_layer_size=25, reg_lambda=0, epsilon_init=0.12, opti_method='TNC', maxiter=500):
self.reg_lambda = reg_lambda
self.epsilon_init = epsilon_init
self.hidden_layer_size = hidden_layer_size
self.activation_func = self.sigmoid
self.activation_func_prime = self.sigmoid_prime
self.method = opti_method
self.maxiter = maxiter
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def sigmoid_prime(self, z):
sig = self.sigmoid(z)
return sig * (1 - sig)
def sumsqr(self, a):
return np.sum(a ** 2)
def rand_init(self, l_in, l_out):
return np.random.rand(l_out, l_in + 1) * 2 * self.epsilon_init - self.epsilon_init
def pack_thetas(self, t1, t2):
return np.concatenate((t1.reshape(-1), t2.reshape(-1)))
def unpack_thetas(self, thetas, input_layer_size, hidden_layer_size, num_labels):
t1_start = 0
t1_end = hidden_layer_size * (input_layer_size + 1)
t1 = thetas[t1_start:t1_end].reshape((hidden_layer_size, input_layer_size + 1))
t2 = thetas[t1_end:].reshape((num_labels, hidden_layer_size + 1))
return t1, t2
def _forward(self, X, t1, t2):
m = X.shape[0]
ones = None
if len(X.shape) == 1:
ones = np.array(1).reshape(1,)
else:
ones = | np.ones(m) | numpy.ones |
'''
script for generating the MTL and fiberassign on DR9SV imaging.
'''
import os
import glob
import h5py
import numpy as np
import numpy.lib.recfunctions as rfn
import fitsio
import healpy as hp
from astropy.table import Table
from pydl.pydlutils.spheregroup import spherematch
# -- desitarget --
from desitarget.targets import calc_priority, main_cmx_or_sv, set_obsconditions
from desitarget.sv1.sv1_targetmask import desi_mask, bgs_mask, mws_mask
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
if os.environ['NERSC_HOST'] != 'cori':
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dir_dat = '/global/cscratch1/sd/chahah/feasibgs/survey_validation/'
dir_cfs = '/global/cfs/cdirs/desi/users/chahah/'
if not os.path.isdir(dir_dat):
dir_dat = '/Users/ChangHoon/data/feasiBGS/survey_validation/'
f_svfields = 'BGS_SV_30_3x_superset60_Apr2020v2.fits'
######################################################################
# constructing MTLs
######################################################################
def mtl_dr9sv(seed=0, clobber=False):
''' make MTL using DR9SV imaging
'''
np.random.seed(seed)
#########################################################################
# compile sv tiles
#########################################################################
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
print('%i BGS SV tiles' % len(sv['RA']))
# get SV tiles *outside* of the DR9 SV imaging region
in_dr9 = _in_DR9_SVregion(sv['RA'], sv['DEC'])
print('%i tiles outside of DR9' % np.sum(~in_dr9))
#########################################################################
# compile targets and match to truth table and SN host
#########################################################################
# read targets from DR9SV and DR8 cut out
ftargets = [
'sv1-targets-dr9-hp-X.spec_truth.sn_host.fits',
'sv1-targets-dr8.sv_cutout.spec_truth.sn_host.fits'
]
ntargets = len(ftargets)
for i, _ftarget in enumerate(ftargets):
ftarget = os.path.join(dir_dat, 'sv.spec_truth', _ftarget)
if not os.path.isfile(ftarget) or clobber:
# read target files with truth tables
_f = os.path.join(dir_dat, 'sv.spec_truth',
_ftarget.replace('.spec_truth.sn_host.fits', '.spec_truth.fits'))
__f = os.path.join(dir_dat, 'sv.spec_truth',
_ftarget.replace('.spec_truth.sn_host.fits', '.fits'))
if not os.path.isfile(_f) or clobber:
print('... matching %s to truth table' % __f)
_target = fitsio.read(os.path.join(dir_dat, __f))
target = match2spectruth(_target)
fitsio.write(_f, target, clobber=True)
else:
target = fitsio.read(_f)
print('... matching %s to SN host' % _ftarget)
target = match2snhost(target)
fitsio.write(ftarget, target, clobber=True)
else:
print('... reading %s targets' % ftarget)
target = fitsio.read(ftarget)
# construct MTLs for set of targets
mtl = make_mtl(target, seed=seed)
fmtl = os.path.join(dir_dat, 'mtl',
'mtl.bgs.dr9sv.%iof%i.seed%i.fits' % (i+1, ntargets, seed))
mtl.write(fmtl, format='fits', overwrite=True)
return None
def make_mtl(targets, seed=None):
''' construct mtl given targets.
notes:
-----
* At the moment, highest priority is set for targets with spectroscopic
redshifts or are SN hosts.
'''
assert 'IN_SPECTRUTH' in targets.dtype.names
assert 'HAS_SN' in targets.dtype.names
np.random.seed(seed)
# determine whether the input targets are main survey, cmx or SV.
colnames, masks, survey = main_cmx_or_sv(targets)
# ADM set the first column to be the "desitarget" column
desi_target, desi_mask = colnames[0], masks[0]
n = len(targets)
# ADM if the input target columns were incorrectly called NUMOBS or PRIORITY
# ADM rename them to NUMOBS_INIT or PRIORITY_INIT.
for name in ['NUMOBS', 'PRIORITY']:
targets.dtype.names = [name+'_INIT' if col == name else col for col in targets.dtype.names]
# ADM if a redshift catalog was passed, order it to match the input targets
# ADM catalog on 'TARGETID'.
ztargets = Table()
ztargets['TARGETID'] = targets['TARGETID']
ztargets['NUMOBS'] = np.zeros(n, dtype=np.int32)
ztargets['Z'] = -1 * np.ones(n, dtype=np.float32)
ztargets['ZWARN'] = -1 * np.ones(n, dtype=np.int32)
# ADM if zcat wasn't passed, there is a one-to-one correspondence
# ADM between the targets and the zcat.
zmatcher = np.arange(n)
# ADM extract just the targets that match the input zcat.
targets_zmatcher = targets[zmatcher]
# ADM use passed value of NUMOBS_INIT instead of calling the memory-heavy calc_numobs.
# ztargets['NUMOBS_MORE'] = np.maximum(0, calc_numobs(ztargets) - ztargets['NUMOBS'])
ztargets['NUMOBS_MORE'] = np.maximum(0, targets_zmatcher['NUMOBS_INIT'] - ztargets['NUMOBS'])
# ADM need a minor hack to ensure BGS targets are observed once
# ADM (and only once) every time during the BRIGHT survey, regardless
# ADM of how often they've previously been observed. I've turned this
# ADM off for commissioning. Not sure if we'll keep it in general.
# ADM only if we're considering bright survey conditions.
ii = targets_zmatcher[desi_target] & desi_mask.BGS_ANY > 0
ztargets['NUMOBS_MORE'][ii] = 1
# ADM assign priorities, note that only things in the zcat can have changed priorities.
# ADM anything else will be assigned PRIORITY_INIT, below.
priority = calc_priority(targets_zmatcher, ztargets, 'BRIGHT')
# set subpriority in order to tune the SV target densities
# BGS target classes: BRIGHT, FAINT, EXTFAINT, FIBERMAG, LOWQ
# initial DR8 target density ---> desired density
# BRIGHT: 882.056980 ---> 540 = 63% 0.62 - 1
# FAINT: 746.769486 ---> 300 = 41% 0.41 - 1
# EXTFAINT: 623.470673 ---> 150 = 24% 0 - 1
# FIBERMAG: 207.534409 ---> 150 = 71% 0.66 - 1
# LOW Q: 55.400240 ---> 60 = 100% 0.76 - 1
# (depending on imaging LOWQ varies a lot! DES~50/deg2, DECALS~114/deg2, North~185/deg2)
# bgs bitmask
bitmask_bgs = targets['SV1_BGS_TARGET']
has_spec = targets['IN_SPECTRUTH'] # objects in spectroscopic truth table
has_sn = targets['HAS_SN']
# BGS objects with spectra or hosts SN
special = np.zeros(n).astype(bool) #(has_spec | has_sn)
bgs_special = special & (bitmask_bgs).astype(bool)
bgs_all = ~special & (bitmask_bgs).astype(bool)
bgs_bright = ~special & (bitmask_bgs & bgs_mask.mask('BGS_BRIGHT')).astype(bool)
bgs_faint = ~special & (bitmask_bgs & bgs_mask.mask('BGS_FAINT')).astype(bool)
bgs_extfaint = ~special & (bitmask_bgs & bgs_mask.mask('BGS_FAINT_EXT')).astype(bool) # extended faint
bgs_fibmag = ~special & (bitmask_bgs & bgs_mask.mask('BGS_FIBMAG')).astype(bool) # fiber magn limited
bgs_lowq = ~special & (bitmask_bgs & bgs_mask.mask('BGS_LOWQ')).astype(bool) # low quality
n_bgs = np.sum(bgs_special) + np.sum(bgs_all)
n_bgs_special = np.sum(bgs_special)
n_bgs_bright = np.sum(bgs_bright)
n_bgs_faint = np.sum(bgs_faint)
n_bgs_extfaint = np.sum(bgs_extfaint)
n_bgs_fibmag = np.sum(bgs_fibmag)
n_bgs_lowq = np.sum(bgs_lowq)
# target classes with spectra
n_bgs_sp, n_bgs_bright_sp, n_bgs_faint_sp, n_bgs_extfaint_sp, n_bgs_fibmag_sp, n_bgs_lowq_sp = \
bgs_targetclass(targets['SV1_BGS_TARGET'][special])
#f_special = 1. # keep 100%
#f_bright = 0.45 / n_bgs_bright
#f_faint = 0.25 / n_bgs_faint
#f_extfaint = 0.125 / n_bgs_extfaint
#f_fibmag = 0.125 / n_bgs_fibmag
#f_lowq = 0.05 / n_bgs_lowq
f_bright = 540. / (n_bgs_bright + n_bgs_bright_sp)
f_faint = 300. / (n_bgs_faint + n_bgs_faint_sp)
f_extfaint = 150. / (n_bgs_extfaint + n_bgs_extfaint_sp)
f_fibmag = 150. / (n_bgs_fibmag + n_bgs_fibmag_sp)
f_lowq = 60. / (n_bgs_lowq + n_bgs_lowq_sp)
f_ref = np.min([f_bright, f_faint, f_extfaint, f_fibmag, f_lowq])
r_special = 1.#(1. - f_ref / f_special)
r_bright = (1. - f_ref / f_bright)
r_faint = (1. - f_ref / f_faint)
r_extfaint = (1. - f_ref / f_extfaint)
r_fibmag = (1. - f_ref / f_fibmag)
r_lowq = (1. - f_ref / f_lowq)
subpriority = np.random.uniform(0., 1., n)
subpriority[bgs_special] = np.random.uniform(r_special, 1., np.sum(bgs_special))
subpriority[bgs_bright] = np.random.uniform(r_bright, 1., np.sum(bgs_bright))
subpriority[bgs_faint] = np.random.uniform(r_faint, 1., np.sum(bgs_faint))
subpriority[bgs_extfaint] = np.random.uniform(f_extfaint, 1, np.sum(bgs_extfaint))
subpriority[bgs_fibmag] = np.random.uniform(r_fibmag, 1, np.sum(bgs_fibmag))
subpriority[bgs_lowq] = np.random.uniform(r_lowq, 1, np.sum(bgs_lowq))
_sample = (bitmask_bgs).astype(bool) & (subpriority > 0.943)#np.random.uniform(0., 1., n))
_n_bgs, _n_bgs_bright, _n_bgs_faint, _n_bgs_extfaint, _n_bgs_fibmag, _n_bgs_lowq = \
bgs_targetclass(targets['SV1_BGS_TARGET'][_sample])
# set priority of all BGS targets equal
priority[bgs_all] = 2000
print('---------------------------------')
print('total n_bgs = %i' % n_bgs)
print('approx. target class fractions')
print(' orig frac exp. frac (target frac)')
print(' ------------------------------------')
#print(' BGS special %i %.3f' % (n_bgs_special, n_bgs_special/n_bgs))
#print(' BGS Bright %i %.3f (0.45)' % (n_bgs_bright, n_bgs_bright/n_bgs))
#print(' BGS Faint %i %.3f (0.25)' % (n_bgs_faint, n_bgs_faint/n_bgs))
#print(' BGS Ext.Faint %i %.3f (0.125)' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs))
#print(' BGS Fib.Mag %i %.3f (0.125)' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs))
#print(' BGS Low Q. %i %.3f (0.05)' % (n_bgs_lowq, n_bgs_lowq/n_bgs))
print(' BGS Bright %.3f %.3f (0.45)' % (n_bgs_bright/n_bgs, _n_bgs_bright/_n_bgs))
print(' BGS Faint %.3f %.3f (0.25)' % (n_bgs_faint/n_bgs, _n_bgs_faint/_n_bgs))
print(' BGS Ext.Faint %.3f %.3f (0.125)' % (n_bgs_extfaint/n_bgs, _n_bgs_extfaint/_n_bgs))
print(' BGS Fib.Mag %.3f %.3f (0.125)' % (n_bgs_fibmag/n_bgs, _n_bgs_fibmag/_n_bgs))
print(' BGS Low Q. %.3f %.3f (0.05)' % (n_bgs_lowq/n_bgs, _n_bgs_lowq/_n_bgs))
# If priority went to 0==DONOTOBSERVE or 1==OBS or 2==DONE, then NUMOBS_MORE should also be 0.
# ## mtl['NUMOBS_MORE'] = ztargets['NUMOBS_MORE']
#ii = (priority <= 2)
#log.info('{:d} of {:d} targets have priority zero, setting N_obs=0.'.format(np.sum(ii), n))
#ztargets['NUMOBS_MORE'][ii] = 0
# - Set the OBSCONDITIONS mask for each target bit.
obsconmask = set_obsconditions(targets)
# ADM set up the output mtl table.
mtl = Table(targets)
mtl.meta['EXTNAME'] = 'MTL'
# ADM any target that wasn't matched to the ZCAT should retain its
# ADM original (INIT) value of PRIORITY and NUMOBS.
mtl['NUMOBS_MORE'] = mtl['NUMOBS_INIT']
mtl['PRIORITY'] = mtl['PRIORITY_INIT']
# ADM now populate the new mtl columns with the updated information.
mtl['OBSCONDITIONS'] = obsconmask
mtl['PRIORITY'][zmatcher] = priority
mtl['SUBPRIORITY'][zmatcher] = subpriority
mtl['NUMOBS_MORE'][zmatcher] = ztargets['NUMOBS_MORE']
# Filtering can reset the fill_value, which is just wrong wrong wrong
# See https://github.com/astropy/astropy/issues/4707
# and https://github.com/astropy/astropy/issues/4708
mtl['NUMOBS_MORE'].fill_value = -1
return mtl
def match2spectruth(targets):
''' match target table to spectroscopic truth table
'''
assert 'BRICKID' in targets.dtype.names
assert 'BRICK_OBJID' in targets.dtype.names
isbgs = (targets['SV1_BGS_TARGET']).astype(bool)
targ_brickid = targets['BRICKID'][isbgs]
targ_objid = targets['BRICK_OBJID'][isbgs]
# read in spectroscopic truth table
spectruth = h5py.File(os.path.join(dir_dat, 'bgs_truth_table.hdf5'), 'r')
st_brickid = spectruth['BRICKID'][...]
st_objid = spectruth['OBJID'][...]
in_spectruth = np.zeros(targets.shape[0]).astype(bool)
gama_cataid = np.repeat(-999, targets.shape[0])
#in_spectruth.dtype.names = ['ID', 'IN_SPECTRUTH']
ii = np.arange(targets.shape[0])
indices, cataid = [], []
uniq_brickid = np.unique(targ_brickid)
for brickid in uniq_brickid:
in_targbrick = (targ_brickid == brickid)
in_specbrick = (st_brickid == brickid)
#in_spec = np.isin(targ_objid[in_targbrick], st_objid[in_specbrick])
_, in_spec, in_targ = np.intersect1d(targ_objid[in_targbrick], st_objid[in_specbrick],
return_indices=True)
if len(in_spec) > 0:
#print(len(in_spec))
#print(targets['RA'][isbgs][in_targbrick][in_spec] - spectruth['RA'][...][in_specbrick][in_targ])
#print(targets['DEC'][isbgs][in_targbrick][in_spec] - spectruth['DEC'][...][in_specbrick][in_targ])
#print(spectruth['GAMA_CATAID'][...][in_specbrick][in_targ])
indices.append(ii[isbgs][in_targbrick][in_spec])
cataid.append(spectruth['GAMA_CATAID'][...][in_specbrick][in_targ])
in_spectruth[np.concatenate(indices)] = True
gama_cataid[np.concatenate(indices)] = np.concatenate(cataid)
print('%i BGS SV targets have spectra' % np.sum(in_spectruth))
targets = rfn.append_fields(targets, ['IN_SPECTRUTH'], [in_spectruth])
targets = rfn.append_fields(targets, ['GAMA_CATAID'], [gama_cataid])
return targets
def match2snhost(targets):
''' match target table to supernovae hosts compiled by Segev
'''
assert 'BRICKID' in targets.dtype.names
assert 'BRICK_OBJID' in targets.dtype.names
isbgs = (targets['SV1_BGS_TARGET']).astype(bool)
targ_ra = targets['RA'][isbgs]
targ_dec = targets['DEC'][isbgs]
# read in supernovae hosts
snhost = fitsio.read(os.path.join(dir_dat, 'snhost_dr8_target.fits'))
sn_ra = snhost['RA']
sn_dec = snhost['DEC']
has_sn = np.zeros(targets.shape[0]).astype(bool)
# spherematch compiled hosts
m_targ, m_sn, d_match = spherematch(targ_ra, targ_dec, sn_ra, sn_dec, 0.000277778, maxmatch=1)
has_sn[m_targ] = True
print('%i BGS SV targets are supernova hosts' % np.sum(has_sn))
targets = rfn.append_fields(targets, ['HAS_SN'], [has_sn])
return targets
def _in_DR9_SVregion(ras, decs):
''' DR9 imaging SV region listed in
https://desi.lbl.gov/trac/wiki/TargetSelectionWG/SVFields_for_DR9
'''
sv_regions = {}
sv_regions['01_s82'] = [30.,40.,-7.,2.]
sv_regions['02_egs'] = [210.,220.,50.,55.]
sv_regions['03_gama09'] = [129.,141.,-2.,3.]
sv_regions['04_gama12'] = [175.,185.,-3.,2.]
sv_regions['05_gama15'] = [212.,222.,-2.,3.]
sv_regions['06_overlap'] = [135.,160.,30.,35.]
sv_regions['07_refnorth'] = [215.,230.,41.,46.]
sv_regions['08_ages'] = [215.,220.,30.,40.]
sv_regions['09_sagittarius'] = [200.,210.,5.,10.]
sv_regions['10_highebv_n'] = [140.,150.,65.,70.]
sv_regions['11_highebv_s'] = [240.,245.,20.,25.]
sv_regions['12_highstardens_n'] = [273.,283.,40.,45.]
sv_regions['13_highstardens_s'] = [260.,270.,15.,20.]
n_tiles = len(ras)
in_dr9 = np.zeros(n_tiles).astype(bool)
for i, ra, dec in zip(range(n_tiles), ras, decs):
for k in sv_regions.keys():
if ((ra >= sv_regions[k][0]) & (ra <= sv_regions[k][1]) &
(dec >= sv_regions[k][2]) & (dec <= sv_regions[k][3])):
in_dr9[i] = True
return in_dr9
def bgs_targetclass(bitmask_bgs):
n_bgs = np.float(np.sum(bitmask_bgs.astype(bool)))
n_bgs_bright = np.sum((bitmask_bgs & bgs_mask.mask('BGS_BRIGHT')).astype(bool))
n_bgs_faint = np.sum((bitmask_bgs & bgs_mask.mask('BGS_FAINT')).astype(bool))
n_bgs_extfaint = np.sum((bitmask_bgs & bgs_mask.mask('BGS_FAINT_EXT')).astype(bool)) # extended faint
n_bgs_fibmag = np.sum((bitmask_bgs & bgs_mask.mask('BGS_FIBMAG')).astype(bool)) # fiber magnitude limited
n_bgs_lowq = np.sum((bitmask_bgs & bgs_mask.mask('BGS_LOWQ')).astype(bool)) # low quality
return n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq
def check_targets_dr9sv():
'''
'''
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
print('%i BGS SV tiles' % len(sv['RA']))
ftargets = ['sv1-targets-dr9-hp-X.fits', 'sv1-targets-dr8.sv_cutout.fits']
ntargets = len(ftargets)
# plot confirming coverage
fig = plt.figure(figsize=(10,7))
sub = fig.add_subplot(111)
targs = []
for i, _ftarget in enumerate(ftargets):
ftarget = os.path.join(dir_dat, 'sv.spec_truth',
_ftarget.replace('.fits', '.spec_truth.sn_host.fits'))
targ = fitsio.read(ftarget)
sub.scatter(targ['RA'][::100], targ['DEC'][::100], c='k')
targs.append(targ)
for ra, dec in zip(sv['RA'], sv['DEC']):
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlabel('RA', fontsize=25)
sub.set_xlim(0., 360.)
sub.set_ylabel('DEC', fontsize=25)
sub.set_ylim(-40., 85)
fig.savefig(os.path.join(dir_dat, 'sv.spec_truth',
'check_dr9sv_targets.png'), bbox_inches='tight')
# plot confirming coverage tile by tile
fig = plt.figure(figsize=(20,12))
bkgd = fig.add_subplot(111, frameon=False)
for i, ra, dec in zip(range(len(sv['RA'])), sv['RA'], sv['DEC']):
sub = fig.add_subplot(6,10,i+1)
for targ in targs:
sub.scatter(targ['RA'][::100], targ['DEC'][::100], c='k', s=1)
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlim(ra - 2.5, ra + 2.5)
sub.set_ylim(dec - 2.5, dec + 2.5)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel(r'RA', labelpad=7, fontsize=25)
bkgd.set_ylabel(r'DEC', labelpad=7, fontsize=25)
fig.savefig(os.path.join(dir_dat, 'sv.spec_truth',
'check_dr9sv_targets.tile_by_tile.png'), bbox_inches='tight')
return None
def check_mtl_dr9sv():
''' check the target fraction of the MTLs
'''
mtls = []
for fmtl in glob.glob(os.path.join(dir_dat, 'mtl', 'mtl*.fits')):
print('--- %s ---' % fmtl)
# read MTL
mtl = fitsio.read(fmtl)
assigned = mtl['SUBPRIORITY'] > 0.943
n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq = \
bgs_targetclass(mtl['SV1_BGS_TARGET'][assigned])
print('total n_bgs = %i' % n_bgs)
print(' nobj frac (expected frac)')
print(' ------------------------------------')
print(' BGS Bright %i %.3f (0.45)' % (n_bgs_bright, n_bgs_bright/n_bgs))
print(' BGS Faint %i %.3f (0.25)' % (n_bgs_faint, n_bgs_faint/n_bgs))
print(' BGS Ext.Faint %i %.3f (0.125)' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs))
print(' BGS Fib.Mag %i %.3f (0.125)' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs))
print(' BGS Low Q. %i %.3f (0.05)' % (n_bgs_lowq, n_bgs_lowq/n_bgs))
mtls.append(mtl)
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields))
# plot confirming coverage
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
for mtl in mtls:
sub.scatter(mtl['RA'][::100], mtl['DEC'][::100], c='k', s=1)
for ra, dec in zip(sv['RA'], sv['DEC']):
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlabel('RA', fontsize=25)
sub.set_xlim(0., 360.)
sub.set_ylabel('DEC', fontsize=25)
sub.set_ylim(-40., 85)
fig.savefig(os.path.join(dir_dat, 'mtl', 'mtl_dr9sv_check.png'),
bbox_inches='tight')
# plot confirming coverage tile by tile
fig = plt.figure(figsize=(20,12))
bkgd = fig.add_subplot(111, frameon=False)
for i, ra, dec in zip(range(len(sv['RA'])), sv['RA'], sv['DEC']):
sub = fig.add_subplot(6,10,i+1)
for mtl in mtls:
sub.scatter(mtl['RA'][::100], mtl['DEC'][::100], c='k', s=1)
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlim(ra - 2.5, ra + 2.5)
sub.set_ylim(dec - 2.5, dec + 2.5)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel(r'RA', labelpad=7, fontsize=25)
bkgd.set_ylabel(r'DEC', labelpad=7, fontsize=25)
fig.savefig(os.path.join(dir_dat, 'mtl', 'mtl_dr9sv_check.tile_by_tile.png'),
bbox_inches='tight')
return None
def _dr8_target_cutouts():
''' combine dr8 target files for SV tiles that are outside of the dr9sv
region.
* April 9, 2020: Turns out some of the BGS SV fields are chopped up!
'''
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
print('%i BGS SV tiles' % len(sv['RA']))
# get SV tiles *outside* of the DR9 SV imaging region
in_dr9 = _in_DR9_SVregion(sv['RA'], sv['DEC'])
print('%i tiles outside of DR9' % np.sum(~in_dr9))
# for tiles outside of DR9SV read all dr8 healpix that sufficiently covers
# the tiles
ras, decs = [], []
for ra, dec in zip(sv['RA'][~in_dr9], sv['DEC'][~in_dr9]):
corners_ra = [ra - 2., ra + 2., ra + 2., ra - 2.]
corners_dec = [dec + 2., dec + 2., dec - 2., dec - 2.]
ras += corners_ra
decs += corners_dec
phi = | np.deg2rad(ras) | numpy.deg2rad |
import os
import glob
import logging
import torch
import numpy as np
from .utils import nested_getattr, nested_setattr, named_output
logger = logging.getLogger(__name__)
class BaseAlgo(object):
"""
Common methods for model checkpointing in pytorch.
Attributes
----------
checkpoint_directory : str
The directory where checkpoints are stored. If not set, the checkpoint
directory will be taken from ``self.data_logger.logdir``.
data_logger : object
num_steps : int
Total number of training steps. It's assumed that subclasses will
increment this in their training loops.
checkpoint_interval : int
Interval between subsequent checkpoints
num_checkpoints : int
Total number of checkpoints to maintain the logging directory.
Older checkpoints that exceed this number are deleted.
checkpoint_attribs : list
List of attributes on the algorithm that ought to be saved at each
checkpoint. This should be overridden by subclasses.
Note that this implicitly contains ``num_steps``.
"""
data_logger = None
num_steps = 0
checkpoint_interval = 100000
max_checkpoints = 3
checkpoint_attribs = []
_last_checkpoint = -1
_checkpoint_directory = None
@property
def checkpoint_directory(self):
return self._checkpoint_directory or (
self.data_logger and self.data_logger.logdir)
@checkpoint_directory.setter
def checkpoint_directory(self, value):
self._checkpoint_directory = value
def get_all_checkpoints(self):
"""
Return a sorted list of all checkpoints in the log directory.
"""
chkpt_dir = self.checkpoint_directory
if not chkpt_dir:
return []
files = glob.glob(os.path.join(chkpt_dir, 'checkpoint-*.data'))
def step_from_checkpoint(f):
try:
return int(os.path.basename(f)[11:-5])
except ValueError:
return -1
files = [f for f in files if step_from_checkpoint(f) >= 0]
return sorted(files, key=step_from_checkpoint)
def save_checkpoint_if_needed(self):
if self._last_checkpoint < 0:
self.save_checkpoint()
elif self._last_checkpoint + self.checkpoint_interval < self.num_steps:
self.save_checkpoint()
else:
pass # already have a recent checkpoint
def save_checkpoint(self):
chkpt_dir = self.checkpoint_directory
if not chkpt_dir:
return
data = {'num_steps': self.num_steps}
for attrib in self.checkpoint_attribs:
try:
val = nested_getattr(self, attrib)
except AttributeError:
logger.error("Cannot save attribute '%s'", attrib)
continue
if hasattr(val, 'state_dict'):
val = val.state_dict()
data[attrib] = val
path = os.path.join(chkpt_dir, 'checkpoint-%i.data' % self.num_steps)
torch.save(data, path)
logger.info("Saving checkpoint: '%s'", path)
old_checkpoints = self.get_all_checkpoints()
for old_checkpoint in old_checkpoints[:-self.max_checkpoints]:
os.remove(old_checkpoint)
self._last_checkpoint = self.num_steps
def load_checkpoint(self, checkpoint_name=None):
chkpt_dir = self.checkpoint_directory
if checkpoint_name and os.path.dirname(checkpoint_name):
# Path includes a directory.
# Treat it as a complete path name and ignore chkpt_dir
path = checkpoint_name
elif chkpt_dir and checkpoint_name:
path = os.path.join(chkpt_dir, checkpoint_name)
else:
checkpoints = self.get_all_checkpoints()
path = checkpoints and checkpoints[-1]
if not path or not os.path.exists(path):
return
logger.info("Loading checkpoint: %s", path)
if torch.cuda.is_available():
checkpoint = torch.load(path)
else:
checkpoint = torch.load(path, map_location=torch.device('cpu'))
for key, val in checkpoint.items():
orig_val = nested_getattr(self, key, None)
if hasattr(orig_val, 'load_state_dict'):
orig_val.load_state_dict(val)
else:
try:
nested_setattr(self, key, val)
except AttributeError:
logger.error("Cannot load key '%s'", key)
self._last_checkpoint = self.num_steps
def tensor(self, data, dtype):
"""
Shorthand for creating a tensor with the current compute device.
Note that this is *much* faster than passing data in list form to
``torch.tensor`` directly, at least as of torch v1.3.
See https://github.com/pytorch/pytorch/issues/13918 for more details.
"""
data = np.asanyarray(data)
return torch.as_tensor(data, device=self.compute_device, dtype=dtype)
def obs_for_envs(self, envs):
"""
Return current observations and agent ids for a list of environments.
If the environments are multi-agent, then the number of returned
observations will not generally match the number of environments because
there can be more than (or fewer than) one agent per environment.
This should be used in conjunction with `act_on_envs()`.
Note that together they add attributes `last_obs`, `last_done`, and
`num_resets` to the environment itself.
"""
obs_list = []
active = []
agent_ids = []
for env in envs:
if hasattr(env, 'last_obs'):
obs = env.last_obs
done = env.last_done
else:
obs = env.reset()
if getattr(env, 'single_agent', True):
obs = np.asanyarray(obs)[np.newaxis]
env.last_done = done = np.tile(False, len(obs))
env.num_resets = 0
for k in range(len(obs)):
agent_ids.append((id(env), env.num_resets, k))
obs_list.append(obs)
active.append(~done)
obs_list = np.concatenate(obs_list)
active = np.concatenate(active)
# Make an array of agent ids, but keep each element of the array
# a tuple so that they can be used as dictionary keys.
agent_id_arr = np.zeros(len(agent_ids), dtype=object)
agent_id_arr[:] = agent_ids
return obs_list[active], agent_id_arr[active]
def act_on_envs(self, envs, actions):
"""
Return observations, rewards, and done flags for each environment.
The number of actions should match the total number of active agents
in each environment, which should also match the number of observations
returned by `obs_for_envs()`.
This should be used in conjunction with `obs_for_envs()`.
Note that together they add attributes `last_obs`, `last_done`, and
`num_resets` to the environment itself.
"""
obs_list = []
reward_list = []
done_list = []
k = 0
for env in envs:
single_agent = getattr(env, 'single_agent', True)
active = ~env.last_done
num_active = np.sum(active)
if num_active == 0:
continue
active_actions = actions[k:k+num_active]
assert len(active_actions) == num_active
action_shape = (len(active),) + np.asanyarray(active_actions[0]).shape
env_actions = np.zeros_like(active_actions[0], shape=action_shape)
env_actions[active] = active_actions
k += num_active
if single_agent:
obs, reward, done, info = env.step(env_actions[0])
obs = np.asanyarray(obs)[np.newaxis]
reward = np.array([reward])
done = np.array([done])
else:
obs, reward, done, info = env.step(env_actions)
obs_list.append(obs[active])
reward_list.append(reward[active])
done_list.append(done[active])
if np.all(done):
obs = env.reset()
if getattr(env, 'single_agent', True):
obs = np.asanyarray(obs)[np.newaxis]
done = np.tile(False, len(obs))
env.num_resets += 1
env.last_obs = obs
env.last_done = done
return (
np.concatenate(obs_list),
np.concatenate(reward_list),
np.concatenate(done_list),
)
@named_output('obs actions rewards done next_obs agent_ids')
def take_one_step(self, envs):
"""
Take one step in each of the environments.
This returns a set of arrays, with one value for each agent.
Environments can contain more than one agent (or no agents at all),
so the number of items in each array won't generally match the number
of environments.
This function should be implemented by subclasses to execute the
subclass's policy.
Returns
-------
obs : list
actions : list
rewards : list
done : list
Whether or not each environment reached its end this step.
next_obs : list
agent_ids : list
A unique identifier for each agent. This can be used to string
multiple steps together.
"""
# Example:
# obs, agent_ids = self.obs_for_envs(envs)
# (calculate actions from the observations)
# next_obs, rewards, done = self.act_on_envs(envs, actions)
# return obs, actions, rewards, done, agent_ids
raise NotImplementedError
def run_episodes(self, envs, num_episodes=None):
"""
Run each environment to completion.
Note that no data is logged in this method. It's instead assumed
that each environment has a wrapper which takes care of the logging.
Parameters
----------
envs : list
List of environments to run in parallel.
num_episodes : int
Total number of episodes to run. Defaults to the same as number
of environments.
"""
if not envs:
return
if num_episodes is None:
num_episodes = len(envs)
num_completed = 0
logger = getattr(envs[0], 'logger', None)
if logger is not None:
logger.reset_summary()
while num_completed < num_episodes:
data = self.take_one_step(envs)
num_in_progress = len(envs)
new_envs = []
for env, done in zip(envs, data.done):
done = | np.all(done) | numpy.all |
import pandas as pd
import numpy as np
import os
import time
import matplotlib.pyplot as plt
# constants
dinterval = 500
cinterval = 10
dtolerance = 1.0
abdt = 1.0
bedt = 2.0
HUGE = 1000000.0
TEENY_WEENY = 0.000001
# read the .csv files
fi = "log/temps.csv"
di = pd.read_csv(fi)
#plt.plot(di[" time_since_start"], di[" Ambient"], color="blue")
experiments = di["Experiment"].unique()
#experiments = experiments[-1:]
di["mam"], di["bam"] = 0.0, 0.0
di["mab"], di["bab"] = 0.0, 0.0
di["mbe"], di["bbe"] = 0.0, 0.0
di["mmam"] = 0.0
di["mmab"] = 0.0
di["mmbe"] = 0.0
# TODO : if experiments.csv exists, read it and check that we have the required headers
# TODO : if all checks out, only perform experiments that
# dataframe for the experiments
edf = pd.DataFrame({"Experiment" : experiments})
edf["eq_start"] = 0.0
edf["eq_end"] = HUGE
edf["beab_max"] = 0.0
edf["beab_min"] = 0.0
edf["beab_mean"] = 0.0
edf["abam_max"] = 0.0
edf["abam_min"] = 0.0
edf["abam_mean"] = 0.0
edf["ab_slope_max"] = 0.0
edf["be_slope_max"] = 0.0
edf["beab-abam_max"] = 0.0 # difference between differences
edf["beab-abam_min"] = 0.0
edf["beab-abam_mean"] = 0.0
edf["ab_time_10deg"] = 0.0
edf["be_time_10deg"] = 0.0
edf["analyze"] = 1 # 0 - don't, 1 - do, 2 - done and don't do again
for experiment in experiments:
dis = di[di["Experiment"] == experiment]
# loop through the time intervals
abtim = HUGE
betim = HUGE
# prep print which experiment we're trying
for i in range(len(dis)):
if (i < dinterval):
dint = i
else:
dint = dinterval-1
diss = dis.loc[dis.index[i-dint:i+1]]
if (len(diss) < 10):
mam, mab, mbe = 0.0, 0.0, 0.0
bam = np.mean(diss[" Ambient"])
bab = np.mean(diss[" Above"])
bbe = np.mean(diss[" Below"])
else:
mam, bam = np.polyfit(diss[" time_since_start"], diss[" Ambient"], 1)
mab, bab = np.polyfit(diss[" time_since_start"], diss[" Above"], 1)
mbe, bbe = np.polyfit(diss[" time_since_start"], diss[" Below"], 1)
mmam = np.mean(diss[" Ambient"])
mmab = np.mean(diss[" Above"])
mmbe = np.mean(diss[" Below"])
abtims = diss.loc[diss[" Above"] > diss.loc[diss.index[0]][" Above"]+abdt][" time_since_start"]
betims = diss.loc[diss[" Above"] > diss.loc[diss.index[0]][" Above"]+bedt][" time_since_start"]
# find the first index 10 higher than this one
if (len(abtims) > 0):
abtim = np.min([np.float(abtim), abtims[abtims.index[0]] - diss.loc[diss.index[0]][" time_since_start"]])
if (len(betims) > 0):
betim = np.min([betim, betims[betims.index[0]] - diss.loc[diss.index[0]][" time_since_start"]])
# if there's none, return HUGE
abcsum = np.cumsum(diss[" Above"])
becsum = np.cumsum(diss[" Below"])
di.loc[dis.index[i], ["mam", "bam"]] = mam, bam
di.loc[dis.index[i], ["mab", "bab"]] = mab, bab
di.loc[dis.index[i], ["mbe", "bbe"]] = mbe, bbe
di.loc[dis.index[i], ["mmam", "mmab", "mmbe"]] = mmam, mmab, mmbe
# Done : plot difference between below and above
# ambient and above
# Done : plot difference between ambient and below (transparent)
# label all plots
# Done : plot 100 time moving average
# metrics:
# Identify stable period
# below is at least 40 degrees
# 500 moving average slope is below 0.0006
dis = di[di["Experiment"] == experiment]
sinds = dis.index[((dis["mmbe"] > 40) & ( | np.abs(dis["mbe"]) | numpy.abs |
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
def create_covar_matrix_original(overlap_array,variances):
"""Create the covariance matrix for a single wavelength slice.
As input takes the output of the drizzle class, overlap_array,
and the variances of the individual fibres"""
covarS = 2 # Radius of sub-region to record covariance information - probably
# shouldn't be hard coded, but scaled to drop size in some way
s = np.shape(overlap_array)
if s[2] != len(variances):
raise Exception('Length of variance array must be equal to the number of fibre overlap maps supplied')
#Set up the covariance array
covariance_array = np.zeros((s[0],s[1],(covarS*2)+1,(covarS*2)+1))
if len(np.where(np.isfinite(variances) == True)[0]) == 0:
return covariance_array
#Set up coordinate arrays for the covariance sub-arrays
xB = np.zeros(((covarS*2+1)**2),dtype=np.int)
yB = np.zeros(((covarS*2+1)**2),dtype=np.int)
for i in range(covarS*2+1):
for j in range(covarS*2+1):
xB[j+i*(covarS*2+1)] = i
yB[j+i*(covarS*2+1)] = j
xB = xB - covarS
yB = yB - covarS
#Pad overlap_array with covarS blank space in the spatial axis
overlap_array_padded = np.zeros([s[0]+2*covarS,s[1]+2*covarS,s[2]])
overlap_array_padded[covarS:-covarS,covarS:-covarS,:] = overlap_array
overlap_array = overlap_array_padded
#Loop over output pixels
for xA in range(s[0]):
for yA in range(s[1]):
#Loop over each fibre
for f in range(len(variances)):
if np.isfinite(overlap_array[xA+covarS,yA+covarS,f]):
xC = xA +covarS + xB
yC = yA + covarS + yB
a = overlap_array[xA+covarS,yA+covarS,f]*np.sqrt(variances[f])
if np.isfinite(a) == False:
a = 1.0
#except:
# code.interact(local=locals())
b = overlap_array[xC,yC,f]*np.sqrt(variances[f])
b[np.where(np.isfinite(b) == False)] = 0.0
covariance_array[xA,yA,:,:] = covariance_array[xA,yA,:,:] + (a*b).reshape(covarS*2+1,covarS*2+1)
covariance_array[xA,yA,:,:] = covariance_array[xA,yA,:,:]/covariance_array[xA,yA,covarS,covarS]
return covariance_array
def create_covar_matrix_vectorised(overlap_array,variances):
"""Create the covariance matrix for a single wavelength slice.
As input takes the output of the drizzle class, overlap_array,
and the variances of the individual fibres
Dis been refactored by Francesco to add 0-th order vectorization.
Reason is:
1) old function `create_covar_matrix` (now `create_covar_matrix_original`)
took >1000s/1400s of the cubing time.
2) three for loops in python = three for loops because python is not a
smart cookie in this respect
So I removed one loop, but we could do more and save additional time.
"""
covarS = 2 # Radius of sub-region to record covariance information - probably
# shouldn't be hard coded, but scaled to drop size in some way
s = np.shape(overlap_array)
if s[2] != len(variances):
raise Exception('Length of variance array must be equal to the number of fibre overlap maps supplied')
#Set up the covariance array
covariance_array = np.zeros((s[0],s[1],(covarS*2)+1,(covarS*2)+1))
if len(np.where(np.isfinite(variances) == True)[0]) == 0:
return covariance_array
#Set up coordinate arrays for the covariance sub-arrays
xB = np.zeros(((covarS*2+1)**2),dtype=np.int)
yB = np.zeros(((covarS*2+1)**2),dtype=np.int)
for i in range(covarS*2+1):
for j in range(covarS*2+1):
xB[j+i*(covarS*2+1)] = i
yB[j+i*(covarS*2+1)] = j
xB = xB - covarS
yB = yB - covarS
#Pad overlap_array with covarS blank space in the spatial axis
overlap_array_padded = np.zeros([s[0]+2*covarS,s[1]+2*covarS,s[2]])
overlap_array_padded[covarS:-covarS,covarS:-covarS,:] = overlap_array
overlap_array = overlap_array_padded
#Loop over output pixels
for xA in range(s[0]):
for yA in range(s[1]):
valid = np.where(np.isfinite(overlap_array[xA+covarS,yA+covarS,:]))
if len(valid[0])>0:
xC = xA +covarS + xB
yC = yA + covarS + yB
a = overlap_array[xA+covarS,yA+covarS,valid[0]]*np.sqrt(variances[valid])
a[np.where(~np.isfinite(a))] = 1.0
b = overlap_array[xC,yC,:][:, valid[0]]* | np.sqrt(variances[valid]) | numpy.sqrt |
"""
***************** INTRINSIC ANALYSIS MODULE *********************
Calculates properties of intrinsic surfaces, based on output files of
intrinsic_surface_method.py
********************************************************************
Created 22/2/2018 by <NAME>
Contributors: <NAME>
Last modified 27/2/2018 by <NAME>
"""
import os
import sys
import tables
import numpy as np
from alias.io.hdf5_io import (
make_hdf5,
load_hdf5,
save_hdf5,
shape_check_hdf5,
frame_check_hdf5,
mode_check_hdf5
)
from alias.io.numpy_io import load_npy
from alias.src.conversions import coeff_to_fourier_2
from alias.src.wave_function import (
wave_function,
d_wave_function,
dd_wave_function
)
from .utilities import unit_vector, create_file_name
def make_pos_dxdy(xmol, ymol, coeff, nmol, dim, qm):
"""
Calculate distances and derivatives at each molecular position with
respect to intrinsic surface
Parameters
----------
xmol: float, array_like; shape=(nmol)
Molecular coordinates in x dimension
ymol: float, array_like; shape=(nmol)
Molecular coordinates in y dimension
coeff: float, array_like; shape=(n_waves**2)
Optimised surface coefficients
nmol: int
Number of molecules in simulation
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
qm: int
Maximum number of wave frequencies in Fourier Sum
representing intrinsic surface
Returns
-------
int_z_mol: array_like (float); shape=(nframe, 2, qm+1, nmol)
Molecular distances from intrinsic surface
int_dxdy_mol: array_like (float); shape=(nframe, 4, qm+1, nmol)
First derivatives of intrinsic surface wrt x and y at xmol, ymol
int_ddxddy_mol: array_like (float); shape=(nframe, 4, qm+1, nmol)
Second derivatives of intrinsic surface wrt x and y at xmol, ymol
"""
int_z_mol = np.zeros((qm+1, 2, nmol))
int_dxdy_mol = np.zeros((qm+1, 4, nmol))
int_ddxddy_mol = np.zeros((qm+1, 4, nmol))
tmp_int_z_mol = np.zeros((2, nmol))
tmp_dxdy_mol = np.zeros((4, nmol))
tmp_ddxddy_mol = np.zeros((4, nmol))
for qu in range(qm+1):
if qu == 0:
j = (2 * qm + 1) * qm + qm
f_x = wave_function(xmol, 0, dim[0])
f_y = wave_function(ymol, 0, dim[1])
tmp_int_z_mol[0] += f_x * f_y * coeff[0][j]
tmp_int_z_mol[1] += f_x * f_y * coeff[1][j]
else:
for u in [-qu, qu]:
for v in range(-qu, qu+1):
j = (2 * qm + 1) * (u + qm) + (v + qm)
f_x = wave_function(xmol, u, dim[0])
f_y = wave_function(ymol, v, dim[1])
df_dx = d_wave_function(xmol, u, dim[0])
df_dy = d_wave_function(ymol, v, dim[1])
ddf_ddx = dd_wave_function(xmol, u, dim[0])
ddf_ddy = dd_wave_function(ymol, v, dim[1])
tmp_int_z_mol[0] += f_x * f_y * coeff[0][j]
tmp_int_z_mol[1] += f_x * f_y * coeff[1][j]
tmp_dxdy_mol[0] += df_dx * f_y * coeff[0][j]
tmp_dxdy_mol[1] += f_x * df_dy * coeff[0][j]
tmp_dxdy_mol[2] += df_dx * f_y * coeff[1][j]
tmp_dxdy_mol[3] += f_x * df_dy * coeff[1][j]
tmp_ddxddy_mol[0] += ddf_ddx * f_y * coeff[0][j]
tmp_ddxddy_mol[1] += f_x * ddf_ddy * coeff[0][j]
tmp_ddxddy_mol[2] += ddf_ddx * f_y * coeff[1][j]
tmp_ddxddy_mol[3] += f_x * ddf_ddy * coeff[1][j]
for u in range(-qu+1, qu):
for v in [-qu, qu]:
j = (2 * qm + 1) * (u + qm) + (v + qm)
f_x = wave_function(xmol, u, dim[0])
f_y = wave_function(ymol, v, dim[1])
df_dx = d_wave_function(xmol, u, dim[0])
df_dy = d_wave_function(ymol, v, dim[1])
ddf_ddx = dd_wave_function(xmol, u, dim[0])
ddf_ddy = dd_wave_function(ymol, v, dim[1])
tmp_int_z_mol[0] += f_x * f_y * coeff[0][j]
tmp_int_z_mol[1] += f_x * f_y * coeff[1][j]
tmp_dxdy_mol[0] += df_dx * f_y * coeff[0][j]
tmp_dxdy_mol[1] += f_x * df_dy * coeff[0][j]
tmp_dxdy_mol[2] += df_dx * f_y * coeff[1][j]
tmp_dxdy_mol[3] += f_x * df_dy * coeff[1][j]
tmp_ddxddy_mol[0] += ddf_ddx * f_y * coeff[0][j]
tmp_ddxddy_mol[1] += f_x * ddf_ddy * coeff[0][j]
tmp_ddxddy_mol[2] += ddf_ddx * f_y * coeff[1][j]
tmp_ddxddy_mol[3] += f_x * ddf_ddy * coeff[1][j]
int_z_mol[qu] += tmp_int_z_mol
int_dxdy_mol[qu] += tmp_dxdy_mol
int_ddxddy_mol[qu] += tmp_ddxddy_mol
int_z_mol = np.swapaxes(int_z_mol, 0, 1)
int_dxdy_mol = np.swapaxes(int_dxdy_mol, 0, 1)
int_ddxddy_mol = np.swapaxes(int_ddxddy_mol, 0, 1)
return int_z_mol, int_dxdy_mol, int_ddxddy_mol
def create_intrinsic_positions_dxdyz(
directory, file_name, nmol, nframe, qm, n0,
phi, dim, recon=0, ow_pos=False):
"""
Calculate distances and derivatives at each molecular position
with respect to intrinsic surface in simulation frame
Parameters
----------
directory: str
File path of directory of alias analysis.
file_name: str
File name of trajectory being analysed.
nmol: int
Number of molecules in simulation
nframe: int
Number of frames in simulation trajectory
qm: int
Maximum number of wave frequencies in Fourier Sum
representing intrinsic surface
n0: int
Maximum number of molecular pivots in intrinsic surface
phi: float
Weighting factor of minimum surface area term in surface
optimisation function
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
recon: bool (default=False)
Whether to use surface reconstructe coefficients
ow_pos: bool (default=False)
Whether to overwrite positions and derivatives (default=False)
"""
print("\n--- Running Intrinsic Positions and Derivatives Routine ---\n")
surf_dir = os.path.join(directory, 'surface')
pos_dir = os.path.join(directory, 'pos')
intpos_dir = os.path.join(directory, 'intpos')
if not os.path.exists(intpos_dir):
os.mkdir(intpos_dir)
file_name_pos = create_file_name(
[file_name, qm, n0, int(1/phi + 0.5), nframe]
)
file_name_coeff = file_name_pos
if recon:
file_name_coeff += '_r'
file_name_pos += '_r'
intpos_data_file = os.path.join(intpos_dir + file_name_pos)
if not os.path.exists(intpos_data_file + "_int_z_mol.hdf5"):
make_hdf5(
intpos_data_file + '_int_z_mol',
(2, qm+1, nmol), tables.Float64Atom())
make_hdf5(
intpos_data_file + '_int_dxdy_mol',
(4, qm+1, nmol), tables.Float64Atom())
make_hdf5(
intpos_data_file + '_int_ddxddy_mol',
(4, qm+1, nmol), tables.Float64Atom())
file_check = False
elif not ow_pos:
"Checking number of frames in current distance files"
try:
file_check = shape_check_hdf5(
intpos_data_file + '_int_z_mol',
(nframe, 2, qm+1, nmol))
file_check *= shape_check_hdf5(
intpos_data_file + '_int_dxdy_mol',
(nframe, 4, qm+1, nmol))
file_check *= shape_check_hdf5(
intpos_data_file + '_int_ddxddy_mol',
(nframe, 4, qm+1, nmol))
except FileNotFoundError:
file_check = False
else:
file_check = False
pos_data_file = os.path.join(pos_dir + file_name)
if not file_check:
xmol = load_npy(
pos_data_file + f'_{nframe}_xmol',
frames=range(nframe))
ymol = load_npy(
pos_data_file + f'_{nframe}_ymol',
frames=range(nframe))
for frame in range(nframe):
"Checking number of frames in int_z_mol file"
frame_check_int_z_mol = frame_check_hdf5(
intpos_data_file + '_int_z_mol', frame)
frame_check_int_dxdy_mol = frame_check_hdf5(
intpos_data_file + '_int_dxdy_mol', frame)
frame_check_int_ddxddy_mol = frame_check_hdf5(
intpos_data_file + '_int_ddxddy_mol', frame)
mode_int_z_mol = mode_check_hdf5(
frame_check_int_z_mol, ow_pos)
mode_int_dxdy_mol = mode_check_hdf5(
frame_check_int_dxdy_mol, ow_pos)
mode_int_ddxddy_mol = mode_check_hdf5(
frame_check_int_ddxddy_mol, ow_pos)
check = mode_int_z_mol or mode_int_dxdy_mol or mode_int_ddxddy_mol
if not check:
sys.stdout.write(
"Calculating molecular distances "
f"and derivatives: frame {frame}\r"
)
sys.stdout.flush()
surf_data_file = os.path.join(surf_dir, file_name_coeff)
coeff = load_hdf5(surf_data_file + '_coeff', frame)
int_z_mol, int_dxdy_mol, int_ddxddy_mol = make_pos_dxdy(
xmol[frame], ymol[frame], coeff, nmol, dim, qm)
save_hdf5(intpos_data_file + '_int_z_mol',
int_z_mol, frame, mode_int_z_mol)
save_hdf5(intpos_data_file + '_int_dxdy_mol',
int_dxdy_mol, frame, mode_int_dxdy_mol)
save_hdf5(intpos_data_file + '_int_ddxddy_mol',
int_ddxddy_mol, frame, mode_int_ddxddy_mol)
def make_int_mol_count(zmol, int_z_mol, nslice, qm, dim):
"""
Creates density histogram
Parameters
----------
zmol: float, array_like; shape=(nmol)
Molecular coordinates in z dimension
int_z_mol: array_like (float); shape=(nframe, 2, qm+1, nmol)
Molecular distances from intrinsic surface
nmol: int
Number of molecules in simulation
nslice: int
Number of bins in density histogram along axis
normal to surface
qm: int
Maximum number of wave frequencies in Fouier Sum
representing intrinsic surface
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
Returns
-------
mol_count_array: int, array_like; shape=(qm+1, nslice, nz)
Number histogram binned by molecular position along
z axis and mean curvature H across qm resolutions
"""
mol_count_array = np.zeros((qm+1, nslice))
for qu in range(qm+1):
temp_mol_count_array = np.zeros((nslice))
int_z1 = int_z_mol[0][qu]
int_z2 = int_z_mol[1][qu]
z1 = zmol - int_z1 + dim[2]
z2 = -(zmol - int_z2) + dim[2]
z1 -= dim[2] * np.array(z1 / dim[2], dtype=int)
z2 -= dim[2] * np.array(z2 / dim[2], dtype=int)
temp_mol_count_array += np.histogram(
z1, bins=nslice, range=[0, dim[2]])[0]
temp_mol_count_array += np.histogram(
z2, bins=nslice, range=[0, dim[2]])[0]
mol_count_array[qu] += temp_mol_count_array
return mol_count_array
def den_curve_hist(zmol, int_z_mol, int_ddxddy_mol, nslice, nz, qm, dim,
max_H=12):
"""
Creates density and mean curvature histograms
Parameters
----------
zmol: float, array_like; shape=(nmol)
Molecular coordinates in z dimension
int_z_mol: array_like (float); shape=(nframe, 2, qm+1, nmol)
Molecular distances from intrinsic surface
int_ddxddy_mol: array_like (float); shape=(nframe, 4, qm+1, nmol)
Second derivatives of intrinsic surface wrt x and y at xmol, ymol
nslice: int
Number of bins in density histogram along axis normal to surface
nz: int (optional)
Number of bins in curvature histogram along axis normal to
surface (default=100)
qm: int
Maximum number of wave frequencies in Fouier Sum representing
intrinsic surface
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
Returns
-------
count_corr_array: int, array_like; shape=(qm+1, nslice, nz)
Number histogram binned by molecular position along z axis and
mean curvature H across qm resolutions
"""
count_corr_array = np.zeros((qm+1, nslice, nz))
for qu in range(qm+1):
temp_count_corr_array = np.zeros((nslice, nz))
int_z1 = int_z_mol[0][qu]
int_z2 = int_z_mol[1][qu]
z1 = zmol - int_z1
z2 = zmol - int_z2
z1 -= dim[2] * np.array(2 * z1 / dim[2], dtype=int)
z2 -= dim[2] * np.array(2 * z2 / dim[2], dtype=int)
ddzx1 = int_ddxddy_mol[0][qu]
ddzy1 = int_ddxddy_mol[1][qu]
ddzx2 = int_ddxddy_mol[2][qu]
ddzy2 = int_ddxddy_mol[3][qu]
H1 = abs(ddzx1 + ddzy1)
H2 = abs(ddzx2 + ddzy2)
temp_count_corr_array += np.histogram2d(
z1, H1, bins=[nslice, nz],
range=[[-dim[2]/2, dim[2]/2], [0, max_H]])[0]
temp_count_corr_array += (np.histogram2d(
z2, H2, bins=[nslice, nz],
range=[[-dim[2]/2, dim[2]/2], [0, max_H]])[0])[::-1]
count_corr_array[qu] += temp_count_corr_array
return count_corr_array
def create_intrinsic_den_curve_hist(directory, file_name, qm, n0, phi, nframe,
nslice, dim,
nz=100, recon=False, ow_hist=False):
"""
Calculate density and curvature histograms across surface
Parameters
----------
directory: str
File path of directory of alias analysis.
file_name: str
File name of trajectory being analysed.
qm: int
Maximum number of wave frequencies in Fouier Sum representing intrinsic
surface
n0: int
Maximum number of molecular pivots in intrinsic surface
phi: float
Weighting factor of minimum surface area term in surface optimisation
function
nframe: int
Number of frames in simulation trajectory
nslice: int
Number of bins in density histogram along axis normal to surface
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
nz: int (optional)
Number of bins in curvature histogram along axis normal to surface
(default=100)
recon: bool (optional)
Whether to use surface reconstructe coefficients (default=False)
ow_hist: bool (optional)
Whether to overwrite density and curvature distributions
(default=False)
"""
print("\n--- Running Intrinsic Density and Curvature Routine --- \n")
pos_dir = os.path.join(directory, 'pos')
intpos_dir = os.path.join(directory, 'intpos')
intden_dir = os.path.join(directory, 'intden')
if not os.path.exists(intden_dir):
os.mkdir(intden_dir)
file_name_pos = create_file_name(
[file_name, qm, n0, int(1./phi + 0.5), nframe])
file_name_hist = create_file_name(
[file_name, nslice, nz, qm, n0, int(1./phi + 0.5), nframe])
if recon:
file_name_pos += '_r'
file_name_hist += '_r'
intden_data_file = os.path.join(intden_dir + file_name_hist)
if not os.path.exists(intden_data_file + '_count_corr.hdf5'):
make_hdf5(intden_data_file + '_count_corr',
(qm+1, nslice, nz), tables.Float64Atom())
file_check = False
elif not ow_hist:
"Checking number of frames in current distribution files"
try:
file_check = shape_check_hdf5(
intden_data_file + '_count_corr', (nframe, qm+1, nslice, nz)
)
except FileNotFoundError:
file_check = False
else:
file_check = False
if not file_check:
pos_data_file = os.path.join(pos_dir + file_name)
zmol = load_npy(pos_data_file + '_{}_zmol'.format(nframe))
COM = load_npy(pos_data_file + '_{}_com'.format(nframe))
nmol = zmol.shape[1]
com_tile = np.moveaxis(
np.tile(COM, (nmol, 1, 1)), [0, 1, 2], [2, 1, 0])[2]
zmol = zmol - com_tile
for frame in range(nframe):
"Checking number of frames in hdf5 files"
frame_check_count_corr = frame_check_hdf5(
intden_data_file + '_count_corr', frame)
mode_count_corr = mode_check_hdf5(
frame_check_count_corr, ow_hist)
if mode_count_corr:
sys.stdout.write(
"Calculating position and curvature "
"distributions: frame {}\r".format(frame))
sys.stdout.flush()
intpos_data_file = os.path.join(intpos_dir + file_name_pos)
int_z_mol = load_hdf5(
intpos_data_file + '_int_z_mol', frame)
int_ddxddy_mol = load_hdf5(
intpos_data_file + '_int_ddxddy_mol', frame)
count_corr_array = den_curve_hist(
zmol[frame], int_z_mol, int_ddxddy_mol,
nslice, nz, qm, dim)
save_hdf5(
intden_data_file + '_count_corr',
count_corr_array, frame, mode_count_corr)
def av_intrinsic_distributions(directory, file_name, dim, nslice, qm, n0, phi,
nframe, nsample,
nz=100, recon=False, ow_dist=False):
"""
Summate average density and curvature distributions
Parameters
----------
directory: str
File path of directory of alias analysis.
file_name: str
File name of trajectory being analysed.
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
nslice: int
Number of bins in density histogram along axis normal to surface
qm: int
Maximum number of wave frequencies in Fouier Sum representing
intrinsic surface
n0: int
Maximum number of molecular pivots in intrinsic surface
phi: float
Weighting factor of minimum surface area term in surface
optimisation function
nframe: int
Number of frames in simulation trajectory
nsample: int
Number of frames to average over
nz: int (optional)
Number of bins in curvature histogram along axis normal to
surface (default=100)
recon: bool (optional)
Whether to use surface reconstructe coefficients (default=False)
ow_dist: bool (optional)
Whether to overwrite average density and curvature
distributions (default=False)
Returns
-------
int_den_curve_matrix: float, array_like; shape=(qm+1, nslice, nz)
Average intrinsic density-curvature distribution for each
resolution across nsample frames
int_density: float, array_like; shape=(qm+1, nslice)
Average intrinsic density distribution for each resolution
across nsample frames
int_curvature: float, array_like; shape=(qm+1, nz)
Average intrinsic surface curvature distribution for each
resolution across nsample frames
"""
intden_dir = os.path.join(directory, 'intden')
file_name_hist = create_file_name(
[file_name, nslice, nz, qm, n0, int(1./phi + 0.5), nframe])
file_name_dist = create_file_name(
[file_name, nslice, nz, qm, n0, int(1. / phi + 0.5), nsample])
if recon:
file_name_hist += '_r'
file_name_dist += '_r'
count_data_file = os.path.join(intden_dir, file_name_hist)
curve_data_file = os.path.join(intden_dir, file_name_dist)
if not os.path.exists(curve_data_file + '_int_den_curve.npy') or ow_dist:
int_den_curve_matrix = np.zeros((qm+1, nslice, nz))
print("\n--- Loading in Density and Curvature Distributions ---\n")
lslice = dim[2] / nslice
Vslice = dim[0] * dim[1] * lslice
for frame in range(nsample):
sys.stdout.write("Frame {}\r".format(frame))
sys.stdout.flush()
count_corr_array = load_hdf5(
count_data_file + '_count_corr', frame)
int_den_curve_matrix += count_corr_array / (nsample * Vslice)
np.save(curve_data_file + "_int_den_curve.npy", int_den_curve_matrix)
else:
int_den_curve_matrix = load_npy(curve_data_file + '_int_den_curve')
int_density = np.sum(
int_den_curve_matrix, axis=2) / 2.
int_curvature = np.sum(
np.moveaxis(int_den_curve_matrix, 1, 2), axis=2) / 2.
return int_den_curve_matrix, int_density, int_curvature
def coeff_slice(coeff, qm, qu):
"""
coeff_slice(coeff, qm, qu)
Truncates coeff array up to qu resolution
"""
n_waves_qm = 2 * qm + 1
n_waves_qu = 2 * qu + 1
index_1 = qm - qu
index_2 = index_1 + n_waves_qu
coeff_matrix = np.reshape(coeff, (n_waves_qm, n_waves_qm))
coeff_qu = coeff_matrix[
[slice(index_1, index_2) for _ in coeff_matrix.shape]
].flatten()
return coeff_qu
def xy_correlation(coeff_2, qm, qu, dim):
"""
xy_correlation(coeff_2, qm, qu, dim)
Return correlation across xy plane using Wiener-Khinchin theorem
Parameters
----------
coeff_2: float, array_like; shape=(n_waves**2)
Square of optimised surface coefficients
qm: int
Maximum number of wave frequencies in Fouier Sum representing
intrinsic surface
qu: int
Upper limit of wave frequencies in Fouier Sum representing
intrinsic surface
Returns
-------
xy_corr: float, array_like; shape=(n_waves_qu**2)
Length correlation function across xy plane
"""
coeff_2[len(coeff_2)/2] = 0
coeff_2_slice = coeff_slice(coeff_2, qm, qu)
xy_corr, frequencies = coeff_to_fourier_2(coeff_2_slice, qu, dim)
# xy_corr = np.abs(amplitudes_2) / np.mean(amplitudes_2)
return xy_corr, frequencies
def make_den_curve(zmol, int_z_mol, int_dxdy_mol, nmol, nslice, nz, qm, dim):
"""
Creates density and curvature distributions normal to surface
Parameters
----------
zmol: float, array_like; shape=(nmol)
Molecular coordinates in z dimension
int_z_mol: array_like (float); shape=(nframe, 2, qm+1, nmol)
Molecular distances from intrinsic surface
int_dxdy_mol: array_like (float); shape=(nframe, 4, qm+1, nmol)
First derivatives of intrinsic surface wrt x and y at xmol, ymol
nmol: int
Number of molecules in simulation
nslice: int
Number of bins in density histogram along axis normal to surface
nz: int (optional)
Number of bins in curvature histogram along axis normal to
surface (default=100)
qm: int
Maximum number of wave frequencies in Fouier Sum representing
intrinsic surface
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
Returns
-------
count_corr_array: int, array_like; shape=(qm+1, nslice, nz)
Number histogram binned by molecular position along z axis
and mean curvature H across qm resolutions
"""
count_corr_array = np.zeros((qm+1, nslice, nz))
for qu in range(qm+1):
temp_count_corr_array = | np.zeros((nslice, nz)) | numpy.zeros |
import argparse
import datetime
import json
import os
import typing
import pandas as pd
import numpy as np
import tensorflow as tf
import tqdm
from data_loader import DataLoader
from training_loop_launcher import select_model
from create_batch_files import create_and_save_batches
def prepare_dataloader(
dataframe: pd.DataFrame,
target_datetimes: typing.List[datetime.datetime],
stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_time_offsets: typing.List[datetime.timedelta],
config: typing.Dict[typing.AnyStr, typing.Any],
) -> tf.data.Dataset:
"""This function should be modified in order to prepare & return your own data loader.
Note that you can use either the netCDF or HDF5 data. Each iteration over your data loader should return a
2-element tuple containing the tensor that should be provided to the model as input, and the target values. In
this specific case, you will not be able to provide the latter since the dataframe contains no GHI, and we are
only interested in predictions, not training. Therefore, you must return a placeholder (or ``None``) as the second
tuple element.
Reminder: the dataframe contains imagery paths for every possible timestamp requested in ``target_datetimes``.
However, we expect that you will use some of the "past" imagery (i.e. imagery at T<=0) for any T in
``target_datetimes``, but you should NEVER rely on "future" imagery to generate predictions (for T>0). We
will be inspecting data loader implementations to ensure this is the case, and those who "cheat" will be
dramatically penalized.
See https://github.com/mila-iqia/ift6759/tree/master/projects/project1/evaluation.md for more information.
Args:
dataframe: a pandas dataframe that provides the netCDF file path (or HDF5 file path and offset) for all
relevant timestamp values over the test period.
target_datetimes: a list of timestamps that your data loader should use to provide imagery for your model.
The ordering of this list is important, as each element corresponds to a sequence of GHI values
to predict. By definition, the GHI values must be provided for the offsets given by ``target_time_offsets``
which are added to each timestamp (T=0) in this datetimes list.
stations: a map of station names of interest paired with their coordinates (latitude, longitude, elevation).
target_time_offsets: the list of timedeltas to predict GHIs for (by definition: [T=0, T+1h, T+3h, T+6h]).
config: configuration dictionary holding any extra parameters that might be required by the user. These
parameters are loaded automatically if the user provided a JSON file in their submission. Submitting
such a JSON file is completely optional, and this argument can be ignored if not needed.
Returns:
A ``tf.data.Dataset`` object that can be used to produce input tensors for your model. One tensor
must correspond to one sequence of past imagery data. The tensors must be generated in the order given
by ``target_sequences``.
"""
# MODIFY BELOW
# WE ARE PROVIDING YOU WITH A DUMMY DATA GENERATOR FOR DEMONSTRATION PURPOSES.
# MODIFY EVERYTHING IN IN THIS BLOCK AS YOU SEE FIT
base_folder_path = os.path.expandvars(config["val_data_folder"])
data_folder = os.path.join(base_folder_path, list(stations.keys())[0])
DL = DataLoader(dataframe,
target_datetimes,
stations,
target_time_offsets,
config,
data_folder=data_folder)
data_loader = DL.get_data_loader()
# MODIFY ABOVE
return data_loader
def prepare_model(
stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_time_offsets: typing.List[datetime.timedelta],
config: typing.Dict[typing.AnyStr, typing.Any],
) -> tf.keras.Model:
"""This function should be modified in order to prepare & return your own prediction model.
See https://github.com/mila-iqia/ift6759/tree/master/projects/project1/evaluation.md for more information.
Args:
stations: a map of station names of interest paired with their coordinates (latitude, longitude, elevation).
target_time_offsets: the list of timedeltas to predict GHIs for (by definition: [T=0, T+1h, T+3h, T+6h]).
config: configuration dictionary holding any extra parameters that might be required by the user. These
parameters are loaded automatically if the user provided a JSON file in their submission. Submitting
such a JSON file is completely optional, and this argument can be ignored if not needed.
Returns:
A ``tf.keras.Model`` object that can be used to generate new GHI predictions given imagery tensors.
"""
# MODIFY BELOW
MainModel = select_model(config)
model = MainModel(stations, target_time_offsets, config, return_ghi_only=True)
if MainModel.TRAINING_REQUIRED:
weights_file = config["model_file"]
print("Loading weights from {}".format(weights_file))
assert os.path.exists(weights_file), "Model not trained!"
model.load_weights(weights_file)
# MODIFY ABOVE
return model
def generate_predictions(data_loader: tf.data.Dataset, model: tf.keras.Model, pred_count: int) -> np.ndarray:
"""Generates and returns model predictions given the data prepared by a data loader."""
predictions = []
with tqdm.tqdm("generating predictions", total=pred_count) as pbar:
for iter_idx, minibatch in enumerate(data_loader):
assert isinstance(minibatch, tuple) and len(minibatch) >= 2, \
"the data loader should load each minibatch as a tuple with model input(s) and target tensors"
# remember: the minibatch should contain the input tensor(s) for the model as well as the GT (target)
# values, but since we are not training (and the GT is unavailable), we discard the last element
# see https://github.com/mila-iqia/ift6759/blob/master/projects/project1/datasources.md#pipeline-formatting
if len(minibatch) == 2: # there is only one input + groundtruth, give the model the input directly
pred = model(minibatch[0])
else: # the model expects multiple inputs, give them all at once using the tuple
pred = model(minibatch[:-1])
if isinstance(pred, tf.Tensor):
pred = pred.numpy()
assert pred.ndim == 2, "prediction tensor shape should be BATCH x SEQ_LENGTH"
predictions.append(pred)
pbar.update(len(pred))
return np.concatenate(predictions, axis=0)
def generate_all_predictions(
target_stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_datetimes: typing.List[datetime.datetime],
target_time_offsets: typing.List[datetime.timedelta],
dataframe: pd.DataFrame,
user_config: typing.Dict[typing.AnyStr, typing.Any],
) -> np.ndarray:
"""Generates and returns model predictions given the data prepared by a data loader."""
# we will create one data loader per station to make sure we avoid mixups in predictions
predictions = []
for station_idx, station_name in enumerate(target_stations):
# usually, we would create a single data loader for all stations, but we just want to avoid trouble...
stations = {station_name: target_stations[station_name]}
print(f"preparing data loader & model for station '{station_name}' ({station_idx + 1}/{len(target_stations)})")
data_loader = prepare_dataloader(dataframe, target_datetimes, stations, target_time_offsets, user_config)
model = prepare_model(stations, target_time_offsets, user_config)
station_preds = generate_predictions(data_loader, model, pred_count=len(target_datetimes))
# print("harman in prediction: ",(station_preds), (target_datetimes))
assert len(station_preds) == len(target_datetimes), "number of predictions mismatch with requested datetimes"
predictions.append(station_preds)
# print("station: {}: Predictions: {},".format(station_name, station_preds))
return np.concatenate(predictions, axis=0)
def parse_gt_ghi_values(
target_stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_datetimes: typing.List[datetime.datetime],
target_time_offsets: typing.List[datetime.timedelta],
dataframe: pd.DataFrame,
) -> np.ndarray:
"""Parses all required station GHI values from the provided dataframe for the evaluation of predictions."""
gt = []
for station_idx, station_name in enumerate(target_stations):
station_ghis = dataframe[station_name + "_GHI"]
for target_datetime in target_datetimes:
seq_vals = []
for time_offset in target_time_offsets:
index = target_datetime + time_offset
if index in station_ghis.index:
seq_vals.append(station_ghis.iloc[station_ghis.index.get_loc(index)])
else:
seq_vals.append(float("nan"))
gt.append(seq_vals)
return np.concatenate(gt, axis=0)
def parse_nighttime_flags(
target_stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_datetimes: typing.List[datetime.datetime],
target_time_offsets: typing.List[datetime.timedelta],
dataframe: pd.DataFrame,
) -> np.ndarray:
"""Parses all required station daytime flags from the provided dataframe for the masking of predictions."""
flags = []
for station_idx, station_name in enumerate(target_stations):
station_flags = dataframe[station_name + "_DAYTIME"]
for target_datetime in target_datetimes:
seq_vals = []
for time_offset in target_time_offsets:
index = target_datetime + time_offset
if index in station_flags.index:
seq_vals.append(station_flags.iloc[station_flags.index.get_loc(index)] > 0)
else:
seq_vals.append(False)
flags.append(seq_vals)
return np.concatenate(flags, axis=0)
def main(
preds_output_path: typing.AnyStr,
admin_config_path: typing.AnyStr,
user_config_path: typing.Optional[typing.AnyStr] = None,
stats_output_path: typing.Optional[typing.AnyStr] = None,
) -> None:
"""Extracts predictions from a user model/data loader combo and saves them to a CSV file."""
user_config = {}
if user_config_path:
assert os.path.isfile(user_config_path), f"invalid user config file: {user_config_path}"
with open(user_config_path, "r") as fd:
user_config = json.load(fd)
assert os.path.isfile(admin_config_path), f"invalid admin config file: {admin_config_path}"
with open(admin_config_path, "r") as fd:
admin_config = json.load(fd)
dataframe_path = admin_config["dataframe_path"]
assert os.path.isfile(dataframe_path), f"invalid dataframe path: {dataframe_path}"
dataframe = pd.read_pickle(dataframe_path)
if "start_bound" in admin_config:
dataframe = dataframe[dataframe.index >= datetime.datetime.fromisoformat(admin_config["start_bound"])]
if "end_bound" in admin_config:
dataframe = dataframe[dataframe.index < datetime.datetime.fromisoformat(admin_config["end_bound"])]
create_and_save_batches(admin_config_path, user_config_path, is_eval=True)
target_datetimes = [datetime.datetime.fromisoformat(d) for d in admin_config["target_datetimes"]]
assert target_datetimes and all([d in dataframe.index for d in target_datetimes])
target_stations = admin_config["stations"]
target_time_offsets = [pd.Timedelta(d).to_pytimedelta() for d in admin_config["target_time_offsets"]]
if "bypass_predictions_path" in admin_config and admin_config["bypass_predictions_path"]:
# re-open cached output if possible (for 2nd pass eval)
assert os.path.isfile(preds_output_path), f"invalid preds file path: {preds_output_path}"
with open(preds_output_path, "r") as fd:
predictions = fd.readlines()
assert len(predictions) == len(target_datetimes) * len(target_stations), \
"predicted ghi sequence count mistmatch wrt target datetimes x station count"
assert len(predictions) % len(target_stations) == 0
predictions = np.asarray([float(ghi) for p in predictions for ghi in p.split(",")])
else:
predictions = generate_all_predictions(target_stations, target_datetimes,
target_time_offsets, dataframe, user_config)
with open(preds_output_path, "w") as fd:
for pred in predictions:
fd.write(",".join([f"{v:0.03f}" for v in pred.tolist()]) + "\n")
if any([s + "_GHI" not in dataframe for s in target_stations]):
print("station GHI measures missing from dataframe, skipping stats output")
return
assert not np.isnan(predictions).any(), "user predictions should NOT contain NaN values"
predictions = predictions.reshape((len(target_stations), len(target_datetimes), len(target_time_offsets)))
gt = parse_gt_ghi_values(target_stations, target_datetimes, target_time_offsets, dataframe)
with open(preds_output_path + "_true_GHI", "w") as fd:
for i in range(len(gt)):
fd.write(str(gt[i]) + ",")
if (i + 1) % 4 == 0:
fd.write("\n")
# fd.write(",".join([f"{v:0.03f}" for v in gt_.tolist()]) + "\n")
gt = gt.reshape((len(target_stations), len(target_datetimes), len(target_time_offsets)))
day = parse_nighttime_flags(target_stations, target_datetimes, target_time_offsets, dataframe)
day = day.reshape((len(target_stations), len(target_datetimes), len(target_time_offsets)))
squared_errors = | np.square(predictions - gt) | numpy.square |
import numpy as np
import math
import os
import sys
import multiprocessing
import pyfftw
#Author: <NAME>, EMBL Heidelberg, Sachse Group (2019)
#-------------------------------------------------------------------------------------
def estimateNoiseFromMap(map, windowSize, boxCoord):
#**************************************************
#****** function to estimate var an mean from *****
#**** nonoverlapping boxes outside the particle ***
#**************************************************
if boxCoord == 0:
#extract a sample of pure noise from the map
sizeMap = map.shape;
sizePatch = np.array([windowSize, windowSize, windowSize]);
center = np.array([0.5*sizeMap[0], 0.5*sizeMap[1], 0.5*sizeMap[2]]);
sampleMap1 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
int(0.02*sizeMap[1]):(int(0.02*sizeMap[1]) + sizePatch[1]),
(int(center[2]-0.5*sizePatch[2])):(int((center[2]-0.5*sizePatch[2]) + sizePatch[2]))];
sampleMap2 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
int(0.98*sizeMap[1] - sizePatch[1]):(int(0.98*sizeMap[1])),
(int(center[2]-0.5*sizePatch[2])):(int((center[2]-0.5*sizePatch[2]) + sizePatch[2]))];
sampleMap3 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
(int(center[1]-0.5*sizePatch[1])):(int((center[1]-0.5*sizePatch[1]) + sizePatch[1])),
int(0.02*sizeMap[2]):(int(0.02*sizeMap[2]) + sizePatch[2])];
sampleMap4 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
(int(center[1]-0.5*sizePatch[1])):(int((center[1]-0.5*sizePatch[1]) + sizePatch[1])),
int(0.98*sizeMap[2]) - sizePatch[2]:(int(0.98*sizeMap[2]))];
#concatenate the two samples
sampleMap = np.concatenate((sampleMap1, sampleMap2, sampleMap3, sampleMap4), axis=0);
else:
sizePatch = np.array([windowSize, windowSize, windowSize]);
center = np.array(boxCoord);
sampleMap = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
int(center[1]-0.5*sizePatch[1]):(int(center[1]-0.5*sizePatch[1]) + sizePatch[1]),
(int(center[2]-0.5*sizePatch[2])):(int((center[2]-0.5*sizePatch[2]) + sizePatch[2]))];
#estimate variance and mean from the sample
mean = np.mean(sampleMap);
var = np.var(sampleMap);
if var == 0.0:
print("Variance is estimated to be 0. You are probably estimating noise in a masked region. Exit ...")
sys.exit();
return mean, var, sampleMap;
#---------------------------------------------------------------------------------
def makeHannWindow(map):
#***********************************************************
#*** generate Hann window with the size of the given map ***
#***********************************************************
#some initialization
mapSize = map.shape;
if map.ndim == 3:
x = np.linspace(-math.floor(mapSize[0]/2.0), -math.floor(mapSize[0]/2.0) + mapSize[0], mapSize[0]);
y = np.linspace(-math.floor(mapSize[1]/2.0), -math.floor(mapSize[1]/2.0) + mapSize[1], mapSize[1]);
z = np.linspace(-math.floor(mapSize[2]/2.0), -math.floor(mapSize[2]/2.0) + mapSize[2], mapSize[2]);
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij');
radiusMap = np.sqrt(xx**2 + yy**2 + zz**2);
windowMap = 0.5*(1.0 - np.cos((2.0*np.pi*radiusMap/map.shape[0]) + np.pi));
elif map.ndim == 2:
x = np.linspace(-math.floor(mapSize[0]/2.0), -math.floor(mapSize[0]/2.0) + mapSize[0], mapSize[0]);
y = np.linspace(-math.floor(mapSize[1]/2.0), -math.floor(mapSize[1]/2.0) + mapSize[1], mapSize[1]);
xx, yy = np.meshgrid(x, y, indexing='ij');
radiusMap = np.sqrt(xx**2 + yy**2);
windowMap = 0.5*(1.0 - np.cos((2.0*np.pi*radiusMap/map.shape[0]) + np.pi));
windowMap[radiusMap>(mapSize[0]/2.0)] = 0.0;
return windowMap;
#-------------------------------------------------------------------------------------
def estimateNoiseFromMapInsideMask(map, mask):
#**************************************************
#****** function to estimate var an mean from *****
#******* map outside the user provided mask *******
#**************************************************
mask[mask<=0.5] = 0.0;
mask[mask>0.0] = 1000.0;
mask[mask<1000.0] = 1.0;
mask[mask==1000.0] = 0.0;
sampleMap = np.copy(map)*mask;
sampleMap = sampleMap[sampleMap != 0.0];
#estimate variance and mean from the sample
mean = np.mean(sampleMap);
var = np.var(sampleMap);
return mean, var, sampleMap;
#-------------------------------------------------------------------------------------
def estimateNoiseFromHalfMaps(halfmap1, halfmap2, circularMask):
halfmapDiff = halfmap1 - halfmap2;
varianceBackground = np.var(halfmapDiff[circularMask>0.5]);
return varianceBackground;
#-------------------------------------------------------------------------------------
def estimateECDFFromMap(map, windowSize, boxCoord):
#**************************************************
#****** function to estimate empirical cumul. *****
#**** distribution function from solvent area *****
#**************************************************
if boxCoord == 0:
#extract a sample of pure noise from the map
sizeMap = map.shape;
sizePatch = np.array([windowSize, windowSize, windowSize]);
center = np.array([0.5*sizeMap[0], 0.5*sizeMap[1], 0.5*sizeMap[2]]);
sampleMap1 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
int(0.02*sizeMap[1]):(int(0.02*sizeMap[1]) + sizePatch[1]),
(int(center[2]-0.5*sizePatch[2])):(int((center[2]-0.5*sizePatch[2]) + sizePatch[2]))];
sampleMap2 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
int(0.98*sizeMap[1] - sizePatch[1]):(int(0.98*sizeMap[1])),
(int(center[2]-0.5*sizePatch[2])):(int((center[2]-0.5*sizePatch[2]) + sizePatch[2]))];
sampleMap3 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
(int(center[1]-0.5*sizePatch[1])):(int((center[1]-0.5*sizePatch[1]) + sizePatch[1])),
int(0.02*sizeMap[2]):(int(0.02*sizeMap[2]) + sizePatch[2])];
sampleMap4 = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
(int(center[1]-0.5*sizePatch[1])):(int((center[1]-0.5*sizePatch[1]) + sizePatch[1])),
int(0.98*sizeMap[2]) - sizePatch[2]:(int(0.98*sizeMap[2]))];
#conatenate the two samples
sampleMap = np.concatenate((sampleMap1, sampleMap2, sampleMap3, sampleMap4), axis=0);
elif boxCoord == -1:
sampleMap = map;
else:
sizePatch = np.array([windowSize, windowSize, windowSize]);
center = np.array(boxCoord);
sampleMap = map[int(center[0]-0.5*sizePatch[0]):(int(center[0]-0.5*sizePatch[0]) + sizePatch[0]),
int(center[1]-0.5*sizePatch[1]):(int(center[1]-0.5*sizePatch[1]) + sizePatch[1]),
(int(center[2]-0.5*sizePatch[2])):(int((center[2]-0.5*sizePatch[2]) + sizePatch[2]))];
#estimate ECDF from map
sampleMap = sampleMap.flatten();
#downsize the sample
finalSampleSize = min(100000, sampleMap.size);
sampleMap = np.random.choice(sampleMap, finalSampleSize, replace = False);
numSamples = sampleMap.size;
sampleMapSort = np.sort(sampleMap);
minX = sampleMapSort[0];
maxX = sampleMapSort[numSamples-1];
numInterval = numSamples;
spacingX = (maxX - minX)/(float(numInterval));
ECDF = np.zeros(numInterval);
for index in range(numInterval):
val = sampleMapSort[index];
ECDF[index] = ((sampleMapSort[sampleMapSort<= val]).size)/float(numSamples);
return ECDF, sampleMapSort;
#------------------------------------------------------------------------------------
def getCDF(x, ECDF, sampleMapSort):
#****************************************************
#********* get the value of the CDF at point x ******
#******* CDF : Cumulative distribution function *****
#****************************************************
numSamples = sampleMapSort.size;
minX = sampleMapSort[0];
maxX = sampleMapSort[numSamples-1];
if x >= maxX:
CDFval = 1.0;
elif x <= minX:
CDFval = 0.0;
else:
#get the index in the ECDF array
index = np.searchsorted(sampleMapSort, x) - 1;
CDFval = ECDF[index];
return CDFval;
#------------------------------------------------------------------------------------
def AndersonDarling(sample):
#********************************************
#*** Anderson-Darling test for normality ****
#********************************************
sample = np.random.choice(sample, min(10000,sample.size), replace=False);
sampleMapSort = np.sort(sample);
numSamples = sampleMapSort.size;
Ad = -1.0*numSamples;
for i in range(numSamples):
CDF_Yi = 0.5 * (1.0 + math.erf(sampleMapSort[i]/math.sqrt(2.0)));
CDF_Yn = 0.5 * (1.0 + math.erf(sampleMapSort[numSamples-i-1]/math.sqrt(2.0)));
if CDF_Yi == 0:
CDF_Yi = 0.000001;
if CDF_Yi == 1:
CDF_Yi = 0.999999;
if CDF_Yn == 0:
CDF_Yn = 0.000001;
if CDF_Yn == 1:
CDF_Yn = 0.999999;
#calculate the Anderson-Darling test statistic
Ad = Ad - (1.0/float(numSamples)) * (2*(i+1)-1)*(math.log(CDF_Yi) + (math.log(1.0-CDF_Yn)));
#do adjustment for estimation of mean and variance, as unknown before
Ad = Ad*(1 + 0.75/float(numSamples) + 2.25/float(numSamples*numSamples));
#calculate p-values
# <NAME> and <NAME>, Eds., 1986, Goodness-of-Fit Techniques, <NAME>
try:
if Ad >= 0.6:
pVal = math.exp(1.2937 - 5.709*(Ad) + 0.0186*Ad*Ad);
elif 0.34<Ad<0.6:
pVal = math.exp(0.9177 - 4.279*Ad - 1.38 * Ad*Ad);
elif 0.2 < Ad <= 0.34:
pVal = 1 - math.exp(-8.318 + 42.796*Ad - 59.938*Ad*Ad);
else:
pVal = 1 - math.exp(-13.436 + 101.14 * Ad - 223.73 * Ad*Ad);
except:
pVal = -1.0;
return Ad, pVal, numSamples;
#------------------------------------------------------------------------------------
def KolmogorowSmirnow(ECDF, sampleMapSort):
#***********************************************
#***** KS test by supremum of distance *********
#*********** between CDF and ECDF **************
#***********************************************
#some initialization
numSamples = sampleMapSort.size;
X = np.linspace(-5, 5, 200000);
vectorizedErf = np.vectorize(math.erf);
#maximum distances between CDF and ECDF over the whole defintion region
Y_stdNorm = 0.5 * (1.0 + vectorizedErf(X/math.sqrt(2.0)));
Y_ecdf = np.interp(X, sampleMapSort, ECDF, left=0.0, right=1.0);
Dn = np.amax(np.absolute(np.subtract(Y_stdNorm,Y_ecdf)));
#get Kolmogorow-Smirnow test statistic
KS_testStat = math.sqrt(numSamples)*Dn;
#maximum distances between CDF and ECDF for tail regions
X_tail_right = X[X>2.0];
X_tail_left = X[X<-2.0];
X_tail = np.concatenate((X_tail_right, X_tail_left));
Y_stdNorm = 0.5 * (1.0 + vectorizedErf(X_tail/math.sqrt(2.0)));
Y_ecdf = np.interp(X_tail, sampleMapSort, ECDF, left=0.0, right=1.0);
Dn_tail = np.amax(np.absolute(np.subtract(Y_stdNorm,Y_ecdf)));
return KS_testStat, Dn, Dn_tail, numSamples;
#-----------------------------------------------------------------------------------
def checkNormality(map, windowSize, boxCoord):
#***************************************
#** check normal distribution ass. *****
#***************************************
print('Checking the normal distribution assumption ...');
mean, var, _ = estimateNoiseFromMap(map, windowSize, boxCoord);
map = np.subtract(map, mean);
tMap = np.multiply(map, (1.0/(math.sqrt(var))));
map = np.copy(tMap);
#get maximum distances between ECDF and CDF
ECDFvals, sampleSort = estimateECDFFromMap(map, windowSize, boxCoord);
KSstat, Dn, Dn_tail, n = KolmogorowSmirnow(ECDFvals, sampleSort);
output = "Maximum Distance Dn between ECDF and CDF: Dn=" + " %.4f" %Dn + ", in Tail:" + " %.4f" %Dn_tail + ". Sample size used: " + repr(n);
print(output);
#do Anderson-Darling test for normality
AnDarl, pVal, n = AndersonDarling(sampleSort);
output = "Anderson-Darling test summary: " + repr(AnDarl) + ". p-Value: " + "%.4f" %pVal + ". Sample size used: " + repr(n);
if pVal != -1.0:
print(output);
else:
pVal = -1.0;
if (Dn_tail > 0.01):
output = "WARNING: Deviation in the tail areas between the normal distribution and the empircal CDF is higher than 1%. If boxes for background noise estimation are set properly, please consider using the flag -ecdf to use the empirical CDF instead of the normal distribution."
print(output);
#------------------------------------------------------------------------------------
def studentizeMap(map, mean, var):
#****************************************
#********* normalize map ****************
#****************************************
if np.isscalar(var):
studMap = np.subtract(map, mean);
studMap = np.multiply(studMap, (1.0/(math.sqrt(var))));
else: #if local variances are known, use them
var[var == 0] = 1000;
studMap = np.subtract(map, mean);
studMap = np.divide(studMap, np.sqrt(var));
var[var == 1000] = 0.0;
studMap[var == 0.0] = 0.0;
return studMap;
#-----------------------------------------------------------------------------------
def calcQMap(map, mean, var, ECDF, windowSize, boxCoord, mask, method, test):
#*****************************************
#***** generate qMap of a 3D density *****
#*****************************************
#get some map data
sizeMap = map.shape;
#calculate the test statistic
if np.isscalar(var):
#map[map == 0.0] = -100000000;
tmap = np.subtract(map, mean);
tMap = np.multiply(tmap, (1.0/(math.sqrt(var))));
map = | np.copy(tMap) | numpy.copy |
"""
Script for plotting differences in models for select variables over the
1950-2019 period
Author : <NAME>
Date : 15 December 2021
Version : 7 - adds validation data for early stopping
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import palettable.scientific.sequential as sss
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import scipy.stats as sts
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CanESM2','MPI','CSIRO-MK3.6','KNMI-ecearth','GFDL-CM3','GFDL-ESM2M','LENS']
modelGCMsNames = ['CanESM2','MPI','CSIRO-MK3.6','KNMI-ecearth','GFDL-CM3','GFDL-ESM2M','LENS','MMmean']
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m"]
datasetsingle = ['SMILE']
monthlychoiceq = ['JFM','AMJ','JAS','OND','annual']
variables = ['T2M','P','SLP']
reg_name = 'SMILEGlobe'
level = 'surface'
monthlychoiceq = ['annual']
variables = ['T2M']
timeper = 'historical'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
yearsall = np.arange(1950+window,2019+1,1)
###############################################################################
###############################################################################
numOfEns = 16
lentime = len(yearsall)
###############################################################################
###############################################################################
dataset = datasetsingle[0]
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
randomalso = False
shuffletype = 'none'
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in model data
def read_primary_dataset(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
### Call functions
for vv in range(len(variables)):
for mo in range(len(monthlychoiceq)):
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/Climatologies/interModel/%s/' % variq
saveData = monthlychoice + '_' + variq + '_' + reg_name
print('*Filename == < %s >' % saveData)
### Read data
models,lats,lons = read_primary_dataset(variq,dataset,monthlychoice,numOfEns,
lensalso,randomalso,ravelyearsbinary,
ravelbinary,shuffletype,timeper,
lat_bounds,lon_bounds)
### Calculate ensemble mean
ensmean = np.nanmean(models[:,:,:,:,:],axis=1)
### Calculate multimodel mean
modmean = np.nanmean(models[:,:,:,:,:],axis=0)
### Calculate difference from multimodelmean
diffmod = models - modmean
diffmodensm = np.nanmean(diffmod[:,:,:,:,:],axis=1)
diffmodmean = np.nanmean(diffmodensm[:,:,:,:],axis=1)
### Calculate different between each model
# intermodel = np.empty((models.shape[0],models.shape[0],models.shape[1],
# models.shape[2],models.shape[3],models.shape[4]))
# for mm in range(models.shape[0]):
# for ea in range(models.shape[0]):
# intermodel[mm,ea,:,:,:,:] = models[mm,:,:,:,:] - models[ea,:,:,:,:]
# ensmeanintermodel = np.nanmean(intermodel[:,:,:,:,:,:],axis=2)
# timeensmeanintermodel = np.nanmean(ensmeanintermodel[:,:,:,:,:],axis=2)
###############################################################################
###############################################################################
###############################################################################
#######################################################################
#######################################################################
#######################################################################
### Plot subplot of different from multimodel mean
if variq == 'T2M':
limit = np.arange(-6,6.01,0.25)
barlim = np.round(np.arange(-6,7,2),2)
cmap = cmocean.cm.balance
label = r'\textbf{%s -- [$^{\circ}$C MMmean difference] -- 1950-2019}' % variq
elif variq == 'P':
limit = np.arange(-3,3.01,0.01)
barlim = np.round(np.arange(-3,3.1,1),2)
cmap = cmocean.cm.tarn
label = r'\textbf{%s -- [mm/day MMmean difference] -- 1950-2019}' % variq
elif variq == 'SLP':
limit = np.arange(-5,5.1,0.25)
barlim = np.round(np.arange(-5,6,1),2)
cmap = cmocean.cm.diff
label = r'\textbf{%s -- [hPa MMmean difference] -- 1950-2019}' % variq
fig = plt.figure(figsize=(8,4))
for r in range(len(diffmodmean)):
var = diffmodmean[r]
ax1 = plt.subplot(2,4,r+2)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
m.drawcoastlines(color='dimgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='white',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
ax1.annotate(r'\textbf{%s}' % modelGCMs[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.11,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.00,bottom=0.14)
plt.savefig(directoryfigure + 'MultiModelBias-%s_ALL.png' % saveData,dpi=300)
directorydataMS = '/Users/zlabe/Documents/Research/ModelComparison/Data/RevisitResults_v7/'
np.save(directorydataMS + 'MMMeandifferences_7models.npy',diffmodmean)
###############################################################################
###############################################################################
###############################################################################
fig = plt.figure(figsize=(10,2))
for r in range(len(diffmodmean)+1):
if r < 7:
var = diffmodmean[r]
else:
var = np.empty((lats.shape[0],lons.shape[0]))
var[:] = np.nan
ax1 = plt.subplot(1,len(diffmodmean)+1,r+1)
m = Basemap(projection='npstere',boundinglat=65,lon_0=0,
resolution='l',round =True,area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
if ocean_only == True:
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
elif land_only == True:
m.drawlsmask(land_color=(0,0,0,0),ocean_color='darkgrey',lakes=True,zorder=5)
ax1.annotate(r'\textbf{%s}' % modelGCMsNames[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.13,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.02,bottom=0.14)
plt.savefig(directoryfigure + 'MultiModelBias-%s_ALL-Arctic.png' % saveData,dpi=300)
###############################################################################
###############################################################################
###############################################################################
if variq == 'T2M':
limit = np.arange(-3,3.01,0.2)
barlim = np.round(np.arange(-3,4,1),2)
cmap = cmocean.cm.balance
label = r'\textbf{%s -- [$^{\circ}$C MMmean difference] -- 1950-2019}' % variq
elif variq == 'P':
limit = | np.arange(-3,3.01,0.01) | numpy.arange |
#!/usr/bin/env python
""" Larch Tests Version 1 """
import unittest
import time
import ast
import numpy as np
from sys import version_info
from ut_base import TestCase
from larch import Interpreter
class TestEval(TestCase):
'''testing of asteval'''
def test_dict_index(self):
'''dictionary indexing'''
self.session("a_dict = {'a': 1, 'b': 2, 'c': 3, 'd': 4}")
self.isTrue("a_dict['a'] == 1")
self.isTrue("a_dict['d'] == 4")
def test_list_index(self):
'''list indexing'''
self.session("a_list = ['a', 'b', 'c', 'd', 'o']")
self.isTrue("a_list[0] == 'a'")
self.isTrue("a_list[1] == 'b'")
self.isTrue("a_list[2] == 'c'")
def test_tuple_index(self):
'''tuple indexing'''
self.session("a_tuple = (5, 'a', 'x')")
self.isTrue("a_tuple[0] == 5")
self.isTrue("a_tuple[2] == 'x'")
def test_string_index(self):
'''string indexing'''
self.session("a_string = 'hello world'")
self.isTrue("a_string[0] == 'h'")
self.isTrue("a_string[6] == 'w'")
self.isTrue("a_string[-1] == 'd'")
self.isTrue("a_string[-2] == 'l'")
def test_ndarray_index(self):
'''nd array indexing'''
self.session("a_ndarray = 5*arange(20)")
self.isTrue("a_ndarray[2] == 10")
self.isTrue("a_ndarray[4] == 20")
def test_ndarrayslice(self):
'''array slicing'''
self.session("a_ndarray = arange(200).reshape(10, 20)")
self.isTrue("a_ndarray[1:3,5:7] == array([[25,26], [45,46]])")
self.session("y = arange(20).reshape(4, 5)")
self.isTrue("y[:,3] == array([3, 8, 13, 18])")
self.isTrue("y[...,1] == array([1, 6, 11, 16])")
self.session("y[...,1] = array([2, 2, 2, 2])")
self.isTrue("y[1,:] == array([5, 2, 7, 8, 9])")
# print(self.session.symtable["y"])
def test_while(self):
'''while loops'''
self.session("""
n=0
while n < 8:
n += 1
""")
self.isValue('n', 8)
self.session("""
n=0
while n < 8:
n += 1
if n > 3:
break
else:
n = -1
""")
self.isValue('n', 4)
self.session("""
n=0
while n < 8:
n += 1
else:
n = -1
""")
self.isValue('n', -1)
self.session("""
n=0
while n < 10:
n += 1
if n < 3:
continue
n += 1
print( ' n = ', n)
if n > 5:
break
print( 'finish: n = ', n)
""")
self.isValue('n', 6)
def test_assert(self):
'test assert statements'
self.session.error = []
self.session('n=6')
self.session('assert n==6')
self.assertTrue(self.session.error == [])
self.session('assert n==7')
errtype, errmsg = self.session.error[0].get_error()
self.assertTrue(errtype == 'AssertionError')
def test_for(self):
'''for loops'''
self.session('''
n=0
for i in arange(10):
n += i
''')
self.isValue('n', 45)
self.session('''
n=0
for i in arange(10):
n += i
else:
n = -1
''')
self.isValue('n', -1)
self.session('''
n=0
for i in arange(10):
n += i
if n > 2:
break
else:
n = -1
''')
self.isValue('n', 3)
def test_if(self):
'''test if'''
self.session("""zero = 0
if zero == 0:
x = 1
if zero != 100:
x = x+1
if zero > 2:
x = x + 1
else:
y = 33
""")
self.isValue('x', 2)
self.isValue('y', 33)
def test_print(self):
'''print (ints, str, ....)'''
self.session("print(31)")
self.session.writer.flush()
time.sleep(0.1)
out = self.read_stdout()
self.assert_(out== '31\n')
self.session("print('%s = %.3f' % ('a', 1.2012345))")
self.session.writer.flush()
time.sleep(0.1)
out = self.read_stdout()
self.assert_(out== 'a = 1.201\n')
self.session("print('{0:s} = {1:.2f}'.format('a', 1.2012345))")
self.session.writer.flush()
time.sleep(0.1)
out = self.read_stdout()
self.assert_(out== 'a = 1.20\n')
def test_repr(self):
'''repr of dict, list'''
self.session("x = {'a': 1, 'b': 2, 'c': 3}")
self.session("y = ['a', 'b', 'c']")
self.session("rep_x = repr(x['a'])")
self.session("rep_y = repr(y)")
self.session("print rep_y , rep_x")
self.isValue("rep_x", "1")
self.isValue("rep_y", "['a', 'b', 'c']")
def test_cmp(self):
'''numeric comparisons'''
self.isTrue("3 == 3")
self.isTrue("3.0 == 3")
self.isTrue("3.0 == 3.0")
self.isTrue("3 != 4")
self.isTrue("3.0 != 4")
self.isTrue("3 >= 1")
self.isTrue("3 >= 3")
self.isTrue("3 <= 3")
self.isTrue("3 <= 5")
self.isTrue("3 < 5")
self.isTrue("5 > 3")
self.isFalse("3 == 4")
self.isFalse("3 > 5")
self.isFalse("5 < 3")
def test_bool(self):
'''boolean logic'''
self.session('''
yes = True
no = False
nottrue = False
a = arange(7)''')
self.isTrue("yes")
self.isFalse("no")
self.isFalse("nottrue")
self.isFalse("yes and no or nottrue")
self.isFalse("yes and (no or nottrue)")
self.isFalse("(yes and no) or nottrue")
self.isTrue("yes or no and nottrue")
self.isTrue("yes or (no and nottrue)")
self.isFalse("(yes or no) and nottrue")
self.isTrue("yes or not no")
self.isTrue("(yes or no)")
self.isFalse("not (yes or yes)")
self.isFalse("not (yes or no)")
self.isFalse("not (no or yes)")
self.isTrue("not no or yes")
self.isFalse("not yes")
self.isTrue("not no")
def test_bool_coerce(self):
'''coercion to boolean'''
self.isTrue("1")
self.isFalse("0")
self.isTrue("'1'")
self.isFalse("''")
self.isTrue("[1]")
self.isFalse("[]")
self.isTrue("(1)")
self.isTrue("(0,)")
self.isFalse("()")
self.isTrue("dict(y=1)")
self.isFalse("{}")
def test_assignment(self):
'''variables assignment'''
self.session('n = 5')
self.isValue("n", 5)
self.session('s1 = "a string"')
self.isValue("s1", "a string")
self.session('b = (1,2,3)')
self.isValue("b", (1,2,3))
self.session('a = 1.*arange(10)')
self.isValue("a", np.arange(10) )
self.session('a[1:5] = 1 + 0.5 * arange(4)')
self.isNear("a", np.array([ 0. , 1. , 1.5, 2. , 2.5, 5. , 6. , 7. , 8. , 9. ]))
def test_names(self):
'''names test'''
self.session('nx = 1')
self.session('nx1 = 1')
def test_syntaxerrors_1(self):
'''assignment syntax errors test'''
for expr in ('class = 1', 'for = 1', 'if = 1', 'raise = 1',
'1x = 1', '1.x = 1', '1_x = 1'):
failed, errtype, errmsg = False, None, None
self.session(expr)
if self.session.error:
err = self.session.error.pop(0)
errtype, errmsg = err.get_error()
failed = True
self.assertTrue(failed)
self.assertTrue(errtype == 'SyntaxError')
#self.assertTrue(errmsg.startswith('invalid syntax'))
def test_unsupportednodes(self):
'''unsupported nodes'''
for expr in ('f = lambda x: x*x', 'yield 10'):
failed, errtype, errmsg = False, None, None
self.session(expr)
if self.session.error:
err = self.session.error.pop(0)
errtype, errmsg = err.get_error()
failed = True
self.assertTrue(failed)
self.assertTrue(errtype == 'NotImplementedError')
def test_syntaxerrors_2(self):
'''syntax errors test'''
for expr in ('x = (1/*)', 'x = 1.A', 'x = A.2'):
failed, errtype, errmsg = False, None, None
self.session(expr)
if self.session.error:
err = self.session.error.pop(0)
errtype, errmsg = err.get_error()
failed = True
self.assertTrue(failed)
self.assertTrue(errtype == 'SyntaxError')
#self.assertTrue(errmsg.startswith('invalid syntax'))
def test_runtimeerrors_1(self):
'''runtime errors test'''
self.session("zero = 0")
self.session("astr ='a string'")
self.session("atup = ('a', 'b', 11021)")
self.session("arr = arange(20)")
for expr, errname in (('x = 1/zero', 'ZeroDivisionError'),
('x = zero + nonexistent', 'NameError'),
('x = zero + astr', 'TypeError'),
('x = zero()', 'TypeError'),
('x = astr * atup', 'TypeError'),
('x = arr.shapx', 'AttributeError'),
('arr.shapx = 4', 'AttributeError'),
('del arr.shapx', 'LookupError')):
failed, errtype, errmsg = False, None, None
self.session(expr)
if self.session.error:
err = self.session.error.pop(0)
errtype, errmsg = err.get_error()
failed = True
self.assertTrue(failed)
self.assertTrue(errtype == errname)
#self.assertTrue(errmsg.startswith('invalid syntax'))
def test_ndarrays(self):
'''simple ndarrays'''
self.session('n = array([11, 10, 9])')
self.isTrue("isinstance(n, ndarray)")
self.isTrue("len(n) == 3")
self.isValue("n", np.array([11, 10, 9]))
self.session('n = arange(20).reshape(5, 4)')
self.isTrue("isinstance(n, ndarray)")
self.isTrue("n.shape == (5, 4)")
self.session("myx = n.shape")
self.session("n.shape = (4, 5)")
self.isTrue("n.shape == (4, 5)")
# self.session("del = n.shape")
self.session("a = arange(20)")
self.session("gg = a[1:13:3]")
self.isValue('gg', np.array([1, 4, 7, 10]))
self.session("gg[:2] = array([0,2])")
self.isValue('gg', | np.array([0, 2, 7, 10]) | numpy.array |
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import math
import my_config
from prohmr.datasets.pw3d_eval_dataset import PW3DEvalDataset
from prohmr.configs import get_config, prohmr_config
from prohmr.models import ProHMR
from prohmr.models.smpl_mine import SMPL
from prohmr.utils.pose_utils import compute_similarity_transform_batch_numpy, scale_and_translation_transform_batch
from prohmr.utils.geometry import undo_keypoint_normalisation, orthographic_project_torch, convert_weak_perspective_to_camera_translation
from prohmr.utils.renderer import Renderer
from prohmr.utils.sampling_utils import compute_vertex_uncertainties_from_samples
import subsets
def evaluate_3dpw(model,
model_cfg,
eval_dataset,
metrics_to_track,
device,
save_path,
num_pred_samples,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000,
num_samples_to_visualise=10,
save_per_frame_uncertainty=True):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl_neutral = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1).to(device)
smpl_male = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1, gender='male').to(device)
smpl_female = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1, gender='female').to(device)
metric_sums = {'num_datapoints': 0}
per_frame_metrics = {}
for metric in metrics_to_track:
metric_sums[metric] = 0.
per_frame_metrics[metric] = []
if metric == 'joints3D_coco_invis_samples_dist_from_mean':
metric_sums['num_invis_joints3Dsamples'] = 0
elif metric == 'hrnet_joints2D_l2es':
metric_sums['num_vis_hrnet_joints2D'] = 0
elif metric == 'hrnet_joints2Dsamples_l2es':
metric_sums['num_vis_hrnet_joints2Dsamples'] = 0
fname_per_frame = []
pose_per_frame = []
shape_per_frame = []
cam_per_frame = []
if save_per_frame_uncertainty:
vertices_uncertainty_per_frame = []
renderer = Renderer(model_cfg, faces=model.smpl.faces)
reposed_cam_wp = np.array([0.85, 0., -0.2])
reposed_cam_t = convert_weak_perspective_to_camera_translation(cam_wp=reposed_cam_wp,
focal_length=model_cfg.EXTRA.FOCAL_LENGTH,
resolution=model_cfg.MODEL.IMAGE_SIZE)
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# if batch_num == 2:
# break
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input'].to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
hrnet_joints2D_coco = samples_batch['hrnet_kps'].cpu().detach().numpy()
hrnet_joints2D_coco_vis = samples_batch['hrnet_kps_vis'].cpu().detach().numpy()
fname = samples_batch['fname']
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_reposed_smpl_output = smpl_male(betas=target_shape)
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_vertices = target_smpl_output.vertices
target_joints_h36mlsp = target_smpl_output.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :]
target_reposed_vertices = target_reposed_smpl_output.vertices
# ------------------------------- PREDICTIONS -------------------------------
out = model({'img': input})
"""
out is a dict with keys:
- pred_cam: (1, num_samples, 3) tensor, camera is same for all samples
- pred_cam_t: (1, num_samples, 3) tensor, camera is same for all samples
- This is just pred_cam converted from weak-perspective (i.e. [s, tx, ty]) to
full-perspective (i.e. [tx, ty, tz] and focal_length = 5000 --> this is basically just weak-perspective anyway)
- pred_smpl_params: dict with keys:
- global_orient: (1, num_samples, 1, 3, 3) tensor
- body_pose: (1, num_samples, 23, 3, 3) tensor
- betas: (1, num_samples, 10) tensor, betas are same for all samples
- pred_pose_6d: (1, num_samples, 144) tensor
- pred_vertices: (1, num_samples, 6890, 3) tensor
- pred_keypoints_3d: (1, num_samples, 44, 3) tensor
- pred_keypoints_2d: (1, num_samples, 44, 2) tensor
- log_prob: (1, num_samples) tensor
- conditioning_feats: (1, 2047) tensor
"""
pred_cam_wp = out['pred_cam'][:, 0, :]
pred_pose_rotmats_mode = out['pred_smpl_params']['body_pose'][:, 0, :, :, :]
pred_glob_rotmat_mode = out['pred_smpl_params']['global_orient'][:, 0, :, :, :]
pred_shape_mode = out['pred_smpl_params']['betas'][:, 0, :]
pred_pose_rotmats_samples = out['pred_smpl_params']['body_pose'][0, 1:, :, :, :]
pred_glob_rotmat_samples = out['pred_smpl_params']['global_orient'][0, 1:, :, :, :]
pred_shape_samples = out['pred_smpl_params']['betas'][0, 1:, :]
assert pred_pose_rotmats_samples.shape[0] == num_pred_samples
pred_smpl_output_mode = smpl_neutral(body_pose=pred_pose_rotmats_mode,
global_orient=pred_glob_rotmat_mode,
betas=pred_shape_mode,
pose2rot=False)
pred_vertices_mode = pred_smpl_output_mode.vertices # (1, 6890, 3)
pred_joints_h36mlsp_mode = pred_smpl_output_mode.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :] # (1, 14, 3)
pred_joints_coco_mode = pred_smpl_output_mode.joints[:, my_config.ALL_JOINTS_TO_COCO_MAP, :] # (1, 17, 3)
pred_vertices2D_mode = orthographic_project_torch(pred_vertices_mode, pred_cam_wp, scale_first=False)
pred_vertices2D_mode = undo_keypoint_normalisation(pred_vertices2D_mode, input.shape[-1])
pred_joints2D_coco_mode = orthographic_project_torch(pred_joints_coco_mode, pred_cam_wp) # (1, 17, 2)
pred_joints2D_coco_mode = undo_keypoint_normalisation(pred_joints2D_coco_mode, input.shape[-1])
pred_reposed_vertices_mean = smpl_neutral(betas=pred_shape_mode).vertices # (1, 6890, 3)
pred_smpl_output_samples = smpl_neutral(body_pose=pred_pose_rotmats_samples,
global_orient=pred_glob_rotmat_samples,
betas=pred_shape_samples,
pose2rot=False)
pred_vertices_samples = pred_smpl_output_samples.vertices # (num_pred_samples, 6890, 3)
pred_joints_h36mlsp_samples = pred_smpl_output_samples.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :] # (num_samples, 14, 3)
pred_joints_coco_samples = pred_smpl_output_samples.joints[:, my_config.ALL_JOINTS_TO_COCO_MAP, :] # (num_pred_samples, 17, 3)
pred_joints2D_coco_samples = orthographic_project_torch(pred_joints_coco_samples, pred_cam_wp) # (num_pred_samples, 17, 2)
pred_joints2D_coco_samples = undo_keypoint_normalisation(pred_joints2D_coco_samples, input.shape[-1])
pred_reposed_vertices_samples = smpl_neutral(body_pose=torch.zeros(num_pred_samples, 69, device=device, dtype=torch.float32),
global_orient=torch.zeros(num_pred_samples, 3, device=device, dtype=torch.float32),
betas=pred_shape_samples).vertices # (num_pred_samples, 6890, 3)
# ------------------------------------------------ METRICS ------------------------------------------------
# Numpy-fying targets
target_vertices = target_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
# Numpy-fying preds
pred_vertices_mode = pred_vertices_mode.cpu().detach().numpy()
pred_joints_h36mlsp_mode = pred_joints_h36mlsp_mode.cpu().detach().numpy()
pred_joints_coco_mode = pred_joints_coco_mode.cpu().detach().numpy()
pred_vertices2D_mode = pred_vertices2D_mode.cpu().detach().numpy()
pred_joints2D_coco_mode = pred_joints2D_coco_mode.cpu().detach().numpy()
pred_reposed_vertices_mean = pred_reposed_vertices_mean.cpu().detach().numpy()
pred_vertices_samples = pred_vertices_samples.cpu().detach().numpy()
pred_joints_h36mlsp_samples = pred_joints_h36mlsp_samples.cpu().detach().numpy()
pred_joints_coco_samples = pred_joints_coco_samples.cpu().detach().numpy()
pred_joints2D_coco_samples = pred_joints2D_coco_samples.cpu().detach().numpy()
pred_reposed_vertices_samples = pred_reposed_vertices_samples.cpu().detach().numpy()
# -------------- 3D Metrics with Mode and Minimum Error Samples --------------
if 'pves' in metrics_to_track:
pve_batch = np.linalg.norm(pred_vertices_mode - target_vertices,
axis=-1) # (bs, 6890)
metric_sums['pves'] += np.sum(pve_batch) # scalar
per_frame_metrics['pves'].append(np.mean(pve_batch, axis=-1))
if 'pves_samples_min' in metrics_to_track:
pve_per_sample = np.linalg.norm(pred_vertices_samples - target_vertices, axis=-1) # (num samples, 6890)
min_pve_sample = np.argmin(np.mean(pve_per_sample, axis=-1))
pve_samples_min_batch = pve_per_sample[min_pve_sample] # (6890,)
metric_sums['pves_samples_min'] += np.sum(pve_samples_min_batch)
per_frame_metrics['pves_samples_min'].append(np.mean(pve_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Scale and translation correction
if 'pves_sc' in metrics_to_track:
pred_vertices_sc = scale_and_translation_transform_batch(
pred_vertices_mode,
target_vertices)
pve_sc_batch = np.linalg.norm(
pred_vertices_sc - target_vertices,
axis=-1) # (bs, 6890)
metric_sums['pves_sc'] += np.sum(pve_sc_batch) # scalar
per_frame_metrics['pves_sc'].append(np.mean(pve_sc_batch, axis=-1))
if 'pves_sc_samples_min' in metrics_to_track:
target_vertices_tiled = np.tile(target_vertices, (num_pred_samples, 1, 1)) # (num samples, 6890, 3)
pred_vertices_samples_sc = scale_and_translation_transform_batch(
pred_vertices_samples,
target_vertices_tiled)
pve_sc_per_sample = np.linalg.norm(pred_vertices_samples_sc - target_vertices_tiled, axis=-1) # (num samples, 6890)
min_pve_sc_sample = np.argmin(np.mean(pve_sc_per_sample, axis=-1))
pve_sc_samples_min_batch = pve_sc_per_sample[min_pve_sc_sample] # (6890,)
metric_sums['pves_sc_samples_min'] += np.sum(pve_sc_samples_min_batch)
per_frame_metrics['pves_sc_samples_min'].append(np.mean(pve_sc_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Procrustes analysis
if 'pves_pa' in metrics_to_track:
pred_vertices_pa = compute_similarity_transform_batch_numpy(pred_vertices_mode, target_vertices)
pve_pa_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (bs, 6890)
metric_sums['pves_pa'] += np.sum(pve_pa_batch) # scalar
per_frame_metrics['pves_pa'].append(np.mean(pve_pa_batch, axis=-1))
if 'pves_pa_samples_min' in metrics_to_track:
target_vertices_tiled = np.tile(target_vertices, (num_pred_samples, 1, 1)) # (num samples, 6890, 3)
pred_vertices_samples_pa = compute_similarity_transform_batch_numpy(
pred_vertices_samples,
target_vertices_tiled)
pve_pa_per_sample = np.linalg.norm(pred_vertices_samples_pa - target_vertices_tiled, axis=-1) # (num samples, 6890)
min_pve_pa_sample = np.argmin(np.mean(pve_pa_per_sample, axis=-1))
pve_pa_samples_min_batch = pve_pa_per_sample[min_pve_pa_sample] # (6890,)
metric_sums['pves_pa_samples_min'] += np.sum(pve_pa_samples_min_batch)
per_frame_metrics['pves_pa_samples_min'].append(np.mean(pve_pa_samples_min_batch, axis=-1, keepdims=True)) # (1,)
if 'pve-ts' in metrics_to_track:
pvet_batch = np.linalg.norm(pred_reposed_vertices_mean - target_reposed_vertices, axis=-1)
metric_sums['pve-ts'] += np.sum(pvet_batch) # scalar
per_frame_metrics['pve-ts'].append(np.mean(pvet_batch, axis=-1))
if 'pve-ts_samples_min' in metrics_to_track:
pvet_per_sample = np.linalg.norm(pred_reposed_vertices_samples - target_reposed_vertices, axis=-1) # (num samples, 6890)
min_pvet_sample = np.argmin(np.mean(pvet_per_sample, axis=-1))
pvet_samples_min_batch = pvet_per_sample[min_pvet_sample] # (6890,)
metric_sums['pve-ts_samples_min'] += np.sum(pvet_samples_min_batch)
per_frame_metrics['pve-ts_samples_min'].append(np.mean(pvet_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Scale and translation correction
if 'pve-ts_sc' in metrics_to_track:
pred_reposed_vertices_sc = scale_and_translation_transform_batch(
pred_reposed_vertices_mean,
target_reposed_vertices)
pvet_scale_corrected_batch = np.linalg.norm(
pred_reposed_vertices_sc - target_reposed_vertices,
axis=-1) # (bs, 6890)
metric_sums['pve-ts_sc'] += np.sum(pvet_scale_corrected_batch) # scalar
per_frame_metrics['pve-ts_sc'].append(np.mean(pvet_scale_corrected_batch, axis=-1))
if 'pve-ts_sc_samples_min' in metrics_to_track:
target_reposed_vertices_tiled = np.tile(target_reposed_vertices, (num_pred_samples, 1, 1)) # (num samples, 6890, 3)
pred_reposed_vertices_samples_sc = scale_and_translation_transform_batch(
pred_reposed_vertices_samples,
target_reposed_vertices_tiled)
pvet_sc_per_sample = np.linalg.norm(pred_reposed_vertices_samples_sc - target_reposed_vertices_tiled, axis=-1) # (num samples, 6890)
min_pvet_sc_sample = np.argmin(np.mean(pvet_sc_per_sample, axis=-1))
pvet_sc_samples_min_batch = pvet_sc_per_sample[min_pvet_sc_sample] # (6890,)
metric_sums['pve-ts_sc_samples_min'] += np.sum(pvet_sc_samples_min_batch)
per_frame_metrics['pve-ts_sc_samples_min'].append(np.mean(pvet_sc_samples_min_batch, axis=-1, keepdims=True)) # (1,)
if 'mpjpes' in metrics_to_track:
mpjpe_batch = np.linalg.norm(pred_joints_h36mlsp_mode - target_joints_h36mlsp, axis=-1) # (bs, 14)
metric_sums['mpjpes'] += np.sum(mpjpe_batch) # scalar
per_frame_metrics['mpjpes'].append(np.mean(mpjpe_batch, axis=-1))
if 'mpjpes_samples_min' in metrics_to_track:
mpjpe_per_sample = np.linalg.norm(pred_joints_h36mlsp_samples - target_joints_h36mlsp, axis=-1) # (num samples, 14)
min_mpjpe_sample = np.argmin(np.mean(mpjpe_per_sample, axis=-1))
mpjpe_samples_min_batch = mpjpe_per_sample[min_mpjpe_sample] # (14,)
metric_sums['mpjpes_samples_min'] += np.sum(mpjpe_samples_min_batch)
per_frame_metrics['mpjpes_samples_min'].append(np.mean(mpjpe_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Scale and translation correction
if 'mpjpes_sc' in metrics_to_track:
pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(
pred_joints_h36mlsp_mode,
target_joints_h36mlsp)
mpjpe_sc_batch = np.linalg.norm(
pred_joints_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
metric_sums['mpjpes_sc'] += np.sum(mpjpe_sc_batch) # scalar
per_frame_metrics['mpjpes_sc'].append(np.mean(mpjpe_sc_batch, axis=-1))
if 'mpjpes_sc_samples_min' in metrics_to_track:
target_joints_h36mlsp_tiled = np.tile(target_joints_h36mlsp, (num_pred_samples, 1, 1)) # (num samples, 14, 3)
pred_joints_h36mlsp_samples_sc = scale_and_translation_transform_batch(
pred_joints_h36mlsp_samples,
target_joints_h36mlsp_tiled)
mpjpe_sc_per_sample = np.linalg.norm(pred_joints_h36mlsp_samples_sc - target_joints_h36mlsp_tiled, axis=-1) # (num samples, 14)
min_mpjpe_sc_sample = np.argmin(np.mean(mpjpe_sc_per_sample, axis=-1))
mpjpe_sc_samples_min_batch = mpjpe_sc_per_sample[min_mpjpe_sc_sample] # (14,)
metric_sums['mpjpes_sc_samples_min'] += np.sum(mpjpe_sc_samples_min_batch)
per_frame_metrics['mpjpes_sc_samples_min'].append(np.mean(mpjpe_sc_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Procrustes analysis
if 'mpjpes_pa' in metrics_to_track:
pred_joints_h36mlsp_pa = compute_similarity_transform_batch_numpy(pred_joints_h36mlsp_mode, target_joints_h36mlsp)
mpjpe_pa_batch = | np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) | numpy.linalg.norm |
import numpy as np
from pypolyagamma import MultinomialRegression, TreeStructuredMultinomialRegression
from rslds.util import psi_to_pi, one_hot, logistic
class InputHMMTransitions(TreeStructuredMultinomialRegression):
"""
Model the transition probability as a multinomial
regression whose inputs include the previous state
as well as some covariates. For example, the covariates
could be an external signal or even the latent states
of a switching linear dynamical system.
"""
def __init__(self, num_states, covariate_dim, **kwargs):
super(InputHMMTransitions, self).\
__init__(1, num_states, num_states+covariate_dim, **kwargs)
self.num_states = num_states
self.covariate_dim = covariate_dim
def get_trans_matrices(self, X):
""" return a stack of transition matrices, one for each input """
mu, W = self.b, self.A
W_markov = W[:,:self.num_states]
W_covs = W[:,self.num_states:]
# compute the contribution of the covariate to transmat
psi_X = X.dot(W_covs.T)
# compute the transmat stack without the covariate contributions
psi_Z = W_markov.T
# add the (K x K-1) and (T x K-1) matrices together such that they
# broadcast into a [T x K x K-1] stack of trans matrices
trans_psi = psi_X[:, None, :] + psi_Z
# Add the (K-1) mean
trans_psi += mu.reshape((self.D_out,))
# Get choice probabilities for each internal node and
# multiply choice probabilities to get pi
prs = logistic(trans_psi)
pi = np.empty((X.shape[0], self.K, self.K))
for k in range(self.K):
chk = self.choices[k, self.ancestors[k]]
prk = prs[..., self.ancestors[k]]
pi[..., k] = np.prod(chk * prk + (1 - chk) * (1 - prk), axis=-1)
assert np.allclose(pi.sum(axis=-1), 1.0)
return pi
def resample(self, stateseqs=None, covseqs=None, omegas=None, **kwargs):
""" conditioned on stateseqs and covseqs, stack up all of the data
and use the PGMult class to resample """
# assemble all of the discrete states into a dataset
def align_lags(stateseq, covseq):
prev_state = one_hot(stateseq[:-1], self.num_states)
next_state = one_hot(stateseq[1:], self.num_states)
return np.column_stack([prev_state, covseq]), next_state
# Get the stacked previous states, covariates, and next states
datas = [align_lags(z,x) for z, x in zip(stateseqs, covseqs)]
# Clip the last data column since it is redundant
# and not expected by the MultinomialRegression
datas = [(x, y[:,:-1]) for x, y in datas]
masks = [np.ones(y.shape, dtype=bool) for _,y in datas]
super(InputHMMTransitions, self).\
resample(datas, mask=masks, omega=omegas)
class StickyInputHMMTransitions(InputHMMTransitions):
"""
Introduce a "stickiness" parameter to capture the tendency
to stay in the same state. In the standard InputHMM model,
psi_t = W_markov * I[z_{t-1}] + W_input * x_{t-1} + b.
Now we want W_markov[k,k] ~ N(kappa, sigma^2) with kappa > 0,
and W_markov[k,j] ~ N(0, sigma^2) for j \neq k.
"""
def __init__(self, num_states, covariate_dim, kappa=1.0, **kwargs):
assert "mu_A" not in kwargs, "StickyInputHMMTransitions overrides provided mu_A"
mu_A = np.zeros((num_states-1, num_states+covariate_dim))
mu_A[:, :num_states-1] = kappa * np.eye(num_states-1)
kwargs["mu_A"] = mu_A
super(StickyInputHMMTransitions, self).\
__init__(num_states, covariate_dim, **kwargs)
class InputOnlyHMMTransitions(InputHMMTransitions):
"""
Model the transition probability as a multinomial
regression that depends only on the covariates.
For example, the covariates
could be an external signal or even the latent states
of a switching linear dynamical system.
"""
def __init__(self, num_states, covariate_dim, **kwargs):
super(InputOnlyHMMTransitions, self).\
__init__(num_states, covariate_dim, **kwargs)
self.A[:, :self.num_states] = 0
def resample(self, stateseqs=None, covseqs=None, omegas=None, **kwargs):
""" conditioned on stateseqs and covseqs, stack up all of the data
and use the PGMult class to resample """
# Zero out the previous state in the regression
def align_lags(stateseq, covseq):
prev_state = np.zeros((stateseq.shape[0]-1, self.num_states))
next_state = one_hot(stateseq[1:], self.num_states)
return np.column_stack([prev_state, covseq]), next_state
# Get the stacked previous states, covariates, and next states
datas = [align_lags(z,x) for z, x in zip(stateseqs, covseqs)]
# Clip the last data column since it is redundant
# and not expected by the MultinomialRegression
datas = [(x, y[:,:-1]) for x, y in datas]
masks = [np.ones(y.shape, dtype=bool) for _,y in datas]
super(InputHMMTransitions, self).\
resample(datas, mask=masks, omega=omegas)
# Zero out the weights on the previous state
# (the previous state inputs were all zero, so these
# weights are meaningless)
self.A[:, :self.num_states] = 0
class StickyInputOnlyHMMTransitions(InputHMMTransitions):
"""
Hacky way to implement the sticky input only model in which
psi_{t,k} | z_{t-1} =
kappa_k + w_j \dot x_{t-1} + b_j if z_{t-1} = k
0 + w_j \dot x_{t-1} + b_j otherwise
We just set the prior such that the off-diagonal entries of
W_{markov} are effectively zero by setting the variance of
these entries to be super small.
"""
def __init__(self, num_states, covariate_dim, kappa=1.0, sigmasq_kappa=1e-8, **kwargs):
# Set the mean of A
K, D = num_states, covariate_dim
assert "mu_A" not in kwargs, "StickyInputHMMTransitions overrides provided mu_A"
mu_A = np.zeros((K-1, K+D))
mu_A[:,:K-1] = kappa * np.eye(K-1)
kwargs["mu_A"] = mu_A
# Set the covariance of A
if "sigmasq_A" in kwargs:
assert np.isscalar(kwargs["sigmasq_A"])
sig0 = kwargs["sigmasq_A"]
else:
sig0 = 1.0
sigmasq_A = np.zeros((K-1, K+D, K+D))
for k in range(K-1):
sigmasq_A[k, :K, :K] = 1e-8 * np.eye(K)
sigmasq_A[k, k, k] = sigmasq_kappa
sigmasq_A[k, K:, K:] = sig0 * np.eye(D)
kwargs["sigmasq_A"] = sigmasq_A
super(StickyInputOnlyHMMTransitions, self).\
__init__(num_states, covariate_dim, **kwargs)
import autograd.numpy as anp
import autograd.scipy.misc as amisc
from autograd import grad
class _SoftmaxInputHMMTransitionsBase(object):
"""
Like above but with a softmax transition model.
log p(z_{t+1} | z_t, x_t) = z_t^T log pi z_{t+1} + x_t^T W z_{t+1} - Z
where Z = log ( \sum_k exp { z_t^T log pi e_k + x_t^T W e_k} )
CONVENTION: logpi[i,j] = Pr(z_{t+1} = j | z_t = i).
TODO: We could include a redundant affine term b^T z_{t+1} as well.
This would let us seamlessly handle the "input-only" model.
"""
def __init__(self, num_states, covariate_dim,
mu_0=None, Sigma_0=None,
logpi=None, W=None):
self.num_states = num_states
self.covariate_dim = covariate_dim
self.D_out = num_states
self.D_in = num_states + covariate_dim
if logpi is not None:
assert logpi.shape == (num_states, num_states)
self.logpi = logpi
else:
self.logpi = np.zeros((num_states, num_states))
if W is not None:
assert W.shape == (covariate_dim, num_states)
self.W = W
else:
self.W = np.zeros((covariate_dim, num_states))
mu_0 = np.zeros(self.D_in) if mu_0 is None else mu_0
Sigma_0 = 10000. * np.eye(self.D_in) if Sigma_0 is None else Sigma_0
assert mu_0.shape == (self.D_in,)
assert Sigma_0.shape == (self.D_in, self.D_in)
self.h_0 = np.linalg.solve(Sigma_0, mu_0)
self.J_0 = np.linalg.inv(Sigma_0)
def log_prior(self):
# Normal N(mu | mu_0, Sigma / kappa_0)
from scipy.linalg import solve_triangular
sigma = np.linalg.inv(self.J_0)
mu = sigma.dot(self.h_0)
S_chol = np.linalg.cholesky(sigma)
# Stack log pi and W
X = np.vstack((self.logpi, self.W)).T
lp = 0
for d in range(self.D_out):
x = solve_triangular(S_chol, X[d] - mu, lower=True)
lp += -1. / 2. * np.dot(x, x) \
- self.D_in / 2 * np.log(2 * np.pi) \
- np.log(S_chol.diagonal()).sum()
return lp
### HMC
def get_log_trans_matrices(self, X):
"""
Get log transition matrices as a function of X
:param X: inputs/covariates
:return: stack of transition matrices log A[t] \in Kin x Kout
"""
# compute the contribution of the covariate to transition matrix
psi_X = np.dot(X, self.W)
# add the (T x Kout) and (Kin x Kout) matrices together such that they
# broadcast into a (T x Kin x Kout) stack of matrices
psi = psi_X[:, None, :] + self.logpi
# apply softmax and normalize over outputs
log_trans_matrices = psi - amisc.logsumexp(psi, axis=2, keepdims=True)
return log_trans_matrices
def get_trans_matrices(self, X):
"""
Get transition matrices as a function of X
:param X: inputs/covariates
:return: stack of transition matrices A[t] \in Kin x Kout
"""
log_trans_matrices = self.get_log_trans_matrices(X)
return np.exp(log_trans_matrices)
def initialize_with_logistic_regression(self, zs, xs, initialize=False):
from sklearn.linear_model.logistic import LogisticRegression
if not hasattr(self, '_lr'):
self._lr = LogisticRegression(verbose=False,
multi_class="multinomial",
solver="lbfgs",
warm_start=True,
max_iter=10)
lr = self._lr
# Make the covariates
K, D = self.num_states, self.covariate_dim
# Split zs into prevs and nexts
zps = zs[:-1] if isinstance(zs, np.ndarray) else np.concatenate([z[:-1] for z in zs], axis=0)
zns = zs[1:] if isinstance(zs, np.ndarray) else np.concatenate([z[1:] for z in zs], axis=0)
xps = xs[:-1] if isinstance(xs, np.ndarray) else np.concatenate([x[:-1] for x in xs], axis=0)
assert zps.shape[0] == xps.shape[0]
assert zps.ndim == 1 and zps.dtype == np.int32 and zps.min() >= 0 and zps.max() < K
assert zns.ndim == 1 and zns.dtype == np.int32 and zns.min() >= 0 and zns.max() < K
assert xps.ndim == 2 and xps.shape[1] == D
used = np.bincount(zns, minlength=K) > 0
K_used = np.sum(used)
lr_X = np.column_stack((one_hot(zps, K), xps))
lr_y = zns
# The logistic regression solver fails if we only have one class represented
# In this case, set the regression weights to zero and set logpi to have
# high probability of the visited class
if K_used == 1:
self.W = np.zeros((D, K))
self.log_pi = np.zeros((K, K))
self.log_pi[:, used] = 3.0
else:
lr.fit(lr_X, lr_y)
# Now convert the logistic regression into weights
if K_used > 2:
self.W = np.zeros((D, K))
self.W[:, used] = lr.coef_[:, K:].T
self.logpi = np.zeros((K, K))
self.logpi[:, used] = lr.coef_[:, :K].T
self.logpi[:, used] += lr.intercept_[None, :]
self.logpi[:, ~used] += -100.
elif K_used == 2:
# LogisticRegression object only represents one
# set of weights for binary problems
self.W = np.zeros((D, K))
self.W[:, 1] = lr.coef_[0, K:]
self.logpi = np.zeros((K, K))
self.logpi[:, 1] = lr.coef_[0, :K].T
self.logpi[:, 1] += lr.intercept_
class _SoftmaxInputHMMTransitionsHMC(_SoftmaxInputHMMTransitionsBase):
def __init__(self, num_states, covariate_dim,
mu_0=None, Sigma_0=None,
logpi=None, W=None):
super(_SoftmaxInputHMMTransitionsHMC, self).__init__(
num_states, covariate_dim,
mu_0=mu_0, Sigma_0=Sigma_0, logpi=logpi, W=W)
# HMC params
self.step_sz = 0.01
self.accept_rate = 0.9
self.target_accept_rate = 0.9
def joint_log_probability(self, logpi, W, stateseqs, covseqs):
K, D = self.num_states, self.covariate_dim
# Compute the objective
ll = 0
for z, x in zip(stateseqs, covseqs):
T = z.size
assert x.ndim == 2 and x.shape[0] == T - 1
z_prev = one_hot(z[:-1], K)
z_next = one_hot(z[1:], K)
# Numerator
tmp = anp.dot(z_prev, logpi) + anp.dot(x, W)
ll += anp.sum(tmp * z_next)
# Denominator
Z = amisc.logsumexp(tmp, axis=1)
ll -= anp.sum(Z)
return ll
def resample(self, stateseqs=None, covseqs=None,
n_steps=10, **kwargs):
K, D = self.num_states, self.covariate_dim
if K == 1:
return
covseqs = [np.row_stack([c, np.zeros(D)]) for c in covseqs]
# HACK: For now, replace HMC with a deterministic optimization
self.initialize_with_logistic_regression(stateseqs, covseqs, initialize=True)
# # Run HMC
# from hips.inference.hmc import hmc
# def hmc_objective(params):
# # Unpack params
# K, D = self.num_states, self.covariate_dim
# logpi = params[:K ** 2].reshape((K, K))
# W = params[K ** 2:].reshape((D, K))
# return self.joint_log_probability(logpi, W, stateseqs, covseqs)
#
# grad_hmc_objective = grad(hmc_objective)
# x0 = np.concatenate((np.ravel(self.logpi), np.ravel(self.W)))
# xf, self.step_sz, self.accept_rate = \
# hmc(hmc_objective, grad_hmc_objective,
# step_sz=self.step_sz, n_steps=n_steps, q_curr=x0,
# negative_log_prob=False,
# adaptive_step_sz=True,
# avg_accept_rate=self.accept_rate)
#
# self.logpi = xf[:K**2].reshape((K, K))
# self.W = xf[K**2:].reshape((D, K))
class _SoftmaxInputHMMTransitionsEM(_SoftmaxInputHMMTransitionsBase):
def max_likelihood(self, stats):
"""
Update the expected transition matrix with a bunch of stats
:param stats: E_zp1_uT, E_uuT, E_u, a, lambda_bs from the states model
:param prob: minibatch probability
:param stepsize: svi step size
"""
K, D = self.num_states, self.covariate_dim
E_u_zp1T, E_uuT, E_u, a, lambda_bs = stats
# Update statistics each row of A
for k in range(self.D_out):
# Jk = self.J_0 + 2 * lambda_bs[:,k][:,None,None] * E_uuT
Jk = self.J_0 + 2 * np.einsum('t, tij -> ij', lambda_bs[:, k], E_uuT)
hk = self.h_0 + E_u_zp1T[:, :, k].sum(0)
hk -= np.einsum('t, ti -> i', (0.5 - 2 * lambda_bs[:, k] * a), E_u)
# Update the mean field natural parameters
ak = np.linalg.solve(Jk, hk)
self.logpi[:,k] = ak[:K]
self.W[:,k] = ak[K:]
class _SoftmaxInputHMMTransitionsMeanField(_SoftmaxInputHMMTransitionsBase):
def __init__(self, num_states, covariate_dim,
mu_0=None, Sigma_0=None,
logpi=None, W=None):
super(_SoftmaxInputHMMTransitionsMeanField, self).__init__(
num_states, covariate_dim,
mu_0=mu_0, Sigma_0=Sigma_0, logpi=logpi, W=W)
# Mean field natural parameters
self.mf_J = np.array([self.J_0.copy() for _ in range(self.D_out)])
self.mf_h = np.array([self.h_0.copy() for Jd in self.mf_J])
self._mf_Sigma = self._mf_mu = self._mf_mumuT = None
### Mean field
@property
def expected_W(self):
# _mf_mu = [E[logpi], E[W]]
return self._mf_mu[:, self.num_states:].T
@property
def expected_logpi(self):
# _mf_mu = [E[logpi], E[W]]
return self._mf_mu[:, :self.num_states].T
@property
def exp_expected_logpi(self):
P = np.exp(self.expected_logpi)
P /= | np.sum(P, axis=1, keepdims=True) | numpy.sum |
import numpy as np
import matplotlib.pyplot as plt
# from scipy import signal
from matplotlib import animation
# import scipy.constants as con
from IPython.display import HTML
from tqdm import tqdm
# import matplotlib.cm as cm
c = 1
def resonator_modes(t, z, n_modes=3, random_phases=False, plot=True,
figuresize=(10, 4), spectrum_std=1000, save_in=""):
# length of the resonator
L = z.max() - z.min()
# calculate the frequency difference between two neighbouring modes of
# the resonator
delta_nu = c / (2 * L)
frequencies = np.array([delta_nu * i for i in range(1, n_modes+1)])
phases = np.zeros(n_modes)
if random_phases is True:
phases = | np.random.uniform(0, 200, n_modes) | numpy.random.uniform |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from matplotlib.patches import Ellipse
# For reproducibility
np.random.seed(1000)
nb_samples = 300
nb_centers = 2
if __name__ == '__main__':
# Create the dataset
X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=nb_centers,
cluster_std=[1.0, 0.6], random_state=1000)
# Show the dataset
sns.set()
fig, ax = plt.subplots(figsize=(15, 9))
ax.scatter(X[:, 0], X[:, 1], s=120)
ax.set_xlabel(r'$x_0$', fontsize=14)
ax.set_ylabel(r'$x_1$', fontsize=14)
plt.show()
# Train the model
gm = GaussianMixture(n_components=2, random_state=1000)
gm.fit(X)
Y_pred = gm.fit_predict(X)
print('Means: \n{}'.format(gm.means_))
print('Covariance matrices: \n{}'.format(gm.covariances_))
print('Weights: \n{}'.format(gm.weights_))
m1 = gm.means_[0]
m2 = gm.means_[1]
c1 = gm.covariances_[0]
c2 = gm.covariances_[1]
we1 = 1 + gm.weights_[0]
we2 = 1 + gm.weights_[1]
# Eigendecompose the covariances
w1, v1 = np.linalg.eigh(c1)
w2, v2 = np.linalg.eigh(c2)
nv1 = v1 / np.linalg.norm(v1)
nv2 = v2 / np.linalg.norm(v2)
print('Eigenvalues 1: \n{}'.format(w1))
print('Eigenvectors 1: \n{}'.format(nv1))
print('Eigenvalues 2: \n{}'.format(w2))
print('Eigenvectors 2: \n{}'.format(nv2))
a1 = np.arccos( | np.dot(nv1[:, 1], [1.0, 0.0]) | numpy.dot |
import time
import math
import random
import numpy as np
import basis.robot_math as rm
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
from scipy.optimize import minimize
from scipy.optimize import Bounds
import rtree_point as rtp
# NOTE: write your own extend_state_callback and goal_test_callback to implement your own kinodyanmics
class Kinodynamics(object):
def __init__(self, time_interval=.1):
self.linear_speed_rng = [-1.0, 1.0]
self.angular_speed_rng = [-.5, .5]
self.linear_acc = 1.0
self.angular_acc = 3.5
self.time_interval = time_interval
self.weights = np.array([1, 1, 0, 0])
self.epsilon = 1e-3
def extend_state_callback(self, state1, state2):
"""
extend state call back for two-wheel car rbt_s
:param state1: x, y, theta, x_dot, y_dot, theta_dot
:param state2:
:return:
"""
random_step_array = [[self.linear_acc * self.time_interval, 0], \
[-self.linear_acc * self.time_interval, 0], \
[0, -self.angular_acc * self.time_interval], \
[0, self.angular_acc * self.time_interval], \
[np.random.rand()*self.linear_acc * self.time_interval, 0], \
[-np.random.rand()*self.linear_acc * self.time_interval, 0], \
[0, -np.random.rand()*self.angular_acc * self.time_interval], \
[0, np.random.rand()*self.angular_acc * self.time_interval]]
current_speed = np.array([np.linalg.norm(state1[3:5]), state1[5]])
min_value = 1e12
return_result = None
temp_state_list = []
for random_step in random_step_array:
# random increase speed, and clip the too large ones
next_speed = current_speed + np.array(random_step)
next_speed[0] = np.clip(next_speed[0], self.linear_speed_rng[0], self.linear_speed_rng[1])
next_speed[1] = np.clip(next_speed[1], self.angular_speed_rng[0], self.angular_speed_rng[1])
next_angle = (state1[2] + next_speed[1])/2* self.time_interval
next_annihilator = np.array([[np.cos(next_angle), np.sin(next_angle), 0], [0, 0, 1]])
new_state_speed = next_speed.dot(next_annihilator)
new_state_conf = state1[:3] + (state1[3:]+new_state_speed)/2 * self.time_interval
new_state = np.hstack((new_state_conf, new_state_speed))
temp_state_list.append(new_state)
diff_value = self.metric(new_state, state2)
if diff_value < min_value:
min_value = diff_value
return_result = new_state
return return_result, temp_state_list
def annihilator(self, theta_value):
return np.array([[math.cos(theta_value), math.sin(theta_value), 0],
[0, 0, 1]])
def _goal_function(self, x):
new_state = np.zeros_like(self._state1)
new_state_angle = self._state1[2] + self._state1[5] * self.time_interval
# new_state_angle = self._state1[2] + (self._state1[5] + x[1]) / 2 * self.time_intervals
new_state[3:] = x.dot(self.annihilator(new_state_angle))
new_state[:3] = self._state1[:3] + (self._state1[3:] + new_state[3:]) / 2 * self.time_interval
return_value = self.metric(new_state, self._state2)
return return_value
def metric(self, state1, state2):
diff_state = state1 - state2
measurement = np.array([np.linalg.norm(diff_state[:2]),
np.abs(diff_state[2]),
np.linalg.norm(diff_state[3:5]),
np.abs(diff_state[5])])
return self.weights.dot(measurement)
def set_goal_state(self, goal_state):
self._goal_state = goal_state
# def extend_state_callback(self, state1, state2):
# """
# extend state call back for two-wheel car rbt_s
# :param state1: x, y, theta, x_dot, y_dot, theta_dot
# :param state2:
# :return:
# """
# self._state1 = state1
# self._state2 = state2
# s1_ls = np.linalg.norm(state1[3:5]) # linear speed at state 1
# s1_as = state1[5] # angular speed at state 1
# if np.sign(math.cos(state1[2])) != np.sign(state1[3]):
# s1_ls = -s1_ls
# x_bnds = Bounds(lb=[self.linear_speed_rng[0], self.angular_speed_rng[0]],
# ub=[self.linear_speed_rng[1], self.angular_speed_rng[1]])
# # optmize the ns_bnds for t+1
# # acc constraints
# ineq_cons = {'type': 'ineq',
# 'fun': lambda x: np.array([self.linear_acc ** 2 - ((x[0] - s1_ls) / self.time_intervals) ** 2,
# self.angular_acc ** 2 - ((x[1] - s1_as) / self.time_intervals) ** 2])}
# x0 = np.array([s1_ls, s1_as])
# res = minimize(self._goal_function, x0,
# method='SLSQP', constraints=[ineq_cons],
# options={'ftol': self.epsilon, 'disp': True},
# bounds=x_bnds)
# return_state = np.zeros_like(state1)
# return_state_angle = state1[2] + state1[5] * self.time_intervals
# # return_state_angle = state1[2] + (state1[5] + res.x[1]) / 2 * self.time_intervals
# return_state[3:] = res.x.dot(self.annihilator(return_state_angle))
# return_state[:3] = state1[:3] + (state1[3:] + return_state[3:]) / 2 * self.time_intervals
# current_metric = self.metric(state1, state2)
# new_metric = self.metric(return_state, state2)
# print("control ", res.x)
# print("this ", state1)
# print("next ", return_state)
# print("rand ", state2)
# print("dist this to rand", self.metric(state1, state2))
# print("dist next to rand", self.metric(return_state, state2))
# if current_metric < new_metric+self.epsilon:
# return None
# else:
# return return_state
def goal_test_callback(self, state, goal_state):
goal_dist = self.metric(state, goal_state)
if goal_dist < 1e-2:
return True
else:
return False
class RRTKinodynamic(object):
def __init__(self, robot_s, kds):
"""
:param robot_s:
:param extend_conf_callback: call back function for extend_conf
"""
self.robot_s = robot_s.copy()
self.roadmap = nx.Graph()
self.start_conf = None
self.goal_conf = None
self.roadmap = nx.DiGraph()
self.kds = kds
def _is_collided(self,
component_name,
conf,
obstacle_list=[],
otherrobot_list=[]):
self.robot_s.fk(component_name=component_name, jnt_values=conf)
return self.robot_s.is_collided(obstacle_list=obstacle_list, otherrobot_list=otherrobot_list)
def _sample_conf(self, component_name, rand_rate, default_conf):
rand_number = np.random.uniform(0, 100.0)
print("random number/rate: ", rand_number, rand_rate)
if rand_number < rand_rate:
rand_conf = self.robot_s.rand_conf(component_name=component_name)
rand_ls = np.random.uniform(self.kds.linear_speed_rng[0], self.kds.linear_speed_rng[1])
rand_as = np.random.uniform(self.kds.angular_speed_rng[0], self.kds.angular_speed_rng[1])
rand_speed = np.array([rand_ls, rand_as]).dot(self.kds.annihilator(rand_conf[2]))
return np.hstack((rand_conf, rand_speed))
else:
return default_conf
def _get_nearest_nid(self, roadmap, new_conf):
"""
convert to numpy to accelerate access
:param roadmap:
:param new_conf:
:return:
author: weiwei
date: 20210523
"""
nodes_dict = dict(roadmap.nodes(data='conf'))
nodes_key_list = list(nodes_dict.keys())
nodes_value_list = list(nodes_dict.values())
conf_array = np.array(nodes_value_list)
# diff_conf_array = np.linalg.norm(conf_array[:,:self.kds.conf_dof] - new_state[:self.kds.conf_dof], axis=1)
diff_conf = conf_array - new_conf
diff_conf_array = self.kds.weights[0] * np.linalg.norm(diff_conf[:, :2], axis=1) + \
self.kds.weights[1] * np.abs(diff_conf[:, 2]) + \
self.kds.weights[2] * np.linalg.norm(diff_conf[:, 3:5], axis=1) + \
self.kds.weights[3] * | np.abs(diff_conf[:, 5]) | numpy.abs |
"""
This module collects measures for trained LDA models to facilitate model
selection.
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
import pandas as pd
import numpy as np
np.random.seed(42)
import pickle
from tqdm import tqdm
from gensim.models import LdaModel, CoherenceModel
from itertools import combinations
def get_model(num_topics):
"""get_model. Retrieves a saved trained LDA model from the `model` folder
with the specified number of topics.
Parameters
----------
num_topics : int
number of topics for the selected LDA model.
"""
file_path = f'../model/LDA-{num_topics}topics'
try:
lda_model = LdaModel.load(file_path)
except:
print(
f'Model not found. Train a model with {num_topics} and try again.'
)
pass
return lda_model
def get_topics(lda_model):
"""get_topics. Extract list of topics with top twenty words.
Parameters
----------
lda_model : gensim.models.ldamulticore.LdaMulticore
Trained LDA model. Takes output from get_model.
"""
topics = lda_model.show_topics(num_topics = -1, num_words=20, formatted=False)
return topics
def jaccard_similarity(set1, set2):
"""jaccard_similarity. Computes the Jaccard similarity between two sets.
This function is symmetric in its inputs. Jaccard similarity is the
proportion of the union of two sets that is contained in their
intersection. Values range from zero to one. A value of one indicates that
sets are equal and a value of zero indicates that sets are disjoint.
Parameters
----------
set1 : set
Any set.
set2 : set
Any set.
"""
intersection = set1.intersection(set2)
union = set1.union(set2)
similarity = len(intersection)/len(union)
return similarity
def topic_word_set(topic):
"""topic_word_set. Takes a topic from an LDA model and returns a set of top
words for the topic.
Parameters
----------
topic : (int, [ (str, float)])
A topic from a LDA model. Input should be one element of the list
returned by get_topics.
"""
word_tuple_list = topic[1]
word_set = {word_tuple[0] for word_tuple in word_tuple_list}
return word_set
def mean_jaccard_similarity(topics):
"""mean_jaccard_similarity. Computes the mean Jaccard similarity between
pairs of topics from a LDA model. Lower mean Jaccard similarity generally
indicates a better model.
Parameters
----------
topics : [(int, [(str, float)])]
Takes output from get_topics.
"""
N = len(topics)
similarity_list = []
combs = combinations(topics, 2)
for topic1, topic2 in combs:
set1 = topic_word_set(topic1)
set2 = topic_word_set(topic2)
similarity_list.append(jaccard_similarity(set1, set2))
mean_similarity = | np.mean(similarity_list) | numpy.mean |
import os
import numpy as np
repos_dir = r'/home/akikun/repos'
iMetricGAN_dir = os.path.dirname(__file__)
data_dir = r'/home/akikun/projects/nele/data/hikari/all/noise4_5000-500'
train_dir = os.path.join(data_dir, 'train')
Train_Noise_path = os.path.join(train_dir, 'noise')
Train_Clean_path = os.path.join(train_dir, 'clean')
Train_Enhan_path = os.path.join(train_dir, 'enhanced')
test_dir = os.path.join(data_dir, 'test')
Test_Noise_path = os.path.join(test_dir, 'noise')
Test_Clean_path = os.path.join(test_dir, 'clean')
# the directory where the experimental results will be saved.
main_dir = r'/home/akikun/projects/nele/experiments/noise4_siib-estoi2'
output_path = os.path.join(main_dir, 'output')
pt_dir = os.path.join(main_dir, 'checkpoint')
log_dir = os.path.join(main_dir, 'log')
# 1st: SIIB 2nd: ESTOI
# It can be either 'SIIB' or 'ESTOI' or both for now.
# Of course, it can be any arbitary metric of interest.
TargetMetric = 'siib&estoi'
#TargetMetric = 'siib'
# Target metric scores you want generator to generate.
target_score = | np.asarray([1.0, 1.0]) | numpy.asarray |
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from decimal import Decimal, ROUND_HALF_UP
from math import radians, tan, cos, sin
from os import path
_round = lambda f, r=ROUND_HALF_UP: int(Decimal(str(f)).quantize(Decimal("0"), rounding=r))
rgb = lambda r, g, b: (r, g, b)
upper_font_path = path.join(path.dirname(__file__), 'NotoSansCJKSC-Black.ttf')
downer_font_path = path.join(path.dirname(__file__), 'NotoSerifCJKSC-Black.ttf')
def get_gradient_2d(start, stop, width, height, is_horizontal=False):
if is_horizontal:
return np.tile( | np.linspace(start, stop, width) | numpy.linspace |
import numpy
import scipy.stats
def one_hot(array, N):
"""
Convert an array of numbers to an array of one-hot vectors.
:param array: classes to convert
:type array: numpy.ndarray
:param N: number of classes
:type N: int
:return: one-hot vectors
:rtype: numpy.ndarray
"""
array = array.astype(int)
assert numpy.max(array) < N
assert numpy.min(array) >= 0
one_hot = numpy.zeros((array.shape[0], N))
one_hot[numpy.arange(array.shape[0]), array] = 1
return one_hot
def contract_dims(array, axis=0):
"""
Intended as the oppositve of numpy.expand_dims, especially for merging to axes.
:param array: input array
:type array: numpy.ndarray
:param axis: the axis the remove/contract
:type axis: int
:return: array
:rtype: numpy.ndarray
"""
assert isinstance(array, numpy.ndarray), 'array needs to be numpy.ndarray'
assert axis < len(array.shape), 'given axis does exceed rank'
assert axis != len(array.shape) - 1, 'last dimension cannot be contracted'
i = 0
shape = []
while i < len(array.shape):
if i == axis:
shape.append(-1)
i += 1 # Skip the next dimension as we want to contract it
else:
shape.append(array.shape[i])
i += 1
return array.reshape(tuple(shape))
def concatenate(array1, array2, axis=0):
"""
Basically a wrapper for numpy.concatenate, with the exception
that the array itself is returned if its None or evaluates to False.
:param array1: input array or None
:type array1: mixed
:param array2: input array
:type array2: numpy.ndarray
:param axis: axis to concatenate
:type axis: int
:return: concatenated array
:rtype: numpy.ndarray
"""
assert isinstance(array2, numpy.ndarray)
if array1 is not None:
assert isinstance(array1, numpy.ndarray)
return numpy.concatenate((array1, array2), axis=axis)
else:
return array2
def uniform_ball(batch_size, dim, epsilon=1, ord=2, alternative_mode=True):
"""
Sample vectors uniformly in the n-ball.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:param alternative_mode: whether to sample from uniform distance instead of sampling uniformly with respect to volume
:type alternative_mode: bool
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
if alternative_mode:
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) # exponent is only difference!
else:
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) ** (1. / dim)
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def truncated_normal(size, lower=-2, upper=2):
"""
Sample from truncated normal.
See https://stackoverflow.com/questions/18441779/how-to-specify-upper-and-lower-limits-when-using-numpy-random-normal.
:param size: size of vector
:type size: [int]
:param lower: lower bound
:type lower: float
:param upper: upper bound
:type upper: float
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
return scipy.stats.truncnorm.rvs(lower, upper, size=size)
def project_orthogonal(basis, vectors, rank=None):
"""
Project the given vectors on the basis using an orthogonal projection.
:param basis: basis vectors to project on
:type basis: numpy.ndarray
:param vectors: vectors to project
:type vectors: numpy.ndarray
:return: projection
:rtype: numpy.ndarray
"""
# The columns of Q are an orthonormal basis of the columns of basis
Q, R = numpy.linalg.qr(basis)
if rank is not None and rank > 0:
Q = Q[:, :rank]
# As Q is orthogonal, the projection is
beta = Q.T.dot(vectors)
projection = Q.dot(beta)
return projection
def project_lstsq(basis, vectors):
"""
Project using least squares.
:param basis: basis vectors to project on
:type basis: numpy.ndarray
:param vectors: vectors to project
:type vectors: numpy.ndarray
:return: projection
:rtype: numpy.ndarray
"""
x, _, _, _ = numpy.linalg.lstsq(basis, vectors)
projection = basis.dot(x)
return projection
def angles(vectors_a, vectors_b):
"""
Compute angle between two sets of vectors.
See https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf.
:param vectors_a:
:param vectors_b:
:return:
"""
if len(vectors_b.shape) == 1:
vectors_b = vectors_b.reshape(-1, 1)
# Normalize vector
norms_a = | numpy.linalg.norm(vectors_a, ord=2, axis=0) | numpy.linalg.norm |
#!/usr/bin/env python3
"""
Generate PDFs from DNS data
"""
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import io
import itertools
import numpy as np
import pandas as pd
from scipy import stats
import utilities
# ========================================================================
#
# Function definitions
#
# ========================================================================
def load_raw_pdf_data(fname):
"""
Load the data and get a data frame (save it for later)
"""
# Read bins
Zbins = np.array([])
Cbins = np.array([])
with open(fname, "r") as f:
next(f)
for k, line in enumerate(f):
line = line.split()
if len(line) == 3:
Zbin, Cbin, _ = line
Zbins = np.append(Zbins, np.float(Zbin))
Cbins = np.append(Cbins, np.float(Cbin))
else:
break
bins = pd.DataFrame({"Zbins": Zbins, "Cbins": Cbins})
# Read the PDF labels and values
s = io.StringIO()
with open(fname, "r") as f:
label = 0
for k, line in enumerate(f):
line = line.split()
if len(line) == 4:
Z, Zvar, C, Cvar = line
label += 1
print("Processing PDF {0:d}".format(label))
s.write(
"\n"
+ str(
[
label,
np.float(C),
np.float(Cvar),
np.float(Z),
np.float(Zvar),
]
)[1:-1]
)
continue
if len(line) == 3:
_, _, pdf = line
s.write("," + str(pdf))
# Convert to dataframe
s.seek(0)
names = ["C", "Cvar", "Z", "Zvar"] + [
"Y{0:04d}".format(i) for i in range(len(Zbins))
]
df = pd.read_csv(s, index_col=0, names=names)
# Save these to a file
df.to_pickle("pdfs.gz")
bins.to_pickle("bins.gz")
return df, bins
# ========================================================================
def concatenate_dices(dices=["dice_0000", "dice_0001"], datadir="data"):
"""
Concatenate dices
:param dices: list of dice names
:type dices: list
:param datadir: directory containing dices
:type datadir: str
"""
# Setup
fields_load = ["Rho", "Z", "C", "SRC_PV", "Temp"]
oname = os.path.join(datadir, "concatenated.npz")
dats = [np.load(os.path.join(datadir, f"{dice}.npz")) for dice in dices]
# Get data
fdir = dats[0]["fdir"]
z = np.mean([dat["z"] for dat in dats])
dx = dats[0]["dx"]
low = dats[0]["low"]
high = dats[-1]["high"]
fields_save = dict(
zip(
fields_load,
[
np.concatenate([dat[field] for dat in dats], axis=-1)
for field in fields_load
],
)
)
# Save
np.savez_compressed(oname, fdir=fdir, z=z, dx=dx, low=low, high=high, **fields_save)
# ========================================================================
def gen_pdf_from_dice(fname):
"""
Generate PDFs from a dice of data
:param fname: dice file name
:type fname: str
:return: PDFs
:rtype: dataframe
"""
# Load dice file
dat = np.load(fname)
lo = dat["low"]
dx = dat["dx"]
# Variables
rho = dat["Rho"]
Z = np.clip(dat["Z"], 0.0, 1.0)
C = np.clip(dat["C"], 0.0, None)
SRC_PV = dat["SRC_PV"]
rhoZ = rho * Z
rhoC = rho * C
rhoSRC_PV = rho * SRC_PV
# PDF bins
nc = 32
nz = 64
cbin_edges = np.linspace(0, 0.21, nc + 1)
zbin_edges = np.linspace(0, 1, nz + 1)
Zbins, Cbins = np.meshgrid(
utilities.edges_to_midpoint(zbin_edges), utilities.edges_to_midpoint(cbin_edges)
)
bins = pd.DataFrame({"Zbins": np.ravel(Zbins), "Cbins": np.ravel(Cbins)})
bins.to_pickle("bins.gz")
# Loop on all blocks of width^3 separated by stride
width = 32
stride = 8
N = rho.shape
ranges = [
range(0, N[0] - width, stride),
range(0, N[1] - width, stride),
range(0, N[2] - width, stride),
]
# PDFs storage
npdfs = np.prod([len(x) for x in ranges])
pdfs = np.zeros((npdfs, 8 + nz * nc))
src_pv_means = np.zeros((npdfs, nz * nc))
# Loop on all the blocks
for cnt, (i, j, k) in enumerate(itertools.product(ranges[0], ranges[1], ranges[2])):
# Get center of block
bc = [
lo[0] + (i + width // 2) * dx,
lo[1] + (j + width // 2) * dx,
lo[2] + (k + width // 2) * dx,
]
# Favre averages
block = np.s_[i : i + width, j : j + width, k : k + width]
rho_ = np.sum(rho[block])
C_ = np.sum(rhoC[block]) / rho_
Cvar_ = np.sum(rho[block] * (C[block] - C_) ** 2) / rho_
Z_ = np.sum(rhoZ[block]) / rho_
Zvar_ = np.sum(rho[block] * (Z[block] - Z_) ** 2) / rho_
SRC_PV_ = np.sum(rhoSRC_PV[block]) / rho_
# Compute density-weighted PDF
pdf, _, _, _ = stats.binned_statistic_2d(
np.ravel(Z[block]),
np.ravel(C[block]),
| np.ravel(rho[block]) | numpy.ravel |
import cv2
import numpy as np
img= cv2.imread("./input/rc-1.png")
hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
original=img.copy()
def empty(a):
pass
def remove_bad_contours(conts):
new_conts = []
for cont in conts:
bound_rect = cv2.minAreaRect(cont)
length, breadth = float(bound_rect[1][0]), float(bound_rect[1][1])
try:
if max((length/breadth, breadth/length)) > 5:
continue
if not 0.9*img.shape[0] > max((length, breadth)) > 0.05*img.shape[0]:
continue
if cv2.contourArea(cont)/(length*breadth) <0.4:
continue
new_conts.append(cont)
except ZeroDivisionError:
continue
return new_conts
def colorDetection(color,image):
colordict={"Red":[[0, 50, 70],[9, 255, 255],[159, 50, 70],[180, 255, 255],"R"],"Blue":[[90, 50, 70],[128, 255, 255],"B"],"Green":[[36, 50, 70],[89, 255, 255],"G"],"White":[[0, 0, 231],[180, 18, 255],"W"],"Orange":[[10, 50, 70],[24,255,255],"O"],"Yellow":[[ 25, 50,70],[35,255,255],"Y"]}
if color in colordict:
if color=="Red":
lower_l=np.array(colordict[color][0])
upper_l=np.array(colordict[color][1])
lower_u=np.array(colordict[color][2])
upper_u=np.array(colordict[color][3])
Mask_l=cv2.inRange(image,lower_l,upper_l)
Mask_u=cv2.inRange(image,lower_u,upper_u)
Mask=Mask_l+Mask_u
else:
lower= | np.array(colordict[color][0]) | numpy.array |
# python3.7
"""Contains the implementation of generator described in StyleGAN.
Different from the official tensorflow model in folder `stylegan_tf_official`,
this is a simple pytorch version which only contains the generator part. This
class is specially used for inference.
For more details, please check the original paper:
https://arxiv.org/pdf/1812.04948.pdf
"""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['StyleGANGeneratorModel']
# Defines a dictionary, which maps the target resolution of the final generated
# image to numbers of filters used in each convolutional layer in sequence.
_RESOLUTIONS_TO_CHANNELS = {
8: [512, 512, 512],
16: [512, 512, 512, 512],
32: [512, 512, 512, 512, 512],
64: [512, 512, 512, 512, 512, 256],
128: [512, 512, 512, 512, 512, 256, 128],
256: [512, 512, 512, 512, 512, 256, 128, 64],
512: [512, 512, 512, 512, 512, 256, 128, 64, 32],
1024: [512, 512, 512, 512, 512, 256, 128, 64, 32, 16],
}
# Variable mapping from pytorch model to official tensorflow model.
_STYLEGAN_PTH_VARS_TO_TF_VARS = {
# Statistic information of disentangled latent feature, w.
'truncation.w_avg':
'dlatent_avg', # [512]
# Noises.
'synthesis.layer0.epilogue.apply_noise.noise':
'noise0', # [1, 1, 4, 4]
'synthesis.layer1.epilogue.apply_noise.noise':
'noise1', # [1, 1, 4, 4]
'synthesis.layer2.epilogue.apply_noise.noise':
'noise2', # [1, 1, 8, 8]
'synthesis.layer3.epilogue.apply_noise.noise':
'noise3', # [1, 1, 8, 8]
'synthesis.layer4.epilogue.apply_noise.noise':
'noise4', # [1, 1, 16, 16]
'synthesis.layer5.epilogue.apply_noise.noise':
'noise5', # [1, 1, 16, 16]
'synthesis.layer6.epilogue.apply_noise.noise':
'noise6', # [1, 1, 32, 32]
'synthesis.layer7.epilogue.apply_noise.noise':
'noise7', # [1, 1, 32, 32]
'synthesis.layer8.epilogue.apply_noise.noise':
'noise8', # [1, 1, 64, 64]
'synthesis.layer9.epilogue.apply_noise.noise':
'noise9', # [1, 1, 64, 64]
'synthesis.layer10.epilogue.apply_noise.noise':
'noise10', # [1, 1, 128, 128]
'synthesis.layer11.epilogue.apply_noise.noise':
'noise11', # [1, 1, 128, 128]
'synthesis.layer12.epilogue.apply_noise.noise':
'noise12', # [1, 1, 256, 256]
'synthesis.layer13.epilogue.apply_noise.noise':
'noise13', # [1, 1, 256, 256]
'synthesis.layer14.epilogue.apply_noise.noise':
'noise14', # [1, 1, 512, 512]
'synthesis.layer15.epilogue.apply_noise.noise':
'noise15', # [1, 1, 512, 512]
'synthesis.layer16.epilogue.apply_noise.noise':
'noise16', # [1, 1, 1024, 1024]
'synthesis.layer17.epilogue.apply_noise.noise':
'noise17', # [1, 1, 1024, 1024]
# Mapping blocks.
'mapping.dense0.linear.weight':
'Dense0/weight', # [512, 512]
'mapping.dense0.wscale.bias':
'Dense0/bias', # [512]
'mapping.dense1.linear.weight':
'Dense1/weight', # [512, 512]
'mapping.dense1.wscale.bias':
'Dense1/bias', # [512]
'mapping.dense2.linear.weight':
'Dense2/weight', # [512, 512]
'mapping.dense2.wscale.bias':
'Dense2/bias', # [512]
'mapping.dense3.linear.weight':
'Dense3/weight', # [512, 512]
'mapping.dense3.wscale.bias':
'Dense3/bias', # [512]
'mapping.dense4.linear.weight':
'Dense4/weight', # [512, 512]
'mapping.dense4.wscale.bias':
'Dense4/bias', # [512]
'mapping.dense5.linear.weight':
'Dense5/weight', # [512, 512]
'mapping.dense5.wscale.bias':
'Dense5/bias', # [512]
'mapping.dense6.linear.weight':
'Dense6/weight', # [512, 512]
'mapping.dense6.wscale.bias':
'Dense6/bias', # [512]
'mapping.dense7.linear.weight':
'Dense7/weight', # [512, 512]
'mapping.dense7.wscale.bias':
'Dense7/bias', # [512]
# Synthesis blocks.
'synthesis.layer0.first_layer':
'4x4/Const/const', # [1, 512, 4, 4]
'synthesis.layer0.epilogue.apply_noise.weight':
'4x4/Const/Noise/weight', # [512]
'synthesis.layer0.epilogue.bias':
'4x4/Const/bias', # [512]
'synthesis.layer0.epilogue.style_mod.dense.linear.weight':
'4x4/Const/StyleMod/weight', # [1024, 512]
'synthesis.layer0.epilogue.style_mod.dense.wscale.bias':
'4x4/Const/StyleMod/bias', # [1024]
'synthesis.layer1.conv.weight':
'4x4/Conv/weight', # [512, 512, 3, 3]
'synthesis.layer1.epilogue.apply_noise.weight':
'4x4/Conv/Noise/weight', # [512]
'synthesis.layer1.epilogue.bias':
'4x4/Conv/bias', # [512]
'synthesis.layer1.epilogue.style_mod.dense.linear.weight':
'4x4/Conv/StyleMod/weight', # [1024, 512]
'synthesis.layer1.epilogue.style_mod.dense.wscale.bias':
'4x4/Conv/StyleMod/bias', # [1024]
'synthesis.layer2.conv.weight':
'8x8/Conv0_up/weight', # [512, 512, 3, 3]
'synthesis.layer2.epilogue.apply_noise.weight':
'8x8/Conv0_up/Noise/weight', # [512]
'synthesis.layer2.epilogue.bias':
'8x8/Conv0_up/bias', # [512]
'synthesis.layer2.epilogue.style_mod.dense.linear.weight':
'8x8/Conv0_up/StyleMod/weight', # [1024, 512]
'synthesis.layer2.epilogue.style_mod.dense.wscale.bias':
'8x8/Conv0_up/StyleMod/bias', # [1024]
'synthesis.layer3.conv.weight':
'8x8/Conv1/weight', # [512, 512, 3, 3]
'synthesis.layer3.epilogue.apply_noise.weight':
'8x8/Conv1/Noise/weight', # [512]
'synthesis.layer3.epilogue.bias':
'8x8/Conv1/bias', # [512]
'synthesis.layer3.epilogue.style_mod.dense.linear.weight':
'8x8/Conv1/StyleMod/weight', # [1024, 512]
'synthesis.layer3.epilogue.style_mod.dense.wscale.bias':
'8x8/Conv1/StyleMod/bias', # [1024]
'synthesis.layer4.conv.weight':
'16x16/Conv0_up/weight', # [512, 512, 3, 3]
'synthesis.layer4.epilogue.apply_noise.weight':
'16x16/Conv0_up/Noise/weight', # [512]
'synthesis.layer4.epilogue.bias':
'16x16/Conv0_up/bias', # [512]
'synthesis.layer4.epilogue.style_mod.dense.linear.weight':
'16x16/Conv0_up/StyleMod/weight', # [1024, 512]
'synthesis.layer4.epilogue.style_mod.dense.wscale.bias':
'16x16/Conv0_up/StyleMod/bias', # [1024]
'synthesis.layer5.conv.weight':
'16x16/Conv1/weight', # [512, 512, 3, 3]
'synthesis.layer5.epilogue.apply_noise.weight':
'16x16/Conv1/Noise/weight', # [512]
'synthesis.layer5.epilogue.bias':
'16x16/Conv1/bias', # [512]
'synthesis.layer5.epilogue.style_mod.dense.linear.weight':
'16x16/Conv1/StyleMod/weight', # [1024, 512]
'synthesis.layer5.epilogue.style_mod.dense.wscale.bias':
'16x16/Conv1/StyleMod/bias', # [1024]
'synthesis.layer6.conv.weight':
'32x32/Conv0_up/weight', # [512, 512, 3, 3]
'synthesis.layer6.epilogue.apply_noise.weight':
'32x32/Conv0_up/Noise/weight', # [512]
'synthesis.layer6.epilogue.bias':
'32x32/Conv0_up/bias', # [512]
'synthesis.layer6.epilogue.style_mod.dense.linear.weight':
'32x32/Conv0_up/StyleMod/weight', # [1024, 512]
'synthesis.layer6.epilogue.style_mod.dense.wscale.bias':
'32x32/Conv0_up/StyleMod/bias', # [1024]
'synthesis.layer7.conv.weight':
'32x32/Conv1/weight', # [512, 512, 3, 3]
'synthesis.layer7.epilogue.apply_noise.weight':
'32x32/Conv1/Noise/weight', # [512]
'synthesis.layer7.epilogue.bias':
'32x32/Conv1/bias', # [512]
'synthesis.layer7.epilogue.style_mod.dense.linear.weight':
'32x32/Conv1/StyleMod/weight', # [1024, 512]
'synthesis.layer7.epilogue.style_mod.dense.wscale.bias':
'32x32/Conv1/StyleMod/bias', # [1024]
'synthesis.layer8.conv.weight':
'64x64/Conv0_up/weight', # [256, 512, 3, 3]
'synthesis.layer8.epilogue.apply_noise.weight':
'64x64/Conv0_up/Noise/weight', # [256]
'synthesis.layer8.epilogue.bias':
'64x64/Conv0_up/bias', # [256]
'synthesis.layer8.epilogue.style_mod.dense.linear.weight':
'64x64/Conv0_up/StyleMod/weight', # [512, 512]
'synthesis.layer8.epilogue.style_mod.dense.wscale.bias':
'64x64/Conv0_up/StyleMod/bias', # [512]
'synthesis.layer9.conv.weight':
'64x64/Conv1/weight', # [256, 256, 3, 3]
'synthesis.layer9.epilogue.apply_noise.weight':
'64x64/Conv1/Noise/weight', # [256]
'synthesis.layer9.epilogue.bias':
'64x64/Conv1/bias', # [256]
'synthesis.layer9.epilogue.style_mod.dense.linear.weight':
'64x64/Conv1/StyleMod/weight', # [512, 512]
'synthesis.layer9.epilogue.style_mod.dense.wscale.bias':
'64x64/Conv1/StyleMod/bias', # [512]
'synthesis.layer10.weight':
'128x128/Conv0_up/weight', # [3, 3, 256, 128]
'synthesis.layer10.epilogue.apply_noise.weight':
'128x128/Conv0_up/Noise/weight', # [128]
'synthesis.layer10.epilogue.bias':
'128x128/Conv0_up/bias', # [128]
'synthesis.layer10.epilogue.style_mod.dense.linear.weight':
'128x128/Conv0_up/StyleMod/weight', # [256, 512]
'synthesis.layer10.epilogue.style_mod.dense.wscale.bias':
'128x128/Conv0_up/StyleMod/bias', # [256]
'synthesis.layer11.conv.weight':
'128x128/Conv1/weight', # [128, 128, 3, 3]
'synthesis.layer11.epilogue.apply_noise.weight':
'128x128/Conv1/Noise/weight', # [128]
'synthesis.layer11.epilogue.bias':
'128x128/Conv1/bias', # [128]
'synthesis.layer11.epilogue.style_mod.dense.linear.weight':
'128x128/Conv1/StyleMod/weight', # [256, 512]
'synthesis.layer11.epilogue.style_mod.dense.wscale.bias':
'128x128/Conv1/StyleMod/bias', # [256]
'synthesis.layer12.weight':
'256x256/Conv0_up/weight', # [3, 3, 128, 64]
'synthesis.layer12.epilogue.apply_noise.weight':
'256x256/Conv0_up/Noise/weight', # [64]
'synthesis.layer12.epilogue.bias':
'256x256/Conv0_up/bias', # [64]
'synthesis.layer12.epilogue.style_mod.dense.linear.weight':
'256x256/Conv0_up/StyleMod/weight', # [128, 512]
'synthesis.layer12.epilogue.style_mod.dense.wscale.bias':
'256x256/Conv0_up/StyleMod/bias', # [128]
'synthesis.layer13.conv.weight':
'256x256/Conv1/weight', # [64, 64, 3, 3]
'synthesis.layer13.epilogue.apply_noise.weight':
'256x256/Conv1/Noise/weight', # [64]
'synthesis.layer13.epilogue.bias':
'256x256/Conv1/bias', # [64]
'synthesis.layer13.epilogue.style_mod.dense.linear.weight':
'256x256/Conv1/StyleMod/weight', # [128, 512]
'synthesis.layer13.epilogue.style_mod.dense.wscale.bias':
'256x256/Conv1/StyleMod/bias', # [128]
'synthesis.layer14.weight':
'512x512/Conv0_up/weight', # [3, 3, 64, 32]
'synthesis.layer14.epilogue.apply_noise.weight':
'512x512/Conv0_up/Noise/weight', # [32]
'synthesis.layer14.epilogue.bias':
'512x512/Conv0_up/bias', # [32]
'synthesis.layer14.epilogue.style_mod.dense.linear.weight':
'512x512/Conv0_up/StyleMod/weight', # [64, 512]
'synthesis.layer14.epilogue.style_mod.dense.wscale.bias':
'512x512/Conv0_up/StyleMod/bias', # [64]
'synthesis.layer15.conv.weight':
'512x512/Conv1/weight', # [32, 32, 3, 3]
'synthesis.layer15.epilogue.apply_noise.weight':
'512x512/Conv1/Noise/weight', # [32]
'synthesis.layer15.epilogue.bias':
'512x512/Conv1/bias', # [32]
'synthesis.layer15.epilogue.style_mod.dense.linear.weight':
'512x512/Conv1/StyleMod/weight', # [64, 512]
'synthesis.layer15.epilogue.style_mod.dense.wscale.bias':
'512x512/Conv1/StyleMod/bias', # [64]
'synthesis.layer16.weight':
'1024x1024/Conv0_up/weight', # [3, 3, 32, 16]
'synthesis.layer16.epilogue.apply_noise.weight':
'1024x1024/Conv0_up/Noise/weight', # [16]
'synthesis.layer16.epilogue.bias':
'1024x1024/Conv0_up/bias', # [16]
'synthesis.layer16.epilogue.style_mod.dense.linear.weight':
'1024x1024/Conv0_up/StyleMod/weight', # [32, 512]
'synthesis.layer16.epilogue.style_mod.dense.wscale.bias':
'1024x1024/Conv0_up/StyleMod/bias', # [32]
'synthesis.layer17.conv.weight':
'1024x1024/Conv1/weight', # [16, 16, 3, 3]
'synthesis.layer17.epilogue.apply_noise.weight':
'1024x1024/Conv1/Noise/weight', # [16]
'synthesis.layer17.epilogue.bias':
'1024x1024/Conv1/bias', # [16]
'synthesis.layer17.epilogue.style_mod.dense.linear.weight':
'1024x1024/Conv1/StyleMod/weight', # [32, 512]
'synthesis.layer17.epilogue.style_mod.dense.wscale.bias':
'1024x1024/Conv1/StyleMod/bias', # [32]
'synthesis.output.conv.weight':
'ToRGB_lod0/weight', # [3, 16, 1, 1]
'synthesis.output.bias':
'ToRGB_lod0/bias', # [3]
}
class StyleGANGeneratorModel(nn.Module):
"""Defines the generator module in StyleGAN.
Note that the generated images are with RGB color channels.
"""
def __init__(self,
resolution=1024,
w_space_dim=512,
truncation_psi=0.7,
truncation_layers=8,
randomize_noise=False):
"""Initializes the generator with basic settings.
Args:
resolution: The resolution of the final output image.
w_space_dim: The dimension of the disentangled latent vectors, w.
truncation_psi: Style strength multiplier for the truncation trick.
`None` or `1.0` indicates no truncation.
truncation_layers: Number of layers for which to apply the truncation
trick. `None` indicates no truncation.
Raises:
ValueError: If the input `resolution` is not supported.
"""
super().__init__()
self.mapping = MappingModule(final_space_dim=w_space_dim)
self.truncation = TruncationModule(resolution=resolution,
w_space_dim=w_space_dim,
truncation_psi=truncation_psi,
truncation_layers=truncation_layers)
self.synthesis = SynthesisModule(resolution=resolution,
randomize_noise=randomize_noise)
self.pth_to_tf_var_mapping = _STYLEGAN_PTH_VARS_TO_TF_VARS
def forward(self, z):
w = self.mapping(z)
w = self.truncation(w)
x = self.synthesis(w)
return x
class MappingModule(nn.Sequential):
"""Implements the latent space mapping module used in StyleGAN.
Basically, this module executes several dense layers in sequence.
"""
def __init__(self,
normalize_input=True,
input_space_dim=512,
hidden_space_dim=512,
final_space_dim=512,
num_layers=8):
sequence = OrderedDict()
def _add_layer(layer, name=None):
name = name or f'dense{len(sequence) + (not normalize_input) - 1}'
sequence[name] = layer
if normalize_input:
_add_layer(PixelNormLayer(), name='normalize')
for i in range(num_layers):
in_dim = input_space_dim if i == 0 else hidden_space_dim
out_dim = final_space_dim if i == (num_layers - 1) else hidden_space_dim
_add_layer(DenseBlock(in_dim, out_dim))
super().__init__(sequence)
def forward(self, x):
if len(x.shape) != 2:
raise ValueError(f'The input tensor should be with shape [batch_size, '
f'noise_dim], but {x.shape} received!')
return super().forward(x)
class TruncationModule(nn.Module):
"""Implements the truncation module used in StyleGAN."""
def __init__(self,
resolution=1024,
w_space_dim=512,
truncation_psi=0.7,
truncation_layers=8):
super().__init__()
self.num_layers = int(np.log2(resolution)) * 2 - 2
self.w_space_dim = w_space_dim
if truncation_psi is not None and truncation_layers is not None:
self.use_truncation = True
else:
self.use_truncation = False
truncation_psi = 1.0
truncation_layers = 0
self.register_buffer('w_avg', torch.zeros(w_space_dim))
layer_idx = np.arange(self.num_layers).reshape(1, self.num_layers, 1)
coefs = np.ones_like(layer_idx, dtype=np.float32)
coefs[layer_idx < truncation_layers] *= truncation_psi
self.register_buffer('truncation', torch.from_numpy(coefs))
def forward(self, w):
if len(w.shape) == 2:
w = w.view(-1, 1, self.w_space_dim).repeat(1, self.num_layers, 1)
if self.use_truncation:
w_avg = self.w_avg.view(1, 1, self.w_space_dim)
w = w_avg + (w - w_avg) * self.truncation
return w
class SynthesisModule(nn.Module):
"""Implements the image synthesis module used in StyleGAN.
Basically, this module executes several convolutional layers in sequence.
"""
def __init__(self,
resolution=1024,
randomize_noise=False):
super().__init__()
try:
channels = _RESOLUTIONS_TO_CHANNELS[resolution]
except KeyError:
raise ValueError(f'Invalid resolution: {resolution}!\n'
f'Resolutions allowed: '
f'{list(_RESOLUTIONS_TO_CHANNELS)}.')
self.num_layers = int(np.log2(resolution)) * 2 - 2
for i in range(1, len(channels)):
if i == 1:
self.add_module('layer0', FirstConvBlock(channels[0], randomize_noise))
else:
self.add_module(
f'layer{i * 2 - 2}',
UpConvBlock(layer_idx=i * 2 - 2,
in_channels=channels[i - 1],
out_channels=channels[i],
randomize_noise=randomize_noise))
self.add_module(
f'layer{i * 2 - 1}',
ConvBlock(layer_idx=i * 2 - 1,
in_channels=channels[i],
out_channels=channels[i],
randomize_noise=randomize_noise))
self.add_module('output', LastConvBlock(channels[-1]))
def forward(self, w):
x = self.layer0(w[:, 0])
for i in range(1, self.num_layers):
x = self.__getattr__(f'layer{i}')(x, w[:, i])
x = self.output(x)
return x
class PixelNormLayer(nn.Module):
"""Implements pixel-wise feature vector normalization layer."""
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, x):
return x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon)
class InstanceNormLayer(nn.Module):
"""Implements instance normalization layer."""
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, x):
if len(x.shape) != 4:
raise ValueError(f'The input tensor should be with shape [batch_size, '
f'num_channels, height, width], but {x.shape} received!')
x = x - torch.mean(x, dim=[2, 3], keepdim=True)
x = x / torch.sqrt(torch.mean(x**2, dim=[2, 3], keepdim=True) +
self.epsilon)
return x
class ResolutionScalingLayer(nn.Module):
"""Implements the resolution scaling layer.
Basically, this layer can be used to upsample or downsample feature maps from
spatial domain with nearest neighbor interpolation.
"""
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale_factor, mode='nearest')
class BlurLayer(nn.Module):
"""Implements the blur layer used in StyleGAN."""
def __init__(self,
channels,
kernel=(1, 2, 1),
normalize=True,
flip=False):
super().__init__()
kernel = np.array(kernel, dtype=np.float32).reshape(1, 3)
kernel = kernel.T.dot(kernel)
if normalize:
kernel /= np.sum(kernel)
if flip:
kernel = kernel[::-1, ::-1]
kernel = kernel.reshape(3, 3, 1, 1)
kernel = np.tile(kernel, [1, 1, channels, 1])
kernel = np.transpose(kernel, [2, 3, 0, 1])
self.register_buffer('kernel', torch.from_numpy(kernel))
self.channels = channels
def forward(self, x):
return F.conv2d(x, self.kernel, stride=1, padding=1, groups=self.channels)
class NoiseApplyingLayer(nn.Module):
"""Implements the noise applying layer used in StyleGAN."""
def __init__(self, layer_idx, channels, randomize_noise=False):
super().__init__()
self.randomize_noise = randomize_noise
self.res = 2**(layer_idx // 2 + 2)
self.register_buffer('noise', torch.randn(1, 1, self.res, self.res))
self.weight = nn.Parameter(torch.zeros(channels))
def forward(self, x):
if len(x.shape) != 4:
raise ValueError(f'The input tensor should be with shape [batch_size, '
f'num_channels, height, width], but {x.shape} received!')
if self.randomize_noise:
noise = torch.randn(x.shape[0], 1, self.res, self.res)
if x.is_cuda:
noise = noise.cuda()
else:
noise = self.noise
return x + noise * self.weight.view(1, -1, 1, 1)
class StyleModulationLayer(nn.Module):
"""Implements the style modulation layer used in StyleGAN."""
def __init__(self, channels, w_space_dim=512):
super().__init__()
self.channels = channels
self.dense = DenseBlock(in_features=w_space_dim,
out_features=channels*2,
wscale_gain=1.0,
wscale_lr_multiplier=1.0,
activation_type='linear')
def forward(self, x, w):
if len(w.shape) != 2:
raise ValueError(f'The input tensor should be with shape [batch_size, '
f'num_channels], but {x.shape} received!')
style = self.dense(w)
style = style.view(-1, 2, self.channels, 1, 1)
return x * (style[:, 0] + 1) + style[:, 1]
class WScaleLayer(nn.Module):
"""Implements the layer to scale weight variable and add bias.
Note that, the weight variable is trained in `nn.Conv2d` layer (or `nn.Linear`
layer), and only scaled with a constant number , which is not trainable, in
this layer. However, the bias variable is trainable in this layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
gain=np.sqrt(2.0),
lr_multiplier=1.0):
super().__init__()
fan_in = in_channels * kernel_size * kernel_size
self.scale = gain / np.sqrt(fan_in) * lr_multiplier
self.bias = nn.Parameter(torch.zeros(out_channels))
self.lr_multiplier = lr_multiplier
def forward(self, x):
if len(x.shape) == 4:
return x * self.scale + self.bias.view(1, -1, 1, 1) * self.lr_multiplier
if len(x.shape) == 2:
return x * self.scale + self.bias.view(1, -1) * self.lr_multiplier
raise ValueError(f'The input tensor should be with shape [batch_size, '
f'num_channels, height, width], or [batch_size, '
f'num_channels], but {x.shape} received!')
class EpilogueBlock(nn.Module):
"""Implements the epilogue block of each conv block."""
def __init__(self,
layer_idx,
channels,
randomize_noise=False,
normalization_fn='instance'):
super().__init__()
self.apply_noise = NoiseApplyingLayer(layer_idx, channels, randomize_noise)
self.bias = nn.Parameter(torch.zeros(channels))
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
if normalization_fn == 'pixel':
self.norm = PixelNormLayer()
elif normalization_fn == 'instance':
self.norm = InstanceNormLayer()
else:
raise NotImplementedError(f'Not implemented normalization function: '
f'{normalization_fn}!')
self.style_mod = StyleModulationLayer(channels)
def forward(self, x, w):
x = self.apply_noise(x)
x = x + self.bias.view(1, -1, 1, 1)
x = self.activate(x)
x = self.norm(x)
x = self.style_mod(x, w)
return x
class FirstConvBlock(nn.Module):
"""Implements the first convolutional block used in StyleGAN.
Basically, this block starts from a const input, which is `ones(512, 4, 4)`.
"""
def __init__(self, channels, randomize_noise=False):
super().__init__()
self.first_layer = nn.Parameter(torch.ones(1, channels, 4, 4))
self.epilogue = EpilogueBlock(layer_idx=0,
channels=channels,
randomize_noise=randomize_noise)
def forward(self, w):
x = self.first_layer.repeat(w.shape[0], 1, 1, 1)
x = self.epilogue(x, w)
return x
class UpConvBlock(nn.Module):
"""Implements the convolutional block used in StyleGAN.
Basically, this block is used as the first convolutional block for each
resolution, which will execute upsampling.
"""
def __init__(self,
layer_idx,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
add_bias=False,
wscale_gain=np.sqrt(2.0),
wscale_lr_multiplier=1.0,
randomize_noise=False):
"""Initializes the class with block settings.
Args:
in_channels: Number of channels of the input tensor fed into this block.
out_channels: Number of channels (kernels) of the output tensor.
kernel_size: Size of the convolutional kernel.
stride: Stride parameter for convolution operation.
padding: Padding parameter for convolution operation.
dilation: Dilation rate for convolution operation.
add_bias: Whether to add bias onto the convolutional result.
wscale_gain: The gain factor for `wscale` layer.
wscale_lr_multiplier: The learning rate multiplier factor for `wscale`
layer.
Raises:
ValueError: If the block is not applied to the first block for a
particular resolution.
"""
super().__init__()
if layer_idx % 2 == 1:
raise ValueError(f'This block is implemented as the first block of each '
f'resolution, but is applied to layer {layer_idx}!')
self.layer_idx = layer_idx
if self.layer_idx > 9:
self.weight = nn.Parameter(
torch.randn(kernel_size, kernel_size, in_channels, out_channels))
else:
self.upsample = ResolutionScalingLayer()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=add_bias)
fan_in = in_channels * kernel_size * kernel_size
self.scale = wscale_gain / np.sqrt(fan_in) * wscale_lr_multiplier
self.blur = BlurLayer(channels=out_channels)
self.epilogue = EpilogueBlock(layer_idx=layer_idx,
channels=out_channels,
randomize_noise=randomize_noise)
def forward(self, x, w):
if self.layer_idx > 9:
kernel = self.weight * self.scale
kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)
kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +
kernel[1:, :-1] + kernel[:-1, :-1])
kernel = kernel.permute(2, 3, 0, 1)
x = F.conv_transpose2d(x, kernel, stride=2, padding=1)
else:
x = self.upsample(x)
x = self.conv(x) * self.scale
x = self.blur(x)
x = self.epilogue(x, w)
return x
class ConvBlock(nn.Module):
"""Implements the convolutional block used in StyleGAN.
Basically, this block is used as the second convolutional block for each
resolution.
"""
def __init__(self,
layer_idx,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
add_bias=False,
wscale_gain=np.sqrt(2.0),
wscale_lr_multiplier=1.0,
randomize_noise=False):
"""Initializes the class with block settings.
Args:
in_channels: Number of channels of the input tensor fed into this block.
out_channels: Number of channels (kernels) of the output tensor.
kernel_size: Size of the convolutional kernel.
stride: Stride parameter for convolution operation.
padding: Padding parameter for convolution operation.
dilation: Dilation rate for convolution operation.
add_bias: Whether to add bias onto the convolutional result.
wscale_gain: The gain factor for `wscale` layer.
wscale_lr_multiplier: The learning rate multiplier factor for `wscale`
layer.
Raises:
ValueError: If the block is not applied to the second block for a
particular resolution.
"""
super().__init__()
if layer_idx % 2 == 0:
raise ValueError(f'This block is implemented as the second block of each '
f'resolution, but is applied to layer {layer_idx}!')
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=add_bias)
fan_in = in_channels * kernel_size * kernel_size
self.scale = wscale_gain / np.sqrt(fan_in) * wscale_lr_multiplier
self.epilogue = EpilogueBlock(layer_idx=layer_idx,
channels=out_channels,
randomize_noise=randomize_noise)
def forward(self, x, w):
x = self.conv(x) * self.scale
x = self.epilogue(x, w)
return x
class LastConvBlock(nn.Module):
"""Implements the last convolutional block used in StyleGAN.
Basically, this block converts the final feature map to RGB image.
"""
def __init__(self, channels):
super().__init__()
self.conv = nn.Conv2d(in_channels=channels,
out_channels=3,
kernel_size=1,
bias=False)
self.scale = 1 / | np.sqrt(channels) | numpy.sqrt |
"""Define output of Meta Models and visualize the results."""
import math
from itertools import product
from scipy.spatial import cKDTree
import numpy as np
import logging
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.plotting import figure
from bokeh.models import Slider, ColumnDataSource, HoverTool
from bokeh.models import ColorBar, BasicTicker, LinearColorMapper, Range1d
from bokeh.models.widgets import TextInput, Select
from bokeh.server.server import Server
from openmdao.components.meta_model_unstructured_comp import MetaModelUnStructuredComp
from openmdao.components.meta_model_structured_comp import MetaModelStructuredComp
from openmdao.core.problem import Problem
def stack_outputs(outputs_dict):
"""
Stack the values of a dictionary.
Parameters
----------
outputs_dict : dict
Dictionary of outputs
Returns
-------
array
np.stack of values
"""
return np.stack([np.asarray(v) for v in outputs_dict.values()], axis=-1)
class MetaModelVisualization(object):
"""
Top-level container for the Meta Model Visualization.
Attributes
----------
prob : Problem
Name of variable corresponding to Problem Component
meta_model : MetaModel
Name of empty Meta Model Component object reference
resolution : int
Number used to calculate width and height of contour plot
is_structured_meta_model : Bool
Boolean used to signal whether the meta model is structured or unstructured
slider_source : ColumnDataSource
Data source containing dictionary of sliders
contour_training_data_source : ColumnDataSource
Data source containing dictionary of training data points
bottom_plot_source : ColumnDataSource
Data source containing data for the bottom subplot
bottom_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the bottom subplot
right_plot_source : ColumnDataSource
Data source containing data for the right subplot
right_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the right subplot
contour_plot_source : ColumnDataSource
Data source containing data for the contour plot
input_names : list
List of input data titles as strings
output_names : list
List of output data titles as strings
training_inputs : dict
Dictionary of input training data
x_input_select : Select
Bokeh Select object containing a list of inputs for the x axis
y_input_select : Select
Bokeh Select object containing a list of inputs for the y axis
output_select : Select
Bokeh Select object containing a list of inputs for the outputs
x_input_slider : Slider
Bokeh Slider object containing a list of input values for the x axis
y_input_slider : Slider
Bokeh Slider object containing a list of input values for the y axis
slider_dict : dict
Dictionary of slider names and their respective slider objects
predict_inputs : dict
Dictionary containing training data points to predict at.
num_inputs : int
Number of inputs
num_outputs : int
Number of outputs
limit_range : array
Array containing the range of each input
scatter_distance : TextInput
Text input for user to enter custom value to calculate distance of training points around
slice line
right_alphas : array
Array of points containing alpha values for right plot
bottom_alphas : array
Array of points containing alpha values for bottom plot
dist_range : float
Value taken from scatter_distance used for calculating distance of training points around
slice line
x_index : int
Value of x axis column
y_index : int
Value of y axis column
output_variable : int
Value of output axis column
sliders_and_selects : layout
Layout containing the sliders and select elements
doc_layout : layout
Contains first row of plots
doc_layout2 : layout
Contains second row of plots
Z : array
A 2D array containing contour plot data
"""
def __init__(self, model, resolution=50, doc=None):
"""
Initialize parameters.
Parameters
----------
model : MetaModelComponent
Reference to meta model component
resolution : int
Value used to calculate the size of contour plot meshgrid
doc : Document
The bokeh document to build.
"""
self.prob = Problem()
self.resolution = resolution
logging.getLogger("bokeh").setLevel(logging.ERROR)
# If the surrogate model coming in is structured
if isinstance(model, MetaModelUnStructuredComp):
self.is_structured_meta_model = False
# Create list of input names, check if it has more than one input, then create list
# of outputs
self.input_names = [name[0] for name in model._surrogate_input_names]
if len(self.input_names) < 2:
raise ValueError('Must have more than one input value')
self.output_names = [name[0] for name in model._surrogate_output_names]
# Create reference for untructured component
self.meta_model = MetaModelUnStructuredComp(
default_surrogate=model.options['default_surrogate'])
# If the surrogate model coming in is unstructured
elif isinstance(model, MetaModelStructuredComp):
self.is_structured_meta_model = True
self.input_names = [name for name in model._var_rel_names['input']]
if len(self.input_names) < 2:
raise ValueError('Must have more than one input value')
self.output_names = [name for name in model._var_rel_names['output']]
self.meta_model = MetaModelStructuredComp(
distributed=model.options['distributed'],
extrapolate=model.options['extrapolate'],
method=model.options['method'],
training_data_gradients=model.options['training_data_gradients'],
vec_size=1)
# Pair input list names with their respective data
self.training_inputs = {}
self._setup_empty_prob_comp(model)
# Setup dropdown menus for x/y inputs and the output value
self.x_input_select = Select(title="X Input:", value=[x for x in self.input_names][0],
options=[x for x in self.input_names])
self.x_input_select.on_change('value', self._x_input_update)
self.y_input_select = Select(title="Y Input:", value=[x for x in self.input_names][1],
options=[x for x in self.input_names])
self.y_input_select.on_change('value', self._y_input_update)
self.output_select = Select(title="Output:", value=[x for x in self.output_names][0],
options=[x for x in self.output_names])
self.output_select.on_change('value', self._output_value_update)
# Create sliders for each input
self.slider_dict = {}
self.predict_inputs = {}
for title, values in self.training_inputs.items():
slider_data = np.linspace(min(values), max(values), self.resolution)
self.predict_inputs[title] = slider_data
# Calculates the distance between slider ticks
slider_step = slider_data[1] - slider_data[0]
slider_object = Slider(start=min(values), end=max(values), value=min(values),
step=slider_step, title=str(title))
self.slider_dict[title] = slider_object
self._slider_attrs()
# Length of inputs and outputs
self.num_inputs = len(self.input_names)
self.num_outputs = len(self.output_names)
# Precalculate the problem bounds.
limits = np.array([[min(value), max(value)] for value in self.training_inputs.values()])
self.limit_range = limits[:, 1] - limits[:, 0]
# Positional indicies
self.x_index = 0
self.y_index = 1
self.output_variable = self.output_names.index(self.output_select.value)
# Data sources are filled with initial values
# Slider Column Data Source
self.slider_source = ColumnDataSource(data=self.predict_inputs)
# Contour plot Column Data Source
self.contour_plot_source = ColumnDataSource(data=dict(
z=np.random.rand(self.resolution, self.resolution)))
self.contour_training_data_source = ColumnDataSource(
data=dict(x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
# Bottom plot Column Data Source
self.bottom_plot_source = ColumnDataSource(data=dict(
x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
self.bottom_plot_scatter_source = ColumnDataSource(data=dict(
bot_slice_x=np.repeat(0, self.resolution), bot_slice_y=np.repeat(0, self.resolution)))
# Right plot Column Data Source
self.right_plot_source = ColumnDataSource(data=dict(
x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
self.right_plot_scatter_source = ColumnDataSource(data=dict(
right_slice_x=np.repeat(0, self.resolution),
right_slice_y=np.repeat(0, self.resolution)))
# Text input to change the distance of reach when searching for nearest data points
self.scatter_distance = TextInput(value="0.1", title="Scatter Distance")
self.scatter_distance.on_change('value', self._scatter_input)
self.dist_range = float(self.scatter_distance.value)
# Grouping all of the sliders and dropdowns into one column
sliders = [value for value in self.slider_dict.values()]
sliders.extend(
[self.x_input_select, self.y_input_select, self.output_select, self.scatter_distance])
self.sliders_and_selects = row(
column(*sliders))
# Layout creation
self.doc_layout = row(self._contour_data(), self._right_plot(), self.sliders_and_selects)
self.doc_layout2 = row(self._bottom_plot())
if doc is None:
doc = curdoc()
doc.add_root(self.doc_layout)
doc.add_root(self.doc_layout2)
doc.title = 'Meta Model Visualization'
def _setup_empty_prob_comp(self, metamodel):
"""
Take data from surrogate ref and pass it into new surrogate model with empty Problem model.
Parameters
----------
metamodel : MetaModelComponent
Reference to meta model component
"""
# Check for structured or unstructured
if self.is_structured_meta_model:
# Loop through the input names
for idx, name in enumerate(self.input_names):
# Check for no training data
try:
# Append the input data/titles to a dictionary
self.training_inputs[name] = metamodel.params[idx]
# Also, append the data as an 'add_input' to the model reference
self.meta_model.add_input(name, 0.,
training_data=metamodel.params[idx])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
# Add the outputs to the model reference
for idx, name in enumerate(self.output_names):
self.meta_model.add_output(
name, 0.,
training_data=metamodel.training_outputs[name])
else:
for name in self.input_names:
try:
self.training_inputs[name] = {
title for title in metamodel.options['train:' + str(name)]}
self.meta_model.add_input(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
for name in self.output_names:
self.meta_model.add_output(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
# Add the subsystem and setup
self.prob.model.add_subsystem('interp', self.meta_model)
self.prob.setup()
def _slider_attrs(self):
"""
Assign data to slider objects and callback functions.
Parameters
----------
None
"""
for name, slider_object in self.slider_dict.items():
# Checks if there is a callback previously assigned and then clears it
if len(slider_object._callbacks) == 1:
slider_object._callbacks.clear()
# Check if the name matches the 'x input' title
if name == self.x_input_select.value:
# Set the object and add an event handler
self.x_input_slider = slider_object
self.x_input_slider.on_change('value', self._scatter_plots_update)
# Check if the name matches the 'y input' title
elif name == self.y_input_select.value:
# Set the object and add an event handler
self.y_input_slider = slider_object
self.y_input_slider.on_change('value', self._scatter_plots_update)
else:
# If it is not an x or y input then just assign it the event handler
slider_object.on_change('value', self._update)
def _make_predictions(self, data):
"""
Run the data parameter through the surrogate model which is given in prob.
Parameters
----------
data : dict
Dictionary containing training points.
Returns
-------
array
np.stack of predicted points.
"""
# Create dictionary with an empty list
outputs = {name: [] for name in self.output_names}
# Parse dict into shape [n**2, number of inputs] list
inputs = np.empty([self.resolution**2, self.num_inputs])
for idx, values in enumerate(data.values()):
inputs[:, idx] = values.flatten()
# Check for structured or unstructured
if self.is_structured_meta_model:
# Assign each row of the data coming in to a tuple. Loop through the tuple, and append
# the name of the input and value.
for idx, tup in enumerate(inputs):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
# Append the predicted value(s)
for title in self.output_names:
outputs[title].append(
np.array(self.prob[self.meta_model.name + '.' + title]))
else:
for idx, tup in enumerate(inputs):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
for title in self.output_names:
outputs[title].append(
float(self.prob[self.meta_model.name + '.' + title]))
return stack_outputs(outputs)
def _contour_data_calcs(self):
"""
Parse input data into a dictionary to be predicted at.
Parameters
----------
None
Returns
-------
dict
Dictionary of training data to be predicted at.
"""
# Create initial data array of training points
resolution = self.resolution
x_data = np.zeros((resolution, resolution, self.num_inputs))
self._slider_attrs()
# Broadcast the inputs to every row of x_data array
x_data[:, :, :] = np.array(self.input_point_list)
# Find the x/y input titles and match their index positions
for idx, (title, values) in enumerate(self.slider_source.data.items()):
if title == self.x_input_select.value:
self.xlins_mesh = values
x_index_position = idx
if title == self.y_input_select.value:
self.ylins_mesh = values
y_index_position = idx
# Make meshgrid from the x/y inputs to be plotted
X, Y = np.meshgrid(self.xlins_mesh, self.ylins_mesh)
# Move the x/y inputs to their respective positions in x_data
x_data[:, :, x_index_position] = X
x_data[:, :, y_index_position] = Y
pred_dict = {}
for idx, title in enumerate(self.slider_source.data):
pred_dict.update({title: x_data[:, :, idx]})
return pred_dict
def _contour_data(self):
"""
Create a contour plot.
Parameters
----------
None
Returns
-------
Bokeh Image Plot
"""
resolution = self.resolution
# Output data array initialization
y_data = np.zeros((resolution, resolution, self.num_outputs))
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Pass the dict to make predictions and then reshape the output to
# (resolution, resolution, number of outputs)
y_data[:, :, :] = self._make_predictions(self._contour_data_calcs()).reshape(
(resolution, resolution, self.num_outputs))
# Use the output variable to pull the correct column of data from the predicted
# data (y_data)
self.Z = y_data[:, :, self.output_variable]
# Reshape it to be 2D
self.Z = self.Z.reshape(resolution, resolution)
# Update the data source with new data
self.contour_plot_source.data = dict(z=[self.Z])
# Min to max of training data
self.contour_x_range = xlins = self.xlins_mesh
self.contour_y_range = ylins = self.ylins_mesh
# Color bar formatting
color_mapper = LinearColorMapper(
palette="Viridis11", low=np.amin(self.Z), high=np.amax(self.Z))
color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(), label_standoff=12,
location=(0, 0))
# Contour Plot
self.contour_plot = contour_plot = figure(
match_aspect=False,
tooltips=[(self.x_input_select.value, "$x"), (self.y_input_select.value, "$y"),
(self.output_select.value, "@z")], tools='')
contour_plot.x_range.range_padding = 0
contour_plot.y_range.range_padding = 0
contour_plot.plot_width = 600
contour_plot.plot_height = 500
contour_plot.xaxis.axis_label = self.x_input_select.value
contour_plot.yaxis.axis_label = self.y_input_select.value
contour_plot.min_border_left = 0
contour_plot.add_layout(color_bar, 'right')
contour_plot.x_range = Range1d(min(xlins), max(xlins))
contour_plot.y_range = Range1d(min(ylins), max(ylins))
contour_plot.image(image='z', source=self.contour_plot_source, x=min(xlins), y=min(ylins),
dh=(max(ylins) - min(ylins)), dw=(max(xlins) - min(xlins)),
palette="Viridis11")
# Adding training data points overlay to contour plot
if self.is_structured_meta_model:
data = self._structured_training_points()
else:
data = self._unstructured_training_points()
if len(data):
# Add training data points overlay to contour plot
data = np.array(data)
if self.is_structured_meta_model:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model.training_outputs[
self.output_select.value].flatten())
else:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model._training_output[
self.output_select.value])
training_data_renderer = self.contour_plot.circle(
x='x', y='y', source=self.contour_training_data_source,
size=5, color='white', alpha=0.50)
self.contour_plot.add_tools(HoverTool(renderers=[training_data_renderer], tooltips=[
(self.x_input_select.value + " (train)", '@x'),
(self.y_input_select.value + " (train)", '@y'),
(self.output_select.value + " (train)", '@z'), ]))
return self.contour_plot
def _right_plot(self):
"""
Create the right side subplot to view the projected slice.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the y input and match it with the data
y_idx = self.y_input_select.value
y_data = self.predict_inputs[y_idx]
# Find the position of the x_input slider
x_value = self.x_input_slider.value
# Rounds the x_data to match the predict_inputs value
subplot_value_index = np.where(
np.around(self.predict_inputs[self.x_input_select.value], 5) ==
np.around(x_value, 5))[0]
# Make slice in Z data at the point calculated before and add it to the data source
z_data = self.Z[:, subplot_value_index].flatten()
x = z_data
y = self.slider_source.data[y_idx]
# Update the data source with new data
self.right_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.right_plot_fig = right_plot_fig = figure(
plot_width=250, plot_height=500,
title="{} vs {}".format(y_idx, self.output_select.value), tools="pan")
right_plot_fig.xaxis.axis_label = self.output_select.value
right_plot_fig.yaxis.axis_label = y_idx
right_plot_fig.xaxis.major_label_orientation = math.pi / 9
right_plot_fig.line(x='x', y='y', source=self.right_plot_source)
right_plot_fig.x_range.range_padding = 0.1
right_plot_fig.y_range.range_padding = 0.02
# Determine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True, source='right')
else:
data = self._unstructured_training_points(compute_distance=True, source='right')
self.right_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None,
fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
right_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(self.output_select.value + " (train)", '@x'),
(y_idx + " (train)", '@y'),
]))
right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None, fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
# Set the right_plot data source to new values
self.right_plot_scatter_source.data = dict(
right_slice_x=np.repeat(x_value, self.resolution), right_slice_y=y_data)
self.contour_plot.line(
'right_slice_x', 'right_slice_y', source=self.right_plot_scatter_source,
color='black', line_width=2)
return self.right_plot_fig
def _bottom_plot(self):
"""
Create the bottom subplot to view the projected slice.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the x input and match it with the data
x_idx = self.x_input_select.value
x_data = self.predict_inputs[x_idx]
# Find the position of the y_input slider
y_value = self.y_input_slider.value
# Rounds the y_data to match the predict_inputs value
subplot_value_index = np.where(
np.around(self.predict_inputs[self.y_input_select.value], 5) ==
np.around(y_value, 5))[0]
# Make slice in Z data at the point calculated before and add it to the data source
z_data = self.Z[subplot_value_index, :].flatten()
x = self.slider_source.data[x_idx]
y = z_data
# Update the data source with new data
self.bottom_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.bottom_plot_fig = bottom_plot_fig = figure(
plot_width=550, plot_height=250,
title="{} vs {}".format(x_idx, self.output_select.value), tools="")
bottom_plot_fig.xaxis.axis_label = x_idx
bottom_plot_fig.yaxis.axis_label = self.output_select.value
bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
bottom_plot_fig.x_range.range_padding = 0.02
bottom_plot_fig.y_range.range_padding = 0.1
# Determine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True)
else:
data = self._unstructured_training_points(compute_distance=True)
self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
fill_color='#000000',
fill_alpha=self.bottom_alphas.tolist())
bottom_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(x_idx + " (train)", '@x'),
(self.output_select.value + " (train)", '@y'),
]))
# Set the right_plot data source to new values
self.bottom_plot_scatter_source.data = dict(
bot_slice_x=x_data,
bot_slice_y=np.repeat(y_value, self.resolution))
self.contour_plot.line(
'bot_slice_x', 'bot_slice_y', source=self.bottom_plot_scatter_source, color='black',
line_width=2)
return self.bottom_plot_fig
def _unstructured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and returns and array containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being called from.
Returns
-------
array
The array of training points and their alpha opacity with respect to the surrogate line
"""
# Input training data and output training data
x_training = self.meta_model._training_input
training_output = np.squeeze(stack_outputs(self.meta_model._training_output), axis=1)
# Index of input/output variables
x_index = self.x_input_select.options.index(self.x_input_select.value)
y_index = self.y_input_select.options.index(self.y_input_select.value)
output_variable = self.output_names.index(self.output_select.value)
# Vertically stack the x/y inputs and then transpose them
infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = np.divide(points, self.limit_range)
dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
scaled_x0 = np.divide(self.input_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
# Nearest points to x slice
if x_training.shape[1] < 3:
tree = cKDTree(points)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
dists, idxs = tree.query(
scaled_x0, k=len(x_training), distance_upper_bound=self.dist_range)
# kdtree query always returns requested k even if there are not enough valid points
idx_finite = np.where(np.isfinite(dists))
dists = dists[idx_finite]
idxs = idxs[idx_finite]
else:
dists, idxs = self._multidimension_input(scaled_x0, points, source=source)
# data contains:
# [x_value, y_value, ND-distance, func_value]
data = np.zeros((len(idxs), 4))
for dist_index, j in enumerate(idxs):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = dists[dist_index]
data[dist_index, 3] = training_output[j, output_variable]
return data
def _structured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and return an array containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being called from.
Returns
-------
array
The array of training points and their alpha opacity with respect to the surrogate line
"""
# Create tuple of the input parameters
input_dimensions = tuple(self.meta_model.params)
# Input training data and output training data
x_training = np.array([z for z in product(*input_dimensions)])
training_output = self.meta_model.training_outputs[self.output_select.value].flatten()
# Index of input/output variables
x_index = self.x_input_select.options.index(self.x_input_select.value)
y_index = self.y_input_select.options.index(self.y_input_select.value)
# Vertically stack the x/y inputs and then transpose them
infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = np.divide(points, self.limit_range)
self.dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
scaled_x0 = np.divide(self.input_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
# Nearest points to x slice
if x_training.shape[1] < 3:
x_tree, x_idx = self._two_dimension_input(scaled_x0, points, source=source)
else:
x_tree, x_idx = self._multidimension_input(scaled_x0, points, source=source)
# format for 'data'
# [x_value, y_value, ND-distance_(x or y), func_value]
n = len(x_tree)
data = np.zeros((n, 4))
for dist_index, j in enumerate(x_idx):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = x_tree[dist_index]
data[dist_index, 3] = training_output[j]
return data
def _two_dimension_input(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : array
Array of normalized slider positions.
training_points : array
Array of input training data.
source : str
Which subplot the method is being called from.
Returns
-------
idxs : array
Index of closest points that are within the dist range.
x_tree : array
One dimentional array of points that are within the dist range.
"""
# Column of the input
if source == 'right':
col_idx = self.y_input_select.options.index(self.y_input_select.value)
else:
col_idx = self.x_input_select.options.index(self.x_input_select.value)
# Delete the axis of input from source to predicted 1D distance
x = np.delete(scaled_points, col_idx, axis=0)
x_training_points = np.delete(training_points, col_idx, axis=1).flatten()
# Tree of point distances
x_tree = np.abs(x - x_training_points)
# Only return points that are within our distance-viewing paramter.
idx = np.where(x_tree <= self.dist_range)
x_tree = x_tree[idx]
return x_tree, idx[0]
def _multidimension_input(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : array
Array of normalized slider positions.
training_points : array
Array of input training data.
source : str
Which subplot the method is being called from.
Returns
-------
idxs : array
Index of closest points that are within the dist range.
x_tree : array
Array of points that are within the dist range.
"""
# Column of the input
if source == 'right':
col_idx = self.y_input_select.options.index(self.y_input_select.value)
else:
col_idx = self.x_input_select.options.index(self.x_input_select.value)
# Delete the axis of input from source to predicted distance
x = np.delete(scaled_points, col_idx, axis=0)
x_training_points = np.delete(training_points, col_idx, axis=1)
# Tree of point distances
x_tree = cKDTree(x_training_points)
# Query the nearest neighbors tree for the closest points to the scaled array
dists, idx = x_tree.query(x, k=len(x_training_points),
distance_upper_bound=self.dist_range)
# kdtree query always returns requested k even if there are not enough valid points
idx_finite = np.where( | np.isfinite(dists) | numpy.isfinite |
import numpy as np
import os
import math
import uuid
from . import stl_combine
def rotationMatrixToEulerAngles(R):
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def origin(matrix):
urdf = '<origin xyz="%g %g %g" rpy="%g %g %g" />'
x = matrix[0, 3]
y = matrix[1, 3]
z = matrix[2, 3]
rpy = rotationMatrixToEulerAngles(matrix)
return urdf % (x, y, z, rpy[0], rpy[1], rpy[2])
def pose(matrix, frame=''):
sdf = '<pose>%g %g %g %g %g %g</pose>'
x = matrix[0, 3]
y = matrix[1, 3]
z = matrix[2, 3]
rpy = rotationMatrixToEulerAngles(matrix)
if frame != '':
sdf = '<frame name="'+frame+'_frame">'+sdf+'</frame>'
return sdf % (x, y, z, rpy[0], rpy[1], rpy[2])
class RobotDescription(object):
def __init__(self, name):
self.drawCollisions = False
self.relative = True
self.mergeSTLs = 'no'
self.mergeSTLsCollisions = False
self.useFixedLinks = False
self.simplifySTLs = 'no'
self.maxSTLSize = 3
self.xml = ''
self.jointMaxEffort = 1
self.jointMaxVelocity = 10
self.noDynamics = False
self.packageName = ""
self.addDummyBaseLink = False
self.robotName = name
self.meshDir = None
def shouldMergeSTLs(self, node):
return self.mergeSTLs == 'all' or self.mergeSTLs == node
def shouldSimplifySTLs(self, node):
return self.simplifySTLs == 'all' or self.simplifySTLs == node
def append(self, str):
self.xml += str+"\n"
def jointMaxEffortFor(self, jointName):
if isinstance(self.jointMaxEffort, dict):
if jointName in self.jointMaxEffort:
return self.jointMaxEffort[jointName]
else:
return self.jointMaxEffort['default']
else:
return self.jointMaxEffort
def jointMaxVelocityFor(self, jointName):
if isinstance(self.jointMaxVelocity, dict):
if jointName in self.jointMaxVelocity:
return self.jointMaxVelocity[jointName]
else:
return self.jointMaxVelocity['default']
else:
return self.jointMaxVelocity
def resetLink(self):
self._mesh = {'visual': None, 'collision': None}
self._color = np.array([0., 0., 0.])
self._color_mass = 0
self._link_childs = 0
self._visuals = []
self._dynamics = []
def addLinkDynamics(self, matrix, mass, com, inertia):
# Inertia
I = np.matrix(np.reshape(inertia[:9], (3, 3)))
R = matrix[:3, :3]
# Expressing COM in the link frame
com = np.array(
(matrix*np.matrix([com[0], com[1], com[2], 1]).T).T)[0][:3]
# Expressing inertia in the link frame
inertia = R.T*I*R
self._dynamics.append({
'mass': mass,
'com': com,
'inertia': inertia
})
def mergeSTL(self, stl, matrix, color, mass, node='visual'):
if node == 'visual':
self._color += np.array(color) * mass
self._color_mass += mass
m = stl_combine.load_mesh(stl)
stl_combine.apply_matrix(m, matrix)
if self._mesh[node] is None:
self._mesh[node] = m
else:
self._mesh[node] = stl_combine.combine_meshes(self._mesh[node], m)
def linkDynamics(self):
mass = 0
com = | np.array([0.0]*3) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from pixellib.torchbackend.instance.structures.masks import BitMasks, PolygonMasks, polygons_to_bitmask
from pixellib.torchbackend.instance.structures.boxes import Boxes, BoxMode
from pixellib.torchbackend.instance.structures.instances import Instances
from pixellib.torchbackend.instance.structures.boxes import _maybe_jit_unused
from typing import List, Tuple
'''from structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
'''
from pixellib.torchbackend.instance.utils.file_io import PathManager
import pixellib.torchbackend.instance.data.transforms as T
from .catalogdata import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class RotatedBoxes(Boxes):
"""
This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
def clone(self) -> "RotatedBoxes":
"""
Clone the RotatedBoxes.
Returns:
RotatedBoxes
"""
return RotatedBoxes(self.tensor.clone())
@_maybe_jit_unused
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return RotatedBoxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = box[:, 2] * box[:, 3]
return area
def normalize_angles(self) -> None:
"""
Restrict angles to the range of [-180, 180) degrees
"""
self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
For RRPN:
Only clip boxes that are almost horizontal with a tolerance of
clip_angle_threshold to maintain backward compatibility.
Rotated boxes beyond this threshold are not clipped for two reasons:
1. There are potentially multiple ways to clip a rotated box to make it
fit within the image.
2. It's tricky to make the entire rectangular box fit within the image
and still be able to not leave out pixels of interest.
Therefore we rely on ops like RoIAlignRotated to safely handle this.
Args:
box_size (height, width): The clipping box's size.
clip_angle_threshold:
Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
we do the clipping as horizontal boxes.
"""
h, w = box_size
# normalize angles to be within (-180, 180] degrees
self.normalize_angles()
idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
# convert to (x1, y1, x2, y2)
x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
# clip
x1.clamp_(min=0, max=w)
y1.clamp_(min=0, max=h)
x2.clamp_(min=0, max=w)
y2.clamp_(min=0, max=h)
# convert back to (xc, yc, w, h)
self.tensor[idx, 0] = (x1 + x2) / 2.0
self.tensor[idx, 1] = (y1 + y2) / 2.0
# make sure widths and heights do not increase due to numerical errors
self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor: a binary vector which represents
whether each box is empty (False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2]
heights = box[:, 3]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "RotatedBoxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box covering
[0, width] x [0, height]
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
For RRPN, it might not be necessary to call this function since it's common
for rotated box to extend to outside of the image boundaries
(the clip function only clips the near-horizontal boxes)
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
cnt_x = self.tensor[..., 0]
cnt_y = self.tensor[..., 1]
half_w = self.tensor[..., 2] / 2.0
half_h = self.tensor[..., 3] / 2.0
a = self.tensor[..., 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
max_rect_dx = c * half_w + s * half_h
max_rect_dy = c * half_h + s * half_w
inds_inside = (
(cnt_x - max_rect_dx >= -boundary_threshold)
& (cnt_y - max_rect_dy >= -boundary_threshold)
& (cnt_x + max_rect_dx < width + boundary_threshold)
& (cnt_y + max_rect_dy < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return self.tensor[:, :2]
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the rotated box with horizontal and vertical scaling factors
Note: when scale_factor_x != scale_factor_y,
the rotated box does not preserve the rectangular shape when the angle
is not a multiple of 90 degrees under resize transformation.
Instead, the shape is a parallelogram (that has skew)
Here we make an approximation by fitting a rotated rectangle to the parallelogram.
"""
self.tensor[:, 0] *= scale_x
self.tensor[:, 1] *= scale_y
theta = self.tensor[:, 4] * math.pi / 180.0
c = torch.cos(theta)
s = torch.sin(theta)
# In image space, y is top->down and x is left->right
# Consider the local coordintate system for the rotated box,
# where the box center is located at (0, 0), and the four vertices ABCD are
# A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
# the midpoint of the left edge AD of the rotated box E is:
# E = (A+D)/2 = (-w / 2, 0)
# the midpoint of the top edge AB of the rotated box F is:
# F(0, -h / 2)
# To get the old coordinates in the global system, apply the rotation transformation
# (Note: the right-handed coordinate system for image space is yOx):
# (old_x, old_y) = (s * y + c * x, c * y - s * x)
# E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
# F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
# After applying the scaling factor (sfx, sfy):
# E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
# F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
# The new width after scaling tranformation becomes:
# w(new) = |E(new) - O| * 2
# = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
# = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
# i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
# h(new) = |F(new) - O| * 2
# = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
# = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
# i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
# The angle is the rotation angle from y-axis in image space to the height
# vector (top->down in the box's local coordinate system) of the box in CCW.
#
# angle(new) = angle_yOx(O - F(new))
# = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
# = atan2(sfx * s * h / 2, sfy * c * h / 2)
# = atan2(sfx * s, sfy * c)
#
# For example,
# when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
@classmethod
@_maybe_jit_unused
def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
"""
Concatenates a list of RotatedBoxes into a single RotatedBoxes
Arguments:
boxes_list (list[RotatedBoxes])
Returns:
RotatedBoxes: the concatenated RotatedBoxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (5,) at a time.
"""
yield from self.tensor
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = | np.asarray(keypoints, dtype="float64") | numpy.asarray |
import numpy as np
import os
import igraph as ig
from sklearn.decomposition import PCA
from notmad.helpers import utils
from notmad.helpers import graph_utils
def l2_dist(dag1, dag2):
dist_vec = (dag1 - dag2).flatten()
return dist_vec.T @ dist_vec
def gen_data(data_params):
if data_params["simulation_type"] == "archetypes":
W_dict, C_dict = gen_archetypes(data_params["d"], data_params["n_edges"],
data_params["n_c"], data_params["k_true"],
graph_type=data_params["graph_type"], ensure_convex=data_params["ensure_convex"])
sample_loadings, W, C, X = gen_samples(
W_dict, C_dict, n=data_params["n"], n_i=data_params["n_i"], n_mix=data_params["n_mix"],
sem_type=data_params["sem_type"], noise_scale=0.01)
if data_params['context_snr'] > 0 and data_params['context_snr'] < 1.0:
for i in range(data_params["n_c"]):
C[:, i] += np.random.normal(0.,
(1./data_params["context_snr"] - 1)*np.var(C[:, i]),
size=C[:, i].shape)
elif data_params["simulation_type"] == "clusters":
W_dict, C_dict = gen_archetypes(data_params["d"], data_params["n_edges"],
data_params["n_c"], data_params["k_true"],
# data_params["n_c"], data_params["k_true"] + data_params["n_mix"], # Use separate mixing archetypes?
graph_type=data_params["graph_type"], min_radius=data_params["arch_min_radius"],
ensure_convex=data_params["ensure_convex"])
W_arch = W_dict[:data_params["k_true"]]
# W_mix = W_dict[-data_params["n_mix"]:] # Use separate mixing archetypes?
W_mix = np.copy(W_arch)
W, C, X = gen_cluster_samples(W_arch, W_mix, n=data_params["n"], n_i=data_params["n_i"],
radius=data_params["cluster_max_radius"], sem_type=data_params["sem_type"],
noise_scale=0.01)
if data_params['context_snr'] > 0 and data_params['context_snr'] < 1.0:
for i in range(data_params["k_true"] * 2):
C[:, i] += np.random.normal(0.,
(1./data_params["context_snr"] - 1)*np.var(C[:, i]),
size=C[:, i].shape)
else:
W, C, X = gen_samples_no_archs(data_params["n"], data_params["d"],
data_params["n_edges"], data_params["n_i"], data_params["n_c"],
c_signal_noise=data_params["context_snr"], graph_type=data_params["graph_type"],
sem_type=data_params["sem_type"])
W_dict, C_dict = None, None
return W, C, X, W_dict, C_dict
def gen_cluster_samples(W_k, W_mix, n, n_i, radius=1, sem_type='gauss', noise_scale=0.1):
# Generate n samples from k archetypes
(k, d, _) = W_k.shape
(k_mix, d, _) = W_mix.shape
n_c = W_k.shape[0] + W_mix.shape[0]
# subtypes = np.zeros((n, n_c))
W_n = np.zeros((n, d, d))
c_n = np.zeros((n, n_c))
X_n = np.zeros((n, n_i, d))
for i in range(n):
print(f'Generating sample {i}', end='\r')
# TODO: Principled way to make sparse mixtures
finished = False
while not finished:
weights = np.random.uniform(-radius, radius, k_mix)
k_i = np.random.choice(k)
if np.sum(weights) < 1e-3:
continue
W_i = W_k[k_i] + np.tensordot(weights, W_mix, axes=1)
c_i = np.zeros(n_c)
c_i[k_i] = 1
c_i[-k_mix:] = weights
# try:
# X_i = simulate_linear_sem(W_i, n_i, sem_type, noise_scale=noise_scale)
# except ValueError: # mixture may not be a DAG
W_i = graph_utils.project_to_dag(W_i)[0]
X_i = simulate_linear_sem(W_i, n_i, sem_type, noise_scale=noise_scale)
X_n[i] = X_i
# subtypes[i] = weights
W_n[i] = W_i
c_n[i] = c_i
finished = True
print()
return W_n, c_n, X_n
def gen_archetypes(d=8, s0=8, n_c=20, k=4, graph_type='ER', min_radius=0, ensure_convex=False):
# Create network and epigenetic archetypes
W_k = np.ones((k, d, d))
c_k = np.ones((k, n_c))
if ensure_convex:
# Ensure archetypes define a convex set of DAGs
while not graph_utils.is_dag(np.sum(W_k, axis=0)):
for j in range(k):
B_true = simulate_dag(d, s0, graph_type)
while not np.sum(B_true) == s0:
B_true = simulate_dag(d, s0, graph_type)
W_k[j] = simulate_parameter(B_true)
c_k[j] = simulate_context(n_c)
else:
for j in range(k):
B_true = simulate_dag(d, s0, graph_type)
while not graph_utils.is_dag(B_true):
B_true = simulate_dag(d, s0, graph_type)
dists = np.array([l2_dist(B_true, W_i) for W_i in W_k])
W_param = simulate_parameter(B_true)
dists = np.array([l2_dist(W_param, W_i) for W_i in W_k])
while not (dists > min_radius).all(): # min_radius should be <600
W_param = simulate_parameter(B_true)
dists = np.array([l2_dist(W_param, W_i) for W_i in W_k])
W_k[j] = W_param
c_k[j] = simulate_context(n_c)
return W_k, c_k
def simulate_context(n_c):
return np.random.uniform(0, 1, n_c)
def gen_samples(W_k, c_k, n, n_i, n_mix=2, sem_type='gauss', noise_scale=0.1):
# Generate n samples from k archetypes
assert (c_k.shape[0] == W_k.shape[0])
(k, d, _) = W_k.shape
(k, n_c) = c_k.shape
subtypes = np.zeros((n, k))
W_n = np.zeros((n, d, d))
c_n = np.zeros((n, n_c))
X_n = np.zeros((n, n_i, d))
for i in range(n):
print(i, end='\r')
# TODO: Principled way to make sparse mixtures
finished = False
while not finished:
weights = np.zeros((k, ))
idxs = np.random.choice(k, n_mix)
for idx in idxs:
weights[idx] = np.random.uniform(0, 1)
#eights = np.random.uniform(0, 1, k)*np.random.binomial(1, float(n_mix)/k, size=(k))
if np.sum(weights) < 1e-3:
continue
weights /= np.sum(weights)
W_i = np.tensordot(weights, W_k, axes=1)
c_i = np.tensordot(weights, c_k, axes=1)
try:
X_i = simulate_linear_sem(W_i, n_i, sem_type, noise_scale=noise_scale)
except ValueError: # mixture may not be a DAG
continue
X_n[i] = X_i
subtypes[i] = weights
W_n[i] = W_i
c_n[i] = c_i
finished = True
return subtypes, W_n, c_n, X_n
def gen_samples_no_archs(n, d, s0, n_i, n_c, c_signal_noise,
graph_type='ER', sem_type='gauss', noise_scale=0.1):
W_n = np.zeros((n, d, d))
c_n = np.zeros((n, n_c))
X_n = np.zeros((n, n_i, d))
for i in range(n):
print(i, end='\r')
W_n[i] = simulate_dag(d, s0, graph_type)
W_n[i] = simulate_parameter(W_n[i])
X_n[i] = simulate_linear_sem(W_n[i], n_i, sem_type, noise_scale=noise_scale)
pca = PCA(n_components=n_c)
c_n = pca.fit_transform(np.array([w.flatten() for w in W_n]))
if c_signal_noise > 0 and c_signal_noise < 1:
for j in range(n_c):
c_n[:, j] += np.random.normal(0., np.var(c_n[:, j])*(1./c_signal_noise - 1), size=(n, ))
return W_n, c_n, X_n
def simulate_dag(d, s0, graph_type):
"""Simulate random DAG with some expected number of edges.
Args:
d (int): num of nodes
s0 (int): expected num of edges
graph_type (str): ER, SF, BP
Returns:
B (np.ndarray): [d, d] binary adj matrix of DAG
"""
def _random_permutation(M):
# np.random.permutation permutes first axis only
P = np.random.permutation(np.eye(M.shape[0]))
#return P.T @ M @ P
return np.matmul(np.matmul(P.T, M), P)
def _random_acyclic_orientation(B_und):
return np.tril(_random_permutation(B_und), k=-1)
def _graph_to_adjmat(G):
return np.array(G.get_adjacency().data)
if graph_type == 'ER':
# Erdos-Renyi
G_und = ig.Graph.Erdos_Renyi(n=d, m=s0)
B_und = _graph_to_adjmat(G_und)
B = _random_acyclic_orientation(B_und)
elif graph_type == 'SF':
# Scale-free, Barabasi-Albert
G = ig.Graph.Barabasi(n=d, m=int(round(s0 / d)), directed=True)
B = _graph_to_adjmat(G)
elif graph_type == 'BP':
# Bipartite, Sec 4.1 of (Gu, Fu, Zhou, 2018)
top = int(0.2 * d)
G = ig.Graph.Random_Bipartite(top, d - top, m=s0, directed=True, neimode=ig.OUT)
B = _graph_to_adjmat(G)
else:
raise ValueError('unknown graph type')
B_perm = _random_permutation(B)
assert ig.Graph.Adjacency(B_perm.tolist()).is_dag()
return B_perm
def simulate_parameter(B, w_ranges=((-10.0, -1), (1, 10.0))):
"""Simulate SEM parameters for a DAG.
Args:
B (np.ndarray): [d, d] binary adj matrix of DAG
w_ranges (tuple): disjoint weight ranges
Returns:
W (np.ndarray): [d, d] weighted adj matrix of DAG
"""
W = np.zeros(B.shape)
S = np.random.randint(len(w_ranges), size=B.shape) # which range
for i, (low, high) in enumerate(w_ranges):
U = np.random.uniform(low=low, high=high, size=B.shape)
W += B * (S == i) * U
return W
def simulate_linear_sem(W, n, sem_type, noise_scale=None):
"""Simulate samples from linear SEM with specified type of noise.
For uniform, noise z ~ uniform(-a, a), where a = noise_scale.
Args:
W (np.ndarray): [d, d] weighted adj matrix of DAG
n (int): num of samples, n=inf mimics population risk
sem_type (str): gauss, exp, gumbel, uniform, logistic, poisson
noise_scale (np.ndarray): scale parameter of additive noise, default all ones
Returns:
X (np.ndarray): [n, d] sample matrix, [d, d] if n=inf
"""
def _simulate_single_equation(X, w, scale):
"""X: [n, num of parents], w: [num of parents], x: [n]"""
if sem_type == 'gauss':
z = np.random.normal(scale=scale, size=n)
#x = X @ w + z
x = np.matmul(X, w) + z
elif sem_type == 'exp':
z = | np.random.exponential(scale=scale, size=n) | numpy.random.exponential |
"""
"""
from __future__ import division
import numpy as np
import numpy.linalg as linalg
import scipy.sparse as sps
from .base import *
from .transformation_matrices import euler_matrix
from cytransforms import point2grids, direction2grids
import itertools
__all__ = (
'directionTransform',
'pointTransform',
'rotationTransform',
'integralTransform',
'sensorTransform',
'sensorTransformK',
'cumsumTransform',
'fisheyeTransform'
)
def directionTransform(
in_grids,
direction_phi,
direction_theta
):
H = direction2grids(
direction_phi,
direction_theta,
in_grids.expanded[0],
in_grids.expanded[1],
in_grids.expanded[2]
)
return BaseTransform(
H=H,
in_grids=in_grids,
out_grids=in_grids
)
def pointTransform(
in_grids,
point
):
Y, X, Z = in_grids.closed
assert point[0] > Y.min() and point[0] < Y.max(), "point is not directly below the grid"
assert point[1] > X.min() and point[1] < X.max(), "point is not directly below the grid"
assert point[2] < Z.max(), "point is not directly below the grid"
H = point2grids(
point,
in_grids.expanded[0],
in_grids.expanded[1],
in_grids.expanded[2]
)
return BaseTransform(
H=H,
in_grids=in_grids,
out_grids=in_grids
)
def rotationTransform(in_grids, rotation, out_grids=None):
"""Calculate a transform representing a rotation in 3D.
Parameters
----------
in_grids : Grids object
List of grids.
rotation : list of floats or rotation matrix
Either a list of floats representating the rotation in euler angles
(axis used is 'sxyz'). Alternatively, rotation can be a 4x4 rotation matrix
out_grids : Grids object, optional (default=None)
List of grids. The grids are expected to be of the form created by mgrid
and in the same order of creation. The transform is calculated into these
grids. This enables croping of the target domain after the rotation transform.
If none, the destination grids will be calculated to contain the full transformed
source.
"""
if isinstance(rotation, np.ndarray) and rotation.shape == (4, 4):
H_rot = rotation
else:
H_rot = euler_matrix(*rotation)
if out_grids == None:
Y_dst, X_dst, Z_dst = _calcRotatedGrids(in_grids, H_rot)
else:
Y_dst, X_dst, Z_dst = out_grids.expanded
#
# Calculate a rotated grid by applying the rotation.
#
XYZ_dst = np.vstack((X_dst.ravel(), Y_dst.ravel(), Z_dst.ravel(), np.ones(X_dst.size)))
XYZ_inv = np.dot(np.linalg.inv(H_rot), XYZ_dst)
Y_inv = XYZ_inv[1, :].reshape(X_dst.shape)
X_inv = XYZ_inv[0, :].reshape(X_dst.shape)
Z_inv = XYZ_inv[2, :].reshape(X_dst.shape)
inv_grids = Grids(Y_inv, X_inv, Z_inv)
out_grids = Grids(Y_dst, X_dst, Z_dst)
H = calcTransformMatrix(in_grids, inv_grids)
return BaseTransform(
H=H,
in_grids=in_grids,
out_grids=out_grids,
inv_grids=inv_grids
)
#
# Some globals
#
SPARSE_SIZE_LIMIT = 1e6
GRID_DIM_LIMIT = 100
def _calcRotatedGrids(in_grid, H_rot):
#
# Calculate the target grid.
# The calculation is based on calculating the minimal grid that contains
# the transformed input grid.
#
Y_slim, X_slim, Z_slim = [g.ravel() for g in in_grid]
x0_src = np.floor(np.min(X_slim)).astype(np.int)
y0_src = np.floor(np.min(Y_slim)).astype(np.int)
z0_src = np.floor(np.min(Z_slim)).astype(np.int)
x1_src = np.ceil(np.max(X_slim)).astype(np.int)
y1_src = np.ceil(np.max(Y_slim)).astype(np.int)
z1_src = np.ceil(np.max(Z_slim)).astype(np.int)
src_coords = np.array(
[
[x0_src, x0_src, x1_src, x1_src, x0_src, x0_src, x1_src, x1_src],
[y0_src, y1_src, y0_src, y1_src, y0_src, y1_src, y0_src, y1_src],
[z0_src, z0_src, z0_src, z0_src, z1_src, z1_src, z1_src, z1_src],
[1, 1, 1, 1, 1, 1, 1, 1]
]
)
dst_coords = np.dot(H_rot, src_coords)
x0_dst, y0_dst, z0_dst, dump = np.floor(np.min(dst_coords, axis=1)).astype(np.int)
x1_dst, y1_dst, z1_dst, dump = np.ceil(np.max(dst_coords, axis=1)).astype(np.int)
#
# Calculate the grid density.
# Note:
# This calculation is important as having a dense grid results in a huge transform
# matrix even if it is sparse.
#
dy, dx, dz = [d[0, 0, 0] for d in in_grid.derivatives]
delta_src_coords = np.array(
[
[0, dx, 0, 0, -dx, 0, 0],
[0, 0, dy, 0, 0, -dy, 0],
[0, 0, 0, dz, 0, 0, -dz],
[1, 1, 1, 1, 1, 1, 1]
]
)
delta_dst_coords = np.dot(H_rot, delta_src_coords)
delta_dst_coords.sort(axis=1)
delta_dst_coords = delta_dst_coords[:, 1:] - delta_dst_coords[:, :-1]
delta_dst_coords[delta_dst_coords<=0] = 10000000
dx, dy, dz, dump = np.min(delta_dst_coords, axis=1)
x_samples = min(int((x1_dst-x0_dst)/dx), GRID_DIM_LIMIT)
y_samples = min(int((y1_dst-y0_dst)/dy), GRID_DIM_LIMIT)
z_samples = min(int((z1_dst-z0_dst)/dz), GRID_DIM_LIMIT)
dim_ratio = x_samples * y_samples * z_samples / SPARSE_SIZE_LIMIT
if dim_ratio > 1:
dim_reduction = dim_ratio ** (-1/3)
x_samples = int(x_samples * dim_reduction)
y_samples = int(y_samples * dim_reduction)
z_samples = int(z_samples * dim_reduction)
Y_dst, X_dst, Z_dst = np.mgrid[
y0_dst:y1_dst:complex(0, y_samples),
x0_dst:x1_dst:complex(0, x_samples),
z0_dst:z1_dst:complex(0, z_samples),
]
return Y_dst, X_dst, Z_dst
def integralTransform(
in_grids,
jacobian=None,
axis=0,
direction=1
):
"""
Calculate a transform representing integration.
Parameters
----------
in_grids : Grids object
List of grids.
jacobian : array like (default=None)
If given, will be used as the Jacobian of the integration.
axis : int, optional (default=0)
The axis by which the integration is performed.
direction : {1, -1}, optional (default=1)
Direction of integration
direction - 1: integrate up the indices, -1: integrate down the indices.
"""
grid_shape = in_grids.shape
strides = np.array(in_grids.expanded[0].strides)
strides = (strides / strides[-1]).astype(strides.dtype)
derivatives = in_grids.derivatives
inner_stride = strides[axis]
if direction != 1:
direction = -1
inner_height = np.abs(inner_stride)
inner_width = np.prod(grid_shape[axis:])
inner_H = sps.spdiags(
np.ones((grid_shape[axis], max(inner_height, inner_width)))*derivatives[axis].reshape((-1, 1))*direction,
inner_stride*np.arange(grid_shape[axis]),
inner_height,
inner_width
)
if axis == 0:
H = inner_H
else:
m = np.prod(grid_shape[:axis])
H = sps.kron(sps.eye(m, m), inner_H)
H = H.tocsr()
if jacobian != None:
H = H * spdiag(jacobian)
#
# Calculate the output grid
#
temp = range(in_grids.ndim)
temp.remove(axis)
dims = [slice(None)] * in_grids.ndim
dims[axis] = 0
out_grids = [in_grids[i][dims] for i in temp]
return BaseTransform(
H=H,
in_grids=in_grids,
out_grids=Grids(*out_grids)
)
def cumsumTransform(
in_grids,
axis=0,
direction=1,
masked_rows=None
):
"""
Calculate a transform representing cumsum operation.
Parameters
----------
in_grids : Grids object
List of grids.
axis : int, optional (default=0)
Axis along which the cumsum operation is preformed.
direction : {1, -1}, optional (default=1)
Direction of integration, 1 for integrating up the indices
-1 for integrating down the indices.
masked_rows: array, optional(default=None)
If not None, leave only the rows that are non zero in the
masked_rows array.
"""
grid_shape = in_grids.shape
strides = np.array(in_grids.expanded[0].strides)
strides = (strides / strides[-1]).astype(strides.dtype)
derivatives = in_grids.derivatives
inner_stride = strides[axis]
if direction == 1:
inner_stride = -inner_stride
inner_size = np.prod(grid_shape[axis:])
inner_H = sps.spdiags(
np.ones((grid_shape[axis], inner_size))*derivatives[axis].reshape((-1, 1)),
inner_stride*np.arange(grid_shape[axis]),
inner_size,
inner_size)
if axis == 0:
H = inner_H
else:
m = np.prod(grid_shape[:axis])
H = sps.kron(sps.eye(m, m), inner_H)
if masked_rows != None:
H = H.tolil()
indices = masked_rows.ravel() == 0
for i in indices.nonzero()[0]:
H.rows[i] = []
H.data[i] = []
return BaseTransform(
H=H.tocsr(),
in_grids=in_grids,
out_grids=in_grids
)
def sensorTransform(
in_grids,
sensor_center,
sensor_res,
depth_res,
samples_num=1000,
dither_noise=10,
replicate=10
):
"""
Transofrmation of a linear fisheye (to a x, y, R space). In this version the camera is assumed to point up.
Implements ray tracing algorithm.
Parameters:
-----------
in_grids: Grids object
Grids of the 3D space.
sensor_center: array like
Center of the camera/sensor
sensor_res : two tuple.
resolution of sensor as a tuple of two ints.
depth_res : int
Resolution of the R axis in the output grid.
samples_num : int
Number of samples along the R axis
dither_noise : int
Noise in the samples along the R axis (used for avoiding aliasing).
replicate : int
Number of replications at each pixel.
"""
#
# Center the grids
#
centered_grids = in_grids.translate(-np.array(sensor_center))
Y, X, Z = centered_grids.closed
#
# Convert image pixels to ray direction
# The image is assumed the [-1, 1]x[-1, 1] square.
#
Y_sensor, step = np.linspace(-1.0, 1.0, sensor_res[0], endpoint=False, retstep=True)
X_sensor = np.linspace(-1.0, 1.0, sensor_res[1], endpoint=False)
#
# Calculate sample steps along ray
#
R_max = np.max(np.sqrt(centered_grids.expanded[0]**2 + centered_grids.expanded[1]**2 + centered_grids.expanded[2]**2))
R_samples, R_step = np.linspace(0.0, R_max, samples_num, retstep=True)
R_samples = R_samples[1:]
R_dither = np.random.rand(*sensor_res) * R_step * dither_noise
#
# Calculate depth bins
#
#depth_bins = np.logspace(np.log10(R_samples[0]), np.log10(R_samples[-1]+R_step), depth_res+1)
temp = np.linspace(0, 1, depth_res+1)
temp = np.cumsum(temp)
depth_bins = temp / temp[-1] * R_samples[-1]
samples_bin = np.digitize(R_samples, depth_bins)
samples_array = []
for i in range(1, depth_res+1):
samples_array.append(R_samples[samples_bin==i].reshape((-1, 1)))
#
# Create the output grids
#
out_grids = Grids(depth_bins[:-1], Y_sensor, X_sensor)
#
# Calculate inverse grid
#
X_sensor, Y_sensor = np.meshgrid(X_sensor, Y_sensor)
R_sensor = np.sqrt(X_sensor**2 + Y_sensor**2)
R = out_grids.expanded[0]
THETA = R_sensor * np.pi / 2
PHI = np.arctan2(Y_sensor, X_sensor)
THETA = np.tile(THETA[np.newaxis, :, :], [depth_res, 1, 1])
PHI = np.tile(PHI[np.newaxis, :, :], [depth_res, 1, 1])
Y_inv = R * np.sin(THETA) * np.sin(PHI)
X_inv = R * np.sin(THETA) * np.cos(PHI)
Z_inv = R * np.cos(THETA)
inv_grids = Grids(Y_inv, X_inv, Z_inv)
#
# Randomly replicate rays inside each pixel
#
X_sensor = np.tile(X_sensor[:, :, np.newaxis], [1, 1, replicate])
Y_sensor = np.tile(Y_sensor[:, :, np.newaxis], [1, 1, replicate])
X_sensor += np.random.rand(*X_sensor.shape)*step
Y_sensor += np.random.rand(*Y_sensor.shape)*step
#
# Calculate rays angles
# R_sensor is the radius from the center of the image (0, 0) to the
# pixel. It is used for calculating th ray direction (PHI, THETA)
# and for filtering pixels outside the image (radius > 1).
#
R_sensor = np.sqrt(X_sensor**2 + Y_sensor**2)
THETA_ray = R_sensor * np.pi / 2
PHI_ray = np.arctan2(Y_sensor, X_sensor)
DY_ray = np.sin(THETA_ray) * np.sin(PHI_ray)
DX_ray = np.sin(THETA_ray) * np.cos(PHI_ray)
DZ_ray = np.cos(THETA_ray)
#
# Loop on all rays
#
data = []
indices = []
indptr = [0]
for samples in samples_array:
for r, dy, dx, dz, r_dither in itertools.izip(
R_sensor.reshape((-1, replicate)),
DY_ray.reshape((-1, replicate)),
DX_ray.reshape((-1, replicate)),
DZ_ray.reshape((-1, replicate)),
R_dither.ravel(),
):
if np.all(r > 1):
indptr.append(indptr[-1])
continue
#
# Filter steps where r > 1
#
dy = dy[r<=1]
dx = dx[r<=1]
dz = dz[r<=1]
#
# Convert the ray samples to volume indices
#
Y_ray = (r_dither+samples) * dy
X_ray = (r_dither+samples) * dx
Z_ray = (r_dither+samples) * dz
#
# Calculate the atmosphere indices
#
Y_indices = np.searchsorted(Y, Y_ray.ravel())
X_indices = np.searchsorted(X, X_ray.ravel())
Z_indices = np.searchsorted(Z, Z_ray.ravel())
Y_filter = (Y_indices > 0) * (Y_indices < Y.size)
X_filter = (X_indices > 0) * (X_indices < X.size)
Z_filter = (Z_indices > 0) * (Z_indices < Z.size)
filtered = Y_filter*X_filter*Z_filter
Y_indices = Y_indices[filtered]-1
X_indices = X_indices[filtered]-1
Z_indices = Z_indices[filtered]-1
#
# Calculate unique indices
#
inds_ray = (Y_indices*centered_grids.shape[1] + X_indices)*centered_grids.shape[2] + Z_indices
uniq_indices, inv_indices = np.unique(inds_ray, return_inverse=True)
#
# Calculate weights
# Note:
# The weights are divided by the number of samples in the voxels, this gives the
# averaged concentration in the voxel.
#
weights = []
for i, ind in enumerate(uniq_indices):
weights.append((inv_indices == i).sum() / samples.size / replicate)
#
# Sum up the indices and weights
#
data.append(weights)
indices.append(uniq_indices)
indptr.append(indptr[-1]+uniq_indices.size)
#
# Create sparse matrix
#
data = np.hstack(data)
indices = np.hstack(indices)
H = sps.csr_matrix(
(data, indices, indptr),
shape=(sensor_res[0]*sensor_res[1]*depth_res, centered_grids.size)
)
return BaseTransform(
H=H,
in_grids=in_grids,
out_grids=out_grids,
inv_grids=inv_grids
)
def fisheyeTransform(
in_grids,
sensor_center,
sensor_res,
samples_num=1000,
dither_noise=10,
replicate=10
):
"""
Transofrmation of a linear fisheye (to a x, y). In this version the camera is assumed to point up.
Implements ray tracing algorithm.
Parameters:
-----------
in_grids: Grids object
Grids of the 3D space.
sensor_center: array like
Center of the camera/sensor
sensor_res : two tuple.
resolution of sensor as a tuple of two ints.
samples_num : int
Number of samples along the R axis
dither_noise : int
Noise in the samples along the R axis (used for avoiding aliasing).
replicate : int
Number of replications at each pixel.
"""
#
# Center the grids
#
centered_grids = in_grids.translate(-np.array(sensor_center))
Y, X, Z = centered_grids.closed
#
# Convert image pixels to ray direction
# The image is assumed the [-1, 1]x[-1, 1] square.
#
Y_sensor, step = np.linspace(-1.0, 1.0, sensor_res[0], endpoint=False, retstep=True)
X_sensor = np.linspace(-1.0, 1.0, sensor_res[1], endpoint=False)
#
# Calculate sample steps along ray
#
R_max = np.max(np.sqrt(centered_grids.expanded[0]**2 + centered_grids.expanded[1]**2 + centered_grids.expanded[2]**2))
R_samples, R_step = | np.linspace(0.0, R_max, samples_num, retstep=True) | numpy.linspace |
import os
import sys
import json
import numpy as np
from keras.utils import to_categorical
try:
basestring
except NameError:
basestring = str
def sameAux( aux1, aux2 ):
for idx in ["name", "type"]:
if aux1[idx] != aux2[idx]:
return False
if "categorical" == aux1["type"]:
if aux1["categories"] != aux2["categories"]:
return False
return True
def embedAux( actions, aux ):
embedding = {}
for idx, act in enumerate(aux["categories"]):
embedding[act] = idx
emb = []
for act in actions:
emb.append( embedding[act] )
return emb
def loadOneAux( drive_dir, aux ):
auxFile = os.path.join( drive_dir, "{}_aux.npy".format(aux["name"] ) )
if os.path.exists(auxFile):
actions = | np.load(auxFile) | numpy.load |
'''
Created on 4 Sep 2015
@author: maxz
'''
import unittest
import numpy as np, GPy
from GPy.core.parameterization.variational import NormalPosterior
class Test(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
self.N = 20
self.N_new = 50
self.D = 1
self.X = | np.random.uniform(-3., 3., (self.N, 1)) | numpy.random.uniform |
from mpi4py import MPI
from tacs import TACS, elements
from tmr import TMR
from paropt import ParOpt
import numpy as np
from six import iteritems
try:
from scipy.optimize import minimize
except:
minimize = None
def createTopoProblem(forest, callback, filter_type, nlevels=2,
repartition=True, design_vars_per_node=1,
r0=0.05, N=10, lowest_order=2,
ordering=TACS.MULTICOLOR_ORDER,
use_galerkin=False,
scale_coordinate_factor=1.0):
"""
Create a topology optimization problem instance and a hierarchy of meshes.
This code takes in the OctForest or QuadForest on the finest mesh level
and creates a series of coarser meshes for analysis and optimization.
The discretization at each level is created via a callback function that
generates the appropriate TACSCreator object and its associated filter (the
QuadForest or OctForest on which the design parametrization is defined.)
The code then creates a TMRTopoFilter class which stores information about
the design parametrization and hierarchy. It creates a multigrid object and
finally a TMRTopoProblem instance for optimization.
The callback function takes in a forest object, corresponding to the finite-
element discretization and returns a creator object and a filter object in
the following form:
creator, filter = callback(forest)
Args:
callback: A callback function that takes in the forest and
returns the filter and the associated creator class
filter_type (str): Type of filter to create
forest (TMROctForest or TMRQuadForest): Forest type
repartition (bool): Repartition the mesh
design_vars_per_node (int): number of design variables for each node
r0 (float): Helmholtz/matrix filter radius
N (int): Matrix filter approximation parameter
lowest_order (int): Lowest order mesh to create
ordering: TACS Assembler ordering type
use_galerkin: Use Galerkin projection to obtain coarse grid operators
scale_coordinate_factor (float): Scale all coordinates by this factor
Returns:
problem (TopoProblem): The allocated topology optimization problem
"""
# Store data
forests = []
filters = []
assemblers = []
# Balance the forest and repartition across processors
forest.balance(1)
if repartition:
forest.repartition()
# Create the forest object
creator, filtr = callback(forest)
forests.append(forest)
filters.append(filtr)
assemblers.append(creator.createTACS(forest, ordering))
for i in range(nlevels-1):
order = forests[-1].getMeshOrder()
interp = forests[-1].getInterpType()
if order > lowest_order:
forest = forests[-1].duplicate()
order = order-1
forest.setMeshOrder(order, interp)
else:
forest = forests[-1].coarsen()
forest.setMeshOrder(order, interp)
# Balance and repartition if needed
forest.balance(1)
if repartition:
forest.repartition()
# Create the forest object
creator, filtr = callback(forest)
forests.append(forest)
filters.append(filtr)
assemblers.append(creator.createTACS(forest, ordering))
# Scale the coordinates by scale_coordinates factor if it is != 1.0
if scale_coordinate_factor != 1.0:
for assembler in assemblers:
X = assembler.createNodeVec()
assembler.getNodes(X)
X.scale(scale_coordinate_factor)
assembler.setNodes(X)
# Create the multigrid object
mg = TMR.createMg(assemblers, forests, use_galerkin=use_galerkin)
# Create the TMRTopoFilter object
filter_obj = None
if callable(filter_type):
filter_obj = filter_type(assemblers, filters)
elif isinstance(filter_type, str):
if filter_type == 'lagrange':
filter_obj = TMR.LagrangeFilter(assemblers, filters)
elif filter_type == 'matrix':
filter_obj = TMR.MatrixFilter(r0, N, assemblers, filters)
elif filter_type == 'conform':
filter_obj = TMR.ConformFilter(assemblers, filters)
elif filter_type == 'helmholtz':
filter_obj = TMR.HelmholtzFilter(r0, assemblers, filters)
problem = TMR.TopoProblem(filter_obj, mg)
return problem
def computeVertexLoad(name, forest, assembler, point_force):
"""
Add a load at vertices with the given name value. The assembler object must
be created from the forest. The point_force must be equal to the number of
variables per node in the assembler object.
Args:
name (str): Name of the surface where the traction will be added
forest (QuadForest or OctForest): Forest for the finite-element mesh
assembler (Assembler): TACSAssembler object for the finite-element problem
point_force (list): List of point forces to apply at the vertices
Returns:
Vec: A force vector containing the point load
"""
# Get the number of variable per node from the assembler
vars_per_node = assembler.getVarsPerNode()
if vars_per_node != len(point_force):
raise ValueError('Point force length must be equal to vars_per_node')
# Create the force vector and extract the array
force = assembler.createVec()
force_array = force.getArray()
# Retrieve the node numbers from the forest
nodes = forest.getNodesWithName(name)
comm = assembler.getMPIComm()
node_range = forest.getNodeRange()
# Add the point force into the force arrays
for node in nodes:
if ((node >= node_range[comm.rank]) and (node < node_range[comm.rank+1])):
index = node - node_range[comm.rank]
force_array[vars_per_node*index:vars_per_node*(index+1)] += point_force[:]
# Match the ordering of the vector
assembler.reorderVec(force)
return force
def computeTractionLoad(names, forest, assembler, trac):
"""
Add a surface traction to all quadrants or octants that touch a face or edge with
the given name. The assembler must be created from the provided forest. The list
trac must have a traction for each face (6) for octants or each edge (4) for
quadrants.
Note: This code uses the fact that the getOctsWithName or getQuadsWithName returns
the local face or edge index touching the surface or edge in the info member.
Args:
names (str) or list[(str)]: Name or list of names of the surface(s) where the traction will be added
forest (QuadForest or OctForest): Forest for the finite-element mesh
assembler (Assembler): TACSAssembler object for the finite-element problem
trac (list): List of tractions, one for each possible face/edge orientation
Returns:
Vec: A force vector containing the traction
"""
if isinstance(forest, TMR.OctForest):
octants = forest.getOctants()
if isinstance(names, str):
face_octs = forest.getOctsWithName(names)
else:
face_octs = []
for name in names:
face_octs.extend(forest.getOctsWithName(name))
elif isinstance(forest, TMR.QuadForest):
octants = forest.getQuadrants()
if isinstance(names, str):
face_octs = forest.getQuadsWithName(names)
else:
face_octs = []
for name in names:
face_octs.extend(forest.getQuadsWithName(name))
# Create the force vector and zero the variables in the assembler
force = assembler.createVec()
assembler.zeroVariables()
assembler.zeroDotVariables()
assembler.zeroDDotVariables()
# Create the auxiliary element class
aux = TACS.AuxElements()
for i in range(len(face_octs)):
index = face_octs[i].tag
if index is not None:
aux.addElement(index, trac[face_octs[i].info])
# Keep auxiliary elements already set in the assembler
# aux_tmp = assembler.getAuxElements()
assembler.setAuxElements(aux)
# Compute the residual where force = -residual
assembler.assembleRes(force)
force.scale(-1.0)
# Reset the auxiliary elements
assembler.setAuxElements(None) # (aux_tmp)
return force
def compute3DTractionLoad(name, forest, assembler, tr):
"""
Add a constant surface traction to all octants that touch a face or edge with
the given name.
Args:
forest (QuadForest or OctForest): Forest for the finite-element mesh
name (str): Name of the surface where the traction will be added
assembler (Assembler): TACSAssembler object for the finite-element problem
tr (list): The 3D components of the traction.
Returns:
Vec: A force vector containing the traction
"""
# Get the basis
element = assembler.getElements()[0]
basis = element.getElementBasis()
# Get the number of variables per node
vars_per_node = assembler.getVarsPerNode()
trac = []
for findex in range(6):
trac.append(elements.Traction3D(vars_per_node, findex, basis, tr))
return computeTractionLoad(name, forest, assembler, trac)
def interpolateDesignVec(orig_filter, orig_vec, new_filter, new_vec):
"""
This function interpolates a design vector from the original design space defined
on an OctForest or QuadForest and interpolates it to a new OctForest or QuadForest.
This function is used after a mesh adaptation step to get the new design space.
Args:
orig_filter (OctForest or QuadForest): Original filter Oct or QuadForest object
orig_vec (PVec): Design variables on the original mesh in a ParOpt.PVec
new_filter (OctForest or QuadForest): New filter Oct or QuadForest object
new_vec (PVec): Design variables on the new mesh in a ParOpt.PVec (set on ouput)
"""
# Convert the PVec class to TACSBVec
orig_x = TMR.convertPVecToVec(orig_vec)
if orig_x is None:
raise ValueError('Original vector must be generated by TMR.TopoProblem')
new_x = TMR.convertPVecToVec(new_vec)
if new_x is None:
raise ValueError('New vector must be generated by TMR.TopoProblem')
if orig_x.getVarsPerNode() != new_x.getVarsPerNode():
raise ValueError('Number of variables per node must be consistent')
orig_map = orig_x.getNodeMap()
new_map = new_x.getNodeMap()
vars_per_node = orig_x.getVarsPerNode()
# Create the interpolation class
interp = TACS.VecInterp(orig_map, new_map, vars_per_node)
new_filter.createInterpolation(orig_filter, interp)
interp.initialize()
# Perform the interpolation
interp.mult(orig_x, new_x)
return
def addNaturalFrequencyConstraint(problem, omega_min, **kwargs):
"""
Add a natural frequency constraint to a TopoProblem optimization problem
This function automatically sets good default arguments that can be
overridden with keyword arguments passed in through kwargs.
Args:
problem (TopoProblem): TopoProblem optimization problem
omega_min (float): Minimum natural frequency, Hz
**kwargs: Frequency constraint parameters; check
TMR documentation for more detail
"""
# Convert the provided minimum natural frequency from
# Hz to rad/s, square it, and make it negative to fit the
# constraint form: omega^2 - offset >= 0.0
offset = -(2.0*np.pi*omega_min)**2
# Define all the possible arguments and set defaults
opts = {'use_jd':True,
'num_eigs':10,
'ks_weight':50.0,
'offset':offset,
'sigma':-offset,
'scale':-0.75/offset,
'max_lanczos':100,
'tol':1e-30,
'eig_tol':5e-7,
'eig_rtol':1e-6,
'eig_atol':1e-12,
'num_recycle':10,
'fgmres_size':8,
'max_jd_size':50,
'recycle_type':'num_recycling'}
# Apply the user defined parameters
for key, value in kwargs.items():
if key in opts:
opts[key] = value
else:
raise ValueError('%s is not a valid option'%(key))
if opts['use_jd']:
# Set the recycling strategy
if opts['recycle_type'] == 'num_recycling':
recycle_type = TACS.NUM_RECYCLE
else:
recycle_type = TACS.SUM_TWO
problem.addFrequencyConstraint(opts['sigma'], opts['num_eigs'],
opts['ks_weight'], opts['offset'],
opts['scale'], opts['max_jd_size'],
opts['eig_tol'], opts['use_jd'],
opts['fgmres_size'], opts['eig_rtol'],
opts['eig_atol'], opts['num_recycle'],
recycle_type)
else: # use the Lanczos method
problem.addFrequencyConstraint(opts['sigma'], opts['num_eigs'],
opts['ks_weight'], opts['offset'],
opts['scale'],
opts['max_lanczos'], opts['tol'], 0,
0, 0, 0, 0, TACS.SUM_TWO,
opts['track_eigen_iters'])
return
def densityBasedRefine(forest, assembler, index=0,
lower=0.05, upper=0.5, reverse=False,
min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a density-based refinement criteria.
This function takes in a Quad or OctForest that has been used for analysis and its
corresponding Assembler object. It then uses the data set in the constitutive object
to extract the density within each element. If the density falls below the the bound
*lower* the element is coarsened, if the density exceeds *upper* the element is
refined. If *reverse* is set, this scheme is reversed so low design values are
refined. The refinement is applied directly to the forest.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
assembler (Assembler): The TACS.Assembler object associated with forest
index (int): The component index of the design vector used to indicate material
lower (float): the lower limit used for coarsening
upper (float): the upper limit used for refinement
reverse (bool): Reverse the refinement scheme
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
# Get the elements from the Assembler object
elems = assembler.getElements()
for i in range(num_elems):
# Extract the design variables from the element
dvs_per_node = elems[i].getDesignVarsPerNode()
dvs = elems[i].getDesignVars(i)
# Apply the refinement criteria
if reverse:
value = np.min(dvs[index::dvs_per_node])
if value >= upper:
refine[i] = -1
elif value <= lower:
refine[i] = 1
else:
value = np.max(dvs[index::dvs_per_node])
if value >= upper:
refine[i] = 1
elif value <= lower:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
def approxDistanceRefine(forest, fltr, assembler, refine_distance, index=0,
domain_length=1.0, tfactor=0.05, cutoff=0.15,
filename=None, min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a distance-based refinement criteria.
This function takes in a forest associated with the analysis, a filter associated
with the design variables and the corresponding assembler object. An approximate
distance function is computed using TMR which gives an approximation of the distance
to the closest point on the domain boundary. In this case, the domain boundary is
approximated as those points that are intermediate in [cutoff, 1-cutoff]. Since these
are applied to the filtered (not projected) states, there will be intermediate density
values. Finally, all elements that contain values that are within refine_distance to
the approximate boundary are refined, while all other elements are coarseend.
Notes: The index controls which component of the design variable is used to estimate
the distance (useful for multimaterial cases). The tfactor controls the approximation,
larger values of tfactor lead to more diffusive approximations, but small values may
lead to numerical issues. The actual factor value is determined baesd on the domain
length parameter which gives the characteristic length of the domain.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
filtr (QuadForest or OctForest): OctForest or QuadForest for the filter object
assembler (Assembler): The TACS.Assembler object associated with forest
refine_distance (float): Refine all elements within this distance
index (int): The design variable component index (!= 0 for multimaterial cases)
tfactor (float): Factor applied to the domain_length for computing the approx dist.
cutoff (float): Cutoff to indicate structural interface
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Set up and solve for an approximate level set function
x = assembler.createDesignVec()
assembler.getDesignVars(x)
# Approximate the distance to the boundary
dist = TMR.ApproximateDistance(fltr, x, index=index, cutoff=cutoff,
t=tfactor*domain_length, filename=filename)
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
for i in range(num_elems):
# Apply the refinement criteria
if dist[i] <= refine_distance:
refine[i] = 1
else:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
def targetRefine(forest, fltr, assembler, refine_distance,
interface_lev=2, interior_lev=1,
interface_index=-1, interior_index=0, reverse=False,
domain_length=1.0, tfactor=0.05, cutoff=0.15,
filename=None, min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a target-based refinement strategy.
This refinement strategy employs a targeted refinement strategy. The goal is to
refine the interface elements, defined from an approximate distance calculation,
and the interior elements, defined as those elements with a given threshold of
the density field that are not close to the interface, to a prescribed level at
the first iteration. All other elements are coarsened aggressively.
Note: The interface and interior can be computed using different indices in
multimaterial optimization. When the interface index is negative, all materials are
considered during the interface distance calculation.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
filtr (QuadForest or OctForest): OctForest or QuadForest for the filter object
assembler (Assembler): The TACS.Assembler object associated with forest
refine_distance (float): Refine all elements within this distance
interface_lev (int): Target interface refinement level
interior_lev (int): Target interior refinement level
interface_index (int): Design variable component index for the interface problem
interior_index (int): Design variable component index for the interior
reverse (boolean): Reverse the sense of the interior refinement
tfactor (float): Factor applied to the domain_length for computing the approx dist.
cutoff (float): Cutoff to indicate structural interface
filename (str): File name for the approximate distance calculation
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Set up and solve for an approximate level set function
x = assembler.createDesignVec()
assembler.getDesignVars(x)
# Approximate the distance to the boundary
dist = TMR.ApproximateDistance(fltr, x, index=interface_index, cutoff=cutoff,
t=tfactor*domain_length, filename=filename)
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
# Compute the levels
if isinstance(forest, TMR.OctForest):
octants = forest.getOctants()
lev = np.zeros(len(octants))
for i, oc in enumerate(octants):
lev[i] = oc.level
elif isinstance(forest, TMR.QuadForest):
quads = forest.getQuadrants()
lev = np.zeros(len(quads))
for i, quad in enumerate(quads):
lev[i] = quad.level
# Get the elements from the Assembler object
elems = assembler.getElements()
for i in range(num_elems):
# Apply the refinement criteria
if dist[i] <= refine_distance:
refine[i] = interface_lev - lev[i]
else:
# Now check whether this is in the interior or exterior of
# the domain
dvs_per_node = elems[i].getDesignVarsPerNode()
dvs = elems[i].getDesignVars(i)
# Apply the refinement criteria
if reverse:
value = np.min(dvs[interior_index::dvs_per_node])
if value >= 1.0 - cutoff:
refine[i] = -1
elif value <= cutoff:
refine[i] = interior_lev - lev[i]
else:
value = np.max(dvs[interior_index::dvs_per_node])
if value >= 1.0 - cutoff:
refine[i] = interior_lev - lev[i]
elif value <= cutoff:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
class OptFilterWeights:
def __init__(self, diag, X, H):
"""
Compute an approximation of the coefficients of a Helmholtz filter.
Args:
diag (int): The index of the diagonal (base point) of the stencil
X (np.ndarray): An array of the node positions
H (np.ndarray): Symmetric matrix of second derivatives for the filter
"""
self.diag = diag
self.X = X
self.n = self.X.shape[0]
# Compute the normalization
if len(self.X.shape) == 1:
self.delta = np.max(np.absolute(self.X - self.X[self.diag]))
else:
self.delta = np.sqrt(np.max(
np.sum((self.X - self.X[self.diag,:])*(self.X - self.X[self.diag,:]), axis=1)))
self.dim = 3
if len(self.X.shape) == 1 or self.X.shape[1] == 1:
self.dim = 1
# Compute the constraint matrix
A = np.zeros((2, self.n-1))
# Populate the b vector
b = np.zeros(2)
b[1] = H[0,0]
index = 0
for i in range(self.n):
if i != self.diag:
dx = (self.X[i] - self.X[self.diag])/self.delta
A[0,index] = dx
A[1,index] = 0.5*dx**2
index += 1
elif self.X.shape[1] == 2:
self.dim = 2
# Compute the constraint matrix
A = np.zeros((5, self.n-1))
# Populate the b vector
b = np.zeros(5)
b[2] = H[0,0]
b[3] = H[1,1]
b[4] = 2.0*H[0,1]
index = 0
for i in range(self.n):
if i != self.diag:
dx = (self.X[i,0] - self.X[self.diag,0])/self.delta
dy = (self.X[i,1] - self.X[self.diag,1])/self.delta
A[0,index] = dx
A[1,index] = dy
A[2,index] = 0.5*dx**2
A[3,index] = 0.5*dy**2
A[4,index] = dx*dy
index += 1
else:
# Compute the constraint matrix
A = np.zeros((9, self.n-1))
# Populate the b vector
b = np.zeros(9)
b[3] = H[0,0]
b[4] = H[1,1]
b[5] = H[2,2]
b[6] = 2*H[1,2]
b[7] = 2*H[0,2]
b[8] = 2*H[0,1]
index = 0
for i in range(self.n):
if i != self.diag:
dx = (self.X[i,0] - self.X[self.diag,0])/self.delta
dy = (self.X[i,1] - self.X[self.diag,1])/self.delta
dz = (self.X[i,2] - self.X[self.diag,2])/self.delta
A[0,index] = dx
A[1,index] = dy
A[2,index] = dz
A[3,index] = 0.5*dx**2
A[4,index] = 0.5*dy**2
A[5,index] = 0.5*dz**2
A[6,index] = dy*dz
A[7,index] = dx*dz
A[8,index] = dx*dy
index += 1
self.b = b
self.A = A
return
def obj_func(self, w):
"""Evaluate the sum square of the weights"""
return 0.5*np.sum(w**2)
def obj_func_der(self, w):
"""Evaluate the derivative of the sum square of weights"""
return w
def con_func(self, w):
"""Compute the interpolation constraints"""
return np.dot(self.A, w) - self.b
def con_func_der(self, w):
"""Compute the derivative of the interpolation ocnstraints"""
return self.A
def set_alphas(self, w, alpha):
"""Compute the interpolating coefficients based on the weights"""
alpha[:] = 0.0
index = 0
for i in range(self.n):
if i != self.diag:
alpha[i] = w[index]/self.delta**2
alpha[self.diag] += w[index]/self.delta**2
index += 1
alpha[self.diag] += 1.0
return
class Mfilter(TMR.HelmholtzPUFilter):
def __init__(self, N, assemblers, filters, vars_per_node=1,
dim=2, r=0.01):
"""
Create an M-filter: A type of Helmholtz partition of unity filter that
approximates the Helmholtz PDE-based filter and maintains positive
coefficients over a range of meshes.
Args:
N (int): Number of terms in the approximate Neumann inverse
assemblers (list): List of TACS.Assembler objects
filters (list): List of TMR.QuadForest or TMR.OctForest objects
vars_per_node (int): Number of design variables at each node
dim (int): Spatial dimension of the problem
r (float): Filter radius
Note: You must call initialize() on the filter before use.
"""
self.r = r
self.dim = dim
return
def getInteriorStencil(self, diag, X, alpha):
"""Get the weights for an interior stencil point"""
H = self.r**2*np.eye(3)
# Reshape the values in the matrix
X = X.reshape((-1, 3))
n = X.shape[0]
if self.dim == 2:
X = X[:,:2]
# Set up the optimization problem
opt = OptFilterWeights(diag, X, H)
# Set the bounds and initial point
w0 = np.ones(n-1)
bounds = []
for i in range(n-1):
bounds.append((0, None))
res = minimize(opt.obj_func, w0, jac=opt.obj_func_der,
method='SLSQP', bounds=bounds,
constraints={'type': 'eq', 'fun': opt.con_func,
'jac': opt.con_func_der})
# Set the optimized alpha values
opt.set_alphas(res.x, alpha)
return
def getBoundaryStencil(self, diag, normal, X, alpha):
"""Get a sentcil point on the domain boundary"""
H = self.r**2*np.eye(2)
# Reshape the values in the matrix
X = X.reshape((-1, 3))
n = X.shape[0]
if self.dim == 2:
X = X[:,:2]
t = np.array([normal[1], -normal[0]])
Xt = np.dot(X - X[diag,:], t)
elif self.dim == 3:
# Reduce the problem to a 2d problem on linearization of the
# the domain boundary. First, compute an arbitrary direction
# that is not aligned along the normal direction
index = np.argmin( | np.absolute(normal) | numpy.absolute |
import numpy
from hmmlearn.base import _BaseHMM
from hmmlearn.hmm import _check_and_set_gaussian_n_features
from hmmlearn import _utils
class FullPTHMM(_BaseHMM):
r"""Hidden Markov Model for Particle Tracking.
Args:
n_components (int): Number of states.
min_var (float, optional): Floor on the variance to prevent overfitting.
Defaults to 1e-5.
startprob_prior (array, optional):
shape (n_components, ). Parameters of the Dirichlet prior distribution for
:attr:`startprob_`.
transmat_prior (array, optional):
shape (n_components, n_components). Parameters of the Dirichlet prior distribution for each row
of the transition probabilities :attr:`transmat_`.
algorithm (string, optional):
Decoder algorithm. Must be one of "viterbi" or`"map".
Defaults to "viterbi".
random_state (RandomState or an int seed, optional):
A random number generator instance.
n_iter (int, optional): Maximum number of iterations to perform.
tol (float, optional):
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose (bool, optional):
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params (string, optional):
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
init_params (string, optional):
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
Attributes:
monitor\_ (ConvergenceMonitor):
Monitor object used to check the convergence of EM.
startprob\_ (array): shape (n_components, ).
Initial state occupation distribution.
transmat\_ (array): shape (n_components, n_components).
Matrix of transition probabilities between states.
diffusivities\_ (array): shape (n_components, 1).
Diffusion constants for each state.
intensity_means\_ (array): shape (n_components, 1).
Mean parameters of intensity distribution for each state.
intensity_vars\_ (array): shape (n_components, 1).
Variance parameters of intensity distribution for each state.
"""
def __init__(self, n_components=1,
min_var=1e-5,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params="stdmv", init_params="stdmv"):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=tol, params=params, verbose=verbose,
init_params=init_params)
self.min_var = min_var
def _check(self):
super()._check()
self.diffusivities_ = numpy.asarray(self.diffusivities_)
assert self.diffusivities_.shape == (self.n_components, 1)
self.intensity_means_ = numpy.asarray(self.intensity_means_)
assert self.intensity_means_.shape == (self.n_components, 1)
self.intensity_vars_ = numpy.asarray(self.intensity_vars_)
assert self.intensity_vars_.shape == (self.n_components, 1)
self.n_features = 1
def _generate_sample_from_state(self, state, random_state=None):
D = self.diffusivities_[state]
mean = self.intensity_means_[state]
var = self.intensity_vars_[state]
return numpy.hstack([
numpy.sqrt(numpy.power(random_state.normal(scale=numpy.sqrt(2 * D), size=2), 2).sum(keepdims=True)),
random_state.normal(loc=mean, scale=numpy.sqrt(var), size=(1, )),
])
def _get_n_fit_scalars_per_param(self):
nc = self.n_components
nf = self.n_features
return {
"s": nc - 1,
"t": nc * (nc - 1),
"d": nc * nf,
"m": nc * nf,
"v": nc * nf,
}
def _init(self, X, lengths=None):
_check_and_set_gaussian_n_features(self, X)
super()._init(X, lengths=lengths)
_, n_features = X.shape
if hasattr(self, 'n_features') and self.n_features != n_features:
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (n_features, self.n_features))
self.n_features = n_features
if 'd' in self.init_params or not hasattr(self, "diffusivities_"):
diffusivity_means = numpy.mean(X[:, [0]], axis=0) * 0.25
variations = numpy.arange(1, self.n_components + 1)
variations = variations / variations.sum()
self.diffusivities_ = diffusivity_means * variations[:, numpy.newaxis]
if 'm' in self.init_params or not hasattr(self, "intensity_means_"):
from sklearn import cluster
kmeans = cluster.KMeans(n_clusters=self.n_components,
random_state=self.random_state)
kmeans.fit(X[:, [1]])
self.intensity_means_ = kmeans.cluster_centers_
if 'v' in self.init_params or not hasattr(self, "intensity_vars_"):
var = numpy.var(X[:, [1]].T) + self.min_var
self.intensity_vars_ = numpy.tile([var], (self.n_components, 1))
def _initialize_sufficient_statistics(self):
stats = super()._initialize_sufficient_statistics()
stats['post'] = numpy.zeros(self.n_components)
stats['obs1**2'] = numpy.zeros((self.n_components, 1))
stats['obs2'] = numpy.zeros((self.n_components, 1))
stats['obs2**2'] = numpy.zeros((self.n_components, 1))
return stats
def _compute_log_likelihood(self, X):
D = self.diffusivities_
mean = self.intensity_means_
var = self.intensity_vars_
# print("D=", D)
# print("mean=", mean)
# print("var=", var)
if not all(var > 0):
raise ValueError(f'Variance must be positive [{var}]')
q1 = numpy.log(X[:, [0]] / (2 * D[:, 0])) - (X[:, [0]] ** 2 / (4 * D[:, 0]))
q2 = -0.5 * numpy.log(2 * numpy.pi * var[:, 0]) - (X[:, [1]] - mean[:, 0]) ** 2 / (2 * var[:, 0])
return q1 + q2
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice):
super()._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)
if any(param in self.params for param in 'dmv'):
stats['post'] += posteriors.sum(axis=0)
if 'd' in self.params:
stats['obs1**2'] += numpy.dot(posteriors.T, obs[:, [0]] ** 2)
if 'm' in self.params:
stats['obs2'] += numpy.dot(posteriors.T, obs[:, [1]])
if 'v' in self.params:
stats['obs2**2'] += numpy.dot(posteriors.T, obs[:, [1]] ** 2)
def _do_mstep(self, stats):
super()._do_mstep(stats)
denom = stats['post'][:, numpy.newaxis]
if 'd' in self.params:
self.diffusivities_ = 0.25 * stats['obs1**2'] / denom
if 'm' in self.params:
self.intensity_means_ = stats['obs2'] / denom
if 'v' in self.params:
self.intensity_vars_ = (
stats['obs2**2'] - 2 * self.intensity_means_ * stats['obs2'] + self.intensity_means_ ** 2 * denom) / denom
class PTHMM(_BaseHMM):
r"""Hidden Markov Model for Particle Tracking.
Args:
n_diffusivities (int): Number of diffusivity states.
n_oligomers (int): Number of oligomeric states.
n_components is equal to (n_diffusivities * n_oliogmers).
min_var (float, optional): Floor on the variance to prevent overfitting.
Defaults to 1e-5.
startprob_prior (array, optional):
shape (n_components, ). Parameters of the Dirichlet prior distribution for
:attr:`startprob_`.
transmat_prior (array, optional):
shape (n_components, n_components). Parameters of the Dirichlet prior distribution for each row
of the transition probabilities :attr:`transmat_`.
algorithm (string, optional):
Decoder algorithm. Must be one of "viterbi" or`"map".
Defaults to "viterbi".
random_state (RandomState or an int seed, optional):
A random number generator instance.
n_iter (int, optional): Maximum number of iterations to perform.
tol (float, optional):
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose (bool, optional):
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params (string, optional):
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
init_params (string, optional):
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
Attributes:
monitor\_ (ConvergenceMonitor):
Monitor object used to check the convergence of EM.
startprob\_ (array): shape (n_components, ).
Initial state occupation distribution.
transmat\_ (array): shape (n_components, n_components).
Matrix of transition probabilities between states.
diffusivities\_ (array): shape (n_diffusivities, 1).
Diffusion constants for each state.
intensity_means\_ (array): shape (1, 1).
Base mean parameter of intensity distributions.
intensity_vars\_ (array): shape (1, 1).
Base Variance parameter of intensity distributions.
"""
def __init__(self, n_diffusivities=3, n_oligomers=4,
min_var=1e-5,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params="stdmv", init_params="stdmv"):
_BaseHMM.__init__(self, n_diffusivities * n_oligomers,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=tol, params=params, verbose=verbose,
init_params=init_params)
self.min_var = min_var
self.n_diffusivities = n_diffusivities
self.n_oligomers = n_oligomers
assert self.n_components == self.n_diffusivities * self.n_oligomers
def _check(self):
super()._check()
self.diffusivities_ = numpy.asarray(self.diffusivities_)
assert self.diffusivities_.shape == (self.n_diffusivities, 1)
self.intensity_means_ = numpy.asarray(self.intensity_means_)
assert self.intensity_means_.shape == (1, 1)
self.intensity_vars_ = numpy.asarray(self.intensity_vars_)
assert self.intensity_vars_.shape == (1, 1)
self.n_features = 2
def _generate_sample_from_state(self, state, random_state=None):
m = state // self.n_oligomers
n = state % self.n_oligomers
mean = self.intensity_means_[0] * (n + 1)
var = self.intensity_vars_[0] * (n + 1)
D = self.diffusivities_[m]
return numpy.hstack([
numpy.sqrt(numpy.power(random_state.normal(scale=numpy.sqrt(2 * D), size=2), 2).sum(keepdims=True)),
random_state.normal(loc=mean, scale=numpy.sqrt(var), size=(1, )),
])
def _get_n_fit_scalars_per_param(self):
return {
"s": self.n_components - 1,
"t": self.n_components * (self.n_components - 1),
"d": self.n_diffusivities,
"m": 1,
"v": 1,
}
def _init(self, X, lengths=None):
_check_and_set_gaussian_n_features(self, X)
super()._init(X, lengths=lengths)
_, n_features = X.shape
assert n_features == 2
if hasattr(self, 'n_features') and self.n_features != n_features:
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (n_features, self.n_features))
self.n_features = n_features
if 'd' in self.init_params or not hasattr(self, "diffusivities_"):
diffusivity_means = numpy.mean(X[:, [0]], axis=0) * 0.25
variations = numpy.arange(1, self.n_diffusivities + 1)
variations = variations / variations.sum()
self.diffusivities_ = diffusivity_means * variations[:, numpy.newaxis]
if 'm' in self.init_params or not hasattr(self, "intensity_means_"):
# kmeans = cluster.KMeans(n_clusters=self.n_components,
# random_state=self.random_state)
# kmeans.fit(X[:, [1]])
# self.intensity_means_ = kmeans.cluster_centers_
self.intensity_means_ = numpy.array([[numpy.average(X[:, 1]) * 0.5]])
if 'v' in self.init_params or not hasattr(self, "intensity_vars_"):
var = numpy.var(X[:, [1]].T) + self.min_var
self.intensity_vars_ = numpy.array([[var]])
def _initialize_sufficient_statistics(self):
stats = super()._initialize_sufficient_statistics()
stats['post'] = numpy.zeros(self.n_components)
stats['obs1**2'] = | numpy.zeros((self.n_components, 1)) | numpy.zeros |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Function, get_new_symbol
from coremltools.converters.mil.mil.var import Var
from .. import ops
from ..converter import TorchConverter, TranscriptionContext
from ..internal_graph import InternalTorchIRNode
class TestTorchOps:
"""Class containing tests for converting TorchIR -> CoreML ops.
These tests interface with only the InternalTorchIRGraph and do not
build a torch module. Thus, they are much faster then the numerical tests.
However, for some ops it is necessary to use the torch module to verify
numerical output so they are placed the numerical tests.
NOTE: Confused where @context is coming from? Its from the pytest fixture defined below.
"""
@pytest.fixture
def context(self):
return TranscriptionContext()
@pytest.fixture
def set_random_seeds(self):
torch.manual_seed(1)
np.random.seed(1)
@pytest.mark.parametrize("dtype", [torch.bool, torch.float, torch.int])
def test_constant(self, context, dtype):
test_data = torch.ones(1, dtype=dtype)
node = InternalTorchIRNode(
attr={"value": test_data}, kind="constant", inputs=[], outputs=["1"]
)
ssa = self._construct_test_graph(context, ops.constant, node, "1")
assert np.allclose(test_data, ssa.val)
assert test_data.shape == ssa.shape
def test_constant_magic(self, context):
test_val = ops.PYTORCH_MAGIC_DEFAULT
node = InternalTorchIRNode(
attr={"value": test_val}, kind="constant", inputs=[], outputs=["1"]
)
ssa = self._construct_test_graph(context, ops.constant, node, "1")
# We expect the magic default to get converted to None
assert ssa is None
@staticmethod
def _gen_constants(size, vals):
"""Helper function. Generates a list of internal constant nodes.
Arguments:
size: number of constants to generate
vals: Either a list of values for each constant or one value used for all constants."""
is_list = isinstance(vals, list)
if is_list:
if len(vals) != size:
raise ValueError("len(@vals): {} != size: {}".format(len(vals), size))
constants = []
for index in range(size):
if is_list:
val = vals[index]
else:
val = vals
constants.append(
InternalTorchIRNode(
attr={"value": val},
kind="constant",
inputs=[],
outputs=[str(index)],
)
)
input_list = [str(i) for i in range(size)]
output_name = str(len(input_list))
return constants, input_list, output_name
@staticmethod
def _construct_test_graph(
context, test_op, test_node, output_name=None, graph_inputs=None, constants=None
):
""" Construct an Function for the given @graph_inputs, @constants,
and @test_node. Returns the output of the graph, which is the ssa
Var of the given @output_name.
"""
if graph_inputs is None:
graph_inputs = {}
if constants is None:
constants = []
with Function(inputs=graph_inputs) as ssa_func:
for name in ssa_func.inputs.keys():
context.add(ssa_func.inputs[name])
for node in constants:
ops.constant(context, node)
test_op(context, test_node)
ssa = None
if output_name:
ssa = context[output_name]
return ssa
def _test_elementwise_binary(
self, context, op_name, op, test_input, num_constants, expected_result
):
"""Helper function, runs op on test input and compares against expected result"""
constants, input_list, output_name = self._gen_constants(
num_constants, test_input
)
eb_node = InternalTorchIRNode(
kind=op_name, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, op, eb_node, output_name, constants=constants
)
np.testing.assert_allclose(expected_result, ssa.val, atol=1e-7)
def _test_cast(self, context, test_val, op_kind, op_func, python_type):
constants, input_list, output_name = self._gen_constants(1, [test_val])
node = InternalTorchIRNode(
kind=op_kind, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, op_func, node, output_name, constants=constants
)
assert ssa.val == python_type(test_val)
def _test_activation(
self, context, input_shape, constants_list, op_kind, op_func, torch_func, atol
):
test_input = torch.rand(input_shape)
constants, input_list, output_name = self._gen_constants(
len(constants_list) + 1, [test_input] + constants_list
)
node = InternalTorchIRNode(
kind=op_kind, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, op_func, node, output_name, constants=constants
)
expected_result = torch_func(test_input).numpy()
np.testing.assert_allclose(expected_result, ssa.val, atol=atol)
def test_add(self, context):
test_input_1 = np.random.rand(2, 3)
test_input_2 = np.random.rand(2, 3)
scale_factor = 1
self._test_elementwise_binary(
context,
"Add",
ops.add,
[test_input_1, test_input_2, scale_factor],
3,
test_input_1 + test_input_2,
)
def test_add_no_scale_factor(self, context):
test_input_1 = np.random.rand(2, 3)
test_input_2 = np.random.rand(2, 3)
self._test_elementwise_binary(
context,
"Add",
ops.add,
[test_input_1, test_input_2],
2,
test_input_1 + test_input_2,
)
@pytest.mark.parametrize(
"test_input_1, test_input_2",
[(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5),],
)
def test_sub(self, context, test_input_1, test_input_2):
scale_factor = 1
self._test_elementwise_binary(
context,
"Sub",
ops.sub,
[test_input_1, test_input_2, scale_factor],
3,
test_input_1 - test_input_2,
)
@pytest.mark.parametrize(
"test_input_1, test_input_2",
[(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5),],
)
def test_rsub(self, context, test_input_1, test_input_2):
scale_factor = 1
self._test_elementwise_binary(
context,
"rsub",
ops.sub,
[test_input_1, test_input_2, scale_factor],
3,
# Note the reversal of arg ordering relative to 'sub'
test_input_2 - test_input_1,
)
def test_mul(self, context):
test_input_1 = np.random.rand(3, 2)
test_input_2 = np.random.rand(3, 2)
self._test_elementwise_binary(
context,
"Mul",
ops.mul,
[test_input_1, test_input_2],
2,
test_input_1 * test_input_2,
)
def test_div(self, context):
test_input_1 = np.random.rand(3, 2)
test_input_2 = np.random.rand(3, 2)
self._test_elementwise_binary(
context,
"Div",
ops.div,
[test_input_1, test_input_2],
2,
np.divide(test_input_1, test_input_2),
)
def test_floor_divide(self, context):
test_input_1 = np.random.randint(low=1, high=100, size=(3, 2))
test_input_2 = np.random.randint(low=1, high=100, size=(3, 2))
self._test_elementwise_binary(
context,
"floor_divide",
ops.floor_divide,
[test_input_1, test_input_2],
2,
np.floor_divide(test_input_1, test_input_2),
)
def test_pow(self, context):
test_input_1 = np.random.rand(3, 2)
test_input_2 = np.random.rand(3, 2)
self._test_elementwise_binary(
context,
"Pow",
ops.pow_,
[test_input_1, test_input_2],
2,
np.power(test_input_1, test_input_2),
)
def test_eq(self, context):
test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float()
test_input_2 = torch.ones([2, 3, 4, 5, 6]).float()
test_input_2[0][0][0][0][0] = 0
expected_output = (test_input_1 == test_input_2).float()
self._test_elementwise_binary(
context, "Eq", ops.eq, [test_input_1, test_input_2], 2, expected_output
)
def test_ne(self, context):
test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float()
test_input_2 = torch.ones([2, 3, 4, 5, 6]).float()
test_input_2[0][0][0][0][0] = 0
expected_output = (test_input_1 != test_input_2).float()
self._test_elementwise_binary(
context, "ne", ops.ne, [test_input_1, test_input_2], 2, expected_output
)
def test_le(self, context):
test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float()
test_input_2 = torch.ones([2, 3, 4, 5, 6]).float()
test_input_2[0][0][0][0][0] = 0
expected_output = (test_input_1 <= test_input_2).float()
self._test_elementwise_binary(
context, "Le", ops.le, [test_input_1, test_input_2], 2, expected_output
)
def test_lt(self, context):
test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float()
test_input_2 = torch.ones([2, 3, 4, 5, 6]).float()
test_input_2[0][0][0][0][0] = 0
expected_output = (test_input_1 < test_input_2).float()
self._test_elementwise_binary(
context, "Lt", ops.lt, [test_input_1, test_input_2], 2, expected_output
)
def test_ge(self, context):
test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float()
test_input_2 = torch.ones([2, 3, 4, 5, 6]).float()
test_input_2[0][0][0][0][0] = 0
expected_output = (test_input_1 >= test_input_2).float()
self._test_elementwise_binary(
context, "Ge", ops.ge, [test_input_1, test_input_2], 2, expected_output
)
def test_gt(self, context):
test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float()
test_input_2 = torch.ones([2, 3, 4, 5, 6]).float()
test_input_2[0][0][0][0][0] = 0
expected_output = (test_input_1 > test_input_2).float()
self._test_elementwise_binary(
context, "Gt", ops.gt, [test_input_1, test_input_2], 2, expected_output
)
@pytest.mark.parametrize(
"size, array_type",
itertools.product(
[1, 5, 7],
[
("ListConstruct", ops.listconstruct),
("TupleConstruct", ops.tupleconstruct),
],
),
)
def test_arrayconstruct_scalars(self, context, size, array_type):
constant_vals = list(range(size))
array_kind = array_type[0]
array_op = array_type[1]
constants, input_list, output_name = self._gen_constants(size, constant_vals)
ac_node = InternalTorchIRNode(
kind=array_kind, inputs=input_list, outputs=[output_name],
)
ssa = self._construct_test_graph(
context, array_op, ac_node, output_name, constants=constants
)
expected_val = np.arange(size)
np.testing.assert_equal(ssa.shape, (size,))
np.testing.assert_array_equal(ssa.val, expected_val)
@pytest.mark.parametrize(
"shape1, shape2, array_type",
itertools.product(
[(1, 2), (3, 4, 5), (2,)],
[(2, 1), (1, 4, 5), (3,)],
[
("ListConstruct", ops.listconstruct),
("TupleConstruct", ops.tupleconstruct),
],
),
)
def test_arrayconstruct_nonscalar(self, context, shape1, shape2, array_type):
tensor1 = torch.rand(shape1)
tensor2 = torch.rand(shape2)
array_kind = array_type[0]
array_op = array_type[1]
constants, input_list, output_name = self._gen_constants(2, [tensor1, tensor2])
ac_node = InternalTorchIRNode(
kind=array_kind, inputs=input_list, outputs=[output_name],
)
ssa = self._construct_test_graph(
context, array_op, ac_node, output_name, constants=constants
)
expected_val = (tensor1.numpy(), tensor2.numpy())
np.testing.assert_equal(len(ssa), 2)
for x, y in zip(ssa, expected_val):
np.testing.assert_allclose(x.val, y)
@pytest.mark.parametrize(
"input_shape, dim0, dim1",
[
x
for x in itertools.product(
[(1, 2, 3), (1, 2, 3, 4), (1, 2, 3, 4, 5)], [0, 1, -1], [0, 2, -2],
)
]
+ [((1, 2), None, None)],
)
def test_transpose(self, context, input_shape, dim0, dim1):
test_input = torch.rand(input_shape)
constant_list = [test_input]
if len(input_shape) > 2:
constant_list += [dim0, dim1]
kind = "transpose"
expected_result = torch.transpose(test_input, dim0, dim1)
else:
kind = "t"
expected_result = test_input.t()
constants, input_list, output_name = self._gen_constants(
len(constant_list), constant_list
)
transpose_node = InternalTorchIRNode(
kind=kind, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, ops.transpose, transpose_node, output_name, constants=constants,
)
np.testing.assert_array_equal(expected_result.shape, ssa.shape)
np.testing.assert_allclose(expected_result, ssa.val)
@pytest.mark.parametrize(
"dim1, dim2, dim3", itertools.product([1, 2, 5], [2, 5, 10], [1, 2, 5]),
)
def test_matmul(self, context, dim1, dim2, dim3):
mat1 = torch.rand((dim1, dim2))
mat2 = torch.rand((dim2, dim3))
constant_vals = [
mat1,
mat2,
]
constants, input_list, output_name = self._gen_constants(2, constant_vals)
matmul_node = InternalTorchIRNode(
kind="matmul", inputs=input_list, outputs=[output_name],
)
ssa = self._construct_test_graph(
context, ops.matmul, matmul_node, output_name, constants=constants
)
expected_result = torch.matmul(mat1, mat2).detach().numpy()
assert np.allclose(expected_result, ssa.val)
@pytest.mark.parametrize(
"input_shape, axis, expected_shape",
[
((1, 2), None, (2,)),
((1, 2), 0, (2,)),
((1, 2, 1), None, (2,)),
((1, 2, 1, 1), None, (2,)),
((1, 2, 1, 1), 2, (1, 2, 1)),
((1, 2, 1, 1, 1), None, (2,)),
],
)
def test_squeeze(self, context, input_shape, axis, expected_shape):
test_data = torch.rand(input_shape)
if axis is None:
constants, input_list, output_name = self._gen_constants(1, test_data)
else:
constants, input_list, output_name = self._gen_constants(
2, [test_data, axis]
)
squeeze_node = InternalTorchIRNode(
kind="Squeeze", inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, ops.squeeze, squeeze_node, output_name, constants=constants
)
if axis is None:
expected_result = torch.squeeze(test_data)
else:
expected_result = torch.squeeze(test_data, axis)
assert np.allclose(expected_result, ssa.val)
assert expected_result.size() == torch.Size(expected_shape)
@pytest.mark.parametrize(
"input_shape, axis, expected_shape",
[
((2,), 0, (1, 2)),
((2,), 1, (2, 1)),
((2,), -1, (2, 1)),
((2, 3), 1, (2, 1, 3)),
],
)
def test_unsqueeze(self, context, input_shape, axis, expected_shape):
test_data = torch.rand(input_shape)
constants, input_list, output_name = self._gen_constants(2, [test_data, axis])
unsqueeze_node = InternalTorchIRNode(
kind="Unsqueeze", inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, ops.unsqueeze, unsqueeze_node, output_name, constants=constants
)
expected_result = torch.unsqueeze(test_data, axis)
assert np.allclose(expected_result, ssa.val)
assert expected_result.size() == torch.Size(expected_shape)
@pytest.mark.parametrize(
"input_shape, start, end",
[
((2, 1, 1, 2), 1, 3),
((2, 2, 1, 1), 1, -2),
((1, 1, 1), 0, 2),
((1, 2), 0, 1),
((1, 2), 1, 1),
((1, 1), 1, -1),
((1,), 0, 0),
],
)
def test_flatten(self, context, input_shape, start, end):
test_data = torch.rand(input_shape)
constants, input_list, output_name = self._gen_constants(
3, [test_data, start, end]
)
flatten_node = InternalTorchIRNode(
kind="Flatten", inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, ops.flatten, flatten_node, output_name, constants=constants
)
expected_result = torch.flatten(test_data, start, end)
assert np.allclose(expected_result, ssa.val)
@pytest.mark.parametrize(
"start, end", [(0, -5), (100, 2), (2, 100), (-3, -4),],
)
def test_flatten_exception(self, context, start, end):
test_data = torch.rand(1, 1, 1, 1)
constants, input_list, output_name = self._gen_constants(
3, [test_data, start, end]
)
flatten_node = InternalTorchIRNode(
kind="Flatten", inputs=input_list, outputs=[output_name]
)
with pytest.raises(ValueError):
self._construct_test_graph(
context, ops.flatten, flatten_node, output_name, constants=constants,
)
@pytest.mark.parametrize(
"input_shape", [(2, 3), (2, 3, 4), (2, 3, 4, 5), (2, 3, 4, 5, 6),],
)
def test_permute(self, context, input_shape):
test_data = torch.rand(*input_shape)
permutation = list(range(len(input_shape)))
np.random.shuffle(permutation)
constants, input_list, output_name = self._gen_constants(
2, [test_data, permutation]
)
permute_node = InternalTorchIRNode(
kind="Permute", inputs=input_list, outputs=[output_name],
)
ssa = self._construct_test_graph(
context, ops.permute, permute_node, output_name, constants=constants
)
expected_result = test_data.permute(*permutation)
assert expected_result.shape == ssa.shape
@pytest.mark.parametrize(
"in_features, out_features, scaling",
itertools.product([10, 25, 100], [3, 6], [1.0, 0.5]),
)
def test_addmm(self, context, in_features, out_features, scaling):
input_data = torch.rand((1, in_features))
weight_data = torch.rand((in_features, out_features))
bias_data = torch.rand((out_features))
constant_vals = [
scaling,
input_data,
weight_data,
bias_data,
]
constants, _, output_name = self._gen_constants(4, constant_vals)
addmm_node = InternalTorchIRNode(
kind="addmm", inputs=["3", "1", "2", "0", "0"], outputs=[output_name],
)
ssa = self._construct_test_graph(
context, ops.addmm, addmm_node, output_name, constants=constants
)
torch_linear = nn.Linear(in_features=in_features, out_features=out_features,)
expected_shape = tuple(torch_linear(input_data).shape)
assert expected_shape == ssa.shape
@pytest.mark.parametrize(
"height, width, kernel_size, stride, padding, dilation",
itertools.product([5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3]),
)
def test_convolution2d(
self,
context,
height,
width,
kernel_size,
stride,
padding,
dilation,
groups=1,
in_channels=1,
out_channels=2,
):
test_input = torch.rand(1, in_channels, height, width)
constant_vals = [
1, # None argument
test_input,
np.random.rand(
out_channels, in_channels, kernel_size, kernel_size
), # weights
np.random.rand(out_channels), # bias
np.array([stride, stride]),
np.array([padding, padding]),
np.array([dilation, dilation]),
False, # transposed
np.array([0, 0]), # output_pad
groups,
]
constants, _, output_name = self._gen_constants(
len(constant_vals), constant_vals
)
# For reference, the values for `kind` and `inputs` indices are determined from the definition for Torch's
# `at::_convolution` used for all convolutions. The link below is approximately correct at the time of writing.
# https://github.com/pytorch/pytorch/blob/bd604mb5b7ae4f6388aca461891d620b0d485fbb/aten/src/ATen/native/Convolution.cpp#L544
conv_node = InternalTorchIRNode(
kind="_convolution",
inputs=["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "0", "0"],
outputs=[output_name],
)
ssa = self._construct_test_graph(
context, ops._convolution, conv_node, output_name, constants=constants
)
torch_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
expected_shape = tuple(torch_conv(test_input).shape)
assert ssa.val == None
assert expected_shape == ssa.shape
@pytest.mark.parametrize(
"depth, height, width, kernel_size, stride, padding, dilation, groups",
itertools.product(
[5, 5],
[5, 6],
[5, 7],
[1, 3],
[(1, 1, 1), (3, 2, 1)],
[(1, 1, 1), (1, 3, 2)],
[(1, 1, 1), (1, 2, 3)],
[
1,
-1,
], # -1 groups indicates it should be set to the number of input channels for depthwise convolution
),
)
def test_convolution3d(
self,
context,
depth,
height,
width,
kernel_size,
stride,
padding,
dilation,
groups,
in_channels=2,
out_channels=4,
):
if groups == -1:
groups = in_channels
test_input = torch.rand(1, in_channels, depth, height, width)
constant_vals = [
1, # None argument
test_input,
np.random.rand(
out_channels,
in_channels // groups,
kernel_size,
kernel_size,
kernel_size,
), # weights
np.random.rand(out_channels), # bias
# PyTorch's Conv3d accepts either an int (for all dimensions) or a 3-tuple of ints (one per dimension)
np.array([stride[0], stride[1], stride[2]]),
np.array([padding[0], padding[1], padding[2]]),
np.array([dilation[0], dilation[1], dilation[2]]),
False, # transposed
np.array([0, 0, 0]), # out_pad
groups,
]
constants, _, output_name = self._gen_constants(
len(constant_vals), constant_vals
)
# For reference, the values for `kind` and `inputs` indices are determined from the definition for Torch's
# `at::_convolution` used for all convolutions. The link below is approximately correct at the time of writing.
# https://github.com/pytorch/pytorch/blob/bd604mb5b7ae4f6388aca461891d620b0d485fbb/aten/src/ATen/native/Convolution.cpp#L544
conv_node = InternalTorchIRNode(
kind="_convolution",
inputs=["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "0", "0"],
outputs=[output_name],
)
ssa = self._construct_test_graph(
context, ops._convolution, conv_node, output_name, constants=constants
)
torch_conv = nn.Conv3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
expected_result = torch_conv(test_input)
expected_shape = tuple(expected_result.shape)
assert ssa.val is None
assert expected_shape == ssa.shape
@pytest.mark.parametrize(
"height, width, kernel_size, stride, padding, dilation",
itertools.product([5, 6], [5, 7], [1, 3], [2, 3], [0, 1], [1, 3]),
)
def test_convolution_transpose2d(
self,
context,
height,
width,
kernel_size,
stride,
padding,
dilation,
groups=1,
in_channels=1,
out_channels=2,
):
test_input = torch.rand(1, in_channels, height, width)
constant_vals = [
np.random.rand(
in_channels, out_channels, kernel_size, kernel_size
), # weights
| np.random.rand(out_channels) | numpy.random.rand |
import torch
from transformers import BertTokenizer
from transformers import BertForSequenceClassification, AdamW, BertConfig
from transformers import get_linear_schedule_with_warmup
from transformers import BertModel
from transformers import BertConfig
import transformers
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import random
import argparse
parser = argparse.ArgumentParser(description="BERT with Naver Movie Ratings")
parser.add_argument(
"--device", default="auto", help="device type that will be used while training"
)
parser.add_argument("--max_len", default=128, help="max length for BERT sequence input")
parser.add_argument("--batch_size", default=16, help="batch size for training BERT")
parser.add_argument("--lr", default=2e-5, help="learning rate for AdamW")
parser.add_argument("--epochs", default=1, help="training epochs")
args = parser.parse_args()
### Device settings
if args.device == "auto":
if torch.cuda.is_available():
device = "cuda"
print("There are %d GPU(s) available." % torch.cuda.device_count())
print("We will use the GPU:", torch.cuda.get_device_name(0))
else:
device = "cpu"
print("No GPU available, using the CPU instead.")
else:
device = args.device
### Data fetching
print("Fetching the training data")
train = pd.read_csv("nsmc/ratings_train.txt", sep="\t")
sentences = train["document"]
sentences = ["[CLS] " + str(sentence) + " [SEP]" for sentence in sentences]
### Input labels and tokenizers
labels = train["label"].values
print("Load tokenizer")
tokenizer = BertTokenizer.from_pretrained(
"bert-base-multilingual-cased", do_lower_case=False
)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# make all input sequence lengths to be args.max_len
# maybe there should be a builtin function that supports this feature..
for i in range(len(input_ids)):
input_ids[i] = input_ids[i][: args.max_len]
if len(input_ids[i]) < args.max_len:
input_ids[i] += [0] * (args.max_len - len(input_ids[i]))
input_ids = | np.array(input_ids, dtype=np.long) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
import os
import copy
import pickle
from tqdm import tqdm
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"output_file_train", None,
"Output TF example file for trian (or comma-separated list of files).")
flags.DEFINE_string(
"output_file_valid", None,
"Output TF example file for valid (or comma-separated list of files).")
flags.DEFINE_string(
"output_file_finetune", None,
"Output TF example file for valid (or comma-separated list of files).")
flags.DEFINE_string("data_dir", None,
"The 3D human pose of time series.")
flags.DEFINE_string("data_struc", None,
"choose bone or joint")
flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer(
"dupe_factor", 2,
"Number of times to duplicate the input data (with different masks).")
flags.DEFINE_integer(
"sub_seq_num", 64,
"Number of frames for each person.")
flags.DEFINE_integer(
"downsample_factor", 1,
"Number of times to downsampling the input data.")
flags.DEFINE_integer(
"mask_length", 1,
"Number of consecutive masked frames.")
flags.DEFINE_integer(
"length_feature", 75,
"Dimension of the 3d pose vector.")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
def read_data(data_dir, mode, data_struc):
### NTU joint indices to SUB
ntu_to_sbu = np.array([3, 20, 1, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 0])
sbu_to_ntu = np.array([[2, 6, 15, 19, 6, 21, 22, 10, 23, 24],
[1, 5, 11, 14, 5, 5, 5, 8, 8, 8]])
data_path = os.path.join(data_dir, '{}_data_{}.npy'.format(mode, data_struc))
label_path = os.path.join(data_dir, '{}_label.pkl'.format(mode))
data = np.load(data_path, mmap_mode='r')
# data.shape: (N,C,V,T,M)
sample_length = None
try:
with open(label_path, 'rb') as f:
sample_names, labels = pickle.load(f)
sample_length = data.shape[3]
except:
# for pickle file from python2
with open(label_path, 'rb') as f:
sample_names, labels, sample_length = pickle.load(f)
output = []
label_output = []
maxT = FLAGS.sub_seq_num ######
def normalize_video(video):
"""Using 2*(x - min)/(max - min) - 1 normalization.
:param video: np array of shape [seq_len, coordinate]
:return:
"""
max_75 = np.amax(video, axis=0)
min_75 = np.amin(video, axis=0)
max_x = np.max([max_75[i] for i in range(0,FLAGS.length_feature,3)])
max_y = np.max([max_75[i] for i in range(1,FLAGS.length_feature,3)])
max_z = np.max([max_75[i] for i in range(2,FLAGS.length_feature,3)])
min_x = np.min([min_75[i] for i in range(0,FLAGS.length_feature,3)])
min_y = np.min([min_75[i] for i in range(1,FLAGS.length_feature,3)])
min_z = np.min([min_75[i] for i in range(2,FLAGS.length_feature,3)])
norm = np.zeros_like(video)
for i in range(0,FLAGS.length_feature,3):
norm[:,i] = 2*(video[:,i]-min_x)/(max_x-min_x)-1
norm[:,i+1] = 2*(video[:,i+1]-min_y)/(max_y-min_y)-1
norm[:,i+2] = 2*(video[:,i+2]-min_z)/(max_z-min_z)-1
# if max_x - min_x > 0:
# norm[:,i] = 2*(video[:,i]-min_x)/(max_x-min_x)-1
# if max_y - min_y > 0:
# norm[:,i+1] = 2*(video[:,i+1]-min_y)/(max_y-min_y)-1
# if max_z - min_z > 0:
# norm[:,i+2] = 2*(video[:,i+2]-min_z)/(max_z-min_z)-1
return norm
if data.shape[-1] == 60: ## for N-UCLA data, (N, 75, 60)
for seq, label in tqdm(zip(data, labels)):
output.append(seq)
label_output.append(label)
elif data.shape[-1] == 45: ## for UWA3D data, (N, 75, 45)
for seq, label in tqdm(zip(data, labels)):
output.append(seq)
label_output.append(label)
elif data.shape[-2] == 15: ## for SBU data, (N, 2, 25, 15, 3)
if FLAGS.length_feature == 75:
## Change to 25 joints, repeating with nearest joints
hip = (data[:, :, :, 9:10, :] + data[:, :, :, 12:13, :]) / 2.0
data = np.concatenate([data, hip], axis=3) # (N, 2, 25, 15 + 1, 3)
data_new = np.zeros((data.shape[0], 2, 25, 25, 3))
data_new[:, :, :, ntu_to_sbu, :] = data
data_new[:, :, :, sbu_to_ntu[0], :] = data[:, :, :, sbu_to_ntu[1], :]
# print (data_new.shape)
data = data_new
elif FLAGS.length_feature == 48: ## appending 'hip' keypoint
hip = (data[:, :, :, 9:10, :] + data[:, :, :, 12:13, :]) / 2.0
data = np.concatenate([data, hip], axis=3) # (N, 2, 25, 15 + 1, 3)
else:
raise Exception("SBU dataset not support this feature length: ", FLAGS.length_feature)
N, M, T, V, C = data.shape
for seq, label in tqdm(zip(data, labels)):
seq = seq.reshape(M, T, -1)
seq[0], seq[1] = normalize_video(seq[0]), normalize_video(seq[1])
output.append(np.concatenate([seq[0], seq[0]]))
label_output.append(label)
else: ## for NTU
N, C, T, V, M = data.shape
for seq, label, frame_num in tqdm(zip(data, labels, sample_length)):
seq = seq.transpose((3,1,2,0)).reshape((M, T, -1))[0] # only use first person
# normalize first then downsample
seq = normalize_video(seq)
if frame_num <= maxT:
seq = seq[:maxT]
else:
## sample 'self.maxT' frames
s = frame_num // maxT
seq = seq[::s][:maxT]
## sampling points as SBU format
if FLAGS.length_feature == 48:
seq = np.reshape(seq, (maxT, V, C))[:, ntu_to_sbu, :].reshape(maxT, FLAGS.length_feature)
output.append(seq)
label_output.append(label)
return output, label_output
def generate_masked_sample(input_, mode):
output_ = [e for e in input_.copy()]
masked_lm_position = []
masked_lm_ids = []
masked_lm_weights = []
input_mask = [1 for _ in input_]
segment_ids = [0 for _ in range(len(input_)//2)] + [1 for _ in range(len(input_)//2)]
if 'finetune' not in mode and len(input_)!=0:
factor = FLAGS.mask_length
select_num = int(len(input_)*FLAGS.masked_lm_prob)//factor if int(len(input_)*FLAGS.masked_lm_prob)//factor!=0 else 1
try:
masked_index = random.sample(range( (len(input_)) // factor), select_num)
except:
print(range((len(input_))//factor), select_num)
for i in masked_index:
masked_lm_position += list(range(factor*i, factor*(i+1)))
# output_: list of [2*T, 75]
motion = np.zeros((FLAGS.sub_seq_num, FLAGS.length_feature))
for t in range(1, FLAGS.sub_seq_num):
motion[t - 1] = output_[t] - output_[t - 1]
"""Motion direction prediction data generation.
"""
for pos in masked_lm_position:
flow = motion[pos].reshape(FLAGS.length_feature//3, 3)
x, y, z = flow[:, 0] > 0, flow[:, 1] > 0, flow[:, 2] > 0
label = 1 * x + 2 * y + 4 * z
offset = np.arange(FLAGS.length_feature//3) * 8 # every joint has 8 class
# print(label)
masked_lm_ids.append(label + offset)
"""Skeleton inpainting data generation.
"""
# for ele in masked_lm_position:
# masked_frame = None
# if random.random() < 0.8: # 80% of the time, replace with
# masked_frame = np.zeros(FLAGS.length_feature, dtype=np.float32)
# else:
# if random.random() < 0.5: # 10% of the time, keep original
# masked_frame = output_[ele]
# else: # 10% of the time, replace with random word
# masked_frame = output_[random.randint(0, len(output_) - 1)]
# masked_lm_ids.append(output_[ele])
# output_[ele] = masked_frame
masked_lm_weights = [1 for _ in masked_lm_ids]
return output_, segment_ids, masked_lm_position, input_mask, masked_lm_ids, masked_lm_weights
def add_CLS_SEP(sub_seq_, masked_lm_position, input_mask):
"""Add [CLS] and [SEP] tokens into sequence,
e.g. { [CLS], s_10, s_11, ..., s_1T, [SEP], s_20, s_21, ..., s_2T, [SEP] }
where 'T' is the length of each person.
"""
length_init = len(sub_seq_)
sub_seq_.insert(0, -1.0*np.ones(FLAGS.length_feature, dtype = np.float32))
sub_seq_.insert(length_init//2+1, 1.0*np.ones(FLAGS.length_feature, dtype = np.float32))
sub_seq_.append(np.ones(FLAGS.length_feature, dtype = np.float32))
for i in range(len(masked_lm_position)):
if masked_lm_position[i] >= length_init//2:
masked_lm_position[i] += 1
masked_lm_position = [e+1 for e in masked_lm_position]
input_mask += [1, 1, 1]
return sub_seq_, masked_lm_position, input_mask
def padding(data):
#[sub_seq_, input_mask, segment_ids, masked_lm_position, masked_lm_ids, masked_lm_weights, action_labels]
# 0 1 2 3 4 5 6
data[0] += [np.zeros(FLAGS.length_feature, dtype=np.float32) for _ in range(FLAGS.max_seq_length-len(data[0]))]
data[1] += [0 for _ in range(FLAGS.max_seq_length-len(data[1]))]
data[2] += [0 for _ in range(FLAGS.max_seq_length-len(data[2]))]
data[3] += [0 for _ in range(FLAGS.max_predictions_per_seq-len(data[3]))]
data[4] += [np.zeros((FLAGS.length_feature // 3), dtype=np.int64) for _ in range(FLAGS.max_predictions_per_seq-len(data[4]))]
data[5] += [0 for _ in range(FLAGS.max_predictions_per_seq-len(data[5]))]
return data
def save_to_tfrecorder(data, writer):
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
#[sub_seq_, input_mask, segment_ids, masked_lm_position, masked_lm_ids, masked_lm_weights, action_labels]
# 0 1 2 3 4 5 6
data[0] = np.stack(data[0]).astype(np.float32).reshape((-1))
data[1] = np.stack(data[1]).astype(np.int64)
data[2] = np.stack(data[2]).astype(np.int64)
data[3] = np.stack(data[3]).astype(np.int64)
if data[4][0].dtype == np.int or data[4][0].dtype == int:
# for motion, it's classification, so labels are 'Int'
data[4] = | np.stack(data[4]) | numpy.stack |
import pandas as pd
import numpy as np
import os
import csv
import pickle
import h5sparse
import scipy.sparse as ss
from itertools import compress
def write_gvm(gvm, output_fname, fmt='h5'):
'''
Writes a gvm to a .csv or .h5 file.
Parameters:
gvm (h5sparse): gvm to write
output_fname (str): file path to save to
fmt: format to save as. either 'csv' or 'h5'
Returns:
None
'''
if fmt == 'csv':
temp = pd.DataFrame(gvm['gvm'].todense())
if ('idx' not in gvm) or np.all(np.array(gvm['idx'] == None)):
gvm['idx'] = np.array(range(gvm['gvm'].shape[0]))
if ('col' not in gvm) or np.all(np.array(gvm['col'] == None)):
gvm['col'] = np.array(range(gvm['gvm'].shape[1]))
temp.index = gvm['idx']
temp.columns = gvm['col']
gvm = temp
gvm = gvm.replace(to_replace=False, value='')
gvm.to_csv(output_fname, sep='\t')
elif fmt == 'h5':
if type(gvm) == pd.DataFrame:
gvm = {'gvm': ss.csc_matrix(gvm.values),
'idx': gvm.index.values,
'col': gvm.columns.values}
if ('idx' not in gvm) or np.all(np.array(gvm['idx'] == None)):
gvm['idx'] = np.array(range(gvm['gvm'].shape[0]))
if ('col' not in gvm) or np.all(np.array(gvm['col'] == None)):
gvm['col'] = np.array(range(gvm['gvm'].shape[1]))
if not ( np.all([isinstance(i, int) for i in gvm['idx']]) or
np.all([isinstance(i, float) for i in gvm['idx']]) ):
try: gvm['idx'] = np.array([i.encode('utf-8','ignore') for i in gvm['idx']], dtype=np.string_)
except AttributeError: gvm['idx'] = np.array(gvm['idx'], dtype=np.string_)
if not ( np.all([isinstance(i, int) for i in gvm['col']]) or
np.all([isinstance(i, float) for i in gvm['col']]) ):
try: gvm['col'] = np.array([i.encode('utf-8','ignore') for i in gvm['col']], dtype=np.string_)
except AttributeError: gvm['idx'] = np.array(gvm['idx'], dtype=np.string_)
with h5sparse.File(output_fname, 'w') as h:
h.create_dataset('gvm', data=gvm['gvm'])
h.create_dataset('idx', data=np.array(gvm['idx']))
h.create_dataset('col', data=np.array(gvm['col']))
else: raise ValueError('unrecognized format %s'%fmt)
def open_gvm(fname):
'''
Opens and returns the gvm at `fname`.
Parameters:
fname (str): The name of the file to be obtained as a gvm.
Returns:
gvm (h5sparse): gvm
'''
with h5sparse.File(fname, 'r') as h:
data = h['gvm'][()]
if 'idx' in h:
idx = h['idx'][()]
try: idx = [i.decode('utf-8','ignore') for i in idx]
except: pass
else: idx = None
if 'col' in h:
col = h['col'][()]
try: col = [i.decode('utf-8','ignore') for i in col]
except: pass
else: col = None
return {'gvm': data, 'idx': np.array(idx), 'col': np.array(col)}
def get_gvm_size(fname):
'''
Returns the gvm shape.
Parameters:
fname (str): filepath of gvm
Returns:
gvm_size (tuple): width and height of gvm array
'''
with h5sparse.File(fname, 'r') as h:
return h['gvm'].shape
def file_exists(fname, warning=True):
'''
Checks if a file exists or not. Returns True or False. Prints a statement if False.
Parameters:
fname (str): The name of the file to check for existence.
warning (bool): print statement if file already created?
Returns:
does_file_exist (bool):
'''
if type(fname) not in [str, bytes, os.PathLike]: return False
if os.path.isfile(fname):
if warning: print('\t', fname, 'has already been created.')
return True
else: return False
def format_gene_names(names, pct_commas_threshold=.1, pct_uds_threshold=.1, verbose=True):
'''
For a list-like object of gene names, uppercases and removes whitespace.
Also removes suffixes if >10% names have commas or underscores.
Parameters:
names (list-like): list of gene names (str)
pct_commas_threshold (float): between 0 and 1. all pre-comma suffixes will be removed if more than this proportion
of gene names have commas.
pct_uds_threshold (float): between 0 and 1. all pre-underscore suffixes will be removed if more than this proportion
of gene names have underscores.
Returns:
names (lsit-like): list of formatted gene names
'''
# Uppercase and remove whitespace.
names = np.char.strip(np.char.upper(np.array(names, dtype='str')))
# Remove suffixes, if > 10% of gene names have commas.
n_commas = sum([',' in i for i in names])
if n_commas > len(names) * pct_commas_threshold:
if verbose: print('\t %d of %d gene names have commas. Removing the suffixes.'%(n_commas, len(names)))
names = np.array([i.partition(',')[0] for i in names])
names = np.where(names == '', 'EMPTY GENE NAME', names)
# Same for underscores.
n_uds = sum(['_' in i for i in names])
if n_uds > len(names) * pct_uds_threshold:
if verbose: print('\t %d of %d gene names have underscores. Removing the suffixes.'%(n_uds, len(names)))
names = np.array([i.partition('_')[0] for i in names])
names = np.where(names == '', 'EMPTY GENE NAME', names)
return names
def transpose_gvm(gvm):
'''
Transposes and returns a gvm.
'''
gvm['gvm'] = gvm['gvm'].transpose()
gvm['idx'], gvm['col'] = (gvm['col'], gvm['idx'])
return gvm
def merge_updn_gvms(gvm1, gvm2):
'''
Concatenates gvm1 and gvm2, taking the union of the gene sets for repeated terms.
gvm1 and gvm2 should have identical column gene-indices.
Returns:
gvm (h5sparse)
'''
if len(gvm1['col']) != len(gvm2['col']): raise ValueError('gvms must have same column length.')
if np.any(gvm1['col'] != gvm2['col']): raise ValueError('gene column-index must be identical.')
n_genes = gvm1['gvm'].shape[1]
missing_from_gvm2 = gvm1['idx'][~np.isin(gvm1['idx'], gvm2['idx'])]
missing_from_gvm1 = gvm2['idx'][~np.isin(gvm2['idx'], gvm1['idx'])]
# Pad missing terms for gvm1 and gvm2.
gvm1['idx'] = np.append(gvm1['idx'], missing_from_gvm1)
missing_rows = ss.csr_matrix( np.full(fill_value=False, shape=(len(missing_from_gvm1), n_genes)) )
gvm1['gvm'] = ss.csr_matrix( ss.vstack([gvm1['gvm'], missing_rows]) )
idx_sorting = np.argsort(gvm1['idx'])
gvm1['gvm'] = gvm1['gvm'][idx_sorting,:]
gvm1['idx'] = gvm1['idx'][idx_sorting]
gvm2['idx'] = | np.append(gvm2['idx'], missing_from_gvm2) | numpy.append |
from sklearn.preprocessing import Normalizer
from pandas import read_csv
from numpy import set_printoptions
import numpy as np
# -- Normalize using sklearn--L1 norm -- #
print('\nNormalize using sklearn--L1 norm')
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data_frame = read_csv(filename, names=names)
array = data_frame.values
# Separate array into input and output components
X = array[:, 0:8]
Y = array[:, 8]
# L1 norm
scaler = Normalizer(norm='l1').fit(X)
normalizedX = scaler.transform(X)
# Summarize transformed data
set_printoptions(precision=3)
print(normalizedX[0:5, :])
# -- Normalize from scratch--L1 norm -- #
# L1 norm
print('\nNormalize from scratch--L1 norm')
norms = np.abs(X).sum(axis=1)
X_normalized = X / norms[:, np.newaxis]
print(X_normalized[0:5, :])
# -- Normalize using sklearn--L2 norm -- #
# L2 norm
print('\nNormalize using sklearn--L2 norm')
scaler = Normalizer(norm='l2').fit(X)
normalizedX = scaler.transform(X)
# Summarize transformed data
set_printoptions(precision=3)
print(normalizedX[0:5, :])
# -- Normalize from scratch--L2 norm -- #
# L2 norm
print('\nNormalize from scratch--L2 norm')
norms = np.einsum('ij,ij->i', X, X)
| np.sqrt(norms, norms) | numpy.sqrt |
import logging
import random
import numpy as np
import torch
from .model.pytorch_pretrained import BertAdam, warmup_linear
def set_random_seed(seed=42, use_cuda=True):
"""Seed all random number generators to enable repeatable runs"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed_all(seed)
def to_numpy(X):
"""
Convert input to numpy ndarray
"""
if hasattr(X, 'iloc'): # pandas
return X.values
elif isinstance(X, list): # list
return | np.array(X) | numpy.array |
import os, glob, logging
import cPickle as pickle
from functools import partial
import random
import math
import multiprocessing as mp
import multiprocessing.sharedctypes as mps
from contextlib import closing
import numpy as n
from scipy.special import erf
import scipy.stats.mstats as mstats
import rtpipe.parsems as pm
import rtpipe.parsecal as pc
import rtpipe.parsesdm as ps
from rtpipe.version import __version__
import rtlib_cython as rtlib
import pyfftw
try:
import casautil
except ImportError:
import pwkit.environments.casa.util as casautil
# setup CASA and logging
qa = casautil.tools.quanta()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.captureWarnings(True)
logger = logging.getLogger('rtpipe')
def pipeline(d, segments):
""" Transient search pipeline running on single node.
Processes one or more segments of data (in which a single bgsub, (u,v,w), etc. can be used).
Can search completely, independently, and saves candidates.
If segments is a list of segments, then it will parallelize read/search processes.
Stages:
0) Take dictionary that defines metadata and search params
-- This defines state of pipeline, including times, uv extent, pipeline search parameters, etc.
1) Read data
-- Overlapping reads needed to maintain sensitivity to all DMs at all times
2) Prepare data
-- Reads/applies telcal/CASA solutions, flags, bg time subtraction
3) Search using all threads
-- Option for plug-and-play detection algorithm and multiple filters
4) Save candidate and noise info, if requested
"""
if type(segments) == int:
segments = [segments]
logger.info('Starting search of %s, scan %d, segments %s' % (d['filename'], d['scan'], str(segments)))
assert os.path.exists(d['gainfile']), 'Calibration file autodetection failed for gainfile {0}'.format(d['gainfile'])
# seed the pseudo-random number generator # TJWL
random.seed()
# set up shared arrays to fill
data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2); data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); u_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); v_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); w_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
# need these if debugging
data = numpyview(data_mem, 'complex64', datashape(d)) # optional
data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional
u = numpyview(u_mem, 'float32', d['nbl'], raw=False)
v = numpyview(v_mem, 'float32', d['nbl'], raw=False)
w = numpyview(w_mem, 'float32', d['nbl'], raw=False)
# plan fft
logger.debug('Planning FFT...')
arr = pyfftw.empty_aligned((d['npixx'], d['npixy']), dtype='complex64', n=16)
arr[:] = n.random.randn(*arr.shape) + 1j*n.random.randn(*arr.shape)
fft_arr = pyfftw.interfaces.numpy_fft.ifft2(arr)
results = {}
# only one needed for parallel read/process. more would overwrite memory space
with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool:
try:
# submit all segments to pool of 1. locking data should keep this from running away.
for segment in segments:
assert segment in range(d['nsegments']), 'Segment %d not in range of %d nsegments' % (segment, d['nsegments'])
candsfile = getcandsfile(d, segment)
if d['savecands'] and os.path.exists(candsfile):
logger.error('candsfile %s already exists. Ending processing...' % candsfile)
else:
results[segment] = readpool.apply_async(pipeline_dataprep, (d, segment)) # no need for segment here? need to think through structure...
# step through pool of jobs and pull data off as ready. this allows pool to continue to next segment.
while results.keys():
for segment in results.keys():
if results[segment].ready():
job = results.pop(segment)
d = job.get()
else:
continue
with data_mem.get_lock():
cands = search(d, data_mem, u_mem, v_mem, w_mem)
# save candidate info
if d['savecands']:
logger.info('Saving %d candidates for segment %d...'
% (len(cands), segment))
savecands(d, cands)
except KeyboardInterrupt:
logger.error('Caught Ctrl-C. Closing processing pool.')
readpool.terminate()
readpool.join()
raise
def pipeline_dataprep(d, segment):
""" Single-threaded pipeline for data prep that can be started in a pool.
"""
logger.debug('dataprep starting for segment %d' % segment)
# dataprep reads for a single segment, so d['segment'] defined here
d['segment'] = segment
# set up numpy arrays, as expected by dataprep functions
data_read = numpyview(data_read_mem, 'complex64', datashape(d), raw=False); data = numpyview(data_mem, 'complex64', datashape(d), raw=False)
u_read = numpyview(u_read_mem, 'float32', d['nbl'], raw=False); u = numpyview(u_mem, 'float32', d['nbl'], raw=False)
v_read = numpyview(v_read_mem, 'float32', d['nbl'], raw=False); v = numpyview(v_mem, 'float32', d['nbl'], raw=False)
w_read = numpyview(w_read_mem, 'float32', d['nbl'], raw=False); w = numpyview(w_mem, 'float32', d['nbl'], raw=False)
#### #### #### ####
# 1) Read data
#### #### #### ####
with data_read_mem.get_lock():
if d['dataformat'] == 'ms': # CASA-based read
segread = pm.readsegment(d, segment)
data_read[:] = segread[0]
(u_read[:], v_read[:], w_read[:]) = (segread[1][d['readints']/2], segread[2][d['readints']/2], segread[3][d['readints']/2]) # mid int good enough for segment. could extend this to save per chunk
del segread
elif d['dataformat'] == 'sdm':
data_read[:] = ps.read_bdf_segment(d, segment)
(u_read[:], v_read[:], w_read[:]) = ps.get_uvw_segment(d, segment)
#### #### #### ####
# 2) Prepare data
#### #### #### ####
# calibrate data
if os.path.exists(d['gainfile']):
try:
radec = (); spwind = []; calname = '' # set defaults
if '.GN' in d['gainfile']: # if telcal file
if d.has_key('calname'):
calname = d['calname']
sols = pc.telcal_sol(d['gainfile']) # parse gainfile
else: # if CASA table
if d.has_key('calradec'):
radec = d['calradec'] # optionally defined cal location
spwind = d['spw']
sols = pc.casa_sol(d['gainfile'], flagants=d['flagantsol']) # parse gainfile
sols.parsebp(d['bpfile']) # parse bpfile
# if gainfile parsed ok, choose best solution for data
sols.set_selection(d['segmenttimes'][segment].mean(), d['freq']*1e9, rtlib.calc_blarr(d), calname=calname, pols=d['pols'], radec=radec, spwind=spwind)
sols.apply(data_read)
except:
logger.warning('Could not parse or apply gainfile %s.' % d['gainfile'])
raise
else:
logger.warn('Calibration file not found. Proceeding with no calibration applied.')
# flag data
if len(d['flaglist']):
logger.info('Flagging with flaglist: %s' % d['flaglist'])
dataflag(d, data_read)
else:
logger.warn('No real-time flagging.')
# mean t vis subtration
if d['timesub'] == 'mean':
logger.info('Subtracting mean visibility in time...')
rtlib.meantsub(data_read, [0, d['nbl']])
else:
logger.warn('No mean time subtraction.')
# save noise pickle
if d['savenoise']:
noisepickle(d, data_read, u_read, v_read, w_read, chunk=200)
# phase to new location if l1,m1 set and nonzero value
try:
if any([d['l1'], d['m1']]):
logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (d['l1'], d['m1']))
rtlib.phaseshift_threaded(data_read, d, d['l1'], d['m1'], u_read, v_read)
d['l0'] = d['l1']
d['m0'] = d['m1']
else:
logger.debug('Not rephasing.')
except KeyError:
pass
if d['mock']: # could be list or int
# assume that std of vis in the middle of the segment is
# characteristic of noise throughout the segment
falsecands = {}
datamid = n.ma.masked_equal(data_read[d['readints']/2].real, 0, copy=True)
madstd = 1.4826 * n.ma.median(n.abs(datamid - n.ma.median(datamid)))/n.sqrt(d['npol']*d['nbl']*d['nchan'])
std = datamid.std()/n.sqrt(d['npol']*d['nbl']*d['nchan'])
logger.debug('Noise per vis in central int: madstd {}, std {}'.format(madstd, std))
dt = 1 # pulse width in integrations
if isinstance(d['mock'], int):
for i in n.random.randint(d['datadelay'][-1], d['readints'], d['mock']): # add nmock transients at random ints
(loff, moff, A, DM) = make_transient(madstd, max(d['dmarr']), Amin=1.2*d['sigma_image1'])
candid = (int(segment), int(i), DM, int(dt), int(0))
falsecands[candid] = [A/madstd, A, loff, moff]
elif isinstance(d['mock'], list):
for mock in d['mock']:
try:
(i, DM, loff, moff, SNR) = mock
candid = (int(segment), int(i), DM, int(dt), int(0))
falsecands[candid] = [SNR, SNR*madstd, loff, moff]
except:
logger.warn('Could not parse mock parameters: {}'.format(mock))
else:
logger.warn('Not a recognized type for mock.')
for candid in falsecands:
(segment, i, DM, dt, beamnum) = candid
(SNR, A, loff, moff) = falsecands[candid]
logger.info('Adding mock transient at int %d, DM %.1f, (l, m) = (%f, %f) at est SNR %.1f' % (i, DM, loff, moff, SNR))
add_transient(d, data_read, u_read, v_read, w_read, loff, moff, i, A, DM, dt)
if d['savecands']:
savecands(d, falsecands, domock=True)
with data_mem.get_lock():
data[:] = data_read[:]
u[:] = u_read[:]; v[:] = v_read[:]; w[:] = w_read[:]
logger.debug('All data unlocked for segment %d' % segment)
# d now has segment keyword defined
return d
def pipeline_reproduce(d, candloc=[], segment=None, lm=None, product='data'):
""" Reproduce data and/or candidates with given candloc or lm coordinate.
d and segment can be given, if only reading data.
candloc is length 5 or 6 with ([scan], segment, candint, dmind, dtind, beamnum).
product can be 'data', 'dataph', 'imdata', 'datacorr'.
lm is tuple of (l,m) coordinates in radians.
"""
# set up shared arrays to fill
data_reproduce_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
u_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
v_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
w_mem = mps.Array(mps.ctypes.c_float, d['nbl'])
# get numpy views of memory spaces
data = numpyview(data_mem, 'complex64', datashape(d)) # optional
data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional
u = numpyview(u_mem, 'float32', d['nbl'], raw=False)
v = numpyview(v_mem, 'float32', d['nbl'], raw=False)
w = numpyview(w_mem, 'float32', d['nbl'], raw=False)
# set up state dict for merge pkl
if len(candloc) == 6:
scan, segment, candint, dmind, dtind, beamnum = candloc
# this is now defined by call to rtpipe.set_pipeline in parsecands.plot_cand
# d['scan'] = scan
# d['starttime_mjd'] = d['starttime_mjddict'][scan]
# d['nsegments'] = len(d['segmenttimesdict'][scan])
# d['segmenttimes'] = d['segmenttimesdict'][scan]
elif len(candloc) == 5: # if not a merge pkl, then d['scan'] is correct
segment, candint, dmind, dtind, beamnum = candloc
elif isinstance(segment, int):
assert product == 'data', 'If only providing segment, then only data product can be produced.'
else:
logger.error('candloc must be length 5 or 6 or segment provided.')
return
with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool:
readpool.apply(pipeline_dataprep, (d, segment))
if product == 'data':
logger.info('Returning prepared data...')
return data
elif product == 'dataph':
logger.info('Reproducing data and phasing...')
assert lm, 'lm must be tuple with (l, m) coords in radians.'
data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind, lm=lm)
return data
elif product == 'datacorr':
logger.info('Reproducing data...')
data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind)
return data
elif product == 'imdata':
logger.info('Reproducing candidate...')
im, data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind, candint=candint)
return im, data
else:
logger.error('product must be data, dataph, or imdata.')
def meantsubpool(d, data_read):
""" Wrapper for mean visibility subtraction in time.
Doesn't work when called from pipeline using multiprocessing pool.
"""
logger.info('Subtracting mean visibility in time...')
data_read = numpyview(data_read_mem, 'complex64', datashape(d))
tsubpart = partial(rtlib.meantsub, data_read)
blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])]
with closing(mp.Pool(1, initializer=initreadonly, initargs=(data_read_mem,))) as tsubpool:
tsubpool.map(tsubpart, blr)
def dataflag(d, data_read):
""" Flagging data in single process
"""
for flag in d['flaglist']:
mode, sig, conv = flag
# resultlist = []
# with closing(mp.Pool(4, initializer=initreadonly, initargs=(data_read_mem,))) as flagpool:
for ss in d['spw']:
chans = n.arange(d['spw_chanr_select'][ss][0], d['spw_chanr_select'][ss][1])
for pol in range(d['npol']):
status = rtlib.dataflag(data_read, chans, pol, d, sig, mode, conv)
logger.info(status)
# hack to get rid of bad spw/pol combos whacked by rfi
if 'badspwpol' in d:
logger.info('Comparing overall power between spw/pol. Removing those with %d times typical value' % d['badspwpol'])
spwpol = {}
for spw in d['spw']:
chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1])
for pol in range(d['npol']):
spwpol[(spw, pol)] = n.abs(data_read[:,:,chans,pol]).std()
meanstd = n.mean(spwpol.values())
for (spw,pol) in spwpol:
if spwpol[(spw, pol)] > d['badspwpol']*meanstd:
logger.info('Flagging all of (spw %d, pol %d) for excess noise.' % (spw, pol))
chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1])
data_read[:,:,chans,pol] = 0j
def dataflagatom(chans, pol, d, sig, mode, conv):
""" Wrapper function to get shared memory as numpy array into pool
Assumes data_mem is global mps.Array
"""
data = numpyview(data_mem, 'complex64', datashape(d))
# data = n.ma.masked_array(data, data==0j) # this causes massive overflagging on 14sep03 data
return rtlib.dataflag(data, chans, pol, d, sig, mode, conv)
def search(d, data_mem, u_mem, v_mem, w_mem):
""" Search function.
Queues all trials with multiprocessing.
Assumes shared memory system with single uvw grid for all images.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
u = numpyview(u_mem, 'float32', d['nbl'])
v = numpyview(v_mem, 'float32', d['nbl'])
w = numpyview(w_mem, 'float32', d['nbl'])
data_resamp_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2)
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
logger.debug('Search of segment %d' % d['segment'])
beamnum = 0 # not yet implemented
cands = {}
candsfile = getcandsfile(d)
if d['savecands'] and os.path.exists(candsfile):
logger.warn('candsfile %s already exists' % candsfile)
return cands
# make wterm kernels
if d['searchtype'] == 'image2w':
wres = 100
npix = max(d['npixx_full'], d['npixy_full'])
bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], thresh=0.05)
# SUBMITTING THE LOOPS
if | n.any(data) | numpy.any |
"""Strength of Connection functions.
Requirements for the strength matrix C are:
1) Nonzero diagonal whenever A has a nonzero diagonal
2) Non-negative entries (float or bool) in [0,1]
3) Large entries denoting stronger connections
4) C denotes nodal connections, i.e., if A is an nxn BSR matrix with
row block size of m, then C is (n/m) x (n/m)
"""
from warnings import warn
import numpy as np
from scipy import sparse
from . import amg_core
from .relaxation.relaxation import jacobi
from .util.linalg import approximate_spectral_radius
from .util.utils import (scale_rows_by_largest_entry, amalgamate, scale_rows,
get_block_diag, scale_columns)
from .util.params import set_tol
def distance_strength_of_connection(A, V, theta=2.0, relative_drop=True):
"""Distance based strength-of-connection.
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
V : array
Coordinates of the vertices of the graph of A
relative_drop : bool
If false, then a connection must be within a distance of theta
from a point to be strongly connected.
If true, then the closest connection is always strong, and other points
must be within theta times the smallest distance to be strong
Returns
-------
C : csr_matrix
C(i,j) = distance(point_i, point_j)
Strength of connection matrix where strength values are
distances, i.e. the smaller the value, the stronger the connection.
Sparsity pattern of C is copied from A.
Notes
-----
- theta is a drop tolerance that is applied row-wise
- If a BSR matrix given, then the return matrix is still CSR. The strength
is given between super nodes based on the BSR block size.
Examples
--------
>>> from pyamg.gallery import load_example
>>> from pyamg.strength import distance_strength_of_connection
>>> data = load_example('airfoil')
>>> A = data['A'].tocsr()
>>> S = distance_strength_of_connection(data['A'], data['vertices'])
"""
# Amalgamate for the supernode case
if sparse.isspmatrix_bsr(A):
sn = int(A.shape[0] / A.blocksize[0])
u = np.ones((A.data.shape[0],))
A = sparse.csr_matrix((u, A.indices, A.indptr), shape=(sn, sn))
if not sparse.isspmatrix_csr(A):
warn('Implicit conversion of A to csr', sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
dim = V.shape[1]
# Create two arrays for differencing the different coordinates such
# that C(i,j) = distance(point_i, point_j)
cols = A.indices
rows = np.repeat(np.arange(A.shape[0]), A.indptr[1:] - A.indptr[0:-1])
# Insert difference for each coordinate into C
C = (V[rows, 0] - V[cols, 0])**2
for d in range(1, dim):
C += (V[rows, d] - V[cols, d])**2
C = np.sqrt(C)
C[C < 1e-6] = 1e-6
C = sparse.csr_matrix((C, A.indices.copy(), A.indptr.copy()),
shape=A.shape)
# Apply drop tolerance
if relative_drop is True:
if theta != np.inf:
amg_core.apply_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
else:
amg_core.apply_absolute_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
C.eliminate_zeros()
C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the distances.
C.data = 1.0 / C.data
# Scale C by the largest magnitude entry in each row
C = scale_rows_by_largest_entry(C)
return C
def classical_strength_of_connection(A, theta=0.0, norm='abs'):
"""Classical Strength Measure.
Return a strength of connection matrix using the classical AMG measure
An off-diagonal entry A[i,j] is a strong connection iff::
|A[i,j]| >= theta * max(|A[i,k]|), where k != i (norm='abs')
-A[i,j] >= theta * max(-A[i,k]), where k != i (norm='min')
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
theta : float
Threshold parameter in [0,1].
norm: 'string'
'abs' : to use the absolute value,
'min' : to use the negative value (see above)
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- A symmetric A does not necessarily yield a symmetric strength matrix S
- Calls C++ function classical_strength_of_connection
- The version as implemented is designed form M-matrices. Trottenberg et
al. use max A[i,k] over all negative entries, which is the same. A
positive edge weight never indicates a strong connection.
- See [2000BrHeMc]_ and [2001bTrOoSc]_
References
----------
.. [2000BrHeMc] <NAME>., <NAME>., <NAME>., "A multigrid
tutorial", Second edition. Society for Industrial and Applied
Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
.. [2001bTrOoSc] <NAME>., <NAME>., <NAME>., "Multigrid",
Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import classical_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = classical_strength_of_connection(A, 0.0)
"""
if sparse.isspmatrix_bsr(A):
blocksize = A.blocksize[0]
else:
blocksize = 1
if not sparse.isspmatrix_csr(A):
warn('Implicit conversion of A to csr', sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
if (theta < 0 or theta > 1):
raise ValueError('expected theta in [0,1]')
Sp = np.empty_like(A.indptr)
Sj = np.empty_like(A.indices)
Sx = np.empty_like(A.data)
if norm not in ('abs', 'min'):
raise ValueError('Unknown norm')
if norm == 'abs':
amg_core.classical_strength_of_connection_abs(
A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
elif norm == 'min':
amg_core.classical_strength_of_connection_min(
A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)
if blocksize > 1:
S = amalgamate(S, blocksize)
# Strength represents "distance", so take the magnitude
S.data = np.abs(S.data)
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry(S)
return S
def symmetric_strength_of_connection(A, theta=0):
"""Symmetric Strength Measure.
Compute strength of connection matrix using the standard symmetric measure
An off-diagonal connection A[i,j] is strong iff::
abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )
Parameters
----------
A : csr_matrix
Matrix graph defined in sparse format. Entry A[i,j] describes the
strength of edge [i,j]
theta : float
Threshold parameter (positive).
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- For vector problems, standard strength measures may produce
undesirable aggregates. A "block approach" from Vanek et al. is used
to replace vertex comparisons with block-type comparisons. A
connection between nodes i and j in the block case is strong if::
||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]
is the matrix block (degrees of freedom) associated with nodes k and
l and ||.|| is a matrix norm, such a Frobenius.
- See [1996bVaMaBr]_ for more details.
References
----------
.. [1996bVaMaBr] <NAME>. and <NAME>. and <NAME>.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import symmetric_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = symmetric_strength_of_connection(A, 0.0)
"""
if theta < 0:
raise ValueError('expected a positive theta')
if not sparse.isspmatrix_csr(A) and not sparse.isspmatrix_bsr(A):
raise TypeError('expected csr_matrix or bsr_matrix')
if sparse.isspmatrix_csr(A):
# if theta == 0:
# return A
Sp = np.empty_like(A.indptr)
Sj = np.empty_like(A.indices)
Sx = np.empty_like(A.data)
fn = amg_core.symmetric_strength_of_connection
fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)
elif sparse.isspmatrix_bsr(A):
M, N = A.shape
R, C = A.blocksize
if R != C:
raise ValueError('matrix must have square blocks')
if theta == 0:
data = np.ones(len(A.indices), dtype=A.dtype)
S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
shape=(int(M / R), int(N / C)))
else:
# the strength of connection matrix is based on the
# Frobenius norms of the blocks
data = (np.conjugate(A.data) * A.data).reshape(-1, R * C)
data = data.sum(axis=1)
A = sparse.csr_matrix((data, A.indices, A.indptr),
shape=(int(M / R), int(N / C)))
return symmetric_strength_of_connection(A, theta)
# Strength represents "distance", so take the magnitude
S.data = np.abs(S.data)
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry(S)
return S
def energy_based_strength_of_connection(A, theta=0.0, k=2):
"""Energy Strength Measure.
Compute a strength of connection matrix using an energy-based measure.
Parameters
----------
A : sparse-matrix
matrix from which to generate strength of connection information
theta : float
Threshold parameter in [0,1]
k : int
Number of relaxation steps used to generate strength information
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. The sparsity pattern
of S matches that of A. For BSR matrices, S is a reduced strength
of connection matrix that describes connections between supernodes.
Notes
-----
This method relaxes with weighted-Jacobi in order to approximate the
matrix inverse. A normalized change of energy is then used to define
point-wise strength of connection values. Specifically, let v be the
approximation to the i-th column of the inverse, then
(S_ij)^2 = <v_j, v_j>_A / <v, v>_A,
where v_j = v, such that entry j in v has been zeroed out. As is common,
larger values imply a stronger connection.
Current implementation is a very slow pure-python implementation for
experimental purposes, only.
See [2006BrBrMaMaMc]_ for more details.
References
----------
.. [2006BrBrMaMaMc] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
"An Energy-Based AMG Coarsening Strategy",
Numerical Linear Algebra with Applications,
vol. 13, pp. 133-148, 2006.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import energy_based_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = energy_based_strength_of_connection(A, 0.0)
"""
if theta < 0:
raise ValueError('expected a positive theta')
if not sparse.isspmatrix(A):
raise ValueError('expected sparse matrix')
if k < 0:
raise ValueError('expected positive number of steps')
if not isinstance(k, int):
raise ValueError('expected integer')
if sparse.isspmatrix_bsr(A):
bsr_flag = True
numPDEs = A.blocksize[0]
if A.blocksize[0] != A.blocksize[1]:
raise ValueError('expected square blocks in BSR matrix A')
else:
bsr_flag = False
# Convert A to csc and Atilde to csr
if sparse.isspmatrix_csr(A):
Atilde = A.copy()
A = A.tocsc()
else:
A = A.tocsc()
Atilde = A.copy()
Atilde = Atilde.tocsr()
# Calculate the weighted-Jacobi parameter
D = A.diagonal()
Dinv = 1.0 / D
Dinv[D == 0] = 0.0
Dinv = sparse.csc_matrix((Dinv, (np.arange(A.shape[0]),
np.arange(A.shape[1]))), shape=A.shape)
DinvA = Dinv * A
omega = 1.0 / approximate_spectral_radius(DinvA)
del DinvA
# Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
S = sparse.csc_matrix(A.shape, dtype=A.dtype) # empty matrix
Id = sparse.eye(A.shape[0], A.shape[1], format='csc')
for _i in range(k + 1):
S = S + omega * (Dinv * (Id - A * S))
# Calculate the strength entries in S column-wise, but only strength
# values at the sparsity pattern of A
for i in range(Atilde.shape[0]):
v = S[:, i].toarray()
v = v.ravel()
Av = A @ v
denom = np.sqrt(np.inner(v.conj(), Av))
# replace entries in row i with strength values
for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
col = Atilde.indices[j]
vj = v[col].copy()
v[col] = 0.0
# = (||v_j||_A - ||v||_A) / ||v||_A
val = np.sqrt(np.inner(v.conj(), A @ v)) / denom - 1.0
# Negative values generally imply a weak connection
if val > -0.01:
Atilde.data[j] = abs(val)
else:
Atilde.data[j] = 0.0
v[col] = vj
# Apply drop tolerance
Atilde = classical_strength_of_connection(Atilde, theta=theta)
Atilde.eliminate_zeros()
# Put ones on the diagonal
Atilde = Atilde + Id.tocsr()
Atilde.sort_indices()
# Amalgamate Atilde for the BSR case, using ones for all strong connections
if bsr_flag:
Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
nblocks = Atilde.indices.shape[0]
uone = np.ones((nblocks,))
Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
shape=(
int(Atilde.shape[0] / numPDEs),
int(Atilde.shape[1] / numPDEs)))
# Scale C by the largest magnitude entry in each row
Atilde = scale_rows_by_largest_entry(Atilde)
return Atilde
@np.deprecate
def ode_strength_of_connection(A, B=None, epsilon=4.0, k=2, proj_type='l2',
block_flag=False, symmetrize_measure=True):
"""Use evolution_strength_of_connection instead (deprecated)."""
return evolution_strength_of_connection(A, B, epsilon, k, proj_type,
block_flag, symmetrize_measure)
def evolution_strength_of_connection(A, B=None, epsilon=4.0, k=2,
proj_type='l2', block_flag=False,
symmetrize_measure=True):
"""Evolution Strength Measure.
Construct strength of connection matrix using an Evolution-based measure
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
B : string, array
If B=None, then the near nullspace vector used is all ones. If B is
an (NxK) array, then B is taken to be the near nullspace vectors.
epsilon : scalar
Drop tolerance
k : integer
ODE num time steps, step size is assumed to be 1/rho(DinvA)
proj_type : {'l2','D_A'}
Define norm for constrained min prob, i.e. define projection
block_flag : boolean
If True, use a block D inverse as preconditioner for A during
weighted-Jacobi
Returns
-------
Atilde : csr_matrix
Sparse matrix of strength values
See [2008OlScTu]_ for more details.
References
----------
.. [2008OlScTu] <NAME>., <NAME>., <NAME>.,
"A New Perspective on Strength Measures in Algebraic Multigrid",
submitted, June, 2008.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import evolution_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = evolution_strength_of_connection(A, np.ones((A.shape[0],1)))
"""
# ====================================================================
# Check inputs
if epsilon < 1.0:
raise ValueError('expected epsilon > 1.0')
if k <= 0:
raise ValueError('number of time steps must be > 0')
if proj_type not in ['l2', 'D_A']:
raise ValueError('proj_type must be "l2" or "D_A"')
if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
raise TypeError('expected csr_matrix or bsr_matrix')
# ====================================================================
# Format A and B correctly.
# B must be in mat format, this isn't a deep copy
if B is None:
Bmat = np.ones((A.shape[0], 1), dtype=A.dtype)
else:
Bmat = np.asarray(B)
# Pre-process A. We need A in CSR, to be devoid of explicit 0's and have
# sorted indices
if not sparse.isspmatrix_csr(A):
csrflag = False
numPDEs = A.blocksize[0]
D = A.diagonal()
# Calculate Dinv*A
if block_flag:
Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0] + 1)),
shape=A.shape)
Dinv_A = (Dinv * A).tocsr()
else:
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
Dinv[D == 0] = 1.0
Dinv_A = scale_rows(A, Dinv, copy=True)
A = A.tocsr()
else:
csrflag = True
numPDEs = 1
D = A.diagonal()
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
Dinv[D == 0] = 1.0
Dinv_A = scale_rows(A, Dinv, copy=True)
A.eliminate_zeros()
A.sort_indices()
# Handle preliminaries for the algorithm
dimen = A.shape[1]
NullDim = Bmat.shape[1]
# Get spectral radius of Dinv*A, this will be used to scale the time step
# size for the ODE
rho_DinvA = approximate_spectral_radius(Dinv_A)
# Calculate D_A for later use in the minimization problem
if proj_type == 'D_A':
D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
else:
D_A = sparse.eye(dimen, dimen, format='csr', dtype=A.dtype)
# Calculate (I - delta_t Dinv A)^k
# In order to later access columns, we calculate the transpose in
# CSR format so that columns will be accessed efficiently
# Calculate the number of time steps that can be done by squaring, and
# the number of time steps that must be done incrementally
nsquare = int(np.log2(k))
ninc = k - 2**nsquare
# Calculate one time step
Id = sparse.eye(dimen, dimen, format='csr', dtype=A.dtype)
Atilde = (Id - (1.0 / rho_DinvA) * Dinv_A)
Atilde = Atilde.T.tocsr()
# Construct a sparsity mask for Atilde that will restrict Atilde^T to the
# nonzero pattern of A, with the added constraint that row i of Atilde^T
# retains only the nonzeros that are also in the same PDE as i.
mask = A.copy()
# Restrict to same PDE
if numPDEs > 1:
row_length = np.diff(mask.indptr)
my_pde = np.mod( | np.arange(dimen) | numpy.arange |
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import helper
import numpy as np
import problem_unittests as tests
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
print(text[:50])
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
words_list = list(set(word for word in text))
word_counts = Counter(words_list)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {i: word for i, word in enumerate(sorted_vocab)}
vocab_to_int = {word: i for i, word in enumerate(sorted_vocab)}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
tokens = {}
tokens['.'] = '||Period||'
tokens[','] = '||Comma||'
tokens['\"'] = '||QuotaionMark||'
tokens[';'] = '||Semicolon||'
tokens['!'] = '||ExclamationMark||'
tokens['?'] = '||QuestionMark||'
tokens['('] = '||LeftParentheses||'
tokens[')'] = '||RightParentheses||'
tokens['--'] = '||Dash||'
tokens['\n'] = '||Return||'
# TODO: Implement Function
return tokens
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input = tf.placeholder(dtype=tf.int32,shape=(None,None),name="input")
targets = tf.placeholder(dtype=tf.int32,shape=(None,None),name="targets")
learning_rate = tf.placeholder(dtype=tf.float32,name="learning_rate")
return input, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.contrib.rnn import BasicLSTMCell
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
lstm_cell = BasicLSTMCell(rnn_size)
multi_rnn_cell = MultiRNNCell([lstm_cell])
init_state = multi_rnn_cell.zero_state(batch_size,tf.float32)
init_state = tf.identity(init_state,"initial_state")
return multi_rnn_cell, init_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
#download embedding vector from vol?
#apply data extend
#embedding = np.zeros((input_data.shape[0],input_data.shape[1],embed_dim),dtype=np.float32)
input_data_shape = input_data.get_shape().as_list()
embedding = np.zeros((input_data.shape[0],input_data.shape[1],embed_dim),dtype=np.float32)
input_array = tf.get_default_session().run(input_data)
tf.Tensor.get_shape().as_list()
fname = 'data/glove.6B.%dd.txt' % embed_dim
glove_index_dict = {}
with open(fname, 'r') as fp:
glove_symbols = len(fp.readlines())
glove_embedding_weights = np.empty((glove_symbols, embed_dim))
with open(fname, 'r') as fp:
i = 0
for ls in fp:
ls = ls.strip().split()
w = ls[0]
glove_index_dict[w] = i
glove_embedding_weights[i, :] = np.asarray(ls[1:], dtype=np.float32)
i += 1
j=0
for i in range(vocab_size):
vec = glove_embedding_weights[i]
embedding[j,:]= vec
j+=1
return embedding
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(state, "final_state")
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function'
embedding = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embedding)
logits = tf.layers.dense(outputs, vocab_size)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
big_serial_size = batch_size * seq_length
big_serial_count = len(int_text) // big_serial_size
int_text_input = int_text[0:big_serial_count * big_serial_size]
int_text_target = int_text[1:big_serial_count * big_serial_size + 1]
int_text_input = | np.asarray(int_text_input, np.int32) | numpy.asarray |
# encoding: utf-8
from __future__ import absolute_import, division, print_function
import warnings
from datetime import timedelta
import numpy as np
import sgp4.io
import sgp4.propagation
from astropy import time
from numpy import arctan, cos, degrees, sin, sqrt
from represent import ReprMixin
from scipy.constants import kilo, pi
from sgp4.earth_gravity import wgs72
from . import utilities as ou
from .maneuver import (
Maneuver, Operation, PropagateAnomalyBy, PropagateAnomalyTo)
from .utilities import *
J2000 = time.Time('J2000', scale='utc')
__all__ = [
'KeplerianElements',
]
class KeplerianElements(ReprMixin, object):
"""Defines an orbit using keplerian elements.
:param a: Semimajor axis [m]
:param e: Eccentricity [-]
:param i: Inclination [rad]
:param raan: Right ascension of ascending node (:math:`\Omega`) [rad]
:param arg_pe: Argument of periapsis (:math:`\omega`) [rad]
:param M0: Mean anomaly at `ref_epoch` (:math:`M_{0}`) [rad]
:param body: Reference body, e.g. earth
:type body: :py:class:`orbital.bodies.Body`
:param ref_epoch: Reference epoch
:type ref_epoch: :py:class:`astropy.time.Time`
"""
def __init__(self, a=None, e=0, i=0, raan=0, arg_pe=0, M0=0,
body=None, ref_epoch=J2000):
self._a = a
self.e = e
self.i = i
self.raan = raan
self.arg_pe = arg_pe
self.M0 = M0
self._M = M0
self.body = body
self.ref_epoch = ref_epoch
self._t = 0 # This is important because M := M0
super(KeplerianElements, self).__init__()
@classmethod
def with_altitude(cls, altitude, body, e=0, i=0, raan=0, arg_pe=0, M0=0,
ref_epoch=J2000):
"""Initialise with orbit for a given altitude.
For eccentric orbits, this is the altitude at the
reference anomaly, M0
"""
r = radius_from_altitude(altitude, body)
a = r * (1 + e * cos(true_anomaly_from_mean(e, M0))) / (1 - e ** 2)
return cls(a=a, e=e, i=i, raan=raan, arg_pe=arg_pe, M0=M0, body=body,
ref_epoch=ref_epoch)
@classmethod
def with_period(cls, period, body, e=0, i=0, raan=0, arg_pe=0, M0=0,
ref_epoch=J2000):
"""Initialise orbit with a given period."""
ke = cls(e=e, i=i, raan=raan, arg_pe=arg_pe, M0=M0, body=body,
ref_epoch=ref_epoch)
ke.T = period
return ke
@classmethod
def with_apside_altitudes(cls, alt1, alt2, i=0, raan=0, arg_pe=0, M0=0,
body=None, ref_epoch=J2000):
"""Initialise orbit with given apside altitudes."""
altitudes = [alt1, alt2]
altitudes.sort()
pericenter_altitude = altitudes[0]
apocenter_altitude = altitudes[1]
apocenter_radius = radius_from_altitude(apocenter_altitude, body)
pericenter_radius = radius_from_altitude(pericenter_altitude, body)
a, e = elements_for_apsides(apocenter_radius, pericenter_radius)
return cls(a=a, e=e, i=i, raan=raan, arg_pe=arg_pe, M0=M0, body=body,
ref_epoch=ref_epoch)
@classmethod
def with_apside_radii(cls, radius1, radius2, i=0, raan=0, arg_pe=0, M0=0,
body=None, ref_epoch=J2000):
"""Initialise orbit with given apside radii."""
radii = [radius1, radius2]
radii.sort()
pericenter_radius = radii[0]
apocenter_radius = radii[1]
a, e = elements_for_apsides(apocenter_radius, pericenter_radius)
return cls(a=a, e=e, i=i, raan=raan, arg_pe=arg_pe, M0=M0, body=body,
ref_epoch=ref_epoch)
@classmethod
def from_state_vector(cls, r, v, body, ref_epoch=J2000):
"""Create orbit from given state vector."""
elements = elements_from_state_vector(r, v, body.mu)
self = cls(
a=elements.a,
e=elements.e,
i=elements.i,
raan=elements.raan,
arg_pe=elements.arg_pe,
M0=mean_anomaly_from_true(elements.e, elements.f),
body=body,
ref_epoch=ref_epoch)
# Fix mean anomaly at epoch for new orbit and position.
oldM0 = self.M0
self.M0 = ou.mod(self.M - self.n * self.t, 2 * pi)
assert self.M0 == oldM0
return self
@classmethod
def from_tle(cls, line1, line2, body):
"""Create object by parsing TLE using SGP4."""
# Get state vector at TLE epoch
sat = sgp4.io.twoline2rv(line1, line2, wgs72)
r, v = sgp4.propagation.sgp4(sat, 0)
ref_epoch = time.Time(sat.epoch, scale='utc')
# Convert km to m
r, v = np.array(r) * kilo, np.array(v) * kilo
return cls.from_state_vector(r, v, body=body, ref_epoch=ref_epoch)
@property
def epoch(self):
"""Current epoch calculated from time since ref_epoch."""
return self.ref_epoch + time.TimeDelta(self.t, format='sec')
@epoch.setter
def epoch(self, value):
"""Set epoch, adjusting current mean anomaly (from which
other anomalies are calculated).
"""
t = (value - self.ref_epoch).sec
self._M = self.M0 + self.n * t
self._M = ou.mod(self._M, 2 * pi)
self._t = t
@property
def t(self):
"""Time since ref_epoch."""
return self._t
@t.setter
def t(self, value):
"""Set time since ref_epoch, adjusting current mean anomaly (from which
other anomalies are calculated).
"""
self._M = self.M0 + self.n * value
self._M = ou.mod(self._M, 2 * pi)
self._t = value
@property
def M(self):
"""Mean anomaly [rad]."""
return self._M
@M.setter
def M(self, value):
warnings.warn('Setting anomaly does not set time, use KeplerianElements'
'.propagate_anomaly_to() instead.', OrbitalWarning)
self._M = ou.mod(value, 2 * pi)
@property
def E(self):
"""Eccentric anomaly [rad]."""
return eccentric_anomaly_from_mean(self.e, self._M)
@E.setter
def E(self, value):
warnings.warn('Setting anomaly does not set time, use KeplerianElements'
'.propagate_anomaly_to() instead.', OrbitalWarning)
self._M = mean_anomaly_from_eccentric(self.e, value)
@property
def f(self):
"""True anomaly [rad]."""
return true_anomaly_from_mean(self.e, self._M)
@f.setter
def f(self, value):
warnings.warn('Setting anomaly does not set time, use KeplerianElements'
'.propagate_anomaly_to() instead.', OrbitalWarning)
self._M = mean_anomaly_from_true(self.e, value)
@property
def a(self):
return self._a
@a.setter
def a(self, value):
"""Set semimajor axis and fix M0.
To fix self.M0, self.n is called. self.n is a function of self.a
This is safe, because the new value for self._a is set first, then
self.M0 is fixed.
"""
self._a = value
self.M0 = ou.mod(self.M - self.n * self.t, 2 * pi)
@property
def r(self):
"""Position vector (:py:class:`orbital.utilities.Position`) [m]."""
pos = orbit_radius(self.a, self.e, self.f) * self.U
return Position(x=pos[0], y=pos[1], z=pos[2])
@property
def v(self):
"""Velocity vector (:py:class:`orbital.utilities.Velocity`) [m/s]."""
r_dot = sqrt(self.body.mu / self.a) * (self.e * sin(self.f)) / sqrt(1 - self.e ** 2)
rf_dot = sqrt(self.body.mu / self.a) * (1 + self.e * cos(self.f)) / sqrt(1 - self.e ** 2)
vel = r_dot * self.U + rf_dot * self.V
return Velocity(x=vel[0], y=vel[1], z=vel[2])
@v.setter
def v(self, value):
"""Set velocity by altering orbital elements.
This method uses 3 position variables, and 3 velocity
variables to set the 6 orbital elements.
"""
r, v = self.r, value
elements = elements_from_state_vector(r, v, self.body.mu)
self._a = elements.a
self.e = elements.e
self.i = elements.i
self.raan = elements.raan
self.arg_pe = elements.arg_pe
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=OrbitalWarning)
self.f = elements.f
# Fix mean anomaly at epoch for new orbit and position.
self.M0 = ou.mod(self.M - self.n * self.t, 2 * pi)
# Now check that the computed properties for position and velocity are
# reasonably close to the inputs.
# 1e-4 is a large uncertainty, but we don't want to throw an error
# within small differences (e.g. 1e-4 m is 0.1 mm)
if (abs(self.v - v) > 1e-4).any() or (abs(self.r - r) > 1e-4).any():
raise RuntimeError(
'Failed to set orbital elements for velocity. Please file a bug'
' report at https://github.com/RazerM/orbital/issues')
@property
def n(self):
"""Mean motion [rad/s]."""
return sqrt(self.body.mu / self.a ** 3)
@n.setter
def n(self, value):
"""Set mean motion by adjusting semimajor axis."""
self.a = (self.body.mu / value ** 2) ** (1 / 3)
@property
def T(self):
"""Period [s]."""
return 2 * pi / self.n
@T.setter
def T(self, value):
"""Set period by adjusting semimajor axis."""
self.a = (self.body.mu * value ** 2 / (4 * pi ** 2)) ** (1 / 3)
@property
def fpa(self):
return arctan(self.e * sin(self.f) / (1 + self.e * cos(self.f)))
def propagate_anomaly_to(self, **kwargs):
"""Propagate to time in future where anomaly is equal to value passed in.
:param M: Mean anomaly [rad]
:param E: Eccentricity anomaly [rad]
:param f: True anomaly [rad]
This will propagate to a maximum of 1 orbit ahead.
.. note::
Only one parameter should be passed in.
"""
operation = PropagateAnomalyTo(**kwargs)
self.apply_maneuver(operation)
def propagate_anomaly_by(self, **kwargs):
"""Propagate to time in future by an amount equal to the anomaly passed in.
:param M: Mean anomaly [rad]
:param E: Eccentricity anomaly [rad]
:param f: True anomaly [rad]
.. note::
Only one parameter should be passed in.
"""
operation = PropagateAnomalyBy(**kwargs)
self.apply_maneuver(operation)
def __getattr__(self, attr):
"""Dynamically respond to correct apsis names for given body."""
if not attr.startswith('__'):
for apoapsis_name in self.body.apoapsis_names:
if attr == '{}_radius'.format(apoapsis_name):
return self.apocenter_radius
elif attr == '{}_altitude'.format(apoapsis_name):
return self.apocenter_altitude
for periapsis_name in self.body.periapsis_names:
if attr == '{}_radius'.format(periapsis_name):
return self.pericenter_radius
elif attr == '{}_altitude'.format(periapsis_name):
return self.pericenter_altitude
raise AttributeError(
"'{name}' object has no attribute '{attr}'"
.format(name=type(self).__name__, attr=attr))
def apply_maneuver(self, maneuver, iter=False, copy=False):
""" Apply maneuver to orbit.
:param maneuver: Maneuver
:type maneuver: :py:class:`maneuver.Maneuver`
:param bool iter: Return an iterator.
:param bool copy: Each orbit yielded by the generator will be a copy.
If :code:`iter=True`, the returned iterator is of each intermediate orbit
and the next operation, as shown in this table:
+-------------------------------------+------------------+
| Orbit | Operation |
+=====================================+==================+
| Original orbit | First operation |
+-------------------------------------+------------------+
| Orbit after first operation applied | Second operation |
+-------------------------------------+------------------+
The final orbit is not returned, as it is accessible after the method has completed.
If each orbit returned must not be altered, use :code:`copy=True`
"""
if isinstance(maneuver, Operation):
maneuver = Maneuver(maneuver)
if iter:
return maneuver.__iapply__(self, copy)
else:
if copy:
raise ValueError('copy can only be True if iter=True')
maneuver.__apply__(self)
@property
def apocenter_radius(self):
"""Return apocenter radius"""
return (1 + self.e) * self.a
@property
def pericenter_radius(self):
"""Return pericenter radius"""
return (1 - self.e) * self.a
@property
def apocenter_altitude(self):
"""Return apocenter altitude"""
return altitude_from_radius(self.apocenter_radius, self.body)
@property
def pericenter_altitude(self):
"""Return pericenter altitude"""
return altitude_from_radius(self.pericenter_radius, self.body)
@property
def U(self):
"""Radial direction unit vector."""
u = self.arg_pe + self.f
sin_u = sin(u)
cos_u = cos(u)
sin_raan = sin(self.raan)
cos_raan = cos(self.raan)
cos_i = cos(self.i)
return np.array(
[cos_u * cos_raan - sin_u * sin_raan * cos_i,
cos_u * sin_raan + sin_u * cos_raan * cos_i,
sin_u * sin(self.i)]
)
@property
def V(self):
"""Transversal in-flight direction unit vector."""
u = self.arg_pe + self.f
sin_u = sin(u)
cos_u = cos(u)
sin_raan = | sin(self.raan) | numpy.sin |
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import numpy as np
from keras.utils import multi_gpu_model
import matplotlib.pyplot as plt
from scipy.signal import medfilt
from model import *
from featureExtraction import *
import glob
class Options(object):
def __init__(self):
self.num_spec = 513
self.input_size = 31#115
self.batch_size = 64#64
self.resolution = 16
self.figureON = False
options = Options()
def main(filepath,output_dir,gpu_index):
if gpu_index is not None:
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_index)
pitch_range = np.arange(38, 83 + 1.0/options.resolution, 1.0/options.resolution)
pitch_range = np.concatenate([np.zeros(1), pitch_range])
''' Features extraction'''
X_test, X_spec = spec_extraction(file_name=filepath, win_size=options.input_size)
''' melody predict'''
model = melody_ResNet_joint_add(options)
# model = melody_ResNet_joint_add2(options)
model.load_weights('./weights/ResNet_joint_add_L(CE_G).hdf5')
# model.load_weights('./weights/ResNet_joint_add_L(CE_G)_r16_t3_singleGPU.hdf5')
y_predict = model.predict(X_test, batch_size=options.batch_size, verbose=1)
num_total = y_predict[0].shape[0] * y_predict[0].shape[1]
est_pitch = np.zeros(num_total)
y_predict = np.reshape(y_predict[0], (num_total, y_predict[0].shape[2])) # origin
for i in range(y_predict.shape[0]):
index_predict = | np.argmax(y_predict[i, :]) | numpy.argmax |
import sys
from typing import List
import numpy as np
import math
#from memory_profiler import profile
#from pypcd import pypcd
from pypcd import pypcd
from config import CAMERA_ICL, PM, MAP_IP
import cv2
import read_office
import evaluate_ate
import evaluate_rpe
from plane_extraction import *
import open3d as o3d
import os
import subprocess as sp
import argparse
from mrob.mrob import FGraph, geometry, registration, LM
def rotation_matrix_to_quaternion(r_matrix):
# First row of the rotation matrix
r00 = r_matrix[0, 0]
r01 = r_matrix[0, 1]
r02 = r_matrix[0, 2]
# Second row of the rotation matrix
r10 = r_matrix[1, 0]
r11 = r_matrix[1, 1]
r12 = r_matrix[1, 2]
# Third row of the rotation matrix
r20 = r_matrix[2, 0]
r21 = r_matrix[2, 1]
r22 = r_matrix[2, 2]
tr = r00 + r11 + r22
if tr > 0:
s = math.sqrt(tr+1.0) * 2
qw = 0.25 * s
qx = (r21 - r12) / s
qy = (r02 - r20) / s
qz = (r10 - r01) / s
elif r00 > r11 and r00 > r22:
s = math.sqrt(1.0 + r00 - r11 - r22) * 2
qw = (r21 - r12) / s
qx = 0.25 * s
qy = (r01 + r10) / s
qz = (r02 + r20) / s
elif r11 > r22:
s = math.sqrt(1.0 + r11 - r00 - r22) * 2
qw = (r02 - r20) / s
qx = (r01 + r10) / s
qy = 0.25 * s
qz = (r12 + r21) / s
else:
s = math.sqrt(1.0 + r22 - r00 - r11) * 2
qw = (r10 - r01) / s
qx = (r02 + r20) / s
qy = (r12 + r21) / s
qz = 0.25 * s
q = [qx, qy, qz, qw]
return q
def image_processing(function, depth_annot, camera_intrinsics, map_indx_points, planes, func_indx, planes_matcher):
points_of_images = []
colors_of_images = []
matrix_v = None
i = 0
for image, depth in depth_annot:
matrix_color = cv2.imread(image, cv2.IMREAD_COLOR)
matrix_depth = cv2.imread(depth, cv2.IMREAD_ANYDEPTH)
print(image)
if matrix_v is None:
rows, columns, _ = matrix_color.shape
columns_indices = np.arange(columns)
matrix_v = | np.tile(columns_indices, (rows, 1)) | numpy.tile |
import torch
import numpy as np
import time
import os
import argparse
import cv2
from LapNet import LAPNet
from loss import DiscriminativeLoss
from shougang_dataset import ShougangDataset
from logger import Logger
from torch.nn import DataParallel
from collections import OrderedDict
from torch.nn.parameter import Parameter
import platform
parser = argparse.ArgumentParser(description="Train model")
parser.add_argument('--dataset-path', default='ShougangDataset/')
parser.add_argument('--lr', type=float, default=3e-4, help='learning rate')
parser.add_argument('--batch-size', type=int, default=24, help='batch size')
parser.add_argument('--img-size', type=int, nargs='+', default=[1024, 512], help='image resolution: [width height]')
parser.add_argument('--epoch', type=int, default=10000)
parser.add_argument('--gpu-idx',type = int,default= 0, help='using gpu(idx)')
parser.add_argument('--optimizer-reset', type=int, default=100)
args = parser.parse_args()
torch.cuda.set_device(args.gpu_idx)
INPUT_CHANNELS = 3
OUTPUT_CHANNELS = 2
LEARNING_RATE = args.lr #1e-5
BATCH_SIZE = args.batch_size #20
NUM_EPOCHS = args.epoch #100
LOG_INTERVAL = 20
INS_CH = 32
SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]
def state_dict(model, destination=None, prefix='', keep_vars=False):
own_state = model.module if isinstance(model, torch.nn.DataParallel) \
else model
if destination is None:
destination = OrderedDict()
for name, param in own_state._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.data
for name, buf in own_state._buffers.items():
if buf is not None:
destination[prefix + name] = buf
for name, module in own_state._modules.items():
if module is not None:
state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def load_state_dict(model, state_dict, strict=True):
own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \
else model.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
def train(model):
# refer from : https://github.com/Sayan98/pytorch-segnet/blob/master/src/train.py
is_better = True
prev_loss = float('inf')
print("Found",torch.cuda.device_count(),"GPU(s).","Using GPU(s) form idx:",args.gpu_idx)
#model = DataParallel(cpu_model)
# model = cpu_model.cuda()
#device = torch.device("cuda")
# model = torch.nn.DataParallel(model) #= model.cuda()
model.train()
last_better_epoch = start_epoch
for epoch in range(start_epoch,NUM_EPOCHS):
t_start = time.time()
loss_f = []
for batch_idx, (imgs, sem_labels) in enumerate(train_dataloader):
#os.system("clear")
loss = 0
img_tensor = torch.tensor(imgs).cuda()
sem_tensor = torch.tensor(sem_labels).cuda()
# ins_tensor = torch.tensor(ins_labels).cuda()
# Init gradients
optimizer.zero_grad()
img_inpt = np.array(np.transpose(torch.squeeze(img_tensor[0],0).cpu().detach().numpy(), (1,2,0)) ,dtype=np.uint8)
# Predictions
sem_pred = model(img_tensor)
# sem_pred=torch.floor(sem_pred)
seg_map = torch.squeeze(sem_pred,0).cpu().detach().numpy()
# ins_map = torch.squeeze(ins_pred,0).cpu().detach().numpy()
# Discriminative Loss
# disc_loss = criterion_disc(ins_pred, ins_tensor, [INS_CH] * len(img_tensor))/6400.5
# CrossEntropy Loss
ce_loss = criterion_ce(sem_pred.permute(0,2,3,1).contiguous().view(-1,OUTPUT_CHANNELS),
sem_tensor.view(-1))
# print(
# np.shape(sem_pred.permute(0,2,3,1).contiguous().view(-1,OUTPUT_CHANNELS)[:,1]),
# np.shape(sem_tensor.view(-1).float())
# )
# mse = criterion_mse(sem_pred.permute(0,2,3,1).contiguous().view(-1,OUTPUT_CHANNELS)[:,1],sem_tensor.view(-1).float())/1000
loss = ce_loss #+ disc_loss
loss.backward()
optimizer.step()
loss_f.append(loss.cpu().data.numpy())
print(' Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(imgs), len(train_dataloader.dataset),
100. * batch_idx / len(train_dataloader), loss.item()),end = '\r')
seg_show = seg_map[0][1]
_, seg_show2 = cv2.threshold(seg_show+1, 0, 0,cv2.THRESH_TOZERO)
seg_show = cv2.normalize(seg_show,seg_show,0,1,cv2.NORM_MINMAX)
seg_show2 = cv2.normalize(seg_show2,seg_show2,0,1,cv2.NORM_MINMAX)
seg_show = cv2.convertScaleAbs(seg_show,seg_show,255)
seg_show2 = cv2.convertScaleAbs(seg_show2,seg_show2,255)
# cv2.imshow("seg_pred",cv2.addWeighted(img_inpt,0.5,cv2.applyColorMap(seg_show,cv2.COLORMAP_JET),0.5,0))
# cv2.imshow("colormap",cv2.applyColorMap(seg_show,cv2.COLORMAP_JET))
# cv2.imshow("segthresh",cv2.applyColorMap(seg_show2,cv2.COLORMAP_MAGMA))
# for i in range(32):
# ins_show = ins_map[0][i]
# ins_show = cv2.normalize(ins_show,ins_show,0,1,cv2.NORM_MINMAX)
# ins_show = cv2.convertScaleAbs(ins_show,ins_show,255)
# cv2.imshow("insmap"+str(i),cv2.applyColorMap(ins_show,cv2.COLORMAP_OCEAN))
# cv2.imshow("img_inpt",img_inpt)
if cv2.waitKey(1) == 27:
print("Saving current chkpt...")
state = {'net':state_dict(model), 'optimizer':optimizer.state_dict(), 'epoch':epoch}
torch.save(state, chkpt_filename)
exit()
#Tensorboard
# if batch_idx % LOG_INTERVAL == 0:
# print("log at train idx:",batch_idx,end='\r')
# info = {'loss': loss.item(), 'ce_loss': ce_loss.item(), 'epoch': epoch}
# for tag, value in info.items():
# logger.scalar_summary(tag, value, batch_idx + 1)
# # 2. Log values and gradients of the parameters (histogram summary)
# for tag, value in model.named_parameters():
# tag = tag.replace('.', '/')
# logger.histo_summary(tag, value.data.cpu().numpy(), batch_idx + 1)
# # logger.histo_summary(tag + '/grad', value.grad.data.cpu().numpy(), batch_idx + 1)
# if batch_idx % 100 == 0:
# torch.save(model.state_dict(), "Lap"+str(args.gpu_idx)+".pth")
# print("\t\tModel Saved.")
# # 3. Log training images (image summary)
# info = {'images': img_tensor.view(-1, 3, SIZE[1], SIZE[0])[:BATCH_SIZE].cpu().numpy(),
# 'labels': sem_tensor.view(-1, SIZE[1], SIZE[0])[:BATCH_SIZE].cpu().numpy(),
# 'sem_preds': sem_pred.view(-1, 2, SIZE[1], SIZE[0])[:BATCH_SIZE,1].data.cpu().numpy(),
# 'ins_preds': ins_pred.view(-1, SIZE[1], SIZE[0])[:BATCH_SIZE*5].data.cpu().numpy()}
# for tag, images in info.items():
# logger.image_summary(tag, images, batch_idx + 1)
dt = time.time() - t_start
is_better = np.mean(loss_f) < prev_loss
state = {'net':state_dict(model), 'optimizer':optimizer.state_dict(), 'epoch':epoch}
# torch.save(state, os.getcwd()+"/trained_model/"+"LapNet_chkpt_epoch"+str(epoch)+"_GPU"+str(args.gpu_idx)+".pth")
if is_better:
prev_loss = | np.mean(loss_f) | numpy.mean |
from __future__ import division
from collections import OrderedDict
import numpy as np
import loopy as lp
from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa
from parameterized import parameterized, param
from unittest.case import SkipTest
from pyjac.core.array_creator import array_splitter
from pyjac.core.instruction_creator import get_deep_specializer
from pyjac.loopy_utils.loopy_utils import kernel_call
from pyjac.tests import get_test_langs
from pyjac.tests.test_utils import indexer, get_split_elements, OptionLoopWrapper
VECTOR_WIDTH = 8
def opts_loop(width=[VECTOR_WIDTH, None],
depth=[VECTOR_WIDTH, None],
order=['C', 'F'],
is_simd=True,
skip_non_vec=True,
langs=get_test_langs(),
skip_test=None):
oploop = OrderedDict(
[('width', width),
('depth', depth),
('order', order),
('is_simd', is_simd),
('lang', langs)])
def skip(state):
s = (skip_non_vec and not (state['depth'] or state['width']))
if skip_test is not None:
s = s or skip_test(state)
return s
for opts in OptionLoopWrapper.from_dict(oploop, skip_test=skip):
yield param(opts)
def __get_ref_answer(base, asplit):
vw = asplit.vector_width
side = base.shape[0]
split_shape = asplit.split_shape(base)[0]
order = asplit.data_order
def slicify(slicer, inds):
slicer = slicer[:]
count = 0
for i in range(len(slicer)):
if not slicer[i]:
slicer[i] = index[count]
count += 1
assert count == len(inds), 'Not all indicies used!'
return tuple(slicer)
# create answer
# setup
ans = np.zeros(split_shape, dtype=np.int)
# setup
count = 0
side_count = 0
if order == 'F':
inds = slice(1, None)
slicer = [slice(None)] + [None] * len(split_shape[inds])
else:
inds = slice(None, -1)
slicer = [None] * len(split_shape[inds]) + [slice(None)]
it = np.nditer(np.zeros(split_shape[inds]),
flags=['multi_index'], order=order)
if (order == 'C' and asplit.depth) or (order == 'F' and asplit.width):
# SIMD - no split
# array populator
while not it.finished:
index = it.multi_index[:]
# create a column or row
offset = np.arange(side_count, side_count + vw)
mask = (offset >= side)
offset[np.where(mask)] = 0
offset[np.where(~mask)] += count
# set
ans[slicify(slicer, index)] = offset[:]
# update counters
side_count = side_count + vw
if side_count >= side:
# reset
side_count = 0
count += side
it.iternext()
else:
# SIMD - split
# array populator
while not it.finished:
index = it.multi_index[:]
# create a column or row
offset = side_count + (np.arange(count, count + vw)) * side ** (
base.ndim - 1)
mask = (offset >= side**base.ndim)
offset[np.where(mask)] = 0
# set row
ans[slicify(slicer, index)] = offset[:]
# update counters
side_count = side_count + 1
if side_count >= side ** (base.ndim - 1):
# reset
side_count = 0
count += vw
it.iternext()
return ans
def __get_ref_shape(asplit, base_shape):
side = base_shape[0]
shape = list(base_shape)
vw = asplit.vector_width
if asplit.data_order == 'F' and asplit.depth:
# put new dim at front
insert_at = 0
# and shrink last dim
change_at = -1
elif asplit.data_order == 'F':
if not asplit.is_simd:
raise SkipTest('No split for non-explicit SIMD F-ordered '
'shallow vectorization')
assert asplit.is_simd
# insert at front
insert_at = 0
# and change old first dim
change_at = 1
elif asplit.data_order == 'C' and asplit.width:
# put new dim at end
insert_at = len(shape)
# and adjust start dim
change_at = 0
else:
if not asplit.is_simd:
raise SkipTest('No split for non-explicit SIMD C-ordered '
'deep vectorization')
# put new dim at end
insert_at = len(shape)
# and adjust old end dim
change_at = len(shape) - 1
# insert
shape.insert(insert_at, vw)
# and adjust end dim
shape[change_at] = int(np.ceil(side / vw))
return shape
def __internal(asplit, shape, order='C', width=None, depth=None):
"""
Assumes shape is square
"""
# create array
base = np.arange(np.prod(shape)).reshape(shape, order=order)
# split
arr, = asplit.split_numpy_arrays(base.copy())
# check shape
assert np.array_equal(arr.shape, __get_ref_shape(asplit, base.shape))
# check answer
assert np.array_equal(__get_ref_answer(base, asplit), arr)
def _split_doc(func, num, params):
test = '_helper_for_'
name = func.__name__
if test in name:
name = name[name.index(test) + len(test):]
p = params[0][0]
width = p.width
depth = p.depth
order = p.order
return "{} with: [width={}, depth={}, order={}]".format(
name, width, depth, order)
@parameterized(opts_loop,
doc_func=_split_doc,
skip_on_empty=True)
def test_npy_array_splitter(opts):
# create array split
asplit = array_splitter(opts)
def _test(shape):
__internal(asplit, shape, order=opts.order, width=opts.width,
depth=opts.depth)
# test with small square
_test((10, 10))
# now test with evenly sized
_test((16, 16))
# finally, try with 3d arrays
_test((10, 10, 10))
_test((16, 16, 16))
@parameterized(lambda: opts_loop(width=[None]),
doc_func=_split_doc,
skip_on_empty=True)
def test_lpy_deep_array_splitter(opts):
from pymbolic.primitives import Subscript, Variable
# create array split
asplit = array_splitter(opts)
# create a test kernel
size = VECTOR_WIDTH * 3
loop_bound = VECTOR_WIDTH * 2
arg1 = lp.GlobalArg('a1', shape=(size, size), order=opts.order)
arg2 = lp.GlobalArg('a2', shape=(16, 16), order=opts.order)
k = lp.make_kernel(
'{{[i]: 0 <= i < {}}}'.format(loop_bound),
"""
a1[0, i] = 1 {id=a1}
a2[0, i] = 1 {id=a2}
""",
[arg1, arg2],
silenced_warnings=['no_device_in_pre_codegen_checks'],
target=lp.OpenCLTarget())
k = lp.split_iname(k, 'i', VECTOR_WIDTH,
inner_tag='l.0' if not opts.is_simd else 'vec')
a1_hold = k.arg_dict['a1'].copy()
a2_hold = k.arg_dict['a2'].copy()
k = asplit.split_loopy_arrays(k)
# ensure there's no loopy errors
lp.generate_code_v2(k).device_code()
def __indexer():
if opts.order == 'C':
return (0, Variable('i_outer'), Variable('i_inner'))
else:
return (Variable('i_inner'), 0, Variable('i_outer'))
# check dim
a1 = k.arg_dict['a1']
assert a1.shape == asplit.split_shape(a1_hold)[0]
# and indexing
assign = next(insn.assignee for insn in k.instructions if insn.id == 'a1')
# construct index
assert isinstance(assign, Subscript) and assign.index == __indexer()
# now test with evenly sized
a2 = k.arg_dict['a2']
assert a2.shape == asplit.split_shape(a2_hold)[0]
assign = next(insn.assignee for insn in k.instructions if insn.id == 'a2')
assert isinstance(assign, Subscript) and assign.index == __indexer()
# currently only have SIMD for wide-vectorizations
@parameterized(lambda: opts_loop(depth=[None]),
doc_func=_split_doc,
skip_on_empty=True)
def test_lpy_wide_array_splitter(opts):
from pymbolic.primitives import Subscript, Variable
# create array split
asplit = array_splitter(opts)
# create a test kernel
arg1 = lp.GlobalArg('a1', shape=(10, 10), order=opts.order)
arg2 = lp.GlobalArg('a2', shape=(16, 16), order=opts.order)
k = lp.make_kernel(
['{[i]: 0 <= i < 10}',
'{{[j_outer]: 0 <= j_outer < {}}}'.format(int(np.ceil(10 / VECTOR_WIDTH))),
'{{[j_inner]: 0 <= j_inner < {}}}'.format(VECTOR_WIDTH)],
"""
for i, j_outer, j_inner
a1[j_outer, i] = 1 {id=a1}
a2[j_outer, i] = 1 {id=a2}
end
""",
[arg1, arg2],
silenced_warnings=['no_device_in_pre_codegen_checks'],
target=lp.OpenCLTarget())
a1_hold = k.arg_dict['a1'].copy()
a2_hold = k.arg_dict['a2'].copy()
k = asplit.split_loopy_arrays(k)
k = lp.tag_inames(k, {'j_inner': 'l.0' if not opts.is_simd else 'vec'})
# ensure there's no loopy errors
lp.generate_code_v2(k).device_code()
def __indexer():
if opts.order == 'C':
return (Variable('j_outer'), Variable('i'), Variable('j_inner'))
else:
return (Variable('j_inner'), Variable('j_outer'), Variable('i'))
# check dim
a1 = k.arg_dict['a1']
assert a1.shape == asplit.split_shape(a1_hold)[0]
# and indexing
assign = next(insn.assignee for insn in k.instructions if insn.id == 'a1')
# construct index
assert isinstance(assign, Subscript) and assign.index == __indexer()
# now test with evenly sized
a2 = k.arg_dict['a2']
assert a2.shape == asplit.split_shape(a2_hold)[0]
assign = next(insn.assignee for insn in k.instructions if insn.id == 'a2')
assert isinstance(assign, Subscript) and assign.index == __indexer()
@parameterized(lambda: opts_loop(depth=[None]),
doc_func=_split_doc,
skip_on_empty=True)
def test_lpy_iname_presplit(opts):
"""
Tests that inames access to pre-split inames in non-split loopy arrays are
correctly handled
"""
from pymbolic.primitives import Subscript, Variable
# create array split
asplit = array_splitter(opts)
# create a test kernel
arg1 = lp.GlobalArg('a1', shape=(20, 10), order=opts.order)
arg2 = lp.GlobalArg('a2', shape=(16, 16), order=opts.order)
k = lp.make_kernel(
['{[i]: 0 <= i < 10}',
'{{[j_outer]: 0 <= j_outer < {}}}'.format(int(np.ceil(10 / VECTOR_WIDTH))),
'{{[j_inner]: 0 <= j_inner < {}}}'.format(VECTOR_WIDTH)],
"""
a1[j_outer, i] = 1 {id=a1}
a2[j_outer, i] = 1 {id=a2}
""",
[arg1, arg2],
silenced_warnings=['no_device_in_pre_codegen_checks'],
target=lp.OpenCLTarget())
k = asplit.split_loopy_arrays(k, dont_split=['a1', 'a2'])
# ensure there's no loopy errors
lp.generate_code_v2(k).device_code()
def __indexer():
return (Variable('j_outer') * VECTOR_WIDTH + Variable('j_inner'),
Variable('i'))
# check indexing
assign = next(insn.assignee for insn in k.instructions if insn.id == 'a1')
# construct index
assert isinstance(assign, Subscript) and assign.index == __indexer()
# now test with evenly sized
assign = next(insn.assignee for insn in k.instructions if insn.id == 'a2')
assert isinstance(assign, Subscript) and assign.index == __indexer()
def test_atomic_deep_vec_with_small_split():
# test that an :class:`atomic_deep_specialization` with split smaller than
# the vector width uses the correct splitting size
def __test(loop_size, vec_width):
knl = lp.make_kernel(
'{{[i]: 0 <= i < {}}}'.format(loop_size),
"""
<> x = 1.0
a1[0] = a1[0] + x {id=set}
... lbarrier {id=wait, dep=set}
for i
a1[0] = a1[0] + 1 {id=a1, dep=set:wait, nosync=set}
end
""",
[lp.GlobalArg('a1', shape=(loop_size,), order='C', dtype=np.float32)],
target=lp.OpenCLTarget(),
silenced_warnings=['no_device_in_pre_codegen_checks'])
loopy_opts = type('', (object,), {'depth': vec_width, 'order': 'C',
'use_atomic_doubles': True})
knl = lp.split_iname(knl, 'i', vec_width, inner_tag='l.0')
# feed through deep specializer
_, ds = get_deep_specializer(loopy_opts, atomic_ids=['a1'],
split_ids=['set'], use_atomics=True,
is_write_race=True, split_size=loop_size)
knl = ds(knl)
val = np.minimum(loop_size, vec_width)
assert 'x / {:.1f}f'.format(val) in lp.generate_code(knl)[0]
# test kernel w/ loop size smaller than split
__test(10, 16)
# test kernel w/ loop size larger than split
__test(16, VECTOR_WIDTH)
@parameterized(opts_loop,
doc_func=_split_doc,
skip_on_empty=True)
def test_get_split_shape(opts):
# create array split
asplit = array_splitter(opts)
def __test(splitter, shape):
# make a dummy array
arr = np.zeros(shape)
# get the split shape
sh, gr, vec, spl = asplit.split_shape(arr)
# first -- test against numpy splitter to ensure we get the right shape
assert sh == asplit.split_numpy_arrays(arr)[0].shape
# next, the "grow" axis is either the first axis ("C") or the second axis
# for "F"
grow = opts.order == 'F'
assert gr == grow
# and the vec_axis is in front if 'F' else in back
vec_axis = len(shape) if opts.order == 'C' else 0
assert vec == vec_axis
# and finally, the split axis
split_axis = 0 if opts.width else len(shape) - 1
assert spl == split_axis
# test with small square
__test(asplit, (10, 10))
# now test with evenly sized
__test(asplit, (16, 16))
# finally, try with 3d arrays
__test(asplit, (10, 10, 10))
__test(asplit, (16, 16, 16))
# and finally test with some randomly sized arrays
for i in range(50):
shape = np.random.randint(1, 12, size=np.random.randint(2, 5))
__test(asplit, shape)
@parameterized(lambda: opts_loop(skip_non_vec=False),
doc_func=_split_doc,
skip_on_empty=True)
def test_indexer(opts):
asplit = array_splitter(opts)
def __test(splitter, shape):
# make a dummy array
arr = np.arange(np.prod(shape)).reshape(shape)
index = indexer(splitter, shape)
# split
split_arr = splitter.split_numpy_arrays(arr)[0]
# loop over every index in the array
check_axes = tuple(range(len(shape)))
it = np.nditer(arr, flags=['multi_index'], order=opts.order)
while not it.finished:
# get indicies
check_inds = tuple((x,) for x in it.multi_index)
new_indicies = index(check_inds, check_axes)
# check that it matches the old array value
assert split_arr[new_indicies] == arr[it.multi_index]
it.iternext()
# test with small square
__test(asplit, (10, 10))
# now test with evenly sized
__test(asplit, (16, 16))
# finally, try with 3d arrays
__test(asplit, (10, 10, 10))
__test(asplit, (16, 16, 16))
@parameterized(lambda: opts_loop(skip_non_vec=False),
doc_func=_split_doc,
skip_on_empty=True)
def test_get_split_elements(opts):
# create opts
asplit = array_splitter(opts)
def __test(shape, check_inds=None, check_axes=None, tiling=True):
# make a dummy array
arr = np.arange(1, np.prod(shape) + 1).reshape(shape)
# split
split_arr = asplit.split_numpy_arrays(arr)[0]
if check_inds is None:
assert tiling
# create the indicies to check
check_inds = tuple(np.arange(x) for x in shape)
check_axes = tuple(range(len(shape)))
ans = arr.flatten(opts.order)
elif tiling:
assert check_axes is not None
assert check_inds is not None
ans = kernel_call('', arr, check_axes, [check_inds])._get_comparable(
arr, 0, True).flatten(opts.order)
else:
slicer = [slice(None)] * arr.ndim
assert all(check_inds[0].size == ci.size for ci in check_inds[1:])
for i, ax in enumerate(check_axes):
slicer[ax] = check_inds[i]
ans = arr[tuple(slicer)].flatten(opts.order)
# and compare to the old (unsplit) matrix
assert np.allclose(
get_split_elements(split_arr, asplit, arr.shape, check_inds, check_axes,
tiling=tiling),
ans)
# test with small square
__test((10, 10))
# now test with evenly sized
__test((16, 16))
# finally, try with 3d arrays
__test((10, 10, 10))
# and some non-full check-inds / axes
__test((10, 10, 10), [np.arange(3, 7), np.arange(2, 4)], (0, 1))
__test((10, 10, 10), [np.arange(3, 7), np.arange(2, 4)], (1, 2))
__test((10, 10, 10), [np.arange(3, 7), np.arange(2, 4)], (0, 2))
__test((10, 10, 10), [np.arange(3, 7), np.arange(2, 4)], (0, 1))
__test((16, 16, 16))
__test((16, 16, 16), [np.arange(3, 7), np.arange(2, 4)], (1, 2))
__test((16, 16, 16), [np.arange(3, 7), | np.arange(2, 4) | numpy.arange |
import numpy as np
from sklearn import linear_model as lm
from scipy.optimize import fsolve
from scipy.optimize import root
from Seabed.Profile import *
from Utils.SlopeApproximator import *
class Sensor():
"""Acoustic sensor model"""
def __init__(self, X0, Xhat0, U0, accuracy, Gamma, Theta, seabed, estimateslope = True):
self.accuracy = accuracy
self.X_estimate = np.array(Xhat0)
self.X_estimate_history = Xhat0
self.delta_X_estimate = np.array(self.X_estimate.shape)
self.delta_X_estimate_history = np.zeros(Xhat0.shape)
self.Gamma = np.array(Gamma)
self.Theta = np.array(Theta)
self.seabed = seabed
self.estimateslope = estimateslope
if (estimateslope):
self.sa = SlopeApproximator()
self.U_current = U0
self.L_zeros = np.zeros(Gamma.size * Theta.size);
self.L_current = self.L_zeros
_, self.L_current, _, _, _ = self.beamnet(X0, U0)
def e(self, gamma, theta):
return np.array([
np.cos(gamma) * np.cos(theta),
np.cos(gamma) * np.sin(theta),
np.sin(gamma)
])
def de(self, gamma, dgamma, theta, dtheta):
return np.array([
-np.sin(gamma) * dgamma * np.cos(theta) - | np.cos(gamma) | numpy.cos |
'''
List of used function in Main
'''
# numpy
import duden
import numpy as np
# GUI
import PySimpleGUI as sg
# nice table
from tabulate import tabulate
# french conjugation package
from mlconjug3 import Conjugator
# import german duden
from duden import get
def conjugate_fr(verb):
# todo: add color and participe présent
'''
print formated text with conjugation table
:param verb:
:return:
'''
# List of table for each tense
table = []
# Conjugate verb
w = Conjugator(language='fr').conjugate(verb)
# Convert to numpy array and change personal noun
w = w.iterate()
w_arr = np.full((11, 7, 2), None) # 11 tenses, 6 personnel noun and tense form
# w_arr[k, l, 2]
k = 1 # counter for w_arr
# Infinitif
w_arr[0, 1, 0] = w[0][1]
w_arr[0, 1, 1] = w[0][2]
# Ind -> Sub
for i in range(1, 42, 6):
# tense name
tmp = list
w_arr[k, 0, 0] = "{:-<18}".format('')
w_arr[k, 0, 1] = "{:-<25}".format(w[i][0]+' '+w[i][1])
# personal prenoun
w_arr[k, 1, 0] = "{:<18}".format('je')
w_arr[k, 2, 0] = "{:<18}".format('tu')
w_arr[k, 3, 0] = "{:<18}".format('il/elle/on')
w_arr[k, 4, 0] = "{:<18}".format('nous')
w_arr[k, 5, 0] = "{:<18}".format('vous')
w_arr[k, 6, 0] = "{:<18}".format('ils/elles/ont')
# tense form
for j in range(6):
w_arr[k, j+1, 1] = "{:<25}".format(w[i+j][3])
tmp = np.where(w_arr[k, :, :] is None, ' ', w_arr[k, :, :])
table.append(tabulate(tmp, tablefmt='plain', numalign="center"))
k += 1
# Imp présent
w_arr[k, 0, 0] = "{:-<18}".format('')
w_arr[k, 0, 1] = "{:-<25}".format(w[43][1])
w_arr[k, 1, 0] = "{:<18}".format("(tu)")
w_arr[k, 2, 0] = "{:<18}".format("(nous)")
w_arr[k, 3, 0] = "{:<18}".format("(vous)")
for j in range(3):
w_arr[k, j+1, 1] = "{:<25}".format(w[43+j][3])
tmp = np.where(w_arr[k, :, :] is None, ' ', w_arr[k, :, :])
table.append(tabulate(tmp, tablefmt='plain', numalign="center"))
k += 1
# participe présent
w_arr[k, 0, 0] = w[46][1]
w_arr[k, 0, 1] = w[46][2]
part_pres = w_arr[k, 0, 0]+' '+w_arr[k, 0, 1]
k += 1
# participe passé
w_arr[k, 0, 0] = "{:-<18}".format('')
w_arr[k, 0, 1] = "{:-<25}".format(w[47][1])
w_arr[k, 1, 0] = "{:<18}".format("masculin singulier")
w_arr[k, 2, 0] = "{:<18}".format("<NAME>")
w_arr[k, 3, 0] = "{:<18}".format("<NAME>")
w_arr[k, 4, 0] = "{:<18}".format("<NAME>uriel")
for j in range(1, 5):
w_arr[k, j, 1] = "{:<25}".format(w[46+j][3])
tmp = np.where(w_arr[k, :, :] is None, ' ', w_arr[k, :, :])
table.append(tabulate(tmp, tablefmt='plain', numalign="center"))
# formated sg window
# participe présent
col = []
k = 0
for i in range(3):
row = []
for j in range(3):
row.extend([sg.Text(table[k], font='Courier', text_color='white'), sg.VSeparator(color='white')])
k += 1
col.append(row)
col.append([sg.HSeparator(color='white')])
return col
def conjugate_de(verb):
# use duden
w = duden.get(verb)
w_arr = np.full((7, 7, 2), None) # 5 tenses, 6 personnel noun and tense form
table = []
# Infinitiv
w_arr[0, 1, 0] = 'Infinitiv mit zu'
w_arr[0, 1, 1] = w.grammar(duden.INFINITIV_MIT_ZU)
prasens = w.grammar(duden.PRASENS)
prateritum = w.grammar(duden.PRATERITUM)
# Indikativ prasens
w_arr[1, 0, 0] = "{:-<18}".format('')
w_arr[1, 0, 1] = "{:-<25}".format('INDIKATIV'+' '+'PRASENS')
tmp = prasens[0::3]
for j in range(6):
form = tmp[j].split()
# personal prenoun and form
w_arr[1, j+1, 0] = "{:<18}".format(form[0])
w_arr[1, j+1, 1] = "{:<25}".format(form[1])
tmp = | np.where(w_arr[1, :, :] is None, ' ', w_arr[1, :, :]) | numpy.where |
import pytest
pytest.importorskip("numpy")
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import dask.array as da
from dask.array.overlap import (
boundaries,
constant,
ensure_minimum_chunksize,
nearest,
overlap,
overlap_internal,
periodic,
reflect,
trim_internal,
)
from dask.array.utils import assert_eq, same_keys
from ..lib.stride_tricks import sliding_window_view
def test_overlap_internal():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
g = overlap_internal(d, {0: 2, 1: 1})
result = g.compute(scheduler="sync")
assert g.chunks == ((6, 6), (5, 5))
expected = np.array(
[
[0, 1, 2, 3, 4, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 59, 60, 61, 62, 63],
]
)
assert_eq(result, expected)
assert same_keys(overlap_internal(d, {0: 2, 1: 1}), g)
def test_overlap_internal_asymmetric():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
result = overlap_internal(d, {0: (2, 0), 1: (1, 0)})
assert result.chunks == ((4, 6), (4, 5))
expected = np.array(
[
[0, 1, 2, 3, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 27, 28, 29, 30, 31],
[16, 17, 18, 19, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 59, 60, 61, 62, 63],
]
)
assert_eq(result, expected)
assert same_keys(overlap_internal(d, {0: (2, 0), 1: (1, 0)}), result)
def test_overlap_internal_asymmetric_small():
x = np.arange(32).reshape((2, 16))
d = da.from_array(x, chunks=(2, 4))
result = overlap_internal(d, {0: (0, 0), 1: (1, 1)})
assert result.chunks == ((2,), (5, 6, 6, 5))
expected = np.array(
[
[0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15],
[
16,
17,
18,
19,
20,
19,
20,
21,
22,
23,
24,
23,
24,
25,
26,
27,
28,
27,
28,
29,
30,
31,
],
]
)
assert_eq(result, expected)
assert same_keys(overlap_internal(d, {0: (0, 0), 1: (1, 1)}), result)
def test_trim_internal():
d = da.ones((40, 60), chunks=(10, 10))
e = trim_internal(d, axes={0: 1, 1: 2})
assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))
def test_periodic():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = periodic(d, axis=0, depth=2)
assert e.shape[0] == d.shape[0] + 4
assert e.shape[1] == d.shape[1]
assert_eq(e[1, :], d[-1, :])
assert_eq(e[0, :], d[-2, :])
def test_reflect():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
e = reflect(d, axis=0, depth=2)
expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])
assert_eq(e, expected)
e = reflect(d, axis=0, depth=1)
expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])
assert_eq(e, expected)
def test_nearest():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
e = nearest(d, axis=0, depth=2)
expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])
assert_eq(e, expected)
e = nearest(d, axis=0, depth=1)
expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])
assert_eq(e, expected)
def test_constant():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = constant(d, axis=0, depth=2, value=10)
assert e.shape[0] == d.shape[0] + 4
assert e.shape[1] == d.shape[1]
assert_eq(e[1, :], np.ones(8, dtype=x.dtype) * 10)
assert_eq(e[-1, :], np.ones(8, dtype=x.dtype) * 10)
def test_boundaries():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = boundaries(d, {0: 2, 1: 1}, {0: 0, 1: "periodic"})
expected = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[7, 0, 1, 2, 3, 4, 5, 6, 7, 0],
[15, 8, 9, 10, 11, 12, 13, 14, 15, 8],
[23, 16, 17, 18, 19, 20, 21, 22, 23, 16],
[31, 24, 25, 26, 27, 28, 29, 30, 31, 24],
[39, 32, 33, 34, 35, 36, 37, 38, 39, 32],
[47, 40, 41, 42, 43, 44, 45, 46, 47, 40],
[55, 48, 49, 50, 51, 52, 53, 54, 55, 48],
[63, 56, 57, 58, 59, 60, 61, 62, 63, 56],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
assert_eq(e, expected)
def test_overlap():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
g = overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: "reflect"})
assert g.chunks == ((8, 8), (6, 6))
expected = np.array(
[
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],
[8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],
[16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],
[56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
]
)
assert_eq(g, expected)
assert same_keys(g, overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: "reflect"}))
u_depth = np.uint16([2, 1])
u_depth = {k: v for k, v in enumerate(u_depth)}
g = overlap(d, depth=u_depth, boundary={0: 100, 1: "reflect"})
assert g.chunks == ((8, 8), (6, 6))
assert_eq(g, expected)
assert same_keys(g, overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: "reflect"}))
g = overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: "none"})
expected = np.array(
[
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[0, 1, 2, 3, 4, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 59, 60, 61, 62, 63],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
]
)
assert_eq(g, expected)
assert g.chunks == ((8, 8), (5, 5))
u_depth = np.uint16([2, 1])
u_depth = {k: v for k, v in enumerate(u_depth)}
g = overlap(d, depth=u_depth, boundary={0: 100, 1: "none"})
assert_eq(g, expected)
assert g.chunks == ((8, 8), (5, 5))
def test_asymmetric_overlap_boundary_exception():
x = da.arange(10, chunks=5)
with pytest.raises(NotImplementedError):
x.map_overlap(
lambda x: x + len(x), depth={0: (0, 2)}, boundary="reflect", dtype=x.dtype
)
def test_map_overlap():
x = da.arange(10, chunks=5)
y = x.map_overlap(lambda x: x + len(x), depth=2, dtype=x.dtype)
assert_eq(y, np.arange(10) + 5 + 2 + 2)
x = da.arange(10, chunks=5)
y = x.map_overlap(lambda x: x + len(x), depth= | np.int64(2) | numpy.int64 |
import os
import random
import sys
from argparse import ArgumentParser, Namespace
from collections import deque
from datetime import datetime
from pathlib import Path
from pprint import pprint
import numpy as np
import psutil
from flatland.envs.malfunction_generators import (MalfunctionParameters,
malfunction_from_params)
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv, RailEnvActions
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
from torch.utils.tensorboard import SummaryWriter
from utils.agent_action_config import (get_action_size,
get_flatland_full_action_size,
map_action, map_action_policy,
map_actions, set_action_size_full,
set_action_size_reduced)
from utils.fast_tree_obs import FastTreeObs
from utils.observation_utils import normalize_observation
from utils.timer import Timer
# ! Import our policies
from random_policy import RandomPolicy
from go_forward_policy import GoForwardPolicy
from dddqn import DDDQNPolicy
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
try:
import wandb
wandb.init(sync_tensorboard=True)
except ImportError:
print("Install wandb to log to Weights & Biases")
"""
This file shows how to train multiple agents using a reinforcement learning approach.
After training an agent, you can submit it straight away to the NeurIPS 2020 Flatland challenge!
Agent documentation: https://flatland.aicrowd.com/getting-started/rl/multi-agent.html
Submission documentation: https://flatland.aicrowd.com/getting-started/first-submission.html
"""
def create_rail_env(env_params, tree_observation):
n_agents = env_params.n_agents
x_dim = env_params.x_dim
y_dim = env_params.y_dim
n_cities = env_params.n_cities
max_rails_between_cities = env_params.max_rails_between_cities
max_rails_in_city = env_params.max_rails_in_city
seed = env_params.seed
# Break agents from time to time
malfunction_parameters = MalfunctionParameters(
malfunction_rate=env_params.malfunction_rate,
min_duration=20,
max_duration=50
)
return RailEnv(
width=x_dim, height=y_dim,
rail_generator=sparse_rail_generator(
max_num_cities=n_cities,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_city
),
schedule_generator=sparse_schedule_generator(),
number_of_agents=n_agents,
malfunction_generator_and_process_data=malfunction_from_params(
malfunction_parameters),
obs_builder_object=tree_observation,
random_seed=seed
)
def train_agent(train_params, train_env_params, eval_env_params, obs_params):
# Environment parameters
n_agents = train_env_params.n_agents
x_dim = train_env_params.x_dim
y_dim = train_env_params.y_dim
n_cities = train_env_params.n_cities
max_rails_between_cities = train_env_params.max_rails_between_cities
max_rails_in_city = train_env_params.max_rails_in_city
seed = train_env_params.seed
# Unique ID for this training
now = datetime.now()
training_id = now.strftime('%y%m%d%H%M%S')
# Observation parameters
observation_tree_depth = obs_params.observation_tree_depth
observation_radius = obs_params.observation_radius
observation_max_path_depth = obs_params.observation_max_path_depth
# Training parameters
eps_start = train_params.eps_start
eps_end = train_params.eps_end
eps_decay = train_params.eps_decay
n_episodes = train_params.n_episodes
checkpoint_interval = train_params.checkpoint_interval
n_eval_episodes = train_params.n_evaluation_episodes
restore_replay_buffer = train_params.restore_replay_buffer
save_replay_buffer = train_params.save_replay_buffer
# Set the seeds
random.seed(seed)
np.random.seed(seed)
# Observation builder
predictor = ShortestPathPredictorForRailEnv(observation_max_path_depth)
if not train_params.use_fast_tree_observation:
print("\nUsing standard TreeObs")
def check_is_observation_valid(observation):
return observation
def get_normalized_observation(observation, tree_depth: int, observation_radius=0):
return normalize_observation(observation, tree_depth, observation_radius)
tree_observation = TreeObsForRailEnv(
max_depth=observation_tree_depth, predictor=predictor)
tree_observation.check_is_observation_valid = check_is_observation_valid
tree_observation.get_normalized_observation = get_normalized_observation
else:
print("\nUsing FastTreeObs")
def check_is_observation_valid(observation):
return True
def get_normalized_observation(observation, tree_depth: int, observation_radius=0):
return observation
tree_observation = FastTreeObs(max_depth=observation_tree_depth)
tree_observation.check_is_observation_valid = check_is_observation_valid
tree_observation.get_normalized_observation = get_normalized_observation
# Setup the environments
train_env = create_rail_env(train_env_params, tree_observation)
train_env.reset(regenerate_schedule=True, regenerate_rail=True)
eval_env = create_rail_env(eval_env_params, tree_observation)
eval_env.reset(regenerate_schedule=True, regenerate_rail=True)
if not train_params.use_fast_tree_observation:
# Calculate the state size given the depth of the tree observation and the number of features
n_features_per_node = train_env.obs_builder.observation_dim
n_nodes = sum([np.power(4, i)
for i in range(observation_tree_depth + 1)])
state_size = n_features_per_node * n_nodes
else:
# Calculate the state size given the depth of the tree observation and the number of features
state_size = tree_observation.observation_dim
action_count = [0] * get_flatland_full_action_size()
action_dict = dict()
agent_obs = [None] * n_agents
agent_prev_obs = [None] * n_agents
agent_prev_action = [2] * n_agents
update_values = [False] * n_agents
# Smoothed values used as target for hyperparameter tuning
smoothed_eval_normalized_score = -1.0
smoothed_eval_completion = 0.0
# todo smooth when rendering instead
scores_window = deque(maxlen=checkpoint_interval)
completion_window = deque(maxlen=checkpoint_interval)
if train_params.action_size == "reduced":
set_action_size_reduced()
else:
set_action_size_full()
# ! Add Policies here
if train_params.policy == "Random":
policy = RandomPolicy(state_size, get_action_size(), train_params)
elif train_params.policy == "GoForward":
policy = GoForwardPolicy(state_size, get_action_size(), train_params)
elif train_params.policy == "dddqn":
policy = DDDQNPolicy(state_size, get_action_size(), train_params)
# Default policy random
if train_params.policy is None:
policy = GoForwardPolicy(state_size, get_action_size(), train_params)
# Load existing policy
if train_params.load_policy != "":
policy.load(train_params.load_policy)
# Loads existing replay buffer
if restore_replay_buffer:
try:
policy.load_replay_buffer(restore_replay_buffer)
policy.test()
except RuntimeError as e:
print(
"\n🛑 Could't load replay buffer, were the experiences generated using the same tree depth?")
print(e)
exit(1)
print("\n💾 Replay buffer status: {}/{} experiences".format(
len(policy.memory.memory), train_params.buffer_size))
hdd = psutil.disk_usage('/')
if save_replay_buffer and (hdd.free / (2 ** 30)) < 500.0:
print(
"⚠️ Careful! Saving replay buffers will quickly consume a lot of disk space. You have {:.2f}gb left.".format(
hdd.free / (2 ** 30)))
# TensorBoard writer
writer = SummaryWriter(
comment="_" + train_params.policy + "_" + train_params.action_size)
training_timer = Timer()
training_timer.start()
print(
"\n🚉 Training {} trains on {}x{} grid for {} episodes, evaluating on {} episodes every {} episodes. Training id '{}'.\n".format(
train_env.get_num_agents(),
x_dim, y_dim,
n_episodes,
n_eval_episodes,
checkpoint_interval,
training_id
))
for episode_idx in range(n_episodes + 1):
step_timer = Timer()
reset_timer = Timer()
learn_timer = Timer()
preproc_timer = Timer()
inference_timer = Timer()
# Reset environment
reset_timer.start()
if train_params.n_agent_fixed:
number_of_agents = n_agents
train_env_params.n_agents = n_agents
else:
number_of_agents = int(
min(n_agents, 1 + np.floor(episode_idx / 5))) # ! Changed from 200
train_env_params.n_agents = episode_idx % number_of_agents + 1
train_env = create_rail_env(train_env_params, tree_observation)
obs, info = train_env.reset(
regenerate_rail=True, regenerate_schedule=True)
policy.reset(train_env)
reset_timer.end()
if train_params.render:
# Setup renderer
env_renderer = RenderTool(train_env, gl="PGL")
env_renderer.set_new_rail()
score = 0
nb_steps = 0
actions_taken = []
# Build initial agent-specific observations
for agent_handle in train_env.get_agent_handles():
if tree_observation.check_is_observation_valid(obs[agent_handle]):
agent_obs[agent_handle] = tree_observation.get_normalized_observation(obs[agent_handle],
observation_tree_depth,
observation_radius=observation_radius)
agent_prev_obs[agent_handle] = agent_obs[agent_handle].copy()
# Max number of steps per episode
# This is the official formula used during evaluations
# See details in flatland.envs.schedule_generators.sparse_schedule_generator
# max_steps = int(4 * 2 * (env.height + env.width + (n_agents / n_cities)))
max_steps = train_env._max_episode_steps
# Run episode
policy.start_episode(train=True)
for step in range(max_steps - 1):
inference_timer.start()
policy.start_step(train=True)
for agent_handle in train_env.get_agent_handles():
agent = train_env.agents[agent_handle]
if info['action_required'][agent_handle]:
update_values[agent_handle] = True
action = policy.act(
agent_handle, agent_obs[agent_handle], eps=eps_start)
action_count[map_action(action)] += 1
actions_taken.append(map_action(action))
else:
# An action is not required if the train hasn't joined the railway network,
# if it already reached its target, or if is currently malfunctioning.
update_values[agent_handle] = False
action = 0
action_dict.update({agent_handle: action})
policy.end_step(train=True)
inference_timer.end()
# Environment step
step_timer.start()
next_obs, all_rewards, done, info = train_env.step(
map_actions(action_dict))
step_timer.end()
# Render an episode at some interval
if train_params.render:
env_renderer.render_env(
show=True,
frames=False,
show_observations=False,
show_predictions=False
)
# Update replay buffer and train agent
for agent_handle in train_env.get_agent_handles():
if update_values[agent_handle] or done['__all__']:
# Only learn from timesteps where somethings happened
learn_timer.start()
policy.step(agent_handle,
agent_prev_obs[agent_handle],
map_action_policy(
agent_prev_action[agent_handle]),
all_rewards[agent_handle],
agent_obs[agent_handle],
done[agent_handle])
learn_timer.end()
agent_prev_obs[agent_handle] = agent_obs[agent_handle].copy()
agent_prev_action[agent_handle] = action_dict[agent_handle]
# Preprocess the new observations
if tree_observation.check_is_observation_valid(next_obs[agent_handle]):
preproc_timer.start()
agent_obs[agent_handle] = tree_observation.get_normalized_observation(next_obs[agent_handle],
observation_tree_depth,
observation_radius=observation_radius)
preproc_timer.end()
score += all_rewards[agent_handle]
nb_steps = step
if done['__all__']:
break
policy.end_episode(train=True)
# Epsilon decay
eps_start = max(eps_end, eps_decay * eps_start)
# Collect information about training
tasks_finished = sum(done[idx]
for idx in train_env.get_agent_handles())
completion = tasks_finished / max(1, train_env.get_num_agents())
normalized_score = score / (max_steps * train_env.get_num_agents())
action_probs = action_count / max(1, np.sum(action_count))
scores_window.append(normalized_score)
completion_window.append(completion)
smoothed_normalized_score = np.mean(scores_window)
smoothed_completion = | np.mean(completion_window) | numpy.mean |
'''
Deep Learning for Data Science: Assignment 3
Submitted by: <NAME> (<EMAIL>)
Description - Generalize to k-layer Neural Network
- Implement Batch Normalization
- Implement moving average
- Training using mini batch Gradient Descent
'''
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
from scipy.spatial import distance
from sklearn import preprocessing
import copy
import os
import pickle
class kLayerNN(object):
def __init__(self, filePath, GradCheckParams, GradDescentParams, NNParams):
if NNParams['loadAllBatches']:
# Call LoadBatch function to get training, validation and test set data
X1, Y1, y1 = self.LoadBatch(
filePath + '/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_1')
X2, Y2, y2 = self.LoadBatch(
filePath + '/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_2')
X3, Y3, y3 = self.LoadBatch(
filePath + '/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_3')
X4, Y4, y4 = self.LoadBatch(
filePath + '/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_4')
X5, Y5, y5 = self.LoadBatch(
filePath + '/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_5')
X6, Y6, y6 = self.LoadBatch(
filePath + '/Datasets/cifar-10-python/cifar-10-batches-py/test_batch')
self.Xtrain = np.concatenate((X1, X2, X3, X4, X5[:, 0:9000]), axis=1)
self.Ytrain = np.concatenate((Y1, Y2, Y3, Y4, Y5[:, 0:9000]), axis=1)
self.ytrain = np.concatenate((y1, y2, y3, y4, y5[0:9000]))
self.Xval = X5[:, 9000:10000]
self.Yval = Y5[:, 9000:10000]
self.yval = y5[9000:10000]
self.Xtest = X6
self.Ytest = Y6
self.ytest = y6
else:
# Call LoadBatch function to get training, validation and test set data
self.Xtrain, self.Ytrain, self.ytrain = self.LoadBatch(filePath +
'/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_1')
self.Xval, self.Yval, self.yval = self.LoadBatch(filePath +
'/Datasets/cifar-10-python/cifar-10-batches-py/data_batch_2')
self.Xtest, self.Ytest, self.ytest = self.LoadBatch(filePath +
'/Datasets/cifar-10-python/cifar-10-batches-py/test_batch')
# Normalize Data by subtracting mean
self.ZeroMean()
# Assign all GradCheckParams
self.h = GradCheckParams['h']
self.eps = GradCheckParams['eps']
self.tol1 = GradCheckParams['tol1']
# Assign all GradDescentParams
self.sigma = GradDescentParams['sigma']
self.eta = GradDescentParams['eta']
self.lmbda = GradDescentParams['lmbda']
self.rho = GradDescentParams['rho']
self.nEpoch = GradDescentParams['nEpoch']
self.nBatch = GradDescentParams['nBatch']
#self.BatchSize = round(self.Xtrain.shape[1]/self.nBatch)
self.epsilon = GradDescentParams['epsilon']
self.alpha = GradDescentParams['alpha']
# Assign all NNParams
self.d = NNParams['d']
self.k = NNParams['k']
self.n = NNParams['n']
self.m = NNParams['m']
self.nLayers = len(self.m) + 1
self.batchNorm = NNParams['batchNorm']
# Initialize Weights
self.InitializeWeightAndBias('Gaussian')
# Initialize mu_avg and var_avg for exponential moving average
self.mu_avg = [np.zeros_like(self.b[i]) for i in range(1,self.nLayers)]
self.var_avg = [np.zeros_like(self.b[i]) for i in range(1,self.nLayers)]
def unpickle(self, file):
'''
Function: unpickle
Input: file name
Output: data in form of dictionary
'''
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def LoadBatch(self, fileName):
'''
Function: LoadBatch
Input: path to a file
Output: Images (X), labels (y) and one-hot encoding (Y)
'''
dict = self.unpickle(fileName)
X = np.array(dict[b'data']/255)
y = np.array(dict[b'labels'])
binarizer = preprocessing.LabelBinarizer()
binarizer.fit(range(max(y.astype(int)) + 1))
Y1 = np.array(binarizer.transform(y.astype(int))).T
return np.transpose(X), np.transpose(Y1.T), y
def ZeroMean(self):
mean_Xtrain = np.reshape(np.mean(self.Xtrain, 1), (-1, 1))
self.Xtrain -= mean_Xtrain
self.Xval -= mean_Xtrain
self.Xtest -= mean_Xtrain
def InitializeWeightAndBias(self, type='Gaussian'):
'''
Input: Type of weights. Possible choices: Gaussian, Javier, He
Output: W and b; both are lists
'''
if type == 'Gaussian':
np.random.seed(400)
self.W = []
self.W.append([])
self.b = []
self.b.append([])
self.W.append(np.random.randn(
list(self.m)[0], self.d) * self.sigma)
self.b.append(np.zeros((list(self.m)[0], 1)))
for i in range(self.nLayers - 2):
self.W.append(np.random.randn(
self.m[i+1], self.m[i]) * self.sigma)
self.b.append(np.zeros((self.m[i+1], 1)))
self.W.append(np.random.randn(
self.k, list(self.m)[-1]) * self.sigma)
self.b.append(np.zeros((self.k, 1)))
# FUTURE: Add other initializations
def BatchNormalize(self, s, mu, var):
V = np.array([var + self.epsilon])
Vinv0_5 = V**-0.5
sHat = np.multiply((s-mu), Vinv0_5.T)
return sHat
def BatchNormBackPass(self, dJdsHat, s, mu, var):
'''
Input: g (dJ/dsHat), s, mu, var
Output: g (dJ/ds)
Comments: Refer to last slide of Lec 4
'''
N = dJdsHat.shape[0]
V = np.array([var + self.epsilon])
Vinv1_5 = V**-1.5
dJdvar = -0.5 * np.sum(np.multiply(np.multiply(dJdsHat, Vinv1_5),(s-mu).T), axis = 0)
Vinv0_5 = V**-0.5
dJdmu = - np.sum(np.multiply(dJdsHat, Vinv0_5), axis = 0)
dJds = np.multiply(dJdsHat, Vinv0_5) + 2/N * np.multiply(dJdvar, (s-mu).T) + dJdmu/N
return dJds
def EvaluateClassifier2(self, x, Wt, bias):
N = x.shape[1]
if x.ndim == 1:
x = np.reshape(x, (-1, 1))
h = []
s = []
sHat = []
mu = []
var = []
mu.append([])
var.append([])
h.append(x)
s.append([])
sHat.append([])
for i in range(self.nLayers-1):
s.append(np.dot(Wt[i+1], h[i]) + bias[i+1])
# calculate mu and variance
mu.append(np.reshape(np.mean(s[i+1], axis = 1), (-1, 1)))
# var.append(np.reshape(np.sum(((s[i+1]-mu[i+1])**2), 1)/N, (-1, 1))) #DIAG OF THIS IS SCALAR!!!
# DIAG OF THIS IS SQUARE MATRIX!!!
var.append(np.sum(((s[i+1]-mu[i+1])**2), 1)/N)
# Exponential Moving Average
# temp_var = 0
# for j in range(self.nLayers):
# if self.mu_avg[j].all() == 0:
# temp_var = temp_var + 1
if self.mu_avg[i].all() == 0:
# all elements are zero, so this is first ever evaluation step
self.mu_avg[i] = mu[i+1]
self.var_avg[i] = var[i+1]
else:
self.mu_avg[i] = self.alpha * self.mu_avg[i] + (1 - self.alpha) * mu[i+1]
self.var_avg[i] = self.alpha * self.var_avg[i] + (1 - self.alpha) * var[i+1]
sHat.append(self.BatchNormalize(s[i+1], mu[i+1], var[i+1]))
if self.batchNorm:
h.append(np.maximum(0, sHat[i+1])) ###CHANGE TO sHat
else:
h.append(np.maximum(0, s[i+1]))
# for final layer:
s.append(np.dot(Wt[self.nLayers],
h[self.nLayers-1]) + bias[self.nLayers])
# compute softmax function of s
p = np.exp(s[self.nLayers])
p = p / np.sum(p, axis=0)
return p, s, sHat, h, mu, var
def ComputeCost2(self, X, Y, Wt, bias):
N = X.shape[1]
p, _, _, _, _, _ = self.EvaluateClassifier2(X, Wt, bias)
A = np.diag(np.dot(Y.T, p))
B = - | np.log(A) | numpy.log |
import os
import pycqed as pq
import unittest
import numpy as np
from scipy.spatial import ConvexHull
import adaptive
import pycqed.analysis.analysis_toolbox as a_tools
from pycqed.measurement import measurement_control
from pycqed.measurement.sweep_functions import (
None_Sweep,
None_Sweep_idx,
None_Sweep_With_Parameter_Returned,
)
import pycqed.measurement.detector_functions as det
from pycqed.instrument_drivers.physical_instruments.dummy_instruments import (
DummyParHolder,
)
from pycqed.measurement.optimization import nelder_mead, SPSA
from pycqed.utilities.learner1D_minimizer import (Learner1D_Minimizer,
mk_minimization_loss_func, mk_minimization_goal_func)
from pycqed.analysis import measurement_analysis as ma
from pycqed.utilities.get_default_datadir import get_default_datadir
from pycqed.measurement.hdf5_data import read_dict_from_hdf5
from qcodes.instrument.parameter import ManualParameter
from qcodes import station
class Test_MeasurementControl(unittest.TestCase):
@classmethod
def setUpClass(self):
self.station = station.Station()
self.MC = measurement_control.MeasurementControl(
"MC", live_plot_enabled=True, verbose=True
)
self.MC.station = self.station
self.station.add_component(self.MC)
self.mock_parabola = DummyParHolder("mock_parabola")
self.station.add_component(self.mock_parabola)
def setUp(self):
self.MC.soft_avg(1)
def test_soft_sweep_1D(self):
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run("1D_soft")
dset = dat["dset"]
x = dset[:, 0]
xr = np.arange(len(x)) / 15
y = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
y0 = dset[:, 1]
y1 = dset[:, 2]
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y0, y[0, :])
np.testing.assert_array_almost_equal(y1, y[1, :])
# Test that the return dictionary has the right entries
dat_keys = set(
[
"dset",
"opt_res",
"opt_res_dset",
"sweep_parameter_names",
"sweep_parameter_units",
"value_names",
"value_units",
]
)
self.assertEqual(dat_keys, set(dat.keys()))
self.assertEqual(dat["sweep_parameter_names"], ["pts"])
self.assertEqual(dat["sweep_parameter_units"], ["arb. unit"])
self.assertEqual(dat["value_names"], ["I", "Q"])
self.assertEqual(dat["value_units"], ["V", "V"])
def test_soft_sweep_1D_alt_shape(self):
# This is a generalization of a 1D sweep function where instead of
# a shape (2,) it has a shape (2,1). This isinconsistent with the
# N-D hard sweeps. and should be addressed
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft_diff_shape())
dat = self.MC.run("1D_soft")
dset = dat["dset"]
x = dset[:, 0]
xr = np.arange(len(x)) / 15
y = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
y0 = dset[:, 1]
y1 = dset[:, 2]
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y0, y[0, :])
np.testing.assert_array_almost_equal(y1, y[1, :])
# Test that the return dictionary has the right entries
dat_keys = set(
[
"dset",
"opt_res",
"opt_res_dset",
"sweep_parameter_names",
"sweep_parameter_units",
"value_names",
"value_units",
]
)
self.assertEqual(dat_keys, set(dat.keys()))
self.assertEqual(dat["sweep_parameter_names"], ["pts"])
self.assertEqual(dat["sweep_parameter_units"], ["arb. unit"])
self.assertEqual(dat["value_names"], ["I", "Q"])
self.assertEqual(dat["value_units"], ["V", "V"])
@unittest.skipIf(True, "This test is currently broken")
def test_data_location(self):
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
self.MC.run("datadir_test_file")
# raises an error if the file is not found
ma.MeasurementAnalysis(label="datadir_test_file")
# change the datadir
test_dir2 = os.path.abspath(
os.path.join(os.path.dirname(pq.__file__), os.pardir, "data_test_2")
)
self.MC.datadir(test_dir2)
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
self.MC.run("datadir_test_file_2")
# raises an error if the file is not found
with self.assertRaises(Exception):
ma.MeasurementAnalysis(label="datadir_test_file_2")
ma.a_tools.datadir = test_dir2
# changing the dir makes it find the file now
ma.MeasurementAnalysis(label="datadir_test_file_2")
self.MC.datadir(get_default_datadir())
def test_hard_sweep_1D(self):
sweep_pts = np.linspace(0, 10, 5)
self.MC.set_sweep_function(None_Sweep(sweep_control="hard"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Hard())
dat = self.MC.run("1D_hard")
dset = dat["dset"]
x = dset[:, 0]
y = [np.sin(x / np.pi), np.cos(x / np.pi)]
y0 = dset[:, 1]
y1 = dset[:, 2]
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y0, y[0])
np.testing.assert_array_almost_equal(y1, y[1])
d = self.MC.detector_function
self.assertEqual(d.times_called, 1)
def test_soft_sweep_2D(self):
sweep_pts = np.linspace(0, 10, 30)
sweep_pts_2D = np.linspace(0, 10, 5)
self.MC.set_sweep_function(None_Sweep(sweep_control="soft"))
self.MC.set_sweep_function_2D(None_Sweep(sweep_control="soft"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_sweep_points_2D(sweep_pts_2D)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run("2D_soft", mode="2D")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
xr = np.arange(len(sweep_pts) * len(sweep_pts_2D)) / 15
z = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
z0 = dset[:, 2]
z1 = dset[:, 3]
x_tiled = np.tile(sweep_pts, len(sweep_pts_2D))
y_rep = np.repeat(sweep_pts_2D, len(sweep_pts))
np.testing.assert_array_almost_equal(x, x_tiled)
np.testing.assert_array_almost_equal(y, y_rep)
np.testing.assert_array_almost_equal(z0, z[0, :])
np.testing.assert_array_almost_equal(z1, z[1, :])
def test_soft_sweep_2D_with_reading_of_set_parameter(self):
sweep_pts = np.linspace(0, 10, 30)
sweep_pts_2D = np.linspace(0, 10, 5)
self.MC.set_sweep_function(
None_Sweep_With_Parameter_Returned(sweep_control="soft")
)
self.MC.set_sweep_function_2D(
None_Sweep_With_Parameter_Returned(sweep_control="soft")
)
self.MC.set_sweep_points(sweep_pts)
self.MC.set_sweep_points_2D(sweep_pts_2D)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run("2D_soft", mode="2D")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
xr = np.arange(len(sweep_pts) * len(sweep_pts_2D)) / 15
z = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
z0 = dset[:, 2]
z1 = dset[:, 3]
# The +0.1 is to test if the return value is matching
x_tiled = np.tile(sweep_pts + 0.1, len(sweep_pts_2D))
y_rep = np.repeat(sweep_pts_2D + 0.1, len(sweep_pts))
np.testing.assert_array_almost_equal(x, x_tiled)
np.testing.assert_array_almost_equal(y, y_rep)
np.testing.assert_array_almost_equal(z0, z[0, :])
np.testing.assert_array_almost_equal(z1, z[1, :])
def test_soft_sweep_2D_function_calls(self):
sweep_pts = np.arange(0, 30, 1)
sweep_pts_2D = np.arange(0, 5, 1)
s1 = None_Sweep_idx(sweep_control="soft")
s2 = None_Sweep_idx(sweep_control="soft")
self.MC.set_sweep_function(s1)
self.MC.set_sweep_function_2D(s2)
self.MC.set_sweep_points(sweep_pts)
self.MC.set_sweep_points_2D(sweep_pts_2D)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
self.assertEqual(s1.num_calls, 0)
self.assertEqual(s2.num_calls, 0)
self.MC.run("2D_soft", mode="2D")
# Test that the 2D scan only gets called 5 times (when it changes)
# The 1D value always changes and as such should always be called
self.assertEqual(s1.num_calls, 30 * 5)
self.assertEqual(s2.num_calls, 5)
def test_hard_sweep_2D(self):
"""
Hard inner loop, soft outer loop
"""
sweep_pts = np.linspace(10, 20, 3)
sweep_pts_2D = np.linspace(0, 10, 5)
self.MC.live_plot_enabled(False)
self.MC.set_sweep_function(None_Sweep(sweep_control="hard"))
self.MC.set_sweep_function_2D(None_Sweep(sweep_control="soft"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_sweep_points_2D(sweep_pts_2D)
self.MC.set_detector_function(det.Dummy_Detector_Hard())
dat = self.MC.run("2D_hard", mode="2D")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
z = self.data = [np.sin(x / np.pi), np.cos(x / np.pi)]
z0 = dset[:, 2]
z1 = dset[:, 3]
x_tiled = np.tile(sweep_pts, len(sweep_pts_2D))
y_rep = np.repeat(sweep_pts_2D, len(sweep_pts))
np.testing.assert_array_almost_equal(x, x_tiled)
np.testing.assert_array_almost_equal(y, y_rep)
np.testing.assert_array_almost_equal(z0, z[0])
np.testing.assert_array_almost_equal(z1, z[1])
d = self.MC.detector_function
self.assertEqual(d.times_called, 5)
self.MC.live_plot_enabled(True)
def test_many_shots_hard_sweep(self):
"""
Tests acquiring more than the maximum number of shots for a hard
detector by setting the number of sweep points high
"""
sweep_pts = np.arange(50)
self.MC.set_sweep_function(None_Sweep(sweep_control="hard"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Shots_Detector(max_shots=5))
dat = self.MC.run("man_shots")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
self.assertEqual(np.shape(dset), (len(sweep_pts), 2))
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y, sweep_pts)
d = self.MC.detector_function
self.assertEqual(d.times_called, 10)
def test_variable_sized_return_values_hard_sweep(self):
"""
Tests a detector that acquires data in chunks of varying sizes
"""
self.MC.soft_avg(1)
counter_param = ManualParameter("counter", initial_value=0)
def return_variable_size_values():
idx = counter_param() % 3
counter_param(counter_param() + 1)
if idx == 0:
return np.arange(0, 7)
elif idx == 1:
return np.arange(7, 11)
elif idx == 2:
return np.arange(11, 30)
sweep_pts = np.arange(30)
d = det.Function_Detector(
get_function=return_variable_size_values,
value_names=["Variable size counter"],
detector_control="hard",
)
self.MC.set_sweep_function(None_Sweep(sweep_control="hard"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(d)
dat = self.MC.run("varying_chunk_size")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
self.assertEqual(np.shape(dset), (len(sweep_pts), 2))
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y, sweep_pts)
self.assertEqual(self.MC.total_nr_acquired_values, 1 * 30)
def test_soft_sweep_hard_det_1D(self):
def mock_func():
# to also test if the values are set correctly in the sweep
arr = | np.zeros([2, 2]) | numpy.zeros |
"""
Author: <NAME>
License: MIT
"""
import numpy as np
from xdgmm import XDGMM
class Empiricist(object):
"""
Worker object that can fit supernova and host galaxy parameters
given noisy inputs using an XDGMM model, and then predict new
supernovae based on this model and a set of new host galaxies.
Parameters
----------
model_file: string (optional)
Name of text file containing model being used (default=None).
fit_method: string (optional)
Name of XD fitting method to use (default='astroML'). Must be
either 'astroML' or 'Bovy'.
Notes
-----
The class can be initialized with a model or one can be loaded or
fit to data.
"""
def __init__(self, model_file=None, fit_method='astroML'):
self.XDGMM = XDGMM(n_components=7, method=fit_method)
self.fit_method = fit_method
if model_file is not None:
self.read_model(model_file)
def get_SN(self, X, Xerr=None, n_SN=1):
"""
Conditions the XDGMM model based on the data in X and returns
SN parameters sampled from the conditioned model.
Parameters
----------
X: array_like, shape = (n_samples, n_features)
Input data. First 3 entries (SN parameters) should be NaN.
Xerr: array_like, shape = (n_samples, n_features), optional
Error on input data. SN errors should be 0.0. If None,
errors are not used for the conditioning.
n_SN: int (optional)
Number of SNe to sample (default = 1).
Returns
-------
SN_data: array_like, shape = (n_SN, 3)
Sample of SN data taken from the conditioned model.
Notes
-----
Assumes that the first three parameters used when fitting
the model are the SN parameters.
"""
if self.model_file is None:
raise StandardError("Model parameters not set.")
if Xerr is None: cond_XDGMM = self.XDGMM.condition(X)
else: cond_XDGMM = self.XDGMM.condition(X, Xerr)
return np.atleast_2d(cond_XDGMM.sample(n_SN))
def fit_model(self, X, Xerr, filename='empiriciSN_model.fit',
n_components=6):
"""
Fits the XD model to data.
Parameters
----------
X: array_like, shape = (n_samples, n_features)
Input data.
Xerr: array_like, shape = (n_samples, n_features, n_features)
Error on input data.
filename: string (optional)
Filename for model fit to be saved to (default =
'empiriciSN_model.fit').
n_components: float (optional)
Number of Gaussian components to use (default = 6)
Notes
-----
The specified method and n_components Gaussian components will
be used (typical BIC-optimized numbers of components for ~100s
of training datapoints are 6 or 7).
The fit will be saved in the file with name defined by the
filename variable.
"""
self.XDGMM.n_components = n_components
self.XDGMM = self.XDGMM.fit(X, Xerr)
self.XDGMM.save_model(filename)
self.model_file = filename
return
def fit_from_files(self, filelist, filename='empiriciSN_model.fit',
n_components=7):
"""
Fits the XD model to data contained in the files provided.
Parameters
----------
filelist: array_like
Array of strings containing names of files containing data
to fit.
filename: string (optional)
Filename for model fit (default = 'empiriciSN_model.fit').
n_components: float (optional)
Number of Gaussian components to use (default = 7)
method: string (optional)
XD fitting method to use (default = 'astroML')
Notes
-----
The model is fitted using the data contained in the files
named in the `filelist` variable. This assumes that the data
files are in the same format as those provided with this code
and that only redshift, distance from host nucleus, host colors,
and local host surface brightness are being used for the fit.
"""
X, Xerr = self.get_data(filelist)
self.fit_model(X, Xerr, filename=filename,
n_components=n_components)
return
def read_model(self, filename):
"""
Reads the parameters of a model from a file.
Parameters
----------
filename: string
Name of the file to read from.
Notes
-----
Model parameters are stored in the self.XDGMM model object.
The model filename is stored self.model_file.
"""
self.XDGMM.read_model(filename)
self.model_file = filename
return
def component_test(self, X, Xerr, component_range, no_err=False):
"""
Test the performance of the model for a range of numbers of
Gaussian components.
Parameters
----------
X: array_like, shape = (n_samples, n_features)
Input data.
Xerr: array_like, shape = (n_samples, n_features, n_features)
Error on input data.
component_range: array_like
Range of n_components to test.
no_err: bool (optional)
Flag for whether to calculate the BIC with the errors
included or not. (default = False)
Returns
-------
bics: array_like, shape = (len(param_range),)
BIC for each value of n_components
optimal_n_comp: float
Number of components with lowest BIC score
lowest_bic: float
Lowest BIC from the scores computed.
Notes
-----
Uses the XDGMM.bic_test method to compute the BIC score for
each n_components in the component_range array.
"""
bics, optimal_n_comp, lowest_bic = \
self.XDGMM.bic_test(X, Xerr, component_range, no_err)
return bics, optimal_n_comp, lowest_bic
def get_logR(self,cond_indices, R_index, X, Xerr=None):
"""
Uses a subset of parameters in the given data to condition the
model and return a sample value for log(R/Re).
Parameters
----------
cond_indices: array_like
Array of indices indicating which parameters to use to
condition the model. Cannot contain [0, 1, 2] since these
are SN parameters.
R_index: int
Index of log(R/Re) in the list of parameters that were used
to fit the model.
X: array_like, shape = (n < n_features,)
Input data.
Xerr: array_like, shape = (X.shape,) (optional)
Error on input data. If none, no error used to condition.
Returns
-------
logR: float
Sample value of log(R/Re) taken from the conditioned model.
Notes
-----
The fit_params array specifies a list of indices to use to
condition the model. The model will be conditioned and then
a radius will be drawn from the conditioned model.
This is so that the radius can then be used to calculate local
surface brightness to fully condition the model to sample
likely SN parameters.
This does not make assumptions about what parameters are being
used in the model, but does assume that the model has been
fit already and that the first three parameters in the data
that were used to fit the model are the SN parameters.
"""
if self.model_file is None:
raise StandardError("Model parameters not set.")
if 0 in cond_indices or 1 in cond_indices or 2 in cond_indices:
raise ValueError("Cannot condition model on SN parameters.")
if R_index in cond_indices:
raise ValueError("Cannot condition model on log(R/Re).")
cond_data = np.array([])
if Xerr is not None: cond_err = np.array([])
R_cond_idx = R_index
n_features = self.XDGMM.mu.shape[1]
j = 0
for i in range(n_features):
if i in cond_indices:
cond_data = np.append(cond_data,X[j])
if Xerr is not None: cond_err = np.append(cond_err, Xerr[j])
j += 1
if i < R_index: R_cond_idx -= 1
else:
cond_data = np.append(cond_data,np.nan)
if Xerr is not None: cond_err = np.append(cond_err, 0.0)
if Xerr is not None:
cond_XDGMM = self.XDGMM.condition(cond_data, cond_err)
else: cond_XDGMM = self.XDGMM.condition(cond_data)
sample = cond_XDGMM.sample()
logR = sample[0][R_cond_idx]
return logR
def get_local_SB(self, SB_params, R ):
"""
Uses magnitudes, a surface brightness (SB) profile, and
a SN location to fit local surface brightnesses at the location
of the SN.
Parameters
----------
SB_params: array_like, shape = (21,)
Array of parameters needed for the SB fit. First entry
should be a sersic index of 1 or 4, indicating whether to
use an exponential or de Vaucouleurs profile. Following this
should be sets of
(magnitude, mag_unc, effective radius, rad_unc) data for
each of the 5 ugriz filters, giving a total array length of
21. These data are assumed to be known by the user.
R: float
Separation from host nucleus in units of log(R/Re).
It is assumed that the Re used here is the r-band Re, as is
output by the get_logR function.
Returns
-------
SBs: array_list, shape = (5,)
Local surface brightness at the location of the SN for each
of the 5 ugriz filters. Units = mag/arcsec^2
SB_errs: array_like, shape = (5,)
Uncertainties on the local surface brightnesses.
"""
if SB_params[0]!=1 and SB_params[0]!=4:
raise ValueError("Sersic index must be 1 or 4")
sep = (10**R) * SB_params[11] # separation in arcsec
SBs = np.array([])
SB_errs = np.array([])
for j in range(5):
halfmag = SB_params[j*4+1] + 0.75257
magerr = SB_params[j*4+2]
Re = SB_params[j*4+3]
Re_err = SB_params[j*4+4]
r = sep/Re
Ie = halfmag + 2.5 * np.log10(np.pi*Re**2)
Re2_unc = 2 * Re * Re_err * np.pi
log_unc = 2.5 * Re2_unc/(np.log10(np.pi*Re**2) * np.log(10))
Ie_unc = np.sqrt(magerr**2 + log_unc**2)
if SB_params[0] == 1:
Io = Ie-1.824
Io_unc = Ie_unc
sb = Io*np.exp(-1.68*(r))
exp_unc = np.exp(-1.68*(r))*1.68*sep*Re_err/(Re**2)
sb_unc = sb * np.sqrt((Io_unc/Io)**2 +
(exp_unc/np.exp(-1.68*(r)))**2)
if np.isnan(sb_unc): sb_unc = 0.0
if sb_unc < 0: sb_unc = sb_unc*-1.0
SBs = np.append(SBs,sb)
SB_errs = np.append(SB_errs,sb_unc)
if SB_params[0] == 4:
Io = Ie-8.328
Io_unc = Ie_unc
sb = Io*np.exp(-7.67*((r)**0.25))
exp_unc = np.exp(-7.67*((r)**0.25))*7.67*sep \
*Re_err/(4*Re**(1.25))
sb_unc = sb*np.sqrt((Io_unc/Io)**2+(exp_unc \
/np.exp(-7.67*((r)**0.25))))
if np.isnan(sb_unc): sb_unc = 0.0
if sb_unc < 0: sb_unc = sb_unc*-1.0
SBs = np.append(SBs,sb)
SB_errs = np.append(SB_errs,sb_unc)
return SBs, SB_errs
def set_fit_method(self, fit_method):
"""
Sets the XD fitting method to use.
Parameters
----------
fit_method: string
Name of fitting method to use. Must be either 'astroML' or
'Bovy'.
Notes
-----
Changes the fitting method of self.XDGMM to the one specified
in `fit_method`.
"""
if fit_method == 'astroML':
n_iter = 100
elif fit_method == 'Bovy':
n_iter = 10**9
else:
raise ValueError("Method must be either 'astroML' or 'Bovy'")
self.XDGMM.method = fit_method
self.XDGMM.n_iter = n_iter
self.fit_method = fit_method
return
def get_data(self, filelist):
"""
Parses SN and host data from a list of data files.
Parameters
----------
filelist: array_like
Array of strings containing names of files containing data
to fit.
Returns
-------
X: array_like, shape = (n_samples, n_features)
Output data. Contains SALT2 SN parameters, host redshift,
log(R/Re), host colors, and host brightnesses at the
locations of the SN in each filter.
Xerr: array_like, shape = (n_samples, n_features, n_features)
Error on output data.
Notes
-----
Reads in each data file and returns an array of data and a
matrix of errors, which can be used to fit the XDGMM model.
Currently reads the SALT2 SN parameters, host redshift,
log(R/Re), host magnitudes, and host surface brightnesses
at the location of the SN.
This method needs further modularizing, to enable the worker
to calculate host surface brightnesses separately (in a static method).
"""
x0 = np.array([])
x0_err = np.array([])
x1 = np.array([])
x1_err = np.array([])
c = np.array([])
c_err = np.array([])
z = np.array([])
z_err = np.array([])
logr = np.array([])
logr_err = np.array([])
umag = np.array([])
umag_err = np.array([])
gmag = np.array([])
gmag_err = np.array([])
rmag = np.array([])
rmag_err = np.array([])
imag = np.array([])
imag_err = np.array([])
zmag = np.array([])
zmag_err = np.array([])
SB_u = np.array([])
SB_u_err = np.array([])
SB_g = np.array([])
SB_g_err = np.array([])
SB_r = np.array([])
SB_r_err = np.array([])
SB_i = np.array([])
SB_i_err = | np.array([]) | numpy.array |
from math import fabs
import numpy as np
from numba import jit
from numba.extending import overload
@overload(np.clip)
def np_clip(a, a_min, a_max, out=None):
"""
Numba Overload of np.clip
:type a: np.ndarray
:type a_min: int
:type a_max: int
:type out: np.ndarray
:rtype: np.ndarray
"""
if out is None:
out = np.empty_like(a)
for i in range(len(a)):
if a[i] < a_min:
out[i] = a_min
elif a[i] > a_max:
out[i] = a_max
else:
out[i] = a[i]
return out
@jit(nopython=True)
def convolve(data, kernel):
"""
Convolution 1D Array
:type data: np.ndarray
:type kernel: np.ndarray
:rtype: np.ndarray
"""
size_data = len(data)
size_kernel = len(kernel)
size_out = size_data - size_kernel + 1
out = np.array([np.nan] * size_out)
kernel = np.flip(kernel)
for i in range(size_out):
window = data[i:i + size_kernel]
out[i] = sum([window[j] * kernel[j] for j in range(size_kernel)])
return out
@jit(nopython=True)
def sma(data, period):
"""
Simple Moving Average
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
window = data[i - period + 1:i + 1]
out[i] = np.mean(window)
return out
@jit(nopython=True)
def wma(data, period):
"""
Weighted Moving Average
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
weights = np.arange(period, 0, -1)
weights = weights / weights.sum()
out = convolve(data, weights)
return np.concatenate((np.array([np.nan] * (len(data) - len(out))), out))
@jit(nopython=True)
def cma(data):
"""
Cumulative Moving Average
:type data: np.ndarray
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
last_sum = np.array([np.nan] * size)
last_sum[1] = sum(data[:2])
for i in range(2, size):
last_sum[i] = last_sum[i - 1] + data[i]
out[i] = last_sum[i] / (i + 1)
return out
@jit(nopython=True)
def ema(data, period, smoothing=2.0):
"""
Exponential Moving Average
:type data: np.ndarray
:type period: int
:type smoothing: float
:rtype: np.ndarray
"""
size = len(data)
weight = smoothing / (period + 1)
out = np.array([np.nan] * size)
out[0] = data[0]
for i in range(1, size):
out[i] = (data[i] * weight) + (out[i - 1] * (1 - weight))
out[:period - 1] = np.nan
return out
@jit(nopython=True)
def ewma(data, period, alpha=1.0):
"""
Exponential Weighted Moving Average
:type data: np.ndarray
:type period: int
:type alpha: float
:rtype: np.ndarray
"""
weights = (1 - alpha) ** np.arange(period)
weights /= np.sum(weights)
out = convolve(data, weights)
return np.concatenate((np.array([np.nan] * (len(data) - len(out))), out))
@jit(nopython=True)
def dema(data, period, smoothing=2.0):
"""
Double Exponential Moving Average
:type data: np.ndarray
:type period: int
:type smoothing: float
:rtype: np.ndarray
"""
return (2 * ema(data, period, smoothing)) - ema(ema(data, period, smoothing), period, smoothing)
@jit(nopython=True)
def trix(data, period, smoothing=2.0):
"""
Triple Exponential Moving Average
:type data: np.ndarray
:type period: int
:type smoothing: float
:rtype: np.ndarray
"""
return ((3 * ema(data, period, smoothing) - (3 * ema(ema(data, period, smoothing), period, smoothing))) +
ema(ema(ema(data, period, smoothing), period, smoothing), period, smoothing))
@jit(nopython=True)
def macd(data, fast, slow, smoothing=2.0):
"""
Moving Average Convergence Divergence
:type data: np.ndarray
:type fast: int
:type slow: int
:type smoothing: float
:rtype: np.ndarray
"""
return ema(data, fast, smoothing) - ema(data, slow, smoothing)
@jit(nopython=True)
def stoch(c_close, c_high, c_low, period_k, period_d):
"""
Stochastic
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period_k: int
:type period_d: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
k = np.array([np.nan] * size)
for i in range(period_k - 1, size):
e = i + 1
s = e - period_k
ml = np.min(c_low[s:e])
k[i] = ((c_close[i] - ml) / (np.max(c_high[s:e]) - ml)) * 100
return k, sma(k, period_d)
@jit(nopython=True)
def kdj(c_close, c_high, c_low, period_rsv=9, period_k=3, period_d=3, weight_k=3, weight_d=2):
"""
KDJ
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period_rsv: int
:type period_k: int
:type period_d: int
:type weight_k: int
:type weight_d: int
:rtype: (np.ndarray, np.ndarray, np.ndarray)
"""
size = len(c_close)
rsv = np.array([np.nan] * size)
for i in range(period_k - 1, size):
e = i + 1
s = e - period_k
ml = np.min(c_low[s:e])
rsv[i] = ((c_close[i] - ml) / (np.max(c_high[s:e]) - ml)) * 100
k = sma(rsv, period_rsv)
d = sma(k, period_d)
return k, d, (weight_k * k) - (weight_d * d)
@jit(nopython=True)
def wpr(c_close, c_high, c_low, period):
"""
William %R
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
mh = np.max(c_high[s:e])
out[i] = ((mh - c_close[i]) / (mh - np.min(c_low[s:e]))) * -100
return out
@jit(nopython=True)
def rsi(data, period, smoothing=2.0, f_sma=True, f_clip=True, f_abs=True):
"""
Relative Strengh Index
:type data: np.ndarray
:type period: int
:type smoothing: float
:type f_sma: bool
:type f_clip: bool
:type f_abs: bool
:rtype: np.ndarray
"""
size = len(data)
delta = np.array([np.nan] * size)
up = np.array([np.nan] * size)
down = np.array([np.nan] * size)
delta = np.diff(data)
if f_clip:
up, down = np.clip(delta, a_min=0, a_max=np.max(delta)), np.clip(delta, a_min=np.min(delta), a_max=0)
else:
up, down = delta.copy(), delta.copy()
up[delta < 0] = 0.0
down[delta > 0] = 0.0
if f_abs:
for i, x in enumerate(down):
down[i] = fabs(x)
else:
down = np.abs(down)
rs = sma(up, period) / sma(down, period) if f_sma else ema(up, period - 1, smoothing) / ema(
down, period - 1, smoothing)
out = np.array([np.nan] * size)
out[1:] = (100 - 100 / (1 + rs))
return out
@jit(nopython=True)
def srsi(data, period, smoothing=2.0, f_sma=True, f_clip=True, f_abs=True):
"""
Stochastic Relative Strengh Index
:type data: np.ndarray
:type period: int
:type smoothing: float
:type f_sma: bool
:type f_clip: bool
:type f_abs: bool
:rtype: np.ndarray
"""
r = rsi(data, period, smoothing, f_sma, f_clip, f_abs)[period:]
s = np.array([np.nan] * len(r))
for i in range(period - 1, len(r)):
window = r[i + 1 - period:i + 1]
mw = np.min(window)
s[i] = ((r[i] - mw) / (np.max(window) - mw)) * 100
return np.concatenate((np.array([np.nan] * (len(data) - len(s))), s))
@jit(nopython=True)
def bollinger_bands(data, period, dev_up=2.0, dev_down=2.0):
"""
Bollinger Bands
:type data: np.ndarray
:type period: int
:type dev_up: float
:type dev_down: float
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: middle, up, down, width
"""
size = len(data)
bb_up = np.array([np.nan] * size)
bb_down = np.array([np.nan] * size)
bb_width = np.array([np.nan] * size)
bb_mid = sma(data, period)
for i in range(period - 1, size):
std_dev = np.std(data[i - period + 1:i + 1])
mid = bb_mid[i]
bb_up[i] = mid + (std_dev * dev_up)
bb_down[i] = mid - (std_dev * dev_down)
bb_width[i] = bb_up[i] - bb_down[i]
return bb_mid, bb_up, bb_down, bb_width
@jit(nopython=True)
def keltner_channel(c_close, c_open, c_high, c_low, period, smoothing=2.0):
"""
Keltner Channel
:type c_close: np.ndarray
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:type smoothing: float
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: middle, up, down, width
"""
e = ema(c_close, period, smoothing)
aa = 2 * atr(c_open, c_high, c_low, period)
up = e + aa
down = e - aa
return e, up, down, up - down
@jit(nopython=True)
def donchian_channel(c_high, c_low, period):
"""
Donchian Channel
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: middle, up, down, width
"""
size = len(c_high)
out_up = np.array([np.nan] * size)
out_down = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
out_up[i] = np.max(c_high[s:e])
out_down[i] = np.min(c_low[s:e])
return (out_up + out_down) / 2, out_up, out_down, out_up - out_down
@jit(nopython=True)
def heiken_ashi(c_open, c_high, c_low, c_close):
"""
Heiken Ashi
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type c_close: np.ndarray
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: open, high, low, close
"""
ha_close = (c_open + c_high + c_low + c_close) / 4
ha_open = np.empty_like(ha_close)
ha_open[0] = (c_open[0] + c_close[0]) / 2
for i in range(1, len(c_close)):
ha_open[i] = (c_open[i - 1] + c_close[i - 1]) / 2
ha_high = np.maximum(np.maximum(ha_open, ha_close), c_high)
ha_low = np.minimum(np.minimum(ha_open, ha_close), c_low)
return ha_open, ha_high, ha_low, ha_close
@jit(nopython=True)
def ichimoku(data, tenkansen=9, kinjunsen=26, senkou_b=52, shift=26):
"""
Ichimoku
:type data: np.ndarray
:type tenkansen: int
:type kinjunsen: int
:type senkou_b: int
:type shift: int
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: tenkansen, kinjunsen, chikou, senkou a, senkou b
"""
size = len(data)
n_tenkansen = np.array([np.nan] * size)
n_kinjunsen = np.array([np.nan] * size)
n_senkou_b = np.array([np.nan] * (size + shift))
for i in range(tenkansen - 1, size):
window = data[i + 1 - tenkansen:i + 1]
n_tenkansen[i] = (np.max(window) + np.min(window)) / 2
for i in range(kinjunsen - 1, size):
window = data[i + 1 - kinjunsen:i + 1]
n_kinjunsen[i] = (np.max(window) + np.min(window)) / 2
for i in range(senkou_b - 1, size):
window = data[i + 1 - senkou_b:i + 1]
n_senkou_b[i + shift] = (np.max(window) + np.min(window)) / 2
return \
n_tenkansen, n_kinjunsen, np.concatenate(((data[shift:]), (np.array([np.nan] * (size - shift))))), \
np.concatenate((np.array([np.nan] * shift), ((n_tenkansen + n_kinjunsen) / 2))), n_senkou_b
@jit(nopython=True)
def volume_profile(c_close, c_volume, bins=10):
"""
Volume Profile
:type c_close: np.ndarray
:type c_volume: np.ndarray
:type bins: int
:rtype: (np.ndarray, np.ndarray)
:return: count, price
"""
min_close = np.min(c_close)
max_close = np.max(c_close)
norm = 1.0 / (max_close - min_close)
sum_h = np.array([0.0] * bins)
for i in range(len(c_close)):
sum_h[int((c_close[i] - min_close) * bins * norm)] += c_volume[i] ** 2
sq = np.sqrt(sum_h)
return sq / sum(sq), np.linspace(min_close, max_close, bins)
@jit(nopython=True)
def tr(c_open, c_high, c_low):
"""
True Range
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:rtype: np.ndarray
"""
return np.maximum(np.maximum(c_open - c_low, np.abs(c_high - c_open)), np.abs(c_low - c_open))
@jit(nopython=True)
def atr(c_open, c_high, c_low, period):
"""
Average True Range
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: np.ndarray
"""
return sma(tr(c_open, c_high, c_low), period)
@jit(nopython=True)
def adx(c_open, c_high, c_low, period_adx, period_dm, smoothing=2.0):
"""
Average Directionnal Index
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period_adx: int
:type period_dm: int
:type smoothing: float
:rtype: np.ndarray
"""
up = np.concatenate((np.array([np.nan]), c_high[1:] - c_high[:-1]))
down = np.concatenate(( | np.array([np.nan]) | numpy.array |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Relational graph modules"""
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
import networkx as nx
import numpy as np
from torch.nn.modules.utils import _pair
from torch.nn.modules.conv import _ConvNd
from torch.autograd import Function
from itertools import repeat
from networkx.utils import py_random_state
from pycls.datasets.load_graph import load_graph
import pdb
import time
import random
def compute_count(channel, group):
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
return out
@py_random_state(3)
def ws_graph(n, k, p, seed=1):
"""Returns a ws-flex graph, k can be real number in [2,n]
"""
assert k >= 2 and k <= n
# compute number of edges:
edge_num = int(round(k * n / 2))
count = compute_count(edge_num, n)
# print(count)
G = nx.Graph()
for i in range(n):
source = [i] * count[i]
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
# print(source, target)
G.add_edges_from(zip(source, target))
# rewire edges from each node
nodes = list(G.nodes())
for i in range(n):
u = i
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
for v in target:
if seed.random() < p:
w = seed.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = seed.choice(nodes)
if G.degree(u) >= n - 1:
break # skip this rewiring
else:
G.remove_edge(u, v)
G.add_edge(u, w)
return G
@py_random_state(4)
def connected_ws_graph(n, k, p, tries=100, seed=1):
"""Returns a connected ws-flex graph.
"""
for i in range(tries):
# seed is an RNG so should change sequence each call
G = ws_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def nx_to_edge(graph, directed=False, add_self_loops=True,
shuffle_id=False, seed=1):
'''nx graph to edge index'''
graph.remove_edges_from(graph.selfloop_edges())
# relabel graphs
keys = list(graph.nodes)
vals = list(range(graph.number_of_nodes()))
# shuffle node id assignment
if shuffle_id:
random.seed(seed)
random.shuffle(vals)
mapping = dict(zip(keys, vals))
graph = nx.relabel_nodes(graph, mapping, copy=True)
# get edges
edge_index = np.array(list(graph.edges))
if not directed:
edge_index = np.concatenate((edge_index, edge_index[:, ::-1]), axis=0)
if add_self_loops:
edge_self = np.arange(graph.number_of_nodes())[:, np.newaxis]
edge_self = np.tile(edge_self, (1, 2))
edge_index = np.concatenate((edge_index, edge_self), axis=0)
# sort edges
idx = np.argsort(edge_index[:, 0])
edge_index = edge_index[idx, :]
return edge_index
# edge index generator
def generate_index(message_type='ba', n=16, sparsity=0.5, p=0.2,
directed=False, seed=123):
degree = n * sparsity
known_names = ['mcwhole', 'mcwholeraw', 'mcvisual', 'mcvisualraw', 'cat', 'catraw']
if message_type == 'er':
graph = nx.gnm_random_graph(n=n, m=n * degree // 2, seed=seed)
elif message_type == 'random':
edge_num = int(n * n * sparsity)
edge_id = np.random.choice(n * n, edge_num, replace=False)
edge_index = np.zeros((edge_num, 2), dtype=int)
for i in range(edge_num):
edge_index[i, 0] = edge_id[i] // n
edge_index[i, 1] = edge_id[i] % n
elif message_type == 'ws':
graph = connected_ws_graph(n=n, k=degree, p=p, seed=seed)
elif message_type == 'ba':
graph = nx.barabasi_albert_graph(n=n, m=degree // 2, seed=seed)
elif message_type == 'hypercube':
graph = nx.hypercube_graph(n=int(np.log2(n)))
elif message_type == 'grid':
m = degree
n = n // degree
graph = nx.grid_2d_graph(m=m, n=n)
elif message_type == 'cycle':
graph = nx.cycle_graph(n=n)
elif message_type == 'tree':
graph = nx.random_tree(n=n, seed=seed)
elif message_type == 'regular':
graph = nx.connected_watts_strogatz_graph(n=n, k=degree, p=0, seed=seed)
elif message_type in known_names:
graph = load_graph(message_type)
edge_index = nx_to_edge(graph, directed=True, seed=seed)
else:
raise NotImplementedError
if message_type != 'random' and message_type not in known_names:
edge_index = nx_to_edge(graph, directed=directed, seed=seed)
return edge_index
def compute_size(channel, group, seed=1):
np.random.seed(seed)
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
out = np.random.permutation(out)
return out
def compute_densemask(in_channels, out_channels, group_num, edge_index):
repeat_in = compute_size(in_channels, group_num)
repeat_out = compute_size(out_channels, group_num)
mask = np.zeros((group_num, group_num))
mask[edge_index[:, 0], edge_index[:, 1]] = 1
mask = np.repeat(mask, repeat_out, axis=0)
mask = np.repeat(mask, repeat_in, axis=1)
return mask
def get_mask(in_channels, out_channels, group_num,
message_type='ba', directed=False, sparsity=0.5, p=0.2, talk_mode='dense', seed=123):
assert group_num <= in_channels and group_num <= out_channels
# high-level graph edge index
edge_index_high = generate_index(message_type=message_type,
n=group_num, sparsity=sparsity, p=p, directed=directed, seed=seed)
# get in/out size for each high-level node
in_sizes = compute_size(in_channels, group_num)
out_sizes = compute_size(out_channels, group_num)
# decide low-level node num
group_num_low = int(min(np.min(in_sizes), np.min(out_sizes)))
# decide how to fill each node
mask_high = compute_densemask(in_channels, out_channels, group_num, edge_index_high)
return mask_high
############## Linear model
class TalkLinear(nn.Linear):
'''Relational graph version of Linear. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, bias=False,
message_type='ba', directed=False,
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
# print(group_num, in_channels, out_channels, kernel_size, stride)
super(TalkLinear, self).__init__(
in_channels, out_channels, bias)
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask).float().cuda()
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
def forward(self, x):
weight = self.weight * self.mask
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class SymLinear(nn.Module):
'''Linear with symmetric weight matrices'''
def __init__(self, in_features, out_features, bias=True):
super(SymLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0)
return F.linear(input, weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
############## Conv model
class TalkConv2d(_ConvNd):
'''Relational graph version of Conv2d. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, kernel_size, stride=1,
padding=0, dilation=1, bias=False, message_type='ba', directed=False, agg='sum',
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(TalkConv2d, self).__init__(
in_channels, out_channels,
kernel_size, stride, padding, dilation,
False, _pair(0), 1, bias, 'zeros')
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask[:, :, np.newaxis, np.newaxis]).float().cuda()
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
def forward(self, input):
weight = self.weight * self.mask
return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, 1)
class SymConv2d(_ConvNd):
'''Conv2d with symmetric weight matrices'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(SymConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0, 2, 3)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return F.conv2d(F.pad(input, expanded_padding, mode='circular'),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
########### Other OPs
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish activation fun."""
def __init__(self, in_w, se_w, act_fun):
super(SE, self).__init__()
self._construct_class(in_w, se_w, act_fun)
def _construct_class(self, in_w, se_w, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC, Swish, FC, Sigmoid
self.f_ex = nn.Sequential(
nn.Conv2d(in_w, se_w, kernel_size=1, bias=True),
act_fun(),
nn.Conv2d(se_w, in_w, kernel_size=1, bias=True),
nn.Sigmoid()
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class SparseLinear(nn.Linear):
'''Sparse Linear layer'''
def __init__(self, group_num, in_scale, out_scale, bias=False,
edge_index=None, flops_scale=0.5, params_scale=0.5):
# mask is used for reset to zero
mask_one = np.ones((out_scale, in_scale), dtype=bool)
mask_zero = np.zeros((out_scale, in_scale), dtype=bool)
mask_list = [[mask_one for i in range(group_num)] for j in range(group_num)]
for i in range(edge_index.shape[0]):
mask_list[edge_index[i, 0]][edge_index[i, 1]] = mask_zero
self.mask = np.block(mask_list)
self.edge_index = edge_index
# todo: update to pytorch 1.2.0, then use bool() dtype
self.mask = torch.from_numpy(self.mask).byte().cuda()
self.flops_scale = flops_scale
self.params_scale = params_scale
super(SparseLinear, self).__init__(
group_num * in_scale, group_num * out_scale, bias)
def forward(self, x):
weight = self.weight.clone().masked_fill_(self.mask, 0)
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class GroupLinear(nn.Module):
'''Group conv style linear layer'''
def __init__(self, in_channels, out_channels, bias=False, group_size=1):
super(GroupLinear, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.group_size = group_size
self.group_num = in_channels // group_size
self.in_scale = in_channels // self.group_num
self.out_scale = out_channels // self.group_num
assert in_channels % self.group_num == 0
assert out_channels % self.group_num == 0
assert in_channels % self.group_size == 0
# Note: agg_fun is always sum
self.edge_index = | np.arange(self.group_num) | numpy.arange |
import numpy as np
#import matplotlib.pyplot as plt
from skimage.measure import label,find_contours
#from PIL import Image
#from scipy.ndimage.morphology import distance_transform_edt
import csv
import sys
#from scipy.interpolate import Rbf,interp2d
from skimage.morphology import binary_opening
#from scipy import ndimage
from sklearn.linear_model import RANSACRegressor
def get_contour(rad,thresh):
"""
Find the edge in the input radiograph.
Parameters:
rad (numpy.ndarray): Radiograph of a sharp edge sample
thresh (float): The value at which a iso-valued contour (contour is the edge) is drawn
Returns:
numpy.ndarray: Coordinates along the longest detected contour
"""
contours = find_contours(rad,thresh)
best_contour = contours[0]
for contour in contours:
if(len(contour)>len(best_contour)):
best_contour = contour
return(best_contour)
def get_trans(rad,best_contour,trans_min,trans_max,thresh):
"""
Compute the ideal transmission image.
Parameters:
rad (numpy.ndarray): Radiograph of a sharp edge sample
best_contour (numpy.ndarray): Coordinates of the longest contour that is assumed to be the edge
trans_min (float): Minimum transmission value
trans_max (float): Maximum transmission value
thresh (float): Transmission value for the edge
Returns:
numpy.ndarray: Ideal transmission image
"""
window_interp = 5 #for interpolation. must be odd
edge_thick = np.ones(rad.shape) #edge pixel will be labeled as 0
for row,col in best_contour:
row_floor,col_floor = int(np.floor(row)),int(np.floor(col))
row_ceil,col_ceil = int(np.ceil(row)),int(np.ceil(col))
edge_thick[row_floor,col_floor],edge_thick[row_floor,col_ceil] = 0,0
edge_thick[row_ceil,col_floor],edge_thick[row_ceil,col_ceil] = 0,0
edge_thick = binary_opening(edge_thick) #erosion followed by dilation. Rids of bright pixels in edge voxels
rows_edge,cols_edge = np.nonzero(edge_thick==0) #Get edge pixel locations
labels,num = label(edge_thick,background=0,return_num=True)
if(num != 2):
raise ValueError("ERROR: Number of regions detected is {}. Two types of regions must be present in radiographs.".format(num))
val1 = np.mean(rad[labels==1])
val2 = np.mean(rad[labels==2])
trans = np.zeros(rad.shape) #Sample's pixel locations will be labeled as 1
trans[labels==0] = np.nan
trans[labels==1] = trans_min if val1<=val2 else trans_max
trans[labels==2] = trans_max if val1<=val2 else trans_min
for row,col in best_contour:
trans[int(round(row)),int(round(col))] = thresh
ideal_trans = trans.copy()
for row,col in zip(rows_edge,cols_edge):
if(np.isnan(trans[row,col])):
norm,ival = 0,0
for i in range(-int((window_interp-1)/2),int((window_interp-1)/2)+1):
for j in range(-int((window_interp-1)/2),int((window_interp-1)/2)+1):
row_new = row+i
col_new = col+j
if(i!=0 and j!=0 and row_new>=0 and row_new<trans.shape[0] and col_new>=0 and col_new<trans.shape[1]):
if(np.isnan(trans[row_new,col_new]) == False):
weight = 1.0/np.sqrt(i*i+j*j)
ival += weight*trans[row_new,col_new]
norm += weight
ideal_trans[row,col] = ival/norm if norm != 0 else thresh
if(norm == 0):
print("WARNING: No valid value within window for interpolation")
return(ideal_trans)
def get_padded_trans(ideal_trans,bdary_mask_perc,pad_factor,rad_mask):
"""
Appropriately pad the ideal transmission image and the masks.
Parameters:
ideal_trans (numpy.ndarray): Ideal transmission image
bdary_mask_perc (float): Percentage of image region that must be masked, i.e., excluded from blur estimation, close to the radiograph edges on each side (left, right, top, and bottom). Expressed as a percentage of the radiograph size.
pad_factor (list [float,float]): Pad factor as expressed in multiples of input radiograph size
rad_mask (numpy.ndarray): Boolean mask array over the radiograph where blur estimation is done.
"""
bdary_mask_perc /= 100
#Solves hw-(h-2*delta)(w-2*delta)=phw where h,w are idea_trans shape, p is bdary_mask_perc, and delta is delta_mask
a = 4
b = -2*(ideal_trans.shape[0]+ideal_trans.shape[1])
c = bdary_mask_perc*ideal_trans.shape[0]*ideal_trans.shape[1]
delta_mask = (-b-np.sqrt(b*b-4*a*c))/(2*a)
if delta_mask < 0:
raise ValueError("ERROR: delta_mask is negative. This should not occur. Contact the author of this python package.")
# print("Delta mask is ",delta_mask)
mask = np.zeros(ideal_trans.shape).astype(bool)
row_min = int(round(delta_mask))
row_max = int(round(mask.shape[0]-delta_mask))
col_min = int(round(delta_mask))
col_max = int(round(mask.shape[1]-delta_mask))
mask[row_min:row_max,col_min:col_max] = True
if rad_mask is not None:
mask = np.bitwise_and(mask,rad_mask)
norm_rad_mask = mask.copy()
#pad_width0 = int(ideal_trans.shape[0]*(pad_factor[0]-1)/2.0-bdary_mask_perc*mask.shape[0])
#pad_width1 = int(ideal_trans.shape[1]*(pad_factor[1]-1)/2.0-bdary_mask_perc*mask.shape[1])
pad_width0 = int(ideal_trans.shape[0]*(pad_factor[0]-1)/2.0)
pad_width1 = int(ideal_trans.shape[1]*(pad_factor[1]-1)/2.0)
colidx,rowidx = np.meshgrid(np.arange(-pad_width1,ideal_trans.shape[1]+pad_width1),np.arange(-pad_width0,ideal_trans.shape[0]+pad_width0))
ideal_trans = np.pad(ideal_trans,((pad_width0,pad_width0),(pad_width1,pad_width1)),mode='constant',constant_values=-1)
ideal_trans_mask = np.pad(mask,((pad_width0,pad_width0),(pad_width1,pad_width1)),mode='constant',constant_values=False)
return(ideal_trans,ideal_trans_mask,norm_rad_mask,colidx,rowidx)
def ideal_trans_sharp_edge(norm_rad,bdary_mask_perc=5,pad_factor=[2,2],mask=None):
"""Estimate parameters of transmission model for a radiograph with a single straight sharp edge.
This function takes as input a normalized radiograph and estimates the ideal transmission image that would have resulted if there was no blur and no noise. This function also pads the transmission image to prevent aliasing during convolution. It also produces masks for the region well within the image edges where blur model estimation is done. The transmission image and masks are packaged into a python dictionary, which is called the transmission model.
Parameters:
norm_rad (numpy.ndarray): Normalized measured radiograph.
bdary_mask_perc (float): Percentage of image region that must be masked, i.e., excluded from blur estimation, close to radiograph edges
pad_factor (list [float,float]): Pad factor as expressed in multiples of input radiograph size
mask (numpy.ndarray): Boolean mask array over the radiograph where blur estimation is done.
Returns:
trans_model (dict): Python dictionary containing the ideal transmission function, masks, and gradient functions.
params_init (list): Initial parameters for ideal transmission function
params_bounds (list of lists): Bounds on transmission function parameters.
"""
#pad_factor: pad to multiple of the input image size
trans_min = max(0.0,np.min(norm_rad))
trans_max = min(1.0,np.max(norm_rad))
renorm_rad = (norm_rad-trans_min)/(trans_max-trans_min)
best_contour = get_contour(renorm_rad,0.5)
ideal_trans = get_trans(renorm_rad,best_contour,0.0,1.0,0.5)
x,y = [],[]
for row,col in best_contour:
x.append(col)
y.append(row)
coeff = np.polyfit(x,y,1)
line1 = np.poly1d(coeff)
ideal_trans,ideal_trans_mask,norm_rad_mask,colidx,rowidx = get_padded_trans(ideal_trans,bdary_mask_perc,pad_factor,mask)
linediff1 = rowidx - line1(colidx)
for reg in [linediff1>0,linediff1<=0]:
val = np.mean(ideal_trans[np.bitwise_and(ideal_trans>=0,reg)])
ideal_trans[np.bitwise_and(ideal_trans==-1,reg)] = 0.0 if val<0.5 else 1.0
if(np.any(np.isnan(ideal_trans))):
raise ValueError("ERROR: Nan detected in the ideal radiograph image")
X_rows = ideal_trans[ideal_trans_mask].size
X0 = 1-ideal_trans[ideal_trans_mask].reshape(X_rows,1)
X1 = ideal_trans[ideal_trans_mask].reshape(X_rows,1)
X = np.hstack((X0,X1))
y = norm_rad[norm_rad_mask].reshape(X_rows,1)
reg = RANSACRegressor(min_samples=10,residual_threshold=0.1)
reg.fit(X,y)
params_init = [float(reg.predict(np.array([[1,0]]))),float(reg.predict(np.array([[0,1]])))]
z = ideal_trans[ideal_trans_mask]
z_sq = ideal_trans[ideal_trans_mask]**2
A = np.zeros((2,2))
A[0,0] = np.sum(1-2*z+z_sq)
A[0,1] = np.sum(z-z_sq)
A[1,0] = A[0,1]
A[1,1] = np.sum(z_sq)
b = np.zeros(2)
b[0] = | np.sum(norm_rad[norm_rad_mask]*(1-z)) | numpy.sum |
import tensorflow as tf
import numpy as np
class deep_cross_network(object):
def __init__(self, feature_dim_dict,
embedding_size=4, seed=1024, l2_reg_lamda=0.0002,
keep_prob=0.5, use_batch_norm=True, init_std=0.001,
cross_layer_num=3, hidden_size=[32,32]):
self.seed = seed
self.field_dim = len(feature_dim_dict["sparse"])
self.sample_weight = tf.placeholder(tf.float32, [None,], name="sample_weight")
self.input_x = tf.placeholder(tf.int32, [None, self.field_dim], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, 1], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.train_flag = tf.constant(True, dtype=tf.bool)
self.embedding_size = embedding_size
self.l2_reg_lamda = tf.constant(l2_reg_lamda, dtype=tf.float32)
self.init_std = init_std
self.use_batchnorm = use_batch_norm
self.feature_dic_dict = feature_dim_dict
self.feature_dim = len(feature_dim_dict["sparse"])
self.cross_layer_num = cross_layer_num
self.hidden_size = hidden_size
# create variable
self.b = tf.Variable(tf.constant(0.0), name="bias")
self.embedding_size = []
self.total_size = self.field_dim * self.embedding_size
self.sparse_embeddings = [tf.get_variable(name='embed_cate' + str(i) + '-' + feat,
initializer=tf.random_normal(
[self.feature_dim["sparse"][feat],
min(self.embedding_size,
6 * pow(self.feature_dim["sparse"][feat], 0.25))],
stddev=self.init_std)) for i, feat in
enumerate(self.feature_dim["sparse"])]
self.cross_layer_weight = [
tf.Variable(tf.random_normal([self.total_size, 1], stddev=self.init_std, seed=self.seed)) for i in
range(self.cross_layer_num)]
self.cross_layer_bias = [
tf.Variable(tf.random_normal([self.total_size, 1], stddev=self.init_std, seed=self.seed)) for i in
range(self.cross_layer_num)]
self.weights = self._initialize_weights()
with tf.name_scope("cross_network"):
embed_list = [tf.nn.embedding_lookup(self.sparse_embeddings[i], self.X[:, i]) for i in
range(self.field_dim)]
embeds = tf.concat(embed_list, axis=-1)
self._x_0 = tf.reshape(embeds, (-1, self.total_size, 1))
x_l = self._x_0
for l in range(self.cross_layer_num):
x_l = self.f_cross_l(
x_l, self.cross_layer_weight[l], self.cross_layer_bias[l]) + x_l
cross_network_out = tf.reshape(x_l, (-1, self.total_size))
with tf.name_scope("deep_network"):
if len(self.hidden_size) > 0:
fc_input = tf.reshape(
embeds, (-1, self.field_dim * self.embedding_size))
for l in range(len(self.hidden_size)):
if self.use_batchnorm:
weight = tf.get_variable(name='deep_weight' + str(l),
shape=[fc_input.get_shape().as_list()[1], self.hidden_size[l]],
initializer=tf.random_normal_initializer(stddev=self.init_std,
seed=self.seed))
# bias = tf.Variable(0.0,name='bias'+str(l))
H = tf.matmul(fc_input, weight) # ,bias
H_hat = tf.layers.batch_normalization(H, training=self.train_flag)
fc = tf.nn.relu(H_hat)
if l < len(self.hidden_size) - 1:
fc = tf.cond(self.train_flag, lambda: inverted_dropout(fc, self.keep_prob), lambda: fc)
fc_input = fc
deep_network_out = fc_input
with tf.name_scope("combination_output_layer"):
x_stack = cross_network_out
if len(self.hidden_size) > 0:
x_stack = tf.concat([x_stack, deep_network_out], axis=1)
self.logit = tf.add(tf.matmul(x_stack, self.weights['concat_projection']), self.weights['concat_bias'])
with tf.name_scope("loss"):
self.out = tf.nn.sigmoid(self.logit, name="out")
self.loss = tf.losses.log_loss(self.input_y, self.out)
def f_cross_l(self, x_l, w_l, b_l):
dot = tf.matmul(self._x_0, x_l, transpose_b=True)
return tf.tensordot(dot, w_l, 1) + b_l
def inverted_dropout(self, fc, keep_pron):
return tf.divide(tf.nn.dropout(fc, keep_pron), keep_pron)
def _initialize_weights(self):
weights = dict()
# embeddings
weights['feature_embeddings'] = tf.Variable(
tf.random_normal([self.cate_feature_size,self.embedding_size],0.0,0.01),
name='feature_embeddings')
weights['feature_bias'] = tf.Variable(tf.random_normal([self.feature_dim,1],0.0,1.0),name='feature_bias')
#deep layers
num_layer = len(self.hidden_size)
glorot = np.sqrt(2.0/(self.total_size + self.hidden_size[0]))
weights['deep_layer_0'] = tf.Variable(
np.random.normal(loc=0,scale=glorot,size=(self.total_size,self.hidden_size[0])),dtype=np.float32
)
weights['deep_bias_0'] = tf.Variable(
np.random.normal(loc=0,scale=glorot,size=(1,self.hidden_size[0])),dtype=np.float32
)
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.hidden_size[i - 1] + self.hidden_size[i]))
weights["deep_layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.hidden_size[i - 1], self.hidden_size[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["deep_bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.hidden_size[i])),
dtype=np.float32) # 1 * layer[i]
for i in range(self.cross_layer_num):
weights["cross_layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.total_size,1)),
dtype=np.float32)
weights["cross_bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.total_size,1)),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
input_size = self.total_size + self.hidden_size[-1]
glorot = | np.sqrt(2.0/(input_size + 1)) | numpy.sqrt |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dataset
Custom dataset.
"""
import numpy as np
from mindspore import dataset as ds
def get_data(num, img_size=(1, 32, 32), num_classes=10, is_onehot=True):
for _ in range(num):
img = np.random.randn(*img_size)
target = np.random.randint(0, num_classes)
target_ret = np.array([target]).astype(np.float32)
if is_onehot:
target_onehot = | np.zeros(shape=(num_classes,)) | numpy.zeros |
import os
import numpy as np
import scipy.optimize as optimize
import PyAstronomy.pyasl as pyasl
import astropy.units as units
import astropy.time as atime
import pydl.pydlutils.yanny as yanny
from .moonphase import moonphase2
from .sunpos2 import sunpos2
"""Copied from roboscheduler product for python 2 usage
Dependencies:
numpy
scipy
PyAstronomy
astropy
pydl
"""
def dateandtime2mjd(date=None, time='12:00', to_tai=7):
"""Utility to calculate an MJD"""
if((type(date) is list) | (type(date) is np.ndarray)):
isotimes = ["{date} {time}".format(date=cdate, time=ctime)
for cdate, ctime in zip(date, time)]
else:
isotimes = "{date} {time}".format(date=date, time=time)
times = atime.Time(isotimes, format='iso', scale='tai')
times = times + np.int32(to_tai) * units.hour
return(times.mjd)
class SchedulerBase(object):
"""Scheduler base class with generic utilities.
Parameters:
----------
Attributes:
----------
Methods:
-------
ralst2ha(ra=, lst=) : convert RA and LST to hour angle
hadec2altaz(ha=, dec=, lat=) : convert HA, Dec, latitude to alt, az
alt2airmass(alt=) : convert altitude to airmass
"""
def __init__(self):
return
def _arrayify(self, quantity=None):
"""Cast quantity as ndarray of numpy.float64"""
try:
length = len(quantity)
except TypeError:
length = 1
return np.zeros(length, dtype=np.float64) + quantity
def _mjd2jd(self, mjd=None):
"""Convert MJD to JD"""
return (self._arrayify(mjd) + np.float64(2400000.5))
def ralst2ha(self, ra=None, lst=None):
"""Return HA (degrees) given RA and LST
Parameters:
----------
ra : np.float64
right ascension (deg)
lst : np.float64
local sidereal time (deg)
Returns:
-------
ha : np.float64
hour angle (deg)
"""
ha = (((self._arrayify(lst) - self._arrayify(ra) + 360. + 180.)
% 360.) - 180.)
return(ha)
def hadec2altaz(self, ha=None, dec=None, lat=None):
"""Return (alt, az) (degrees) of given HA and Dec and latitude
Parameters:
----------
ha : np.float64
hour angle (deg)
dec : np.float64
declination (deg)
lat : np.float64
latitude (deg)
Returns:
-------
alt : np.float64
altitude (deg)
az : np.float64
azimuth (deg E of N)
"""
aha = self._arrayify(ha)
adec = self._arrayify(dec)
(alt, az) = pyasl.hadec2altaz(aha, adec,
np.float64(lat) + np.zeros(len(aha)))
return (alt, az)
def alt2airmass(self, alt):
"""Return airmass given altitude
Parameters:
----------
alt : np.float64
altitude (deg)
Returns:
-------
airmass : np.float64
airmass (1/sin(altitude))
"""
airmass = 1. / np.sin(np.pi / 180. * self._arrayify(alt))
return(airmass)
class Observer(SchedulerBase):
"""Observer class to define different observatories.
Parameters:
----------
observatory : str
Name of observatory to use (must be in observatory file)
(default 'apo')
observatoryfile : str
Name of Yanny-format observatory file to read
(default $ROBOSCHEDULER_DIR/data/observatories.par)
Attributes:
----------
observatory : str
Name of observatory
latitude : numpy.float64
Latitude of observatory
longitude : numpy.float64
Longitude (E of Greenwich) of observatory
Methods:
-------
ralst2ha(ra=, lst=) : convert RA and LST to hour angle
hadec2altaz(ha=, dec=, lat=) : convert HA, Dec, latitude to alt, az
alt2airmass(alt=) : convert altitude to airmass
lst(mjd=) : return LST in degrees for observer at given MJD (days)
radec2altaz(mjd=, ra=, dec=) : return alt/az for ra/dec at given MJD
sun_radec(mjd=) : return position of Sun in Ra/Dec
sun_altaz(mjd=) : return position of Sun in Alt/AZ
moon_radec(mjd=) : return position of Moon in Ra/Dec
moon_altaz(mjd=) : return position of Moon in Alt/AZ
moon_illumination(mjd=) : return illumination of Moon at given MJD
evening_twilight(mjd=): return evening twilight on MJD
morning_twilight(mjd=): return morning twilight on MJD
"""
def __init__(self, observatory='apo', observatoryfile=None,
dark_twilight=-15., bright_twilight=-8.):
"""Create Observer object"""
# super().__init__()
# self.observatory = observatory
# if(observatoryfile is None):
# observatoryfile = os.path.join(os.getenv('ROBOSCHEDULER_DIR'),
# 'data', 'observatories.par')
# self._file = observatoryfile
# self._data = yanny.yanny(self._file)
# observatories = np.array([obs.decode()
# for obs in
# self._data['OBSERVATORY']['observatory']])
# indx = np.where(observatories == self.observatory)[0]
self.latitude = 32.7797556
self.longitude = -105.82027778
self.dark_twilight = np.float32(dark_twilight)
self.bright_twilight = np.float32(bright_twilight)
return
def lst(self, mjd=None):
"""Return LST (degrees) given MJD for observer
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
lst : np.float64
local sidereal time (deg)
"""
mjds = self._arrayify(mjd)
lst = (np.float64(15.) *
pyasl.ct2lst(self._mjd2jd(mjds),
np.zeros(len(mjds)) + self.longitude))
return (lst)
def sun_radec(self, mjd=None):
"""Return (ra, dec) in deg J2000 for Sun at MJD (days)
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
ra : np.float64
right ascension, J2000 (deg)
dec : np.float64
declination, J2000 (deg)
"""
jd = self._mjd2jd(mjd=self._arrayify(mjd))
(tmp_jd, ra, dec) = sunpos2(jd)
return (ra, dec)
def moon_radec(self, mjd=None):
"""Return (ra, dec) in deg J2000 for Moon at MJD (days)
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
ra : np.float64
right ascension, J2000 (deg)
dec : np.float64
declination, J2000 (deg)
"""
jd = self._mjd2jd(mjd=self._arrayify(mjd))
ra, dec, dist, geolon, geolat = pyasl.moonpos(jd)
return (ra, dec)
def radec2altaz(self, mjd=None, ra=None, dec=None):
"""Return (alt, az) for (ra, dec) in deg J2000 at MJD (days)
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
ra : np.float64
right ascension, J2000 (deg)
dec : np.float64
declination, J2000 (deg)
Returns:
-------
alt : np.float64
altitude (deg)
az : np.float64
azimuth (deg E of N)
"""
lst = self.lst(mjd=mjd)
ha = self.ralst2ha(ra=ra, lst=lst)
(alt, az) = self.hadec2altaz(ha=ha, dec=dec, lat=self.latitude)
return (alt, az)
def moon_illumination(self, mjd=None):
"""Return Moon illumination at MJD (days)
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
illumination : np.float64
fraction of Moon illuminated
"""
jd = self._mjd2jd(mjd=self._arrayify(mjd))
return (moonphase2(jd))
def sun_altaz(self, mjd=None):
"""Return (alt, az) for Sun at MJD (days)
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
alt : np.float64
altitude (deg)
az : np.float64
azimuth (deg E of N)
"""
(ra, dec) = self.sun_radec(mjd=mjd)
(alt, az) = self.radec2altaz(mjd=mjd, ra=ra, dec=dec)
return (alt, az)
def moon_altaz(self, mjd=None):
"""Return (alt, az) for Moon at MJD (days)
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
alt : np.float64
altitude (deg)
az : np.float64
azimuth (deg E of N)
"""
(ra, dec) = self.moon_radec(mjd=mjd)
(alt, az) = self.radec2altaz(mjd=mjd, ra=ra, dec=dec)
return (alt, az)
def lunation(self, mjd=None):
"""Return Moon illumination, or zero if Moon at alt<0"""
(moon_alt, moon_az) = self.moon_altaz(mjd=mjd)
if(moon_alt < 0):
return(0.)
else:
return(self.moon_illumination(mjd=mjd))
def skybrightness(self, mjd=None):
"""Return a sky brightness related number"
Parameters:
----------
mjd : np.float64
Modified Julian Day (days)
Returns:
-------
skybrightness : np.float32
sky brightness related number between 0 and 1
Notes:
-----
If the Sun is above Scheduler.dark_twilight, then the
skybright is one. Otherwise the skybrightness is equal to the
lunation, which if the Moon is above the horizon, is its
fractional illumination, and if the Moon is below the horizon,
is zero.
"""
(moon_alt, moon_az) = self.moon_altaz(mjd=mjd)
(sun_alt, sun_az) = self.sun_altaz(mjd=mjd)
if(sun_alt > self.dark_twilight):
return(1.)
else:
return(self.lunation(mjd=mjd))
def _twilight_function(self, mjd=None, twilight=-8.):
"""Utility function for root-finding to get twilight times"""
(alt, az) = self.sun_altaz(mjd=mjd)
return (alt - twilight)
def evening_twilight(self, mjd=None, twilight=None):
"""Return MJD (days) of evening twilight for MJD
Parameters:
----------
mjd : np.int32, int
Modified Julian Day (days)
Returns:
-------
evening_twilight : np.float64
time of twilight in MJD (days)
"""
if twilight is None:
twilight = self.bright_twilight
if(np.floor(np.float64(mjd)) != np.float64(mjd)):
raise ValueError("MJD should be an integer")
noon_ish = (np.float64(mjd) -
self.longitude / 15. / 24. - 0.5)
midnight_ish = noon_ish + 0.5
twi = optimize.brenth(self._twilight_function,
noon_ish, midnight_ish,
args=twilight)
return(np.float64(twi))
def morning_twilight(self, mjd=None, twilight=None):
"""Return MJD (days) of morning twilight for MJD
Parameters:
----------
mjd : np.int32, int
Modified Julian Day (days)
Returns:
-------
morning_twilight : np.float64
time of twilight in MJD (days)
"""
if twilight is None:
twilight = self.bright_twilight
if(np.floor(np.float64(mjd)) != np.float64(mjd)):
raise ValueError("MJD should be an integer")
midnight_ish = (np.float64(mjd) -
self.longitude / 15. / 24.)
nextnoon_ish = midnight_ish + 0.5
twi = optimize.brenth(self._twilight_function,
midnight_ish, nextnoon_ish,
args=twilight)
return(np.float64(twi))
class Master(Observer):
"""Master class to interpret master schedule as an observer
Parameters:
----------
schedulefile : str
schedule file to use; default $ROBOSCHEDULER_DIR/data/master_schedule.par
Attributes:
----------
start : np.int32
MJD (days) of first night of survey
end : np.int32
MJD (days) of last night of survey
mjds : ndarray of np.int32
MJDs (days) when survey is potentially active
events : ndarray of numpy.str_
names of events of note
event_dates : ndarray of numpy.str_
list of dates in ISO format for events of note
event_times : ndarray of numpy.str_
list of times in ISO format for events of note
event_mjd : ndarray of numpy.float64
MJDs (days) of events of note
Methods:
-------
on() : is the survey on
"""
def __init__(self, schedule='normal', observatory='apo',
observatoryfile=None):
"""Create Master object for schedule"""
super().__init__(observatory=observatory,
observatoryfile=observatoryfile)
masterfile = 'master_schedule_{o}_{s}.par'.format(o=observatory,
s=schedule)
schedulefile = os.path.join(os.getenv('ROBOSCHEDULER_DIR'),
'data', masterfile)
print(schedulefile)
self._schedulefile = schedulefile
self.schedule = yanny.yanny(self._schedulefile)
self._validate()
self.event_dates = np.array([date.decode() for date
in self.schedule['SCHEDULE']['date']])
self.event_times = np.array([time.decode() for time
in self.schedule['SCHEDULE']['time']])
self.event_mjds = self._dateandtime2mjd()
self.events = np.array([event.decode() for event
in self.schedule['SCHEDULE']['event']])
self.start = self._start()
self.end = self._end()
self.mjds = self._mjds()
self.dark_twilight = np.float32(self.schedule['dark_twilight'])
self.bright_twilight = np.float32(self.schedule['bright_twilight'])
return
def _dateandtime2mjd(self):
return(dateandtime2mjd(date=self.event_dates,
time=self.event_times,
to_tai=self.schedule['to_tai']))
def _validate(self):
# should make sure:
# one start (first event)
# one end (last event)
# start MJD is a daytime time
# START_SURVEY is "on"
# END_SURVEY is "off"
return
def on(self, mjd=None):
if(mjd < self.event_mjds[0]):
return('off', self.event_mjds[0])
if(mjd >= self.event_mjds[-1]):
return('off', mjd + 1.)
# Assumes there is only one
indx = np.where((mjd >= self.event_mjds[0:-1]) &
(mjd < self.event_mjds[1:]))[0][0]
return(self.schedule[self.events[indx]],
self.event_mjds[indx + 1])
def end_mjd(self):
"""Return end MJD
Returns:
end_mjd : np.float64
MJD of last event (end of survey)
"""
return(self.event_mjds[-1])
def _start(self):
# Assumes there is only one
indx = np.where(self.events == 'START_SURVEY')[0][0]
# Assumes START_SURVEY turns state on
return(np.int32(np.floor(self.event_mjds[indx])))
def _end(self):
# Assumes there is only one
indx = np.where(self.events == 'END_SURVEY')[0][0]
# Assumes END_SURVEY turns state off
return(np.int32(np.ceil(self.event_mjds[indx])))
def _mjds(self):
nmjd = self.end - self.start + 1
mjds = self.start + np.arange(nmjd, dtype=np.int32)
keep = np.zeros(nmjd, dtype=np.int32)
for indx in np.arange(len(self.events) - 1):
this_event = self.events[indx]
if(self.schedule[this_event] == 'on'):
keep_start = np.int32(np.floor(self.event_mjds[indx]))
keep_end = np.int32(np.ceil(self.event_mjds[indx + 1]))
ikeep = np.where((mjds >= keep_start) &
(mjds <= keep_end))[0]
keep[ikeep] = 1
ikeep = | np.where(keep) | numpy.where |
import numpy as np
import pytest
import tifffile
from lxml import etree # nosec
from PartSegImage.image import Image
from PartSegImage.image_reader import TiffImageReader
from PartSegImage.image_writer import IMAGEJImageWriter, ImageWriter
@pytest.fixture(scope="module")
def ome_xml(bundle_test_dir):
return etree.XMLSchema(file=str(bundle_test_dir / "ome.xsd.xml"))
def test_scaling(tmp_path):
image = Image(np.zeros((10, 50, 50), dtype=np.uint8), (30, 0.1, 0.1), axes_order="ZYX")
ImageWriter.save(image, tmp_path / "image.tif")
read_image = TiffImageReader.read_image(tmp_path / "image.tif")
assert np.all(np.isclose(image.spacing, read_image.spacing))
def test_save_mask(tmp_path):
data = np.zeros((10, 40, 40), dtype=np.uint8)
data[1:-1, 1:-1, 1:-1] = 1
data[2:-3, 4:-4, 4:-4] = 2
mask = np.array(data > 0).astype(np.uint8)
image = Image(data, (0.4, 0.1, 0.1), mask=mask, axes_order="ZYX")
ImageWriter.save_mask(image, tmp_path / "mask.tif")
read_mask = TiffImageReader.read_image(tmp_path / "mask.tif")
assert np.all(np.isclose(read_mask.spacing, image.spacing))
@pytest.mark.parametrize("z_size", (1, 10))
def test_ome_save(tmp_path, bundle_test_dir, ome_xml, z_size):
data = np.zeros((z_size, 20, 20, 2), dtype=np.uint8)
image = Image(
data,
image_spacing=(27 * 10 ** -6, 6 * 10 ** -6, 6 * 10 ** -6),
axes_order="ZYXC",
channel_names=["a", "b"],
shift=(10, 9, 8),
name="Test",
)
ImageWriter.save(image, tmp_path / "test.tif")
with tifffile.TiffFile(tmp_path / "test.tif") as tiff:
assert tiff.is_ome
assert isinstance(tiff.ome_metadata, str)
meta_data = tifffile.xml2dict(tiff.ome_metadata)["OME"]["Image"]
assert "PhysicalSizeX" in meta_data["Pixels"]
assert meta_data["Pixels"]["PhysicalSizeX"] == 6
assert "PhysicalSizeXUnit" in meta_data["Pixels"]
assert meta_data["Pixels"]["PhysicalSizeXUnit"] == "µm"
assert len(meta_data["Pixels"]["Channel"]) == 2
assert meta_data["Pixels"]["Channel"][0]["Name"] == "a"
assert meta_data["Pixels"]["Channel"][1]["Name"] == "b"
assert meta_data["Name"] == "Test"
xml_file = etree.fromstring(tiff.ome_metadata.encode("utf8")) # nosec
ome_xml.assert_(xml_file)
read_image = TiffImageReader.read_image(tmp_path / "test.tif")
assert np.allclose(read_image.spacing, image.spacing)
assert np.allclose(read_image.shift, image.shift)
assert read_image.channel_names == ["a", "b"]
assert read_image.name == "Test"
def test_scaling_imagej(tmp_path):
image = Image(np.zeros((10, 50, 50), dtype=np.uint8), (30, 0.1, 0.1), axes_order="ZYX")
IMAGEJImageWriter.save(image, tmp_path / "image.tif")
read_image = TiffImageReader.read_image(tmp_path / "image.tif")
assert np.all(np.isclose(image.spacing, read_image.spacing))
def test_save_mask_imagej(tmp_path):
data = np.zeros((10, 40, 40), dtype=np.uint8)
data[1:-1, 1:-1, 1:-1] = 1
data[2:-3, 4:-4, 4:-4] = 2
mask = | np.array(data > 0) | numpy.array |
from __future__ import print_function, division, absolute_import
import array
import sys
import numpy as np
from numba import unittest_support as unittest
from numba import jit
from .support import TestCase, compile_function, MemoryLeakMixin
@jit(nopython=True)
def len_usecase(buf):
return len(buf)
@jit(nopython=True)
def getitem_usecase(buf, i):
return buf[i]
@jit(nopython=True)
def getslice_usecase(buf, i, j):
s = buf[i:j]
return s[0] + 2 * s[-1]
@jit(nopython=True)
def setitem_usecase(buf, i, v):
buf[i] = v
@jit(nopython=True)
def iter_usecase(buf):
res = 0.0
for i, x in enumerate(buf):
res += x
res *= i + 1
return res
def attrgetter(attr):
code = """def func(x):
return x.%(attr)s
""" % locals()
pyfunc = compile_function("func", code, globals())
return jit(nopython=True)(pyfunc)
contiguous_usecase = attrgetter("contiguous")
c_contiguous_usecase = attrgetter("c_contiguous")
f_contiguous_usecase = attrgetter("f_contiguous")
itemsize_usecase = attrgetter("itemsize")
nbytes_usecase = attrgetter("nbytes")
ndim_usecase = attrgetter("ndim")
readonly_usecase = attrgetter("readonly")
shape_usecase = attrgetter("shape")
strides_usecase = attrgetter("strides")
# On Python 2, array.array doesn't support the PEP 3118 buffer API
array_supported = sys.version_info >= (3,)
# On Python 2, bytes is really the str object
bytes_supported = sys.version_info >= (3,)
# On Python 2, indexing a memoryview returns bytes
memoryview_structured_indexing = sys.version_info >= (3,)
@unittest.skipIf(sys.version_info < (2, 7),
"buffer protocol not supported on Python 2.6")
class TestBufferProtocol(MemoryLeakMixin, TestCase):
"""
Test operations on buffer-providing objects.
"""
def _arrays(self):
n = 10
for letter, offset in [
('b', -3),
('B', 0),
('h', -5000),
('H', 40000),
('i', -100000),
('I', 1000000),
('l', -100000),
('L', 1000000),
('q', -2**60),
('Q', 2**63 + 1),
('f', 1.5),
('d', -1.5),
]:
yield array.array(letter, [i + offset for i in range(n)])
def _memoryviews(self):
n = 10
yield memoryview(bytearray(b"abcdefghi"))
yield memoryview(b"abcdefghi")
# Different item types
for dtype, start, stop in [
('int8', -10, 10),
('uint8', 0, 10),
('int16', -5000, 1000),
('uint16', 40000, 50000),
('int32', -100000, 100000),
('uint32', 0, 1000000),
('int64', -2**60, 10),
('uint64', 0, 2**64 - 10),
('float32', 1.5, 3.5),
('float64', 1.5, 3.5),
('complex64', -8j, 12 + 5j),
('complex128', -8j, 12 + 5j),
]:
yield memoryview(np.linspace(start, stop, n).astype(dtype))
# Different layouts
arr = np.arange(12).reshape((3, 4))
assert arr.flags.c_contiguous and not arr.flags.f_contiguous
yield memoryview(arr)
arr = arr.T
assert arr.flags.f_contiguous and not arr.flags.c_contiguous
yield memoryview(arr)
arr = arr[::2]
assert not arr.flags.f_contiguous and not arr.flags.c_contiguous
yield memoryview(arr)
def _readonlies(self):
if bytes_supported:
yield b"xyz"
if memoryview_structured_indexing:
yield memoryview(b"abcdefghi")
arr = np.arange(5)
arr.setflags(write=False)
yield memoryview(arr)
def _check_unary(self, jitfunc, *args):
pyfunc = jitfunc.py_func
self.assertPreciseEqual(jitfunc(*args), pyfunc(*args))
def check_len(self, obj):
self._check_unary(len_usecase, obj)
def check_iter(self, obj):
self._check_unary(iter_usecase, obj)
def check_getitem(self, obj):
# Be careful to index all dimensions, since we don't support
# partial indexing yet.
def yield_indices(obj):
try:
shape = obj.shape
except AttributeError:
shape = len(obj),
for tup in np.ndindex(shape):
# Simple 1d buffer-providing objects usually don't support
# tuple indexing.
if len(tup) == 1:
yield tup[0]
else:
yield tup
for i in yield_indices(obj):
try:
expected = obj[i]
except (NotImplementedError, TypeError):
if isinstance(obj, memoryview):
# The memoryview object doesn't support all codes yet,
# fall back on the underlying object.
expected = obj.obj[i]
else:
raise
self.assertPreciseEqual(getitem_usecase(obj, i), expected)
def check_setitem(self, obj):
for i in range(len(obj)):
orig = list(obj)
val = obj[i] // 2 + 1
setitem_usecase(obj, i, val)
self.assertEqual(obj[i], val)
for j, val in enumerate(orig):
if j != i:
self.assertEqual(obj[j], val)
def check_getslice(self, obj):
self._check_unary(getslice_usecase, obj, 1, len(obj) - 1)
def test_len(self):
self.check_len(bytearray(5))
if bytes_supported:
self.check_len(b"xyz")
for mem in self._memoryviews():
self.check_len(mem)
if array_supported:
for arr in self._arrays():
self.check_len(arr)
for buf in self._readonlies():
self.check_getitem(buf)
def test_getitem(self):
self.check_getitem(bytearray(b"abc"))
if bytes_supported:
self.check_getitem(b"xyz")
if memoryview_structured_indexing:
for mem in self._memoryviews():
self.check_getitem(mem)
if array_supported:
for arr in self._arrays():
self.check_getitem(arr)
for buf in self._readonlies():
self.check_getitem(buf)
def test_getslice(self):
with self.assertTypingError():
self.check_getslice(bytearray(b"abcde"))
if bytes_supported:
self.check_getslice(b"xyzuvw")
if memoryview_structured_indexing:
self.check_getslice(memoryview(b"xyzuvw"))
if array_supported:
with self.assertTypingError():
self.check_getslice(array.array('i', range(10)))
for buf in self._readonlies():
self.check_getitem(buf)
def test_setitem(self):
self.check_setitem(bytearray(b"abcdefghi"))
if array_supported:
for arr in self._arrays():
self.check_setitem(arr)
if memoryview_structured_indexing:
for mem in self._memoryviews():
self.check_getitem(mem)
# Read-only buffers
for buf in self._readonlies():
with self.assertTypingError():
self.check_setitem(buf)
def test_iter(self):
self.check_iter(bytearray(b"abc"))
if bytes_supported:
self.check_iter(b"xyz")
if memoryview_structured_indexing:
self.check_iter(memoryview(b"xyz"))
if array_supported:
for arr in self._arrays():
self.check_iter(arr)
for buf in self._readonlies():
self.check_getitem(buf)
@unittest.skipUnless(sys.version_info >= (2, 7),
"memoryview doesn't exist on 2.6")
class TestMemoryView(MemoryLeakMixin, TestCase):
"""
Test memoryview-specific attributes and operations.
"""
def _arrays(self):
arr = | np.arange(12) | numpy.arange |
from dnc.envs.base import KMeansEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.misc import logger
import os.path as osp
raise NotImplementedError('This is taken from DNC repo and needs to be made to work with this repo')
class LobberEnv(KMeansEnv, Serializable):
FILE = osp.join(osp.abspath(osp.dirname(__file__)), 'assets/lob.xml')
def __init__(self, box_center=(0,0), box_noise=0.4, frame_skip=5, *args, **kwargs):
self.box_center = box_center
self.box_noise = box_noise
super(LobberEnv, self).__init__(frame_skip=frame_skip, *args, **kwargs)
Serializable.__init__(self, box_center, box_noise, frame_skip, *args, **kwargs)
def get_current_obs(self):
finger_com = self.get_body_com("jaco_link_finger_1") + self.get_body_com("jaco_link_finger_2") + self.get_body_com("jaco_link_finger_3")
finger_com = finger_com / 3.
return np.concatenate([
self.model.data.qpos.flat[:],
self.model.data.qvel.flat[:],
finger_com,
self.relativeBoxPosition,
]).reshape(-1)
def step(self,action):
self.model.data.ctrl = action
# Taking Steps in the Environment
reward = 0
for _ in range(self.frame_skip):
self.model.step()
step_reward = self.timestep_reward()
reward += step_reward
# Reached the End of Trajectory
done = False
onGround = self.touching_group("geom_object", ["ground", "goal_wall1", "goal_wall2", "goal_wall3", "goal_wall4"])
if onGround and self.numClose > 10:
reward += self.final_reward()
done = True
ob = self.get_current_obs()
new_com = self.model.data.com_subtree[0]
self.dcom = new_com - self.current_com
self.current_com = new_com
# Recording Metrics
obj_position = self.get_body_com("object")
goal_position = self.get_body_com("goal")
distance = np.linalg.norm((goal_position - obj_position)[:2])
normalizedDistance = distance / self.init_block_goal_dist
return Step(ob, float(reward), done, distance=distance, norm_distance=normalizedDistance)
@overrides
def reset(self):
self.numClose = 0
qpos = self.init_qpos.copy().reshape(-1)
qvel = self.init_qvel.copy().reshape(-1) + np.random.uniform(low=-0.005,
high=0.005, size=self.model.nv)
qpos[1] = -1
qpos[9:12] = np.array((0.6, 0.2,0.03))
qvel[9:12] = 0
self.relativeBoxPosition = self.propose() # Proposal
qpos[-2:] += self.relativeBoxPosition
self.set_state(qpos.reshape(-1), qvel)
# Save initial distance between object and goal
obj_position = self.get_body_com("object")
goal_position = self.get_body_com("goal")
self.init_block_goal_dist = np.linalg.norm(obj_position - goal_position)
self.current_com = self.model.data.com_subtree[0]
self.dcom = | np.zeros_like(self.current_com) | numpy.zeros_like |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
tf.set_random_seed(777) # reproducibility
timesteps = seq_length = 7
data_dim = 5
output_dim = 3
# Open,High,Low,Close,Volume
xy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered)
# very important. It does not work without it.
scaler = MinMaxScaler(feature_range=(0, 1))
xy = scaler.fit_transform(xy)
x = xy
y = xy[:, [-1]] # Close as label
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):
_x = x[i:i + seq_length]
_y = y[i + 1] # Next close price as target
print(_x, "->", _y)
dataX.append(_x)
dataY.append(_y)
# split to train and testing
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX, testX = | np.array(dataX[0:train_size]) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from vectormath import Vector2, Vector2Array, Vector3, Vector3Array
class TestVMathVector2(unittest.TestCase):
def test_init_exceptions(self):
self.assertRaises(TypeError, Vector2Array, np.r_[1], 1.0)
self.assertRaises(ValueError, Vector2Array, np.r_[1, 2], np.r_[1])
self.assertRaises(ValueError, Vector2Array, np.array([0, 0, 0]))
self.assertRaises(ValueError, Vector2Array, 'Make', ' me a ')
self.assertRaises(ValueError, Vector2Array, ([0, 0, 0], [0, 0, 0]))
def test_init(self):
v1 = Vector2Array()
v2 = Vector2Array(0, 0)
self.assertTrue(np.array_equal(v1, v2))
v3 = Vector2Array(v1)
self.assertTrue(np.array_equal(v1, v3))
self.assertTrue(v1 is not v3)
v4 = Vector2Array(np.r_[0, 0])
self.assertTrue(np.array_equal(v1, v4))
v5 = Vector2Array(np.c_[np.r_[1, 0], np.r_[0, 1]])
self.assertTrue(np.array_equal(v5.length, np.r_[1, 1]))
v6 = Vector2Array(np.r_[1, 0], np.r_[0, 1])
self.assertTrue(np.array_equal(v6.length, np.r_[1, 1]))
v7 = Vector2Array([0, 0])
self.assertTrue(np.array_equal(v1, v7))
v8 = Vector2Array(x=0, y=0)
self.assertTrue(np.array_equal(v1, v8))
v9 = Vector2Array(
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
)
v10 = Vector2Array([
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]
])
self.assertTrue(np.array_equal(v9, v10))
v11 = Vector2Array([[[[[0]], [[0]], ]]])
self.assertTrue(np.array_equal(v1, v11))
v12 = Vector2Array([0]*5, [0]*5)
self.assertTrue(np.array_equal(v10, v12))
v13 = Vector2Array((0, 0))
self.assertTrue(np.array_equal(v1, v13))
v14 = Vector2Array(([0, 0], [0, 0]))
self.assertTrue(np.array_equal(v14, Vector2Array([0]*2, [0]*2)))
def test_indexing(self):
v2 = Vector2Array(1, 2)
self.assertTrue(v2[0, 0] == 1)
self.assertTrue(v2[0, 1] == 2)
self.assertTrue(len(v2[0]) == 2)
def f(): v2[3]
self.assertRaises(IndexError, f)
def f(): v2[0, 3]
self.assertRaises(IndexError, f)
l = []
for x in v2[0]:
l.append(x)
self.assertTrue(np.array_equal(np.array(l), np.r_[1, 2]))
self.assertTrue(np.array_equal(v2, Vector2Array(l)))
l = []
v3 = Vector2Array([[1, 2],
[2, 3]])
for v in v3:
l.append(v)
self.assertTrue(np.array_equal(
np.array(l),
np.array([[1, 2], [2, 3]]))
)
self.assertTrue(np.array_equal(Vector2Array(l), v3))
v4 = Vector2Array()
v4[0, 0] = 1
v4[0, 1] = 2
self.assertTrue(np.array_equal(v2, v4))
def test_copy(self):
vOrig = Vector2Array()
vCopy = vOrig.copy()
self.assertTrue(np.array_equal(vOrig, vCopy))
self.assertTrue(vOrig is not vCopy)
def test_size(self):
v1 = Vector2Array()
self.assertTrue(v1.nV == 1)
v2 = Vector2Array(np.c_[np.r_[1, 0, 0], np.r_[0, 1, 0]])
self.assertTrue(v2.nV == 3)
v3 = Vector2Array(
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
)
self.assertTrue(v3.nV == 5)
v4 = Vector2Array(0, 0)
self.assertTrue(v4.nV == 1)
def test_setget(self):
v1 = Vector2Array(1, 1)
self.assertTrue(v1.x == 1)
v1.x = 2
self.assertTrue(v1.x == 2)
self.assertTrue(v1.y == 1)
v1.y = 2
self.assertTrue(v1.y == 2)
v2 = Vector2Array([[0, 1],
[1, 2]])
self.assertTrue(np.array_equal(v2.x, [0, 1]))
v2.x = [0, -1]
self.assertTrue(np.array_equal(v2.x, [0, -1]))
self.assertTrue(np.array_equal(v2.y, [1, 2]))
v2.y = [-1, -2]
self.assertTrue(np.array_equal(v2.y, [-1, -2]))
def test_length(self):
v1 = Vector2Array(1, 1)
self.assertTrue(v1.length == np.sqrt(2))
v2 = Vector2Array(np.r_[1, 2], np.r_[1, 2])
self.assertTrue(np.array_equal(v2.length, np.sqrt(np.r_[2, 8])))
v3 = Vector2Array(1, 0)
v3.length = 5
assert v3.x == 5
self.assertTrue(v3.length == 5)
v4 = Vector2Array(np.r_[1, 1], np.r_[0, 0])
def f(): v4.length = 5
self.assertRaises(ValueError, f)
v5 = Vector2Array(np.r_[1, 0], np.r_[0, 1])
self.assertTrue(np.array_equal(v5.length, [1, 1]))
v5.length = [-1, 3]
self.assertTrue(np.array_equal(v5, [[-1., -0.], [0., 3.]]))
self.assertTrue(np.array_equal(v5.length, [1, 3]))
v6 = Vector2Array()
self.assertTrue(v6.length == 0)
def f(): v6.length = 5
self.assertRaises(ZeroDivisionError, f)
v6.length = 0
self.assertTrue(v6.length == 0)
v7 = Vector2Array(
[0, 0, 1, 0, 0],
[1, 1, 0, 0, 0]
)
length = [5, 5, 5, 5, 5]
def f(): v7.length = length
self.assertRaises(ZeroDivisionError, f)
length = [5, 5, 5, 0, 0]
v7.length = length
self.assertTrue(np.array_equal(length, v7.length))
def test_ops(self):
v1 = Vector2Array(1, 1)
v2 = Vector2Array(2, 2)
self.assertTrue(np.array_equal(v2-v1, v1))
self.assertTrue(np.array_equal(v1-v2, -v1))
self.assertTrue(np.array_equal(v1+v1, v2))
self.assertTrue(np.array_equal(v1*v2, v2))
self.assertTrue(np.array_equal(v2/v1, v2))
self.assertTrue(np.array_equal(2*v1, v2))
self.assertTrue(np.array_equal(v2/2, v1))
self.assertTrue(np.array_equal(v1+1, v2))
self.assertTrue(np.array_equal(v2-1, v1))
v1 = Vector2Array(np.r_[1, 1.], np.r_[1, 1.])
v2 = Vector2Array(np.r_[2, 2.], np.r_[2, 2.])
self.assertTrue(np.array_equal(v2-v1, v1))
self.assertTrue(np.array_equal(v1-v2, -v1))
self.assertTrue(np.array_equal(v1+v1, v2))
self.assertTrue(np.array_equal(v1*v2, v2))
self.assertTrue(np.array_equal(v2/v1, v2))
self.assertTrue(np.array_equal(2*v1, v2))
self.assertTrue(np.array_equal(v2/2, v1))
self.assertTrue(np.array_equal(v1+1, v2))
self.assertTrue(np.array_equal(v2-1, v1))
def test_dot(self):
v1 = Vector2Array(1, 1)
v2 = Vector2Array(2, 2)
self.assertTrue(v1.dot(v2) == 4)
v1l = Vector2Array(np.r_[1, 1.], np.r_[1, 1.])
v2l = Vector2Array(np.r_[2, 2.], np.r_[2, 2.])
self.assertTrue(np.array_equal(v1l.dot(v2l), np.r_[4, 4]))
self.assertTrue(np.array_equal(v1.dot(v2l), np.r_[4, 4]))
self.assertTrue(np.array_equal(v1l.dot(v2), np.r_[4, 4]))
v3 = Vector2Array([3]*4, [3]*4)
def f(): v3.dot(v2l)
self.assertRaises(ValueError, f)
def f(): v3.dot(5)
self.assertRaises(TypeError, f)
def test_as_percent(self):
v1 = Vector2Array(10, 0)
v2 = Vector2Array(20, 0)
self.assertTrue(np.array_equal(v1.as_percent(2), v2))
self.assertTrue(np.array_equal(v1, Vector2Array(10, 0))) # not copied
v3 = Vector2Array(
[0, 0, 2, 0, 0],
[0, 2, 0, 0, 0]
)
v4 = v3 * .5
self.assertTrue(np.array_equal(v3.as_percent(.5), v4))
v5 = Vector2Array()
self.assertTrue(np.array_equal(v5.as_percent(100), v5))
v6 = Vector2Array(5, 5)
self.assertTrue(np.array_equal(v6.as_percent(0), v5))
def f(): v6.as_percent('One Hundred Percent')
self.assertRaises(TypeError, f)
def test_normalize(self):
v1 = Vector2Array(5, 0)
self.assertTrue(v1.length == 5)
self.assertTrue(v1.normalize() is v1)
self.assertTrue(v1.length == 1)
v2 = Vector2Array()
def f(): v2.normalize()
self.assertRaises(ZeroDivisionError, f)
v3 = Vector2Array(
[0, 2],
[2, 0]
)
self.assertTrue( | np.array_equal(v3.length, [2, 2]) | numpy.array_equal |
#!/usr/bin/env python
import sys
import yaml
import rospy
import numpy as np
import time
import tf_conversions
from std_srvs.srv import Empty
import std_msgs
from crazyflie_driver.srv import *
from crazyflie_driver.msg import TrajectoryPolynomialPiece, FullState
from tf import TransformListener
def arrayToGeometryPoint(a):
return geometry_msgs.msg.Point(a[0], a[1], a[2])
class TimeHelper:
def __init__(self):
rospy.wait_for_service("/next_phase")
self.nextPhase = rospy.ServiceProxy("/next_phase", Empty)
def time(self):
return time.time()
def sleep(self, duration):
time.sleep(duration)
def nextPhase(self):
self.nextPhase()
class Crazyflie:
def __init__(self, id, initialPosition, tf):
self.id = id
prefix = "/cf" + str(id)
self.prefix = prefix
self.initialPosition = np.array(initialPosition)
self.tf = tf
rospy.wait_for_service(prefix + "/set_group_mask")
self.setGroupMaskService = rospy.ServiceProxy(prefix + "/set_group_mask", SetGroupMask)
rospy.wait_for_service(prefix + "/takeoff")
self.takeoffService = rospy.ServiceProxy(prefix + "/takeoff", Takeoff)
rospy.wait_for_service(prefix + "/land")
self.landService = rospy.ServiceProxy(prefix + "/land", Land)
# rospy.wait_for_service(prefix + "/stop")
# self.stopService = rospy.ServiceProxy(prefix + "/stop", Stop)
rospy.wait_for_service(prefix + "/go_to")
self.goToService = rospy.ServiceProxy(prefix + "/go_to", GoTo)
rospy.wait_for_service(prefix + "/upload_trajectory")
self.uploadTrajectoryService = rospy.ServiceProxy(prefix + "/upload_trajectory", UploadTrajectory)
rospy.wait_for_service(prefix + "/start_trajectory")
self.startTrajectoryService = rospy.ServiceProxy(prefix + "/start_trajectory", StartTrajectory)
rospy.wait_for_service(prefix + "/update_params")
self.updateParamsService = rospy.ServiceProxy(prefix + "/update_params", UpdateParams)
self.cmdFullStatePublisher = rospy.Publisher(prefix + "/cmd_full_state", FullState, queue_size=1)
self.cmdFullStateMsg = FullState()
self.cmdFullStateMsg.header.seq = 0
self.cmdFullStateMsg.header.frame_id = "/world"
self.cmdStopPublisher = rospy.Publisher(prefix + "/cmd_stop", std_msgs.msg.Empty, queue_size=1)
def setGroupMask(self, groupMask):
self.setGroupMaskService(groupMask)
def takeoff(self, targetHeight, duration, groupMask = 0):
self.takeoffService(groupMask, targetHeight, rospy.Duration.from_sec(duration))
def land(self, targetHeight, duration, groupMask = 0):
self.landService(groupMask, targetHeight, rospy.Duration.from_sec(duration))
def stop(self, groupMask = 0):
self.stopService(groupMask)
def goTo(self, goal, yaw, duration, relative = False, groupMask = 0):
gp = arrayToGeometryPoint(goal)
self.goToService(groupMask, relative, gp, yaw, rospy.Duration.from_sec(duration))
def uploadTrajectory(self, trajectoryId, pieceOffset, trajectory):
pieces = []
for poly in trajectory.polynomials:
piece = TrajectoryPolynomialPiece()
piece.duration = rospy.Duration.from_sec(poly.duration)
piece.poly_x = poly.px.p
piece.poly_y = poly.py.p
piece.poly_z = poly.pz.p
piece.poly_yaw = poly.pyaw.p
pieces.append(piece)
self.uploadTrajectoryService(trajectoryId, pieceOffset, pieces)
def startTrajectory(self, trajectoryId, timescale = 1.0, reverse = False, relative = True, groupMask = 0):
self.startTrajectoryService(groupMask, trajectoryId, timescale, reverse, relative)
def position(self):
self.tf.waitForTransform("/world", "/cf" + str(self.id), rospy.Time(0), rospy.Duration(10))
position, quaternion = self.tf.lookupTransform("/world", "/cf" + str(self.id), rospy.Time(0))
return | np.array(position) | numpy.array |
import time
import sys
import lasagne
import theano
import os
import argparse
from collections import defaultdict
from lasagne_utils import save_model, store_in_log, load_model, load_log, \
ExponentialUniformInit, non_flattening_dense, get_layer_output_fn
import theano.tensor as T
import numpy as np
from plstm import PLSTMLayer, PLSTMTimeGate
from bnlstm import LSTMWBNLayer
from lasagne.layers.recurrent import Gate
def get_train_and_val_fn(inputs, target_var, network):
# Get network output
prediction = lasagne.layers.get_output(network)
# Calculate training accuracy
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Calculate crossentropy between predictions and targets
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# Fetch trainable parameters
params = lasagne.layers.get_all_params(network, trainable=True)
# Calculate updates for the parameters given the loss
updates = lasagne.updates.adam(loss, params, learning_rate=1e-3)
# Fetch network output, using deterministic methods
test_prediction = lasagne.layers.get_output(network, deterministic=True)
# Again calculate crossentropy, this time using (test-time) determinstic pass
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
# Also, create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Get the raw output activations, for every layer
out_fn = get_layer_output_fn(inputs, network)
# Add in the targets to the function inputs
fn_inputs = inputs + [target_var]
# Compile a train function with the updates, returning loss and accuracy
train_fn = theano.function(fn_inputs, [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function(fn_inputs, [test_loss, test_acc])
return train_fn, val_fn, out_fn
def get_rnn(input_var, mask_var, time_var, arch_size, GRAD_CLIP=100, bn=False, model_type='plstm'):
# (batch size, max sequence length, number of features)
l_in = lasagne.layers.InputLayer(shape=(None, None, 1), input_var=input_var) #L0?
# Mask as matrices of dimensionality (N_BATCH, MAX_LENGTH)
l_mask = lasagne.layers.InputLayer(shape=(None, None), input_var=mask_var) #l6
# Time as matrices of dimensionality (N_BATCH, MAX_LENGTH)
l_t = lasagne.layers.InputLayer(shape=(None, None), input_var=time_var) #l5
# Allows arbitrary sizes
batch_size, seq_len, _ = input_var.shape
if model_type=='plstm':
print('Using PLSTM.')
# RNN layer 1
l_forward = PLSTMLayer(
l_in, time_input=l_t,
num_units=arch_size[1],
mask_input=l_mask,
ingate=Gate(b=lasagne.init.Constant(-0.1)),
forgetgate=Gate(b=lasagne.init.Constant(0), nonlinearity=lasagne.nonlinearities.sigmoid),
cell=Gate(W_cell=None, nonlinearity=lasagne.nonlinearities.tanh),
outgate=Gate(),
nonlinearity=lasagne.nonlinearities.tanh,
grad_clipping=GRAD_CLIP,
bn=bn,
learn_time_params=[True, True, True],
timegate=PLSTMTimeGate(
Period=ExponentialUniformInit((1,3)),
Shift=lasagne.init.Uniform( (0., 100)),
On_End=lasagne.init.Constant(0.05))
)
else:
print('Using LSTM, with BN: {}'.format(bn))
# RNN layers
l_forward = LSTMWBNLayer(lasagne.layers.ConcatLayer([l_in, lasagne.layers.ReshapeLayer(l_t,[batch_size, seq_len, 1])], axis=2),
num_units=arch_size[1],
mask_input=l_mask, grad_clipping=GRAD_CLIP,
ingate=Gate(b=lasagne.init.Constant(-0.1)),
forgetgate=Gate(b=lasagne.init.Constant(0), nonlinearity=lasagne.nonlinearities.sigmoid),
cell=Gate(W_cell=None, nonlinearity=lasagne.nonlinearities.tanh),
outgate=Gate(),
nonlinearity=lasagne.nonlinearities.tanh,
bn=bn)
# Need to slice off the last layer now
l_slice = lasagne.layers.SliceLayer(l_forward, -1, axis=1) #l11
# Softmax
l_dense = lasagne.layers.DenseLayer(l_slice, num_units=arch_size[2],nonlinearity=lasagne.nonlinearities.leaky_rectify)
l_out = lasagne.layers.NonlinearityLayer(l_dense, nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# Special Data Iterator
# ----------------------------------------------------
class SinWaveIterator(object):
"""
"""
def flow(self, sample_regularly, sample_res, min_period=1, max_period=100, min_spec_period=5, max_spec_period=6,
batch_size=32, num_examples=10000, min_duration=15, max_duration=125,
min_num_points=15, max_num_points=125):
# Calculate constants
num_batches = int(np.ceil(float(num_examples)/batch_size))
min_log_period, max_log_period = np.log(min_period), np.log(max_period)
b = 0
while b < num_batches:
# Choose curve and sampling parameters
num_points = | np.random.uniform(low=min_num_points,high=max_num_points,size=(batch_size)) | numpy.random.uniform |
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import sys
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Layer
import numpy as np
import tensorflow as tf
import itertools
def initializeWeightsBiases(prev_layer_size,
size,
weights=None,
biases=None,
name=None):
"""Initializes weights and biases to be used in a fully-connected layer.
Parameters
----------
prev_layer_size: int
Number of features in previous layer.
size: int
Number of nodes in this layer.
weights: tf.Tensor, optional (Default None)
Weight tensor.
biases: tf.Tensor, optional (Default None)
Bias tensor.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
"""
if weights is None:
weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01)
if biases is None:
biases = tf.zeros([size])
w = tf.Variable(weights, name='w')
b = tf.Variable(biases, name='b')
return w, b
class AtomicConvScore(Layer):
"""The scoring function used by the atomic convolution models."""
def __init__(self, atom_types, layer_sizes, **kwargs):
super(AtomicConvScore, self).__init__(**kwargs)
self.atom_types = atom_types
self.layer_sizes = layer_sizes
def build(self, input_shape):
self.type_weights = []
self.type_biases = []
self.output_weights = []
self.output_biases = []
n_features = int(input_shape[0][-1])
layer_sizes = self.layer_sizes
num_layers = len(layer_sizes)
weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
bias_init_consts = [0.0] * num_layers
for ind, atomtype in enumerate(self.atom_types):
prev_layer_size = n_features
self.type_weights.append([])
self.type_biases.append([])
self.output_weights.append([])
self.output_biases.append([])
for i in range(num_layers):
weight, bias = initializeWeightsBiases(
prev_layer_size=prev_layer_size,
size=layer_sizes[i],
weights=tf.random.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
biases=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]]))
self.type_weights[ind].append(weight)
self.type_biases[ind].append(bias)
prev_layer_size = layer_sizes[i]
weight, bias = initializeWeightsBiases(prev_layer_size, 1)
self.output_weights[ind].append(weight)
self.output_biases[ind].append(bias)
def call(self, inputs):
frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs
atom_types = self.atom_types
num_layers = len(self.layer_sizes)
def atomnet(current_input, atomtype):
prev_layer = current_input
for i in range(num_layers):
layer = tf.nn.bias_add(
tf.matmul(prev_layer, self.type_weights[atomtype][i]),
self.type_biases[atomtype][i])
layer = tf.nn.relu(layer)
prev_layer = layer
output_layer = tf.squeeze(
tf.nn.bias_add(
tf.matmul(prev_layer, self.output_weights[atomtype][0]),
self.output_biases[atomtype][0]))
return output_layer
frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)
frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)
complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)
frag1_atomtype_energy = []
frag2_atomtype_energy = []
complex_atomtype_energy = []
for ind, atomtype in enumerate(atom_types):
frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)
cond = tf.equal(frag1_z, atomtype)
frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
cond = tf.equal(frag2_z, atomtype)
frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
cond = tf.equal(complex_z, atomtype)
complex_atomtype_energy.append(
tf.where(cond, complex_outputs, complex_zeros))
frag1_outputs = tf.add_n(frag1_atomtype_energy)
frag2_outputs = tf.add_n(frag2_atomtype_energy)
complex_outputs = tf.add_n(complex_atomtype_energy)
frag1_energy = tf.reduce_sum(frag1_outputs, 1)
frag2_energy = tf.reduce_sum(frag2_outputs, 1)
complex_energy = tf.reduce_sum(complex_outputs, 1)
binding_energy = complex_energy - (frag1_energy + frag2_energy)
return tf.expand_dims(binding_energy, axis=1)
class AtomicConvModel(KerasModel):
"""Implements an Atomic Convolution Model.
Implements the atomic convolutional networks as introduced in
<NAME> al. "Atomic convolutional networks for predicting protein-ligand binding affinity." arXiv preprint arXiv:1703.10603 (2017).
The atomic convolutional networks function as a variant of
graph convolutions. The difference is that the "graph" here is
the nearest neighbors graph in 3D space. The AtomicConvModel
leverages these connections in 3D space to train models that
learn to predict energetic state starting from the spatial
geometry of the model.
"""
def __init__(self,
frag1_num_atoms=70,
frag2_num_atoms=634,
complex_num_atoms=701,
max_num_neighbors=12,
batch_size=24,
atom_types=[
6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,
53., -1.
],
radial=[[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,
7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]],
layer_sizes=[32, 32, 16],
learning_rate=0.001,
**kwargs):
"""
Parameters
----------
frag1_num_atoms: int
Number of atoms in first fragment
frag2_num_atoms: int
Number of atoms in sec
max_num_neighbors: int
Maximum number of neighbors possible for an atom. Recall neighbors
are spatial neighbors.
atom_types: list
List of atoms recognized by model. Atoms are indicated by their
nuclear numbers.
radial: list
TODO: add description
layer_sizes: list
TODO: add description
learning_rate: float
Learning rate for the model.
"""
# TODO: Turning off queue for now. Safe to re-activate?
self.complex_num_atoms = complex_num_atoms
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.max_num_neighbors = max_num_neighbors
self.batch_size = batch_size
self.atom_types = atom_types
rp = [x for x in itertools.product(*radial)]
frag1_X = Input(shape=(frag1_num_atoms, 3))
frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_z = Input(shape=(frag1_num_atoms,))
frag2_X = Input(shape=(frag2_num_atoms, 3))
frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_z = Input(shape=(frag2_num_atoms,))
complex_X = Input(shape=(complex_num_atoms, 3))
complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_z = Input(shape=(complex_num_atoms,))
self._frag1_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z])
self._frag2_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z])
self._complex_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z])
score = AtomicConvScore(self.atom_types, layer_sizes)([
self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z,
frag2_z, complex_z
])
model = tf.keras.Model(
inputs=[
frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,
frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z,
complex_z
],
outputs=score)
super(AtomicConvModel, self).__init__(
model, L2Loss(), batch_size=batch_size, **kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
batch_size = self.batch_size
def replace_atom_types(z):
def place_holder(i):
if i in self.atom_types:
return i
return -1
return np.array([place_holder(x) for x in z])
for epoch in range(epochs):
for ind, (F_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
batch_size, deterministic=True, pad_batches=pad_batches)):
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
batch_size = F_b.shape[0]
num_features = F_b[0][0].shape[1]
frag1_X_b = np.zeros((batch_size, N_1, num_features))
for i in range(batch_size):
frag1_X_b[i] = F_b[i][0]
frag2_X_b = np.zeros((batch_size, N_2, num_features))
for i in range(batch_size):
frag2_X_b[i] = F_b[i][3]
complex_X_b = np.zeros((batch_size, N, num_features))
for i in range(batch_size):
complex_X_b[i] = F_b[i][6]
frag1_Nbrs = np.zeros((batch_size, N_1, M))
frag1_Z_b = | np.zeros((batch_size, N_1)) | numpy.zeros |
"""
Transfer functions with more complex dependencies.
$Id: basic.py 10790 2009-11-21 17:51:33Z antolikjan $
"""
import copy
import numpy as np
import param
import imagen
from dataviews import SheetView
import topo
import topo.base.functionfamily
from topo.base.arrayutil import clip_lower,array_argmax
from topo.base.boundingregion import BoundingBox
from topo.base.sheetcoords import SheetCoordinateSystem
from topo.transferfn import TransferFn, TransferFnWithState
# Not suitable for basic.py due to its dependence on patterns.
class PatternCombine(TransferFn):
"""
Combine the supplied pattern with one generated using a
PatternGenerator.
Useful for operations like adding noise or masking out lesioned
items or around the edges of non-rectangular shapes.
"""
generator = param.ClassSelector(imagen.PatternGenerator,
default=imagen.Constant(), doc="""
Pattern to combine with the supplied matrix.""")
operator = param.Parameter(np.multiply,precedence=0.98,doc="""
Binary Numeric function used to combine the two patterns.
Any binary Numeric array "ufunc" returning the same type of
array as the operands and supporting the reduce operator is
allowed here. See topo.pattern.Composite.operator for more
details.
""")
def __call__(self,x):
###JABHACKALERT: Need to set it up to be independent of
#density; right now only things like random numbers work
#reasonably
rows,cols = x.shape
bb = BoundingBox(points=((0,0), (rows,cols)))
generated_pattern = self.generator(bounds=bb,xdensity=1,ydensity=1).transpose()
new_pattern = self.operator(x, generated_pattern)
x *= 0.0
x += new_pattern
# Not suitable for basic.py due to its dependence on patterns.
class KernelMax(TransferFn):
"""
Replaces the given matrix with a kernel function centered around the maximum value.
This operation is usually part of the Kohonen SOM algorithm, and
approximates a series of lateral interactions resulting in a
single activity bubble.
The radius of the kernel (i.e. the surround) is specified by the
parameter 'radius', which should be set before using __call__.
The shape of the surround is determined by the
neighborhood_kernel_generator, and can be any PatternGenerator
instance, or any function accepting bounds, density, radius, and
height to return a kernel matrix.
"""
kernel_radius = param.Number(default=0.0,bounds=(0,None),doc="""
Kernel radius in Sheet coordinates.""")
neighborhood_kernel_generator = param.ClassSelector(imagen.PatternGenerator,
default=imagen.Gaussian(x=0.0,y=0.0,aspect_ratio=1.0),
doc="Neighborhood function")
crop_radius_multiplier = param.Number(default=3.0,doc="""
Factor by which the radius should be multiplied, when deciding
how far from the winner to keep evaluating the kernel.""")
density=param.Number(1.0,bounds=(0,None),doc="""
Density of the Sheet whose matrix we act on, for use
in converting from matrix to Sheet coordinates.""")
def __call__(self,x):
rows,cols = x.shape
radius = self.density*self.kernel_radius
crop_radius = int(max(1.25,radius*self.crop_radius_multiplier))
# find out the matrix coordinates of the winner
wr,wc = array_argmax(x)
# convert to sheet coordinates
wy = rows-wr-1
# Optimization: Calculate the bounding box around the winner
# in which weights will be changed
cmin = max(wc-crop_radius, 0)
cmax = min(wc+crop_radius+1,cols)
rmin = max(wr-crop_radius, 0)
rmax = min(wr+crop_radius+1,rows)
ymin = max(wy-crop_radius, 0)
ymax = min(wy+crop_radius+1,rows)
bb = BoundingBox(points=((cmin,ymin), (cmax,ymax)))
# generate the kernel matrix and insert it into the correct
# part of the output array
kernel = self.neighborhood_kernel_generator(bounds=bb,xdensity=1,ydensity=1,
size=2*radius,x=wc+0.5,y=wy+0.5)
x *= 0.0
x[rmin:rmax,cmin:cmax] = kernel
class HalfRectify(TransferFn):
"""
Transfer function that applies a half-wave rectification (clips at zero)
"""
t_init = param.Number(default=0.0,doc="""
The initial value of threshold at which output becomes non-zero..""")
gain = param.Number(default=1.0,doc="""
The neuronal gain""")
randomized_init = param.Boolean(False,doc="""
Whether to randomize the initial t parameter.""")
noise_magnitude = param.Number(default=0.1,doc="""
The magnitude of the additive noise to apply to the t_init
parameter at initialization.""")
def __init__(self,**params):
super(TransferFn,self).__init__(**params)
self.first_call = True
def __call__(self,x):
if self.first_call:
self.first_call = False
if self.randomized_init:
self.t = np.ones(x.shape, x.dtype.char) * self.t_init + \
(imagen.random.UniformRandom() \
(xdensity=x.shape[0],ydensity=x.shape[1])-0.5) * \
self.noise_magnitude*2
else:
self.t = np.ones(x.shape, x.dtype.char) * self.t_init
x -= self.t
clip_lower(x,0)
x *= self.gain
class TemporalScatter(TransferFnWithState):
"""
Scatter values across time using a specified distribution,
discretized into a symmetric interval around zero. This class is
still work in progress as part of the TCAL model.
As no notion of time exists at the level of transfer functions
(state is changes according to call count), this class assumes a
fixed, 'clocked' timestep exists between subsequent calls.
Internally, an activity buffer is computed with a depth
corresponding to the number of timestep intervals with the stated
span value.
Note that the transfer function only has the power to delay
output. Therefore the central peak of a unimodal, zero-mean
distribution will occur *after* the time 'span' has elapsed.
In addition it is very *important* to view the depth map using the
view_depth_map method: if the majority of the values generated by
the distribution are outside the chosen span, values smaller and
larger than the span will be lumped into the first and last bins
respectively, distorting the shape of the intended distribution.
"""
timestep = param.Number(default=5, doc="""
The timestep interval in milliseconds. This value is used to
compute the depth and sample the supplied distribution.
Note that value must be specified some some extenal source and
there is no way to ensure that subsequent calls are regular
with the stated interval.
""")
distribution = param.ClassSelector(imagen.PatternGenerator,
default=imagen.random.GaussianRandom(offset=0.0, scale=30),
doc="""
The pattern generator that defines the scatter distribution in
milliseconds. Any random distribution may be used
e.g. UniformRandom or GaussianRandom. Note that the discretized
binning with the given 'span' is zero-centered.
In other words, even if a distribution is not-symmetric
(i.e. skewed), binning will occur around a symmetric interval
around zero with a total extent given by the span.
""")
span = param.Number(default=120, allow_None=True, bounds=(0,None), doc="""
The input distribution is expected to be continuous and may be
unbounded. For instance, a Gaussian distribution may generate
sample values that are unbounded in both the positive and
negative direction.
The span parameter determines the size of the (zero-centered)
interval which is binned.""")
def __init__(self, **params):
super(TemporalScatter,self).__init__(**params)
self.limits = (-self.span/2.0, self.span/2.0)
self._depth = None
self.first_call = True
self.raw_depth_map = None # The raw depth map (float values)
self.depth_map = None # The discretized depth map (ints)
self._buffer = None # The activity buffer
self.__current_state_stack=[]
def view_depth_map(self, mode='both'):
"""
Visualize the depth map using dataviews, including
distribution histograms.
Mode may be one of 'discrete', 'raw' or 'both':
* The 'discrete' mode presents the depth map used by
TemporalScatter, showing the latency at which
TemporalScatter will propagate the input i.e. discrete,
positive latencies in milliseconds.
* The 'raw' mode shows the continuous distribution before
discretization. This is typically a zero-mean, zero-centered
distribution i.e a continuous, zero-centered latency
distribution.
* Both presents both of the above types together (default).
"""
views = []
if mode in ['raw', 'both']:
views.append(SheetView(self.raw_depth_map,
label = 'Pattern',
name='Raw Depth map').hist())
if mode in ['discrete', 'both']:
scaled_map = (self.depth_map * self.timestep)
discrete_sv = SheetView(scaled_map,
label = 'Pattern', name='Depth map')
views.append(discrete_sv.hist(num_bins=self.depth,
bin_range=(0, self.span)))
return views[0] if len(views)==1 else views[0]+views[1]
@property
def depth(self):
"""
The depth of the activity buffer.
"""
if self._depth:
return self._depth
if not (self.span // self.timestep) or (self.span % self.timestep):
raise Exception("The span of the specified limits must be"
" an exact, *positive*, multiple of timestep")
self._depth = self.span // self.timestep
return self._depth
def _compute_depth_map(self, shape):
(d1,d2) = shape
(min_lim, max_lim) = self.limits
self.raw_depth_map = self.distribution(name='ScatterDepth',
xdensity=d1, ydensity=d2,
bounds=BoundingBox(radius=0.5))
bin_edges = list(np.linspace(min_lim, max_lim, self.depth))
discretized = np.digitize(self.raw_depth_map.flatten(), bin_edges)
# Out of bounds bins (to +inf) need to be pulled back in.
discretized[discretized==len(bin_edges)]=len(bin_edges)-1
return discretized.reshape(*shape)
def __call__(self,x):
(d1,d2) = x.shape
if self.first_call is True:
# The buffer is a 3D array containing a stack of activities
self._buffer = np.zeros((d1,d2,self.depth))
self.depth_map = self._compute_depth_map(x.shape)
self.first_call = False
# Roll the buffer and copy x to the top of the stack
self._buffer =np.roll(self._buffer,1,axis=2)
self._buffer[...,0] = x
x.fill(0.0)
x += self._buffer[np.arange(d1)[:, None],
| np.arange(d2) | numpy.arange |
import numpy as np
x = np.array([
[19, 9],
[15, 7],
[7, 2],
[17, 6]
])
y = np.array([1, 1, 2, 2])
x1 = np.array([
x[0],
x[1],
x[2],
x[3],
])
x2 = np.array([
x[1],
x[0],
x[3],
x[2],
])
x1x2 = x1 - x2
normx1x2 = np.linalg.norm(x1x2, axis=1)
print('x1x2\n%s\nnormx1x2\n%s\n' % (x1x2, normx1x2))
x1x3 = np.array([
x1[0] - x1[2],
x1[0] - x1[3],
x1[1] - x1[2],
x1[1] - x1[3],
x1[2] - x1[0],
x1[2] - x1[1],
x1[3] - x1[0],
x1[3] - x1[1],
])
normx1x3 = np.linalg.norm(x1x3, axis=1)
expx1x3 = | np.exp(1 - normx1x3) | numpy.exp |
import cv2
import numpy as np
from collections import OrderedDict
import colour
from colour_checker_detection import detect_colour_checkers_segmentation
class CreateSpyderCheck:
name = "SpyderChecker 24"
# Color checker reference values are in xyY color space
data = OrderedDict()
data["Aqua"] = np.array([0.29131, 0.39533, 0.4102])
data["Lavender"] = np.array([0.29860, 0.28411, 0.22334])
data["Evergreen"] = np.array([0.36528, 0.46063, 0.12519])
data["Steel Blue"] = np.array([0.27138, 0.29748, 0.17448])
data["Classic Light Skin"] = np.array([0.42207, 0.37609, 0.34173])
data["Classic Dark Skin"] = np.array([0.44194, 0.38161, 0.09076])
data["Primary Orange"] = np.array([0.54238, 0.40556, 0.2918])
data["Blueprint"] = np.array([0.22769, 0.21517, 0.09976])
data["Pink"] = np.array([0.50346, 0.32519, 0.1826])
data["Violet"] = np.array([0.30813, 0.24004, 0.05791])
data["Apple Green"] = np.array([0.40262, 0.50567, 0.44332])
data["Sunflower"] = np.array([0.50890, 0.43959, 0.4314])
data["Primary Cyan"] = np.array([0.19792, 0.30072, 0.16111])
data["Primary Magenta"] = np.array([0.38429, 0.23929, 0.18286])
data["Primary Yellow"] = np.array([0.47315, 0.47936, 0.63319])
data["Primary Red"] = np.array([0.59685, 0.31919, 0.11896])
data["Primary Green"] = np.array([0.32471, 0.51999, 0.22107])
data["Primary Blue"] = np.array([0.19215, 0.15888, 0.04335])
data["Card White"] = np.array([0.35284, 0.36107, 0.90104])
data["20% Gray"] = np.array([0.35137, 0.36134, 0.57464])
data["40% Gray"] = np.array([0.35106, 0.36195, 0.34707])
data["60% Gray"] = | np.array([0.35129, 0.36209, 0.18102]) | numpy.array |
# -*- coding: utf-8 -*-
#~ from __future__ import (unicode_literals, print_function, division, absolute_import)
import numpy as np
import scipy.fftpack
import scipy.signal
import matplotlib.cm
import matplotlib.colors
from .myqt import QT
import pyqtgraph as pg
from .base import BaseMultiChannelViewer, Base_MultiChannel_ParamController, MyViewBox
from .datasource import InMemoryAnalogSignalSource
from .tools import create_plot_grid
#todo remove this
import time
import threading
default_params = [
{'name': 'xsize', 'type': 'float', 'value': 3., 'step': 0.1},
{'name': 'nb_column', 'type': 'int', 'value': 4},
{'name': 'background_color', 'type': 'color', 'value': 'k'},
{'name': 'vline_color', 'type': 'color', 'value': '#FFFFFFAA'},
{'name': 'colormap', 'type': 'list', 'value': 'viridis', 'values' : ['viridis', 'jet', 'gray', 'hot', ] },
{'name': 'display_labels', 'type': 'bool', 'value': True},
{'name': 'show_axis', 'type': 'bool', 'value': True},
{'name': 'scale_mode', 'type': 'list', 'value': 'same_for_all', 'values' : ['by_channel', 'same_for_all', ] },
{'name': 'timefreq', 'type': 'group', 'children': [
{'name': 'f_start', 'type': 'float', 'value': 3., 'step': 1.},
{'name': 'f_stop', 'type': 'float', 'value': 90., 'step': 1.},
{'name': 'deltafreq', 'type': 'float', 'value': 3., 'step': 1., 'limits': [0.1, 1.e6]},
{'name': 'f0', 'type': 'float', 'value': 2.5, 'step': 0.1},
{'name': 'normalisation', 'type': 'float', 'value': 0., 'step': 0.1},]}
]
default_by_channel_params = [
{'name': 'visible', 'type': 'bool', 'value': True},
{'name': 'clim', 'type': 'float', 'value': .1},
]
def generate_wavelet_fourier(len_wavelet, f_start, f_stop, deltafreq, sample_rate, f0, normalisation):
"""
Compute the wavelet coefficients at all scales and compute its Fourier transform.
Parameters
----------
len_wavelet : int
length in samples of the wavelet window
f_start: float
First frequency in Hz
f_stop: float
Last frequency in Hz
deltafreq : float
Frequency interval in Hz
sample_rate : float
Sample rate in Hz
f0 : float
normalisation : float
Returns:
-------
wf : array
Fourier transform of the wavelet coefficients (after weighting).
Axis 0 is time; axis 1 is frequency.
"""
# compute final map scales
scales = f0/np.arange(f_start,f_stop,deltafreq)*sample_rate
# compute wavelet coeffs at all scales
xi=np.arange(-len_wavelet/2.,len_wavelet/2.)
xsd = xi[:,np.newaxis] / scales
wavelet_coefs=np.exp(complex(1j)*2.*np.pi*f0*xsd)*np.exp(-np.power(xsd,2)/2.)
weighting_function = lambda x: x**(-(1.0+normalisation))
wavelet_coefs = wavelet_coefs*weighting_function(scales[np.newaxis,:])
# Transform the wavelet into the Fourier domain
wf=scipy.fftpack.fft(wavelet_coefs,axis=0)
wf=wf.conj()
return wf
class TimeFreqViewer_ParamController(Base_MultiChannel_ParamController):
some_clim_changed = QT.pyqtSignal()
def on_channel_visibility_changed(self):
print('TimeFreqViewer_ParamController.on_channel_visibility_changed')
self.viewer.create_grid()
self.viewer.initialize_time_freq()
self.viewer.refresh()
def clim_zoom(self, factor):
#~ print('clim_zoom factor', factor)
self.viewer.by_channel_params.blockSignals(True)
for i, p in enumerate(self.viewer.by_channel_params.children()):
p.param('clim').setValue(p.param('clim').value()*factor)
self.viewer.by_channel_params.blockSignals(False)
self.some_clim_changed.emit()
def compute_auto_clim(self):
print('compute_auto_clim')
print(self.visible_channels)
self.viewer.by_channel_params.blockSignals(True)
maxs = []
visibles, = np.nonzero(self.visible_channels)
for chan in visibles:
if chan in self.viewer.last_wt_maps.keys():
m = np.max(self.viewer.last_wt_maps[chan])
if self.viewer.params['scale_mode'] == 'by_channel':
self.viewer.by_channel_params['ch'+str(chan), 'clim'] = m
else:
maxs.append(m)
if self.viewer.params['scale_mode'] == 'same_for_all' and len(maxs)>0:
for chan in visibles:
self.viewer.by_channel_params['ch'+str(chan), 'clim'] = max(maxs)
self.viewer.by_channel_params.blockSignals(False)
self.some_clim_changed.emit()
class TimeFreqWorker(QT.QObject):
data_ready = QT.pyqtSignal(int, float, float, float, float, float, object)
def __init__(self, source,viewer, chan, parent=None):
QT.QObject.__init__(self, parent)
self.source = source
self.viewer = viewer
self.chan = chan
def on_request_data(self, chan, t, t_start, t_stop, visible_channels, worker_params):
if chan != self.chan:
return
if not visible_channels[chan]:
return
if self.viewer.t != t:
print('viewer has moved already', chan, self.viewer.t, t)
# viewer has moved already
return
ds_ratio = worker_params['downsample_ratio']
sig_chunk_size = worker_params['sig_chunk_size']
filter_sos = worker_params['filter_sos']
wavelet_fourrier = worker_params['wavelet_fourrier']
plot_length = worker_params['plot_length']
i_start = self.source.time_to_index(t_start)
#~ print('ds_ratio', ds_ratio)
#~ print('start', t_start, i_start)
if ds_ratio>1:
i_start = i_start - (i_start%ds_ratio)
#~ print('start', t_start, i_start)
#clip it
i_start = max(0, i_start)
i_start = min(i_start, self.source.get_length())
if ds_ratio>1:
#after clip
i_start = i_start - (i_start%ds_ratio)
#~ print('start', t_start, i_start)
i_stop = i_start + sig_chunk_size
i_stop = min(i_stop, self.source.get_length())
sigs_chunk = self.source.get_chunk(i_start=i_start, i_stop=i_stop)
sig = sigs_chunk[:, chan]
if ds_ratio>1:
small_sig = scipy.signal.sosfiltfilt(filter_sos, sig)
small_sig =small_sig[::ds_ratio].copy() # to ensure continuity
else:
small_sig = sig.copy() # to ensure continuity
small_sig = small_sig.astype('float64')
left_pad = 0
if small_sig.shape[0] != wavelet_fourrier.shape[0]:
#Pad it
z = np.zeros(wavelet_fourrier.shape[0], dtype=small_sig.dtype)
left_pad = wavelet_fourrier.shape[0] - small_sig.shape[0]
z[:small_sig.shape[0]] = small_sig
small_sig = z
#avoid border effect
small_sig -= small_sig.mean()
#~ print('sig', sig.shape, 'small_sig', small_sig.shape)
small_sig_f = scipy.fftpack.fft(small_sig)
if small_sig_f.shape[0] != wavelet_fourrier.shape[0]:
print('oulala', small_sig_f.shape, wavelet_fourrier.shape)
#TODO pad with zeros somewhere
return
wt_tmp=scipy.fftpack.ifft(small_sig_f[:,np.newaxis]*wavelet_fourrier,axis=0)
wt = scipy.fftpack.fftshift(wt_tmp,axes=[0])
wt = np.abs(wt).astype('float32')
if left_pad>0:
wt = wt[:-left_pad]
wt_map = wt[:plot_length]
#~ wt_map =wt
#~ print('wt_map', wt_map.shape)
#~ print('sleep', chan)
#~ time.sleep(2.)
#TODO t_start and t_stop wrong
#~ print('sub_sample_rate', worker_params['sub_sample_rate'])
#~ print('wanted_size', worker_params['wanted_size'])
#~ print('plot_length', plot_length)
#~ print(i_start, i_stop)
t1 = self.source.index_to_time(i_start)
t2 = self.source.index_to_time(i_start+wt_map.shape[0]*ds_ratio)
#~ t2 = self.source.index_to_time(i_stop)
self.data_ready.emit(chan, t, t_start, t_stop, t1, t2, wt_map)
class TimeFreqViewer(BaseMultiChannelViewer):
_default_params = default_params
_default_by_channel_params = default_by_channel_params
_ControllerClass = TimeFreqViewer_ParamController
request_data = QT.pyqtSignal(int, float, float, float, object, object)
def __init__(self, **kargs):
BaseMultiChannelViewer.__init__(self, **kargs)
self.make_params()
# make all not visible
self.by_channel_params.blockSignals(True)
for c in range(self.source.nb_channel):
self.by_channel_params['ch'+str(c), 'visible'] = c==0
self.by_channel_params.blockSignals(False)
self.make_param_controller()
self.params_controller.some_clim_changed.connect(self.refresh)
self.set_layout()
self.change_color_scale()
self.create_grid()
self.initialize_time_freq()
self._xratio = 0.3
self.last_wt_maps = {}
self.threads = []
self.timefreq_makers = []
for c in range(self.source.nb_channel):
thread = QT.QThread(parent=self)
self.threads.append(thread)
worker = TimeFreqWorker(self.source, self, c)
self.timefreq_makers.append(worker)
worker.moveToThread(thread)
thread.start()
worker.data_ready.connect(self.on_data_ready)
self.request_data.connect(worker.on_request_data)
self.params.param('xsize').setLimits((0, np.inf))
@classmethod
def from_numpy(cls, sigs, sample_rate, t_start, name, channel_names=None):
source = InMemoryAnalogSignalSource(sigs, sample_rate, t_start, channel_names=channel_names)
view = cls(source=source, name=name)
return view
def closeEvent(self, event):
for i, thread in enumerate(self.threads):
thread.quit()
thread.wait()
event.accept()
def set_layout(self):
self.mainlayout = QT.QVBoxLayout()
self.setLayout(self.mainlayout)
self.graphiclayout = pg.GraphicsLayoutWidget()
self.mainlayout.addWidget(self.graphiclayout)
def on_param_change(self, params=None, changes=None):
#~ print('on_param_change')
#track if new scale mode
#~ for param, change, data in changes:
#~ if change != 'value': continue
#~ if param.name()=='scale_mode':
#~ self.params_controller.compute_rescale()
#for simplification everything is recompute
self.change_color_scale()
self.create_grid()
self.initialize_time_freq()
self.refresh()
def create_grid(self):
visible_channels = self.params_controller.visible_channels
self.plots = create_plot_grid(
self.graphiclayout, self.params['nb_column'], visible_channels,
ViewBoxClass=MyViewBox, vb_params={})
for plot in self.plots:
if plot is not None:
plot.vb.doubleclicked.connect(self.show_params_controller)
plot.vb.ygain_zoom.connect(self.params_controller.clim_zoom)
# plot.vb.xsize_zoom.connect(self.params_controller.apply_xsize_zoom)
self.images = []
self.vlines = []
for c in range(self.source.nb_channel):
if visible_channels[c]:
image = pg.ImageItem()
self.plots[c].addItem(image)
self.images.append(image)
vline = pg.InfiniteLine(angle = 90, movable = False, pen = self.params['vline_color'])
vline.setZValue(1) # ensure vline is above plot elements
self.plots[c].addItem(vline)
self.vlines.append(vline)
else:
self.images.append(None)
self.vlines.append(None)
def initialize_time_freq(self):
tfr_params = self.params.param('timefreq')
sample_rate = self.source.sample_rate
# we take sample_rate = f_stop*4 or (original sample_rate)
if tfr_params['f_stop']*4 < sample_rate:
wanted_sub_sample_rate = tfr_params['f_stop']*4
else:
wanted_sub_sample_rate = sample_rate
# this try to find the best size to get a timefreq of 2**N by changing
# the sub_sample_rate and the sig_chunk_size
d = self.worker_params = {}
d['wanted_size'] = self.params['xsize']
l = d['len_wavelet'] = int(2**np.ceil(np.log(d['wanted_size']*wanted_sub_sample_rate)/ | np.log(2) | numpy.log |
from manimlib.imports import *
from math import *
import numpy
class timestable(Scene):
CONFIG = {
"colors" : [DARK_BLUE, YELLOW, RED],
"times" : 500.01,
}
def ll(self, circ, x, y):
lines = VGroup()
for i in range(y):
lines.add(Line(circ.point_from_proportion((i%y) / y), circ.point_from_proportion(((i * x) % y) / y), stroke_width=0.9))
lines.set_color_by_gradient(*self.colors)
return lines
def construct(self):
mod_val = 500
r = FRAME_HEIGHT*0.9 / 2
x_ = []
y_ = []
circle = VGroup()
lines = VGroup()
group = VGroup()
circ = Circle().set_height(FRAME_HEIGHT * 0.9)
texo = TextMobject("$n\\times$").scale(0.5)
tx = TextMobject("$\\mod$").scale(0.5)
for m in numpy.arange(10, mod_val + 2, 3):
x_.clear()
y_.clear()
self.remove(lines, circle, group)
lines = VGroup()
circle = VGroup()
for x in numpy.arange(1, m + 1, 1):
x_.append(r * cos(x * 2 * PI / m))
y_.append(r * sin(x * 2 * PI / m))
for x, y in zip(x_, y_):
circle.add(Dot(np.array([x,y,0]), color=RED, radius=(0.025 - m*0.01/500.0)))
for i in | numpy.arange(0, m, 1) | numpy.arange |
# reads voxels
from __future__ import division, print_function, absolute_import
import numpy as np
import sys
import math
import os
import timeit
import argparse
import pickle as pkl
import sklearn.utils
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# one chunk
class SingleChunk:
def __init__(self, path='', batch_size=32):
self.path = path
self.batch_size = batch_size
if path.endswith('.npy'):
self.allData = np.load(self.path)
else:
self.allData = np.loadtxt(self.path, dtype="float")
sklearn.utils.check_array(self.allData, ensure_2d=False)
# if the dataset is 1D, like dosage features, make it 2D
if len(self.allData.shape) == 1:
self.allData = np.reshape(self.allData, (-1, 1))
self.numSamples = self.allData.shape[0]
self.numFeatures = self.allData.shape[1]
self.currentIndex = 0
# will try to return the amount. might be < amount
def get_amount(self, amount):
loop = False
if self.currentIndex+amount >= self.numSamples:
result = self.allData[self.currentIndex:]
else:
result = self.allData[self.currentIndex: \
self.currentIndex+amount]
self.currentIndex += amount
return result
def reset(self):
self.currentIndex = 0
def randomize(self):
np.random.shuffle(self.allData)
def all(self):
return self.allData
def set_all(self, mat):
self.allData = mat
def is_empty(self):
return self.currentIndex >= self.numSamples
# one label chunk
class SingleChunkLabel(SingleChunk):
def __init__(self, path='', batch_size=32):
self.path = path
self.batch_size = batch_size
if path.endswith('.npy'):
self.allData = np.load(self.path)
else:
self.allData = np.loadtxt(self.path, dtype="float")
sklearn.utils.check_array(self.allData, ensure_2d=False)
self.numSamples = self.allData.shape[0]
# regression labels
self.numClasses = 1
self.currentIndex = 0
# one label chunk
class KeyedSingleChunkLabel(SingleChunk):
def __init__(self, path='', batch_size=32):
self.path = path
self.batch_size = batch_size
if path.endswith('.npy'):
self.allData = np.load(self.path)
else:
self.allData = np.loadtxt(self.path, dtype="float")
sklearn.utils.check_array(self.allData, ensure_2d=False)
self.numSamples = self.allData.shape[0]
self.keys = self.allData[:,0]
self.allData = self.allData[:,1]
# regression labels
self.numClasses = 1
self.currentIndex = 0
def randomize(self):
rng_state = np.random.get_state()
np.random.shuffle(self.allData)
np.random.set_state(rng_state)
np.random.shuffle(self.keys)
# takes in multiple chunks. iterates through them all. min size 1
class ChunkData(object):
def __init__(self, paths=[], batch_size=32, start_chunk=0, preprocessor=None):
self.paths = paths
self.batch_size = batch_size
self.chunk_index = start_chunk - 1
self.iterate_file()
self.numFeatures = self.current_set.numFeatures
self.num_chunks = len(paths)
self.load_preprocessor(preprocessor)
#idempotent, wont iterate beyond the end
def iterate_file(self):
if self.chunk_index+1 >= len(self.paths):
return True
self.chunk_index += 1
self.current_set = SingleChunk(self.paths[self.chunk_index], \
batch_size=self.batch_size)
self.allData = self.current_set.allData
return False
# must return batch_size samples. will repeat across chunks until done
def get_next_batch(self):
# must always return some data. can't be empty
data = self.current_set.get_amount(self.batch_size)
looped = False
while data.shape[0] < self.batch_size:
# current_set must be empty. either reset or iterate
if self.is_last_chunk():
self.reset()
looped = True
else:
self.iterate_file()
new_data = self.current_set.get_amount(self.batch_size-data.shape[0])
data = np.concatenate([data, new_data], axis=0)
if self.current_set.is_empty():
if self.is_last_chunk():
self.reset()
looped = True
else:
# just need to iterate to next file
self.iterate_file()
return data, looped
def get_onetime_batch(self):
if self.current_set.is_empty() and self.is_last_chunk():
return np.array([]), True
data = None
empty = False
while data is None or data.shape[0] < self.batch_size:
if data is None:
data = self.current_set.get_amount(self.batch_size)
else:
new_data = self.current_set.get_amount(self.batch_size-data.shape[0])
data = | np.concatenate([data, new_data], axis=0) | numpy.concatenate |
"""""" #
"""
Copyright (c) 2020-2022, <NAME>
All rights reserved.
This work is licensed under BSD 3-Clause "New" or "Revised" License.
License available at https://github.com/dcajasn/Riskfolio-Lib/blob/master/LICENSE.txt
"""
import numpy as np
import pandas as pd
import statsmodels.api as sm
import sklearn.covariance as skcov
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from numpy.linalg import inv
import riskfolio.AuxFunctions as af
import arch.bootstrap as bs
import riskfolio.DBHT as db
def mean_vector(X, method="hist", d=0.94):
r"""
Calculate the expected returns vector using the selected method.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
method : str, optinal
The method used to estimate the expected returns.
The default value is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
d : scalar
The smoothing factor of ewma methods.
The default is 0.94.
Returns
-------
mu : 1d-array
The estimation of expected returns.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
assets = X.columns.tolist()
if method == "hist":
mu = np.array(X.mean(), ndmin=2)
elif method == "ewma1":
mu = np.array(X.ewm(alpha=1 - d).mean().iloc[-1, :], ndmin=2)
elif method == "ewma2":
mu = np.array(X.ewm(alpha=1 - d, adjust=False).mean().iloc[-1, :], ndmin=2)
mu = pd.DataFrame(np.array(mu, ndmin=2), columns=assets)
return mu
def covar_matrix(X, method="hist", d=0.94, **kwargs):
r"""
Calculate the covariance matrix using the selected method.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
method : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
d : scalar
The smoothing factor of ewma methods.
The default is 0.94.
**kwargs:
Other variables related to covariance estimation. See
`Scikit Learn <https://scikit-learn.org/stable/modules/covariance.html>`_
and chapter 2 of :cite:`b-MLforAM` for more details.
Returns
-------
cov : nd-array
The estimation of covariance matrix.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
assets = X.columns.tolist()
if method == "hist":
cov = np.cov(X.T)
elif method == "ewma1":
cov = X.ewm(alpha=1 - d).cov()
item = cov.iloc[-1, :].name[0]
cov = cov.loc[(item, slice(None)), :]
elif method == "ewma2":
cov = X.ewm(alpha=1 - d, adjust=False).cov()
item = cov.iloc[-1, :].name[0]
cov = cov.loc[(item, slice(None)), :]
elif method == "ledoit":
lw = skcov.LedoitWolf(**kwargs)
lw.fit(X)
cov = lw.covariance_
elif method == "oas":
oas = skcov.OAS(**kwargs)
oas.fit(X)
cov = oas.covariance_
elif method == "shrunk":
sc = skcov.ShrunkCovariance(**kwargs)
sc.fit(X)
cov = sc.covariance_
elif method == "gl":
gl = skcov.GraphicalLassoCV(**kwargs)
gl.fit(X)
cov = gl.covariance_
elif method == "jlogo":
S = np.cov(X.T)
R = np.corrcoef(X.T)
D = np.sqrt(np.clip((1 - R) / 2, a_min=0.0, a_max=1.0))
(_, _, separators, cliques, _) = db.PMFG_T2s(1 - D ** 2, nargout=4)
cov = db.j_LoGo(S, separators, cliques)
cov = np.linalg.inv(cov)
elif method in ["fixed", "spectral", "shrink"]:
cov = np.cov(X.T)
T, N = X.shape
q = T / N
cov = af.denoiseCov(cov, q, kind=method, **kwargs)
cov = pd.DataFrame(np.array(cov, ndmin=2), columns=assets, index=assets)
return cov
def forward_regression(X, y, criterion="pvalue", threshold=0.05, verbose=False):
r"""
Select the variables that estimate the best model using stepwise
forward regression. In case none of the variables has a p-value lower
than threshold, the algorithm will select the variable with lowest p-value.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
y : Series of shape (n_samples, 1)
Target vector, where n_samples in the number of samples.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
thresholdt : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
verbose : bool, optional
Enable verbose output. The default is False.
Returns
-------
value : list
A list of the variables that produce the best model.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(y, pd.DataFrame) and not isinstance(y, pd.Series):
raise ValueError("y must be a column DataFrame")
if isinstance(y, pd.DataFrame):
if y.shape[0] > 1 and y.shape[1] > 1:
raise ValueError("y must be a column DataFrame")
included = []
aic = 1e10
sic = 1e10
r2 = -1e10
r2_a = -1e10
pvalues = None
if criterion == "pvalue":
value = 0
while value <= threshold:
excluded = list(set(X.columns) - set(included))
best_pvalue = 999999
new_feature = None
for i in excluded:
factors = included + [i]
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
new_pvalues = results.pvalues
new_pvalues = new_pvalues[new_pvalues.index != "const"]
cond_1 = new_pvalues.max()
if best_pvalue > new_pvalues[i] and cond_1 <= threshold:
best_pvalue = results.pvalues[i]
new_feature = i
pvalues = new_pvalues.copy()
if pvalues is not None:
value = pvalues[pvalues.index != "const"].max()
if new_feature is None:
break
else:
included.append(new_feature)
if verbose:
print("Add {} with p-value {:.6}".format(new_feature, best_pvalue))
# This part is how to deal when there isn't an asset with pvalue lower than threshold
if len(included) == 0:
excluded = list(set(X.columns) - set(included))
best_pvalue = 999999
new_feature = None
for i in excluded:
factors = included + [i]
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
new_pvalues = results.pvalues
new_pvalues = new_pvalues[new_pvalues.index != "const"]
if best_pvalue > new_pvalues[i]:
best_pvalue = results.pvalues[i]
new_feature = i
pvalues = new_pvalues.copy()
value = pvalues[pvalues.index != "const"].max()
included.append(new_feature)
if verbose:
print(
"Add {} with p-value {:.6}".format(pvalues.idxmax(), pvalues.max())
)
else:
excluded = X.columns.tolist()
for i in range(X.shape[1]):
j = 0
value = None
for i in excluded:
factors = included.copy()
factors.append(i)
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
if criterion == "AIC":
if results.aic < aic:
value = i
aic = results.aic
if criterion == "SIC":
if results.bic < sic:
value = i
sic = results.bic
if criterion == "R2":
if results.rsquared > r2:
value = i
r2 = results.rsquared
if criterion == "R2_A":
if results.rsquared_adj > r2_a:
value = i
r2_a = results.rsquared_adj
j += 1
if j == len(excluded):
if value is None:
break
else:
excluded.remove(value)
included.append(value)
if verbose:
if criterion == "AIC":
print(
"Add {} with AIC {:.6}".format(value, results.aic)
)
elif criterion == "SIC":
print(
"Add {} with SIC {:.6}".format(value, results.bic)
)
elif criterion == "R2":
print(
"Add {} with R2 {:.6}".format(
value, results.rsquared
)
)
elif criterion == "R2_A":
print(
"Add {} with Adjusted R2 {:.6}".format(
value, results.rsquared_adj
)
)
return included
def backward_regression(X, y, criterion="pvalue", threshold=0.05, verbose=False):
r"""
Select the variables that estimate the best model using stepwise
backward regression. In case none of the variables has a p-value lower
than threshold, the algorithm will select the variable with lowest p-value.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
y : Series of shape (n_samples, 1)
Target vector, where n_samples in the number of samples.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
threshold : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
verbose : bool, optional
Enable verbose output. The default is False.
Returns
-------
value : list
A list of the variables that produce the best model.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(y, pd.DataFrame) and not isinstance(y, pd.Series):
raise ValueError("y must be a column DataFrame")
if isinstance(y, pd.DataFrame):
if y.shape[0] > 1 and y.shape[1] > 1:
raise ValueError("y must be a column DataFrame")
X1 = sm.add_constant(X)
results = sm.OLS(y, X1).fit()
pvalues = results.pvalues
aic = results.aic
sic = results.bic
r2 = results.rsquared
r2_a = results.rsquared_adj
included = pvalues.index.tolist()
excluded = ["const"]
if criterion == "pvalue":
while pvalues[pvalues.index != "const"].max() > threshold:
factors = pvalues[~pvalues.index.isin(excluded)].index.tolist()
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
pvalues = results.pvalues
pvalues = pvalues[pvalues.index != "const"]
if pvalues.shape[0] == 0:
break
excluded = ["const", pvalues.idxmax()]
if verbose and pvalues.max() > threshold:
print(
"Drop {} with p-value {:.6}".format(pvalues.idxmax(), pvalues.max())
)
included = pvalues.index.tolist()
# This part is how to deal when there isn't an asset with pvalue lower than threshold
if len(included) == 0:
excluded = list(set(X.columns) - set(included))
best_pvalue = 999999
new_feature = None
for i in excluded:
factors = included + [i]
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
new_pvalues = results.pvalues
new_pvalues = results.pvalues
new_pvalues = new_pvalues[new_pvalues.index != "const"]
if best_pvalue > new_pvalues[i]:
best_pvalue = results.pvalues[i]
new_feature = i
pvalues = new_pvalues.copy()
value = pvalues[pvalues.index != "const"].max()
included.append(new_feature)
if verbose:
print(
"Add {} with p-value {:.6}".format(pvalues.idxmax(), pvalues.max())
)
else:
included.remove("const")
for i in range(X.shape[1]):
j = 0
value = None
for i in included:
factors = included.copy()
factors.remove(i)
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
if criterion == "AIC":
if results.aic < aic:
value = i
aic = results.aic
elif criterion == "SIC":
if results.bic < sic:
value = i
sic = results.bic
elif criterion == "R2":
if results.rsquared > r2:
value = i
r2 = results.rsquared
elif criterion == "R2_A":
if results.rsquared_adj > r2_a:
value = i
r2_a = results.rsquared_adj
j += 1
if j == len(included):
if value is None:
break
else:
included.remove(value)
if verbose:
if criterion == "AIC":
print(
"Drop {} with AIC {:.6}".format(value, results.aic)
)
elif criterion == "SIC":
print(
"Drop {} with SIC {:.6}".format(value, results.bic)
)
elif criterion == "R2":
print(
"Drop {} with R2 {:.6}".format(
value, results.rsquared
)
)
elif criterion == "R2_A":
print(
"Drop {} with Adjusted R2 {:.6}".format(
value, results.rsquared_adj
)
)
return included
def PCR(X, y, n_components=0.95):
r"""
Estimate the coeficients using Principal Components Regression (PCR).
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
y : Series of shape (n_samples, 1)
Target vector, where n_samples in the number of samples.
n_components : int, float, None or str, optional
if 1 < n_components (int), it represents the number of components that
will be keep. if 0 < n_components < 1 (float), it represents the
percentage of variance that the is explained by the components keeped.
See `PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
for more details. The default is 0.95.
Returns
-------
value : nd-array
An array with the coefficients of the model calculated using PCR.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(y, pd.DataFrame) and not isinstance(y, pd.Series):
raise ValueError("y must be a column DataFrame")
if isinstance(y, pd.DataFrame):
if y.shape[0] > 1 and y.shape[1] > 1:
raise ValueError("y must be a column DataFrame")
scaler = StandardScaler()
scaler.fit(X)
X_std = scaler.transform(X)
pca = PCA(n_components=n_components)
pca.fit(X_std)
Z_p = pca.transform(X_std)
V_p = pca.components_.T
results = sm.OLS(y, sm.add_constant(Z_p)).fit()
beta_pc = results.params[1:]
beta_pc = np.array(beta_pc, ndmin=2)
std = np.array(np.std(X, axis=0, ddof=1), ndmin=2)
mean = np.array(np.mean(X, axis=0), ndmin=2)
beta = V_p @ beta_pc.T / std.T
beta_0 = np.array(y.mean(), ndmin=2) - np.sum(beta * mean.T)
beta = np.insert(beta, 0, beta_0)
beta = np.array(beta, ndmin=2)
return beta
def loadings_matrix(
X,
Y,
feature_selection="stepwise",
stepwise="Forward",
criterion="pvalue",
threshold=0.05,
n_components=0.95,
verbose=False,
):
r"""
Estimate the loadings matrix using stepwise regression.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
Y : DataFrame of shape (n_samples, n_assets)
Target matrix, where n_samples in the number of samples and
n_assets is the number of assets.
feature_selection: str 'stepwise' or 'PCR', optional
Indicate the method used to estimate the loadings matrix.
The default is 'stepwise'.
stepwise: str 'Forward' or 'Backward', optional
Indicate the method used for stepwise regression.
The default is 'Forward'.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
threshold : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
n_components : int, float, None or str, optional
if 1 < n_components (int), it represents the number of components that
will be keep. if 0 < n_components < 1 (float), it represents the
percentage of variance that the is explained by the components keeped.
See `PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
for more details. The default is 0.95.
verbose : bool, optional
Enable verbose output. The default is False.
Returns
-------
loadings : DataFrame
A DataFrame with the loadings matrix.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(Y, pd.DataFrame):
raise ValueError("Y must be a DataFrame")
rows = Y.columns.tolist()
cols = X.columns.tolist()
cols.insert(0, "const")
loadings = np.zeros((len(rows), len(cols)))
loadings = pd.DataFrame(loadings, index=rows, columns=cols)
for i in rows:
if feature_selection == "stepwise":
if stepwise == "Forward":
included = forward_regression(
X, Y[i], criterion=criterion, threshold=threshold, verbose=verbose
)
elif stepwise == "Backward":
included = backward_regression(
X, Y[i], criterion=criterion, threshold=threshold, verbose=verbose
)
else:
raise ValueError("Choose and adecuate stepwise method")
results = sm.OLS(Y[i], sm.add_constant(X[included])).fit()
params = results.params
loadings.loc[i, params.index.tolist()] = params.T
elif feature_selection == "PCR":
beta = PCR(X, Y[i], n_components=n_components)
beta = pd.Series(np.ravel(beta), index=cols)
loadings.loc[i, cols] = beta.T
return loadings
def risk_factors(
X,
Y,
B=None,
const=False,
method_mu="hist",
method_cov="hist",
feature_selection="stepwise",
stepwise="Forward",
criterion="pvalue",
threshold=0.05,
n_components=0.95,
error=True,
**kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based on risk
factors models :cite:`b-Ross` :cite:`b-Fan`.
.. math::
\begin{aligned}
R & = \alpha + B F + \epsilon \\
\mu_{f} & = \alpha +BE(F) \\
\Sigma_{f} & = B \Sigma_{F} B^{T} + \Sigma_{\epsilon} \\
\end{aligned}
where:
:math:`R` is the series returns.
:math:`\alpha` is the intercept.
:math:`B` is the loadings matrix.
:math:`F` is the expected returns vector of the risk factors.
:math:`\Sigma_{F}` is the covariance matrix of the risk factors.
:math:`\Sigma_{\epsilon}` is the covariance matrix of error terms.
:math:`\mu_{f}` is the expected returns vector obtained with the
risk factor model.
:math:`\Sigma_{f}` is the covariance matrix obtained with the risk
factor model.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
Y : DataFrame of shape (n_samples, n_assets)
Target matrix, where n_samples in the number of samples and
n_assets is the number of assets.
B : DataFrame of shape (n_assets, n_features), optional
Loadings matrix. If is not specified, is estimated using
stepwise regression. The default is None.
const : bool, optional
Indicate if the loadings matrix has a constant.
The default is False.
method: str, 'stepwise' or 'PCR', optional
Indicate the method used to estimate the loadings matrix.
The default is 'stepwise'.
stepwise: str, 'Forward' or 'Backward'
Indicate the method used for stepwise regression.
The default is 'Forward'.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
threshold : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
n_components : int, float, None or str, optional
if 1 < n_components (int), it represents the number of components that
will be keep. if 0 < n_components < 1 (float), it represents the
percentage of variance that the is explained by the components keeped.
See `PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
for more details. The default is 0.95.
error : bool
Indicate if diagonal covariance matrix of errors is included (only
when B is estimated through a regression).
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of risk factors model.
cov : DataFrame
The covariance matrix of risk factors model.
returns : DataFrame
The returns based on a risk factor model.
nav : DataFrame
The cumulated uncompound returns based on a risk factor model.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame) and not isinstance(Y, pd.DataFrame):
raise ValueError("X and Y must be DataFrames")
if B is None:
B = loadings_matrix(
X,
Y,
feature_selection=feature_selection,
stepwise=stepwise,
criterion=criterion,
threshold=threshold,
n_components=n_components,
verbose=False,
)
elif not isinstance(B, pd.DataFrame):
raise ValueError("B must be a DataFrame")
X1 = X.copy()
if const == True or "const" in B.columns.tolist():
X1 = sm.add_constant(X)
assets = Y.columns.tolist()
dates = X.index.tolist()
mu_f = np.array(mean_vector(X1, method=method_mu, **kwargs), ndmin=2)
S_f = np.array(covar_matrix(X1, method=method_cov, **kwargs), ndmin=2)
B = np.array(B, ndmin=2)
returns = np.array(X1, ndmin=2) @ B.T
mu = B @ mu_f.T
if error == True:
e = np.array(Y, ndmin=2) - returns
S_e = np.diag(np.var(np.array(e), ddof=1, axis=0))
S = B @ S_f @ B.T + S_e
elif error == False:
S = B @ S_f @ B.T
mu = pd.DataFrame(mu.T, columns=assets)
cov = pd.DataFrame(S, index=assets, columns=assets)
returns = pd.DataFrame(returns, index=dates, columns=assets)
nav = returns.cumsum()
return mu, cov, returns, nav
def black_litterman(
X, w, P, Q, delta=1, rf=0, eq=True, method_mu="hist", method_cov="hist", **kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based
on the Black Litterman model :cite:`b-BlackLitterman` :cite:`b-Black1`.
.. math::
\begin{aligned}
\Pi & = \delta \Sigma w \\
\Pi_{BL} & = \left [ (\tau\Sigma)^{-1}+ P^{T} \Omega^{-1}P \right]^{-1}
\left[(\tau\Sigma)^{-1} \Pi + P^{T} \Omega^{-1} Q \right] \\
M & = \left((\tau\Sigma)^{-1} + P^{T}\Omega^{-1} P \right)^{-1} \\
\mu_{BL} & = \Pi_{BL} + r_{f} \\
\Sigma_{BL} & = \Sigma + M \\
\end{aligned}
where:
:math:`r_{f}` is the risk free rate.
:math:`\delta` is the risk aversion factor.
:math:`\Pi` is the equilibrium excess returns.
:math:`\Sigma` is the covariance matrix.
:math:`P` is the views matrix.
:math:`Q` is the views returns matrix.
:math:`\Omega` is the covariance matrix of the error views.
:math:`\mu_{BL}` is the mean vector obtained with the black
litterman model.
:math:`\Sigma_{BL}` is the covariance matrix obtained with the black
litterman model.
Parameters
----------
X : DataFrame of shape (n_samples, n_assets)
Assets matrix, where n_samples is the number of samples and
n_assets is the number of assets.
w : DataFrame of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
P : DataFrame of shape (n_views, n_assets)
Analyst's views matrix, can be relative or absolute.
Q : DataFrame of shape (n_views, 1)
Expected returns of analyst's views.
delta : float, optional
Risk aversion factor. The default value is 1.
rf : scalar, optional
Risk free rate. The default is 0.
eq : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
method_mu : str, can be {'hist', 'ewma1' or 'ewma2'}
The method used to estimate the expected returns.
The default value is 'hist'.
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
method_cov : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of Black Litterman model.
cov : DataFrame
The covariance matrix of Black Litterman model.
w : DataFrame
The equilibrium weights of Black Litterman model, without constraints.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame) and not isinstance(w, pd.DataFrame):
raise ValueError("X and w must be DataFrames")
if w.shape[0] > 1 and w.shape[1] > 1:
raise ValueError("w must be a column DataFrame")
assets = X.columns.tolist()
w = np.array(w, ndmin=2)
if w.shape[0] == 1:
w = w.T
mu = np.array(mean_vector(X, method=method_mu, **kwargs), ndmin=2)
S = np.array(covar_matrix(X, method=method_cov, **kwargs), ndmin=2)
P = np.array(P, ndmin=2)
Q = np.array(Q, ndmin=2)
tau = 1 / X.shape[0]
Omega = np.array(np.diag(np.diag(P @ (tau * S) @ P.T)), ndmin=2)
if eq == True:
PI = delta * (S @ w)
elif eq == False:
PI = mu.T - rf
PI_ = inv(inv(tau * S) + P.T @ inv(Omega) @ P) @ (
inv(tau * S) @ PI + P.T @ inv(Omega) @ Q
)
M = inv(inv(tau * S) + P.T @ inv(Omega) @ P)
# PI_1 = PI + (tau * S* P.T) * inv(P * tau * S * P.T + Omega) * (Q - P * PI)
# M = tau * S - (tau * S * P.T) * inv(P * tau * S * P.T + Omega) * P * tau * S
mu = PI_ + rf
mu = mu.T
cov = S + M
w = inv(delta * cov) @ PI_
mu = pd.DataFrame(mu, columns=assets)
cov = pd.DataFrame(cov, index=assets, columns=assets)
w = pd.DataFrame(w, index=assets)
return mu, cov, w
def augmented_black_litterman(
X,
w,
F=None,
B=None,
P=None,
Q=None,
P_f=None,
Q_f=None,
delta=1,
rf=0,
eq=True,
const=True,
method_mu="hist",
method_cov="hist",
**kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based
on the Augmented Black Litterman model :cite:`b-WCheung`.
.. math::
\begin{aligned}
\Pi^{a} & = \delta \left [ \begin{array}{c} \Sigma \\ \Sigma_{F} B^{T} \\ \end{array} \right ] w \\
P^{a} & = \left [ \begin{array}{cc} P & 0 \\ 0 & P_{F} \\ \end{array} \right ] \\
Q^{a} & = \left [ \begin{array}{c} Q \\ Q_{F} \\ \end{array} \right ] \\
\Sigma^{a} & = \left [ \begin{array}{cc} \Sigma & B \Sigma_{F}\\ \Sigma_{F} B^{T} & \Sigma_{F} \\ \end{array} \right ] \\
\Omega^{a} & = \left [ \begin{array}{cc} \Omega & 0 \\ 0 & \Omega_{F} \\ \end{array} \right ] \\
\Pi^{a}_{BL} & = \left [ (\tau \Sigma^{a})^{-1} + (P^{a})^{T} (\Omega^{a})^{-1} P^{a} \right ]^{-1}
\left [ (\tau\Sigma^{a})^{-1} \Pi^{a} + (P^{a})^{T} (\Omega^{a})^{-1} Q^{a} \right ] \\
M^{a} & = \left ( (\tau\Sigma^{a})^{-1} + (P^{a})^{T} (\Omega^{a})^{-1} P^{a} \right )^{-1} \\
\mu^{a}_{BL} & = \Pi^{a}_{BL} + r_{f} \\
\Sigma^{a}_{BL} & = \Sigma^{a} + M^{a} \\
\end{aligned}
where:
:math:`r_{f}` is the risk free rate.
:math:`\delta` is the risk aversion factor.
:math:`B` is the loadings matrix.
:math:`\Sigma` is the covariance matrix of assets.
:math:`\Sigma_{F}` is the covariance matrix of factors.
:math:`\Sigma^{a}` is the augmented covariance matrix.
:math:`P` is the assets views matrix.
:math:`Q` is the assets views returns matrix.
:math:`P_{F}` is the factors views matrix.
:math:`Q_{F}` is the factors views returns matrix.
:math:`P^{a}` is the augmented views matrix.
:math:`Q^{a}` is the augmented views returns matrix.
:math:`\Pi^{a}` is the augmented equilibrium excess returns.
:math:`\Omega` is the covariance matrix of errors of assets views.
:math:`\Omega_{F}` is the covariance matrix of errors of factors views.
:math:`\Omega^{a}` is the covariance matrix of errors of augmented views.
:math:`\mu^{a}_{BL}` is the mean vector obtained with the Augmented Black
Litterman model.
:math:`\Sigma^{a}_{BL}` is the covariance matrix obtained with the Augmented
Black Litterman model.
Parameters
----------
X : DataFrame of shape (n_samples, n_assets)
Assets matrix, where n_samples is the number of samples and
n_assets is the number of features.
w : DataFrame of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
F : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
B : DataFrame of shape (n_assets, n_features), optional
Loadings matrix. The default is None.
P : DataFrame of shape (n_views, n_assets)
Analyst's views matrix, can be relative or absolute.
Q : DataFrame of shape (n_views, 1)
Expected returns of analyst's views.
P_f : DataFrame of shape (n_views, n_features)
Analyst's factors views matrix, can be relative or absolute.
Q_f : DataFrame of shape (n_views, 1)
Expected returns of analyst's factors views.
delta : float, optional
Risk aversion factor. The default value is 1.
rf : scalar, optional
Risk free rate. The default is 0.
eq : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
const : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
method_mu : str, can be {'hist', 'ewma1' or 'ewma2'}
The method used to estimate the expected returns.
The default value is 'hist'.
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
method_cov : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of Augmented Black Litterman model.
cov : DataFrame
The covariance matrix of Augmented Black Litterman model.
w : DataFrame
The equilibrium weights of Augmented Black Litterman model, without constraints.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame) and not isinstance(w, pd.DataFrame):
raise ValueError("X and w must be DataFrames")
if not isinstance(F, pd.DataFrame) and not isinstance(B, pd.DataFrame):
raise ValueError("F and B must be DataFrames")
if w.shape[0] > 1 and w.shape[1] > 1:
raise ValueError("w must be a column DataFrame")
assets = X.columns.tolist()
N = len(assets)
w = np.array(w, ndmin=2)
if w.shape[0] == 1:
w = w.T
if B is not None:
B = np.array(B, ndmin=2)
if const == True:
alpha = B[:, :1]
B = B[:, 1:]
mu = np.array(mean_vector(X, method=method_mu, **kwargs), ndmin=2)
S = np.array(covar_matrix(X, method=method_cov, **kwargs), ndmin=2)
tau = 1 / X.shape[0]
if F is not None:
mu_f = np.array(mean_vector(F, method=method_mu, **kwargs), ndmin=2)
S_f = np.array(covar_matrix(F, method=method_cov, **kwargs), ndmin=2)
if P is not None and Q is not None and P_f is None and Q_f is None:
S_a = S
P_a = P
Q_a = Q
Omega = np.array(np.diag(np.diag(P @ (tau * S) @ P.T)), ndmin=2)
Omega_a = Omega
if eq == True:
PI_a_ = delta * S_a @ w
elif eq == False:
PI_a_ = mu.T - rf
elif P is None and Q is None and P_f is not None and Q_f is not None:
S_a = S_f
P_a = P_f
Q_a = Q_f
Omega_f = np.array(np.diag(np.diag(P_f @ (tau * S_f) @ P_f.T)), ndmin=2)
Omega_a = Omega_f
if eq == True:
PI_a_ = delta * (S_f @ B.T) @ w
elif eq == False:
PI_a_ = mu_f.T - rf
elif P is not None and Q is not None and P_f is not None and Q_f is not None:
S_a = np.hstack((np.vstack((S, S_f @ B.T)), np.vstack((B @ S_f, S_f))))
P = np.array(P, ndmin=2)
Q = np.array(Q, ndmin=2)
P_f = np.array(P_f, ndmin=2)
Q_f = np.array(Q_f, ndmin=2)
zeros_1 = np.zeros((P_f.shape[0], P.shape[1]))
zeros_2 = np.zeros((P.shape[0], P_f.shape[1]))
P_a = np.hstack((np.vstack((P, zeros_1)), np.vstack((zeros_2, P_f))))
Q_a = np.vstack((Q, Q_f))
Omega = np.array(np.diag(np.diag(P @ (tau * S) @ P.T)), ndmin=2)
Omega_f = np.array(np.diag(np.diag(P_f @ (tau * S_f) @ P_f.T)), ndmin=2)
zeros = np.zeros((Omega.shape[0], Omega_f.shape[0]))
Omega_a = np.hstack((np.vstack((Omega, zeros.T)), np.vstack((zeros, Omega_f))))
if eq == True:
PI_a_ = delta * (np.vstack((S, S_f @ B.T)) @ w)
elif eq == False:
PI_a_ = np.vstack((mu.T, mu_f.T)) - rf
PI_a = inv(inv(tau * S_a) + P_a.T @ inv(Omega_a) @ P_a) @ (
inv(tau * S_a) @ PI_a_ + P_a.T @ inv(Omega_a) @ Q_a
)
M_a = inv(inv(tau * S_a) + P_a.T @ inv(Omega_a) @ P_a)
# PI_a = PI_a_ + (tau * S_a @ P_a.T) * inv(P_a @ tau * S_a @ P_a.T + Omega) * (Q_a - P_a @ PI_a_)
# M = tau * S_a - (tau * S_a @ P_a.T) * inv(P_a @ tau * S_a @ P_a.T + Omega_a) @ P_a @ tau * S_a
mu_a = PI_a + rf
mu_a = mu_a.T
cov_a = S_a + M_a
w_a = inv(delta * cov_a) @ PI_a
if P is None and Q is None and P_f is not None and Q_f is not None:
mu_a = mu_a @ B.T
cov_a = B @ cov_a @ B.T
w_a = inv(delta * cov_a) @ B @ PI_a
if const == True:
mu_a = mu_a[:, :N] + alpha.T
mu_a = pd.DataFrame(mu_a[:, :N], columns=assets)
cov_a = pd.DataFrame(cov_a[:N, :N], index=assets, columns=assets)
w_a = pd.DataFrame(w_a[:N, 0], index=assets)
return mu_a, cov_a, w_a
def black_litterman_bayesian(
X,
F,
B,
P_f,
Q_f,
delta=1,
rf=0,
eq=True,
const=True,
diag=True,
method_mu="hist",
method_cov="hist",
**kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based
on the black litterman model :cite:`b-BLB`.
.. math::
\begin{aligned}
\Sigma_{F} & = B \Sigma_{F} B^{T} + D \\
\overline{\Pi}_{F} & = \left ( \Sigma_{F}^{-1} + P_{F}^{T}\Omega_{F}^{-1}P_{F} \right )^{-1} \left ( \Sigma_{F}^{-1}\Pi_{F} + P_{F}^{T}\Omega_{F}^{-1}Q_{F} \right) \\
\overline{\Sigma}_{F} & = \left ( \Sigma_{F}^{-1} + P_{F}^{T}\Omega_{F}^{-1}P_{F} \right )^{-1} \\
\Sigma_{BLB} & = \left( \Sigma^{-1} - \Sigma^{-1} B \left( \overline{\Sigma}_{F}^{-1} + B^{T}\Sigma^{-1}B \right)^{-1} B^{T}\Sigma^{-1} \right )^{-1} \\
\mu_{BLB} & = \Sigma_{BLB} \left ( \Sigma^{-1} B \left( \overline{\Sigma}_{F}^{-1} +B^{T}\Sigma^{-1}B \right)^{-1} \overline{\Sigma}_{F}^{-1} \overline{\Pi}_{F} \right ) + r_{f} \\
\end{aligned}
where:
:math:`r_{f}` is the risk free rate.
:math:`B` is the loadings matrix.
:math:`D` is a diagonal matrix of variance of errors of a factor model.
:math:`\Sigma` is the covariance matrix obtained with a factor model.
:math:`\Pi_{F}` is the equilibrium excess returns of factors.
:math:`\overline{\Pi}_{F}` is the posterior excess returns of factors.
:math:`\Sigma_{F}` is the covariance matrix of factors.
:math:`\overline{\Sigma}_{F}` is the posterior covariance matrix of factors.
:math:`P_{F}` is the factors views matrix.
:math:`Q_{F}` is the factors views returns matrix.
:math:`\Omega_{F}` is the covariance matrix of errors of factors views.
:math:`\mu_{BLB}` is the mean vector obtained with the Black
Litterman Bayesian model or posterior predictive mean.
:math:`\Sigma_{BLB}` is the covariance matrix obtained with the Black
Litterman Bayesian model or posterior predictive covariance.
Parameters
----------
X : DataFrame of shape (n_samples, n_assets)
Assets matrix, where n_samples is the number of samples and
n_assets is the number of assets.
F : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
B : DataFrame of shape (n_assets, n_features), optional
Loadings matrix. The default is None.
P_f : DataFrame of shape (n_views, n_features)
Analyst's factors views matrix, can be relative or absolute.
Q_f : DataFrame of shape (n_views, 1)
Expected returns of analyst's factors views.
delta : float, optional
Risk aversion factor. The default value is 1.
rf : scalar, optional
Risk free rate. The default is 0.
eq : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
const : bool, optional
Indicate if the loadings matrix has a constant.
The default is True.
diag : bool, optional
Indicate if we use the diagonal matrix to calculate covariance matrix
of factor model, only useful when we work with a factor model based on
a regresion model (only equity portfolio).
The default is True.
method_mu : str, can be {'hist', 'ewma1' or 'ewma2'}
The method used to estimate the expected returns.
The default value is 'hist'.
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
method_cov : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of Black Litterman model.
cov : DataFrame
The covariance matrix of Black Litterman model.
w : DataFrame
The equilibrium weights of Black Litterman model, without constraints.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be DataFrames")
if not isinstance(F, pd.DataFrame) and not isinstance(B, pd.DataFrame):
raise ValueError("F and B must be DataFrames")
assets = X.columns.tolist()
if B is not None:
B = np.array(B, ndmin=2)
if const == True:
alpha = B[:, :1]
B = B[:, 1:]
mu_f = np.array(mean_vector(F, method=method_mu, **kwargs), ndmin=2)
mu_f = (mu_f - rf).T
tau = 1 / X.shape[0]
S_f = np.array(covar_matrix(F, method=method_cov, **kwargs), ndmin=2)
S = B @ S_f @ B.T
if diag == True:
D = X.to_numpy() - F @ B.T
D = np.diag(D.var())
S = S + D
Omega_f = np.array(np.diag(np.diag(P_f @ (tau * S_f) @ P_f.T)), ndmin=2)
S_hat = inv(inv(S_f) + P_f.T @ inv(Omega_f) @ P_f)
Pi_hat = S_hat @ (inv(S_f) @ mu_f + P_f.T @ inv(Omega_f) @ Q_f)
S_blb = inv(inv(S) - inv(S) @ B @ inv(inv(S_hat) + B.T @ inv(S) @ B) @ B.T @ inv(S))
Pi_blb = (
S_blb @ inv(S) @ B @ inv( | inv(S_hat) | numpy.linalg.inv |
import numpy as np
from scipy.optimize import minimize
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost_function(theta, X, y, sample_weight, lambda_=0):
m = len(y)
theta = theta.ravel()
bias, weights = theta[0], theta[1:] # TODO 1D
weights = weights.reshape(X.shape[1], y.shape[1])
h = sigmoid(np.dot(X, weights) + bias)
J = (-np.dot((y * sample_weight).T, np.log(h))
- np.dot(((1 - y) * sample_weight).T, np.log(1 - h))) / m
weights_grad = np.dot(X.T, h - y) / m
bias_grad = np.dot(np.ones((1, X.shape[0])), h - y) / m
grad = | np.concatenate([bias_grad, weights_grad]) | numpy.concatenate |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for implied_vol_approx."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
bs = tff.black_scholes
@test_util.run_all_in_graph_and_eager_modes
class ApproxImpliedVolTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for methods in implied_vol module."""
def test_approx_implied_vol(self):
"""Basic test of the implied vol calculation."""
| np.random.seed(6589) | numpy.random.seed |
# Copyright 2017 <NAME>. All rights reserved.
#
# Licensed under the MIT license
"""
Commonly used helper functions
"""
import tensorflow as tf
import os
from warnings import warn
import numpy as np
import h5py
import matplotlib.pyplot as pl
import seaborn as sns
from global_defs import GlobalDefs
# Functions
def ca_convolve(trace, ca_timeconstant, frame_rate, kernel=None):
"""
Convolves a trace with a decaying calcium kernel
:param trace: The activity trace to convolve
:param ca_timeconstant: The timeconstant of the calcium indicator
:param frame_rate: The original frame-rate to relate samples to the time constant
:param kernel: Optionally a pre-computed kernel in which case ca_timeconstant and frame_rate will be ignored
:return: The convolved trace
"""
def ca_kernel(tau, framerate):
"""
Creates a calcium decay kernel for the given frameRate
with the given half-life in seconds
"""
fold_length = 5 # make kernel length equal to 5 half-times (decay to 3%)
klen = int(fold_length * tau * framerate)
tk = np.linspace(0, fold_length * tau, klen, endpoint=False)
k = 2 ** (-1 * tk / tau)
k = k / k.sum()
return k
if ca_timeconstant == 0 and kernel is None:
return trace
if kernel is None:
kernel = ca_kernel(ca_timeconstant, frame_rate)
return np.convolve(trace, kernel)[:trace.size]
def create_weight_var(name, shape, w_decay=None, loss_collection="losses", dale=False):
"""
Creates a weight variable with optional weight decay initialized with sd = 1/size
:param name: The name of the variable
:param shape: The desired shape
:param w_decay: None or L2 loss term if weight decay is desired
:param loss_collection: The name of the collection to which loss should be added
:param dale: If set to true enforce L1 loss on those weights going out of a unit whose sign is opposite
of the average sign of weights from that unit
:return: The weight variable
"""
var = tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
if dale:
# compute a loss that penalizes wheights coming from one unit which have a sign
# opposite of the average
av_w = tf.sign(tf.reduce_mean(var, 1, keepdims=True))
# positive value for weights whose sign is opposite of sign of the mean
dale_loss = tf.reduce_sum(tf.maximum(-av_w * var, 0), name="dale_w_loss_"+name)
tf.add_to_collection(loss_collection, 1e-4*dale_loss)
if w_decay is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), w_decay, name="l2_w_loss_"+name)
tf.add_to_collection(loss_collection, weight_decay)
return var
def create_bias_var(name, shape, init):
"""
Creates a bias variable
:param name: The name of the variable
:param shape: The desired shape
:param init: Initial value
:return: The bias variable
"""
initial = tf.constant(init, shape=shape)
return tf.Variable(initial, name=name)
def create_conv2d(name, x, W, mode="VALID"):
"""
Create 2D convolution with stride 1
:param name: The name of the operation output
:param x: The input tensor
:param W: The convolution weights of desired shape
:param mode: The convolution mode 'VALID' or 'SAME'
:return: The convolution operation
"""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=mode, name=name)
def create_meanpool2d(name, x, ax1, ax2):
"""
Create average 2D pooling operation (i.e. binning operation leaving batch and channel axis untouched)
:param name: The name of the operation output
:param x: The input tensor
:param ax1: The amount of pooling along the first 2D axis
:param ax2: The amount of pooling along the second 2D axis
:return: The pooling operation
"""
return tf.nn.avg_pool(x, ksize=[1, ax1, ax2, 1], strides=[1, ax1, ax2, 1], padding='SAME', name=name)
def get_loss(labels, predictions, loss_collection="losses"):
"""
Computes the total loss as the mean squared error loss of the current prediction and
all the weight decay losses in the model
:param labels: The real output values
:param predictions: The output predictions
:param loss_collection: The name of the collection containing all losses
:return: The total loss tensor
"""
# Calculate batch average mean squared loss
sq_loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
joint_loss = tf.reduce_sum(sq_loss)
tf.add_to_collection(loss_collection, joint_loss)
return tf.add_n(tf.get_collection(loss_collection), name='total_loss'), joint_loss
def create_train_step(total_loss):
"""
Creates a training step of the model given the labels and predictions tensor
:param total_loss: The total loss of the model
:return: The train step
"""
return tf.train.AdamOptimizer(1e-4).minimize(total_loss)
def indexing_matrix(triggers: np.ndarray, past: int, future: int, input_length: int):
"""
Builds an indexing matrix from an vector of triggers
:param triggers: The elements on which to trigger (timepoint 0)
:param past: The number of elements into the past to include
:param future: The number of elements into the future to include
:param input_length: The total length of the array to eventually index to determine valid triggers
:return:
[0]: The n_valid_triggers x (past+1+future) trigger matrix
[1]: The number of triggers that have been cut out because they would have included indices < 0
[2]: The number of triggers that have been cut out because they would have included indices >= input_length
"""
if triggers.ndim > 1:
raise ValueError("Triggers has to be 1D vector")
to_take = np.r_[-past:future + 1][None, :]
t = triggers[:, None]
# construct trigger matrix
index_mat = np.repeat(t, to_take.size, 1) + np.repeat(to_take, t.size, 0)
# identify front and back rows that need to be removed
cut_front = np.sum(np.sum(index_mat < 0, 1) > 0, 0)
cut_back = np.sum(np.sum(index_mat >= input_length, 1) > 0, 0)
# remove out-of-bounds rows
if cut_back > 0:
return index_mat[cut_front:-cut_back, :].astype(int), cut_front, cut_back
else:
return index_mat[cut_front:, :].astype(int), cut_front, 0
# Classes
class GradientStandards:
"""
Lightweight wrapper of only standardizations used in a gradient data object
"""
def __init__(self, temp_mean, temp_std, disp_mean, disp_std, ang_mean, ang_std):
"""
Creates a new GradientStandards object
:param temp_mean: The temperature average
:param temp_std: The temperature standard deviation
:param disp_mean: The displacement average
:param disp_std: The displacement standard deviation
:param ang_mean: The angle average
:param ang_std: The angle standard
"""
self.temp_mean = temp_mean
self.temp_std = temp_std
self.disp_mean = disp_mean
self.disp_std = disp_std
self.ang_mean = ang_mean
self.ang_std = ang_std
class NotInitialized(Exception):
def __init__(self, message):
super().__init__(message)
class NetworkModel:
"""
Base class for neuronal network models. Offers very basic shared functionality
"""
def __init__(self, use_dale_constraint, use_tanh):
"""
Creates a new NetworkModel
:param use_dale_constraint: If set to true, 1/2 of network units can only provide inhibition 1/2 only excitation
:param use_tanh: If set to true we use tanh instead of ReLu as the activation function
"""
self.initialized = False
if use_dale_constraint:
warn("Current implementation of dale constraint hinders network training")
self.use_dale_constraint = use_dale_constraint
self.use_tanh = use_tanh
self.bias_init = 0.0 if use_tanh else 0.1
# set training defaults
self.w_decay = 1e-6 if self.use_tanh else 1e-4 # weight decay needs to be reduced for tanh activation
self.keep_train = 0.5
assert GlobalDefs.frame_rate % GlobalDefs.model_rate == 0
self.t_bin = GlobalDefs.frame_rate // GlobalDefs.model_rate # bin input down to 5Hz
self.binned_size = GlobalDefs.frame_rate * GlobalDefs.hist_seconds // self.t_bin
self._x_in = None # network inputs
# our branches
self._branches = None
# the number of our convolution layers
self.n_conv_layers = None
# deterministic removal units
self._det_remove = {}
# our graph object
self._graph = None # type: tf.Graph
# our session object
self._session = None # type: tf.Session
# saver object to save progress
self._saver = None # type: tf.train.Saver
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
return False # re-raise any previous exceptions
# Protected API
def _check_init(self):
"""
Checks if network is initialized and raises exception otherwise
"""
if not self.initialized:
raise NotInitialized("Can't perform operation before performing setup of graph.")
def _create_convolution_layer(self, branch, prev_out) -> tf.Tensor:
"""
Creates a convolution layer
:param branch: The branch on which to create the layer
:param prev_out: The previous output tensor (= input to the convolution)
:return: The flattened output of the convolution operation
"""
if 't' in self._branches:
len_0 = 1 # branched network, only one input to our convolution layer
else:
len_0 = 3 # fully mixed network, all three inputs are convolved together
w_conv1 = create_weight_var(self.cvn("WEIGHT", branch, -1), [len_0, self.binned_size, 1, self.n_conv_layers])
b_conv1 = create_bias_var(self.cvn("BIAS", branch, -1), [self.n_conv_layers], self.bias_init)
conv1 = create_conv2d(self.cvn("CONV", branch, -1), prev_out, w_conv1)
cname = self.cvn("HIDDEN", branch, -1)
if self.use_tanh:
h_conv1 = tf.nn.tanh(conv1 + b_conv1, cname)
else:
h_conv1 = tf.nn.relu(conv1 + b_conv1, cname)
h_conv1_flat = tf.reshape(h_conv1, [-1, self.n_conv_layers], cname+"_flat")
return h_conv1_flat
def _create_hidden_layer(self, branch: str, index: int, prev_out: tf.Tensor, n_units: int) -> tf.Tensor:
"""
Creates a hidden layer in the network
:param branch: The branch that layer belongs to
:param index: The 0-based index of the hidden layer within the branch
:param prev_out: The output tensor of the previous layer
:param n_units: The number of units in this layer
:return: The hidden layer activations
"""
if branch not in self._branches:
raise ValueError("branch {0} is not valid. Has to be one of {1}".format(branch, self._branches))
w = create_weight_var(self.cvn("WEIGHT", branch, index), [prev_out.shape[1].value, n_units], self.w_decay,
dale=self.use_dale_constraint)
b = create_bias_var(self.cvn("BIAS", branch, index), [n_units], self.bias_init)
dr = self._det_remove[branch][index]
scale = n_units / tf.reduce_sum(dr)
if self.use_tanh:
h = tf.nn.tanh((tf.matmul(prev_out, w) + b) * dr * scale, self.cvn("HIDDEN", branch, index))
else:
h = tf.nn.relu((tf.matmul(prev_out, w) + b) * dr * scale, self.cvn("HIDDEN", branch, index))
return h
# Public API
def load(self, meta_file: str, checkpoint_file: str):
"""
Loads model definitions from model description file and populates data from given checkpoint
:param meta_file: The model definition file
:param checkpoint_file: The saved model checkpoint (weights, etc.)
"""
self.clear()
self._graph = tf.Graph()
def clear(self):
"""
Clears the network graph
"""
if not self.initialized:
return
# close session object if it exists
if self._session is not None:
self._session.close()
self._session = None
self._graph = None
self._saver = None
# mark network as not initialized
self.initialized = False
def init_variables(self):
"""
Runs global variable initializer, resetting all values
"""
self._check_init()
with self._graph.as_default():
self._session.run(tf.global_variables_initializer())
def save_state(self, chkpoint_file, index, save_meta=True) -> str:
"""
Saves the current state of the network to a checkpoint
:param chkpoint_file: The base chkpoint file name
:param index: The index of the current chkpoint
:param save_meta: Whether to save the meta-graph as well or not
:return: The full chkpoint filename and path
"""
self._check_init()
with self._graph.as_default():
if self._saver is None:
self._saver = tf.train.Saver(max_to_keep=None) # never delete chkpoint files
return self._saver.save(self._session, chkpoint_file, global_step=index, write_meta_graph=save_meta)
@property
def input_dims(self):
"""
The network's input dimensions
"""
self._check_init()
with self._graph.as_default():
return self._x_in.shape.as_list()
@staticmethod
def cvn(vartype: str, branch: str, index: int) -> str:
"""
Creates a reproducible variable name for layer specific variables
:param vartype: The variable type (WEIGHT, BIAS, HIDDEN, DROP, REMOVE, CONV)
:param branch: The branch on which to create variable
:param index: The layer index of the variable
:return: The variable name
"""
vartype = vartype.upper()
vartypes = ["WEIGHT", "BIAS", "HIDDEN", "DROP", "REMOVE", "CONV", "OUTPUT"]
if vartype not in vartypes:
raise ValueError("Unknown vartype {0}. Has to be in {1}".format(vartype, vartypes))
return "{0}_{1}_{2}".format(vartype, branch, index)
class GpNetworkModel(NetworkModel):
"""
Base class of branched gradient prediction network models
"""
def __init__(self, use_dale_constraint, use_tanh):
"""
Creates a new GpNetworkModel
"""
super().__init__(use_dale_constraint, use_tanh)
# initialize fields that will be populated later
self.n_units = None
self.n_layers_branch = None
self.n_layers_mixed = None
self._n_mixed_dense = None
self._n_branch_dense = None
# model fields that are later needed for parameter feeding
self._keep_prob = None # drop-out keep probability
self._y_ = None # true responses (for training)
# network output
self._m_out = None # type: tf.Tensor
# the square loss (loss w.o. weight decay)
self._sq_loss = None # type: tf.Tensor
# total loss across the network
self._total_loss = None # type: tf.Tensor
# the training step to train the network
self._train_step = None # type: tf.Operation
# Protected API
def _create_unit_lists(self):
"""
Creates lists of hidden unit counts and branch list according to network configuration
"""
self._det_remove = {}
self._n_mixed_dense = [self.n_units[1]] * self.n_layers_mixed
if self.n_layers_branch == 0:
self._branches = ['m', 'o'] # mixed network
else:
self._branches = ['t', 's', 'a', 'm', 'o'] # single input network
self._n_branch_dense = [self.n_units[0]] * self.n_layers_branch
def _create_real_out_placeholder(self) -> tf.Tensor:
"""
Creates the 2D placeholder for the true labels
Abstract method in this baseclass
"""
raise NotImplementedError("ABSTRACT")
def _create_output(self, prev_out: tf.Tensor) -> tf.Tensor:
"""
Creates the output layer for reporting predicted temperature of all behaviors
Abstract method in this baseclass
"""
raise NotImplementedError("ABSTRACT")
def _create_branch(self, branch: str, prev_out: tf.Tensor) -> tf.Tensor:
"""
Creates a branch of the network
:param branch: The name of the branch
:param prev_out: The output of the previous layer
:return: The output of the branch
"""
if branch not in self._branches:
raise ValueError("branch {0} is not valid. Has to be one of {1}".format(branch, self._branches))
n_layers = self.n_layers_mixed if branch == 'm' else self.n_layers_branch
last = prev_out
for i in range(n_layers):
last = self._create_hidden_layer(branch, i, last,
self._n_mixed_dense[i] if branch == 'm' else self._n_branch_dense[i])
last = tf.nn.dropout(last, self._keep_prob, name=self.cvn("DROP", branch, i))
return last
def _create_feed_dict(self, x_vals, y_vals=None, keep=1.0, removal=None) -> dict:
"""
Create network feed dict
:param x_vals: The network input values
:param y_vals: True output values for training (optional)
:param keep: The dropout probability for keeping all units
:param removal: Deterministic keep/removal vectors
:return: The feeding dict to pass to the network
"""
f_dict = {self._x_in: x_vals, self._keep_prob: keep}
if y_vals is not None:
f_dict[self._y_] = y_vals
# Fill deterministic removal part of feed dict
for b in self._branches:
for i, dr in enumerate(self._det_remove[b]):
s = dr.shape[0].value
if removal is None or b not in removal:
f_dict[dr] = np.ones(s, dtype=np.float32)
else:
if removal[b][i].size != s:
raise ValueError("removal in branch {0} layer {1} does not have required size of {2}".format(b,
i,
s))
f_dict[dr] = removal[b][i]
return f_dict
# Public API
def setup(self, n_conv_layers: int, n_units, n_layers_branch: int, n_layers_mixed: int):
"""
Creates the network graph from scratch according to the given specifications
:param n_conv_layers: The number of convolutional layers per input branch
:param n_units: The number of units in each hidden layer or 2 element list for units in branch and mix
:param n_layers_branch: The number of hidden layers in each branch (can be 0 for full mixing)
:param n_layers_mixed: The number of hidden layers in the mixed part of the model
"""
self.clear()
# ingest parameters
if n_layers_mixed < 1:
raise ValueError("Network needs at least on mixed hidden layer")
if n_layers_branch < 0:
raise ValueError("Number of branch layers can't be negative")
self.n_conv_layers = n_conv_layers
if type(n_units) is not list:
self.n_units = [n_units] * 2
else:
if len(n_units) != 2:
raise ValueError("n_units should either be scalar or a 2-element list")
self.n_units = n_units
self.n_layers_branch = n_layers_branch
self.n_layers_mixed = n_layers_mixed
self._create_unit_lists()
self._graph = tf.Graph()
with self._graph.as_default():
# create deterministic removal units
for b in self._branches:
if b == 'm':
self._det_remove[b] = [tf.placeholder(tf.float32, shape=[self._n_mixed_dense[i]],
name=self.cvn("REMOVE", b, i))
for i in range(self.n_layers_mixed)]
else:
self._det_remove[b] = [tf.placeholder(tf.float32, shape=[self._n_branch_dense[i]],
name=self.cvn("REMOVE", b, i))
for i in range(self.n_layers_branch)]
# dropout probability placeholder
self._keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# model input: BATCHSIZE x (Temp,Move,Turn) x HISTORYSIZE x 1 CHANNEL
self._x_in = tf.placeholder(tf.float32, [None, 3, GlobalDefs.frame_rate*GlobalDefs.hist_seconds, 1], "x_in")
# real outputs: BATCHSIZE x (dT(Stay), dT(Straight), dT(Left), dT(Right))
self._y_ = self._create_real_out_placeholder()
# data binning layer
xin_pool = create_meanpool2d("xin_pool", self._x_in, 1, self.t_bin)
# the input convolution depends on the network structure: branched or fully mixed
if 't' in self._branches:
# branched network - split input into temperature, speed and angle
x_1, x_2, x_3 = tf.split(xin_pool, num_or_size_splits=3, axis=1, name="input_split")
# create convolution and deep layer for each branch
time = self._create_convolution_layer('t', x_1)
time = self._create_branch('t', time)
speed = self._create_convolution_layer('s', x_2)
speed = self._create_branch('s', speed)
angle = self._create_convolution_layer('a', x_3)
angle = self._create_branch('a', angle)
# combine branch outputs and create mix branch
mix = tf.concat([time, speed, angle], 1, self.cvn("HIDDEN", 'm', -1))
mix = self._create_branch('m', mix)
else:
# fully mixed network
mix = self._create_convolution_layer('m', xin_pool)
mix = self._create_branch('m', mix)
self._m_out = self._create_output(mix)
# create and store losses and training step
self._total_loss, self._sq_loss = get_loss(self._y_, self._m_out)
self._train_step = create_train_step(self._total_loss)
# store our training operation
tf.add_to_collection('train_op', self._train_step)
# create session
self._session = tf.Session()
# mark network as initialized
self.initialized = True
# intialize all variables
self.init_variables()
def load(self, meta_file: str, checkpoint_file: str):
"""
Loads model definitions from model description file and populates data from given checkpoint
:param meta_file: The model definition file
:param checkpoint_file: The saved model checkpoint (weights, etc.)
"""
super().load(meta_file, checkpoint_file)
with self._graph.as_default():
# restore graph and variables
self._session = tf.Session()
saver = tf.train.import_meta_graph(meta_file)
saver.restore(self._session, checkpoint_file)
graph = self._session.graph
self._m_out = graph.get_tensor_by_name(self.cvn("OUTPUT", 'o', 0)+":0")
self._x_in = graph.get_tensor_by_name("x_in:0")
self._keep_prob = graph.get_tensor_by_name("keep_prob:0")
self._y_ = graph.get_tensor_by_name("y_:0")
# collect deterministic removal units and use these
# to determine which branches exist and how many layers they have
possible_branches = ['t', 's', 'a', 'm', 'o']
self._branches = []
self._det_remove = {}
for b in possible_branches:
try:
graph.get_tensor_by_name(self.cvn("REMOVE", b, 0)+":0")
# we found layer 0 in this branch so it exists
self._branches.append(b)
self._det_remove[b] = []
i = 0
try:
while True:
self._det_remove[b].append(graph.get_tensor_by_name(self.cvn("REMOVE", b, i)+":0"))
i += 1
except KeyError:
pass
except KeyError:
continue
if 't' in self._branches:
self.n_layers_branch = len(self._det_remove['t'])
else:
self.n_layers_branch = 0
self.n_units = [0, 0]
self.n_layers_mixed = len(self._det_remove['m'])
self._n_mixed_dense = [self._det_remove['m'][i].shape[0].value for i in range(self.n_layers_mixed)]
self.n_units[1] = self._n_mixed_dense[0]
if self.n_layers_branch > 0:
self._n_branch_dense = [self._det_remove['t'][i].shape[0].value for i in range(self.n_layers_branch)]
self.n_units[0] = self._n_branch_dense[0]
else:
self._n_branch_dense = []
# retrieve training step
self._train_step = graph.get_collection("train_op")[0]
# retrieve total loss tensor
self._total_loss = graph.get_tensor_by_name("total_loss:0")
# set up squared loss calculation
self._sq_loss = tf.losses.mean_squared_error(labels=self._y_, predictions=self._m_out)
self.initialized = True
# use convolution data biases to get number of convolution layers
conv_biases = self.convolution_data[1]
self.n_conv_layers = conv_biases.popitem()[1].shape[0]
def clear(self):
"""
Clears the network graph
"""
if not self.initialized:
return
super().clear()
self.n_conv_layers = None
self.n_units = None
self.n_layers_branch = None
self.n_layers_mixed = None
# mark network as not initialized
self.initialized = False
def train(self, xbatch, ybatch, keep=0.5, removal=None):
"""
Runs a training step on the given batches
:param xbatch: The input of the training batch
:param ybatch: The true labels of the training batch
:param removal: Optional dictionary of which units in the network should be kept or dropped
:param keep: The keep probability of each unit
"""
self._check_init()
with self._graph.as_default():
self._train_step.run(self._create_feed_dict(xbatch, ybatch, keep, removal), self._session)
def get_filtered_train(self, filter_fun: callable):
"""
Creates a training procedure for this network using simple gradient descent only on variables
that pass filtering
:param filter_fun: A function that given a variable name returns true if this variable should be trained
:return: A closure with the same signature as the train method of the object but operating on filtered variables
"""
def train_op(xbatch, ybatch, keep=0.5, removal=None):
nonlocal self
nonlocal train
self._check_init()
with self._graph.as_default():
train.run(self._create_feed_dict(xbatch, ybatch, keep, removal), self._session)
self._check_init()
with self._graph.as_default():
optimizer = tf.train.AdamOptimizer(1e-4, name="Addam")
all_vars = self._session.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
train = optimizer.minimize(self._total_loss, var_list=[v for v in all_vars if filter_fun(v.name)])
# initialize variables newly created for Adam
ini = tf.variables_initializer(
[v for v in tf.global_variables() if
v.name.split(':')[0].encode() in set(self._session.run(tf.report_uninitialized_variables()))
])
self._session.run(ini)
return train_op
def get_squared_loss(self, xbatch, ybatch, keep=1) -> float:
"""
Computes the square loss over the given batch
:param xbatch: The batch input
:param ybatch: The true labels of the batch
:param keep: The keep probability of each unit
:return: The square loss
"""
self._check_init()
with self._graph.as_default():
return self._sq_loss.eval(self._create_feed_dict(xbatch, ybatch, keep), self._session)
def predict(self, xbatch, keep=1.0, det_drop=None) -> np.ndarray:
"""
Uses the network to predict output given the input
:param xbatch: The network input
:param keep: The keep probability of each unit
:param det_drop: The deterministic keep/drop of each unit
:return: The network output
"""
self._check_init()
with self._graph.as_default():
return self._m_out.eval(self._create_feed_dict(xbatch, keep=keep, removal=det_drop), session=self._session)
def test_error_distributions(self, test_data):
"""
For the network returns the distribution of squared losses and rank errors across test data
:param test_data: The test data to evaluate the model
[0]: For each datapoint in test_data the squared error loss
[1]: For each datapoint in test_data the rank error
"""
self._check_init()
with self._graph.as_default():
sq_errors = np.full(test_data.data_size, -1)
rank_errors = np.full(test_data.data_size, -1)
for i in range(test_data.data_size):
xbatch, ybatch = test_data.training_batch(1)
pred = self.predict(xbatch, 1.0)
sq_errors[i] = (np.sum((ybatch - pred) ** 2))
rank_real = np.unique(ybatch, return_inverse=True)[1]
rank_pred = np.unique(pred, return_inverse=True)[1]
rank_errors[i] = np.sum(np.abs(rank_real - rank_pred))
return sq_errors, rank_errors
def unit_stimulus_responses(self, temperature, speed, angle, standardizations: GradientStandards,
det_drop=None) -> dict:
"""
Computes and returns the responses of each unit in the network in response to a stimulus
:param temperature: The temperature stimulus in C (can be None for clamping to 0)
:param speed: The speed input in pixels per timestep (can be None for clamping to 0)
:param angle: The angle input in degrees per timestep (can be None for clamping to 0)
:param standardizations: Object that provides mean and standard deviation for each input
:param det_drop: The deterministic keep/drop of each unit
:return: Branch-wise dictionary of lists with n_hidden elements, each an array of time x n_units activations
"""
self._check_init()
with self._graph.as_default():
# ensure that at least one stimulus was provided that all have same size and standardize them
if any((temperature is not None, speed is not None, angle is not None)):
sizes = [x.size for x in (temperature, speed, angle) if x is not None]
if any([s != sizes[0] for s in sizes]):
raise ValueError("All given inputs must have same length")
if temperature is None:
temperature = np.zeros(sizes[0], np.float32)
else:
temperature = (temperature-standardizations.temp_mean) / standardizations.temp_std
if speed is None:
speed = np.zeros(sizes[0], np.float32)
else:
speed = (speed - standardizations.disp_mean) / standardizations.disp_std
if angle is None:
angle = np.zeros(sizes[0], np.float32)
else:
angle = (angle - standardizations.ang_mean) / standardizations.ang_std
else:
raise ValueError("At least one input needs to be given")
history = self.input_dims[2]
activity = {}
ix = indexing_matrix(np.arange(temperature.size), history - 1, 0, temperature.size)[0]
model_in = np.zeros((ix.shape[0], 3, history, 1))
model_in[:, 0, :, 0] = temperature[ix]
model_in[:, 1, :, 0] = speed[ix]
model_in[:, 2, :, 0] = angle[ix]
for b in self._branches:
if b == 'o':
activity[b] = [self.predict(model_in, det_drop=det_drop)]
continue
n_layers = self.n_layers_mixed if b == 'm' else self.n_layers_branch
for i in range(n_layers):
h = self._session.graph.get_tensor_by_name(self.cvn("HIDDEN", b, i)+":0")
fd = self._create_feed_dict(model_in, keep=1.0, removal=det_drop)
if b in activity:
activity[b].append(h.eval(feed_dict=fd, session=self._session))
else:
activity[b] = [h.eval(feed_dict=fd, session=self._session)]
return activity
def branch_output(self, branch_name, xbatch, det_drop=None) -> np.ndarray:
"""
Computes the activations of all units in the last hidden layer of the given branch
:param branch_name: The name of the branch ('t', 's', 'a', 'm')
:param xbatch: The network input
:param det_drop: The deterministic keep/drop of each unit
:return: The activations of the last layer of reach branch for the given inputs
"""
self._check_init()
if branch_name not in self._branches:
raise ValueError("Branch '{0}' is not present in this network. Has to be one of {1}".format(branch_name,
self._branches))
# obtain the name of the last hidden layer of the given branch
tensor_name = self.cvn("HIDDEN", branch_name,
self.n_layers_branch-1 if branch_name != 'm' else self.n_layers_mixed-1)
tensor_name = tensor_name + ":0"
with self._graph.as_default():
fd = self._create_feed_dict(xbatch, removal=det_drop)
tensor = self._graph.get_tensor_by_name(tensor_name)
layer_out = tensor.eval(fd, session=self._session)
return layer_out
def parse_layer_input_by_cluster(self, branch: str, layer_index: int, clid_upstream, clid_layer, id_exlude=-1):
"""
Computes input to given unit clusters in a network layer as outputs of upstream layer clusters
:param branch: The branch of the layer
:param layer_index: The index of the layer
:param clid_upstream: The cluster ids of neurons feeding into this layer
:param clid_layer: The cluster ids of neurons in this layer
:param id_exlude: This cluster id will be excluded from calculations (usually -1 i.e. unclustered units)
:return: Connectivity matrix with clid_upstream ids as rows and clid_layer ids as columns with values
representing the average input contribution
"""
if branch not in self._branches:
raise ValueError("Invalid branch. For this network has to be one of: {0}".format(self._branches))
if layer_index < 0:
raise ValueError("{0} is not a valid layer index".format(layer_index))
if branch == 'o' or layer_index == 0:
raise NotImplementedError("Connectivity analysis across branches is currently not implemented")
n_layers = self.n_units[0] if branch != 'm' else self.n_units[1]
if layer_index >= n_layers:
raise ValueError("layer_index {0} is not valid since branch {1} only has {2} layers.".format(layer_index,
branch,
n_layers))
# get weight matrix of the requested layer
with self._graph.as_default():
w_name = self.cvn("WEIGHT", branch, layer_index)
weight_mat = self._graph.get_tensor_by_name(w_name+":0").eval(session=self._session)
if clid_upstream.size != weight_mat.shape[0]:
raise ValueError("clid_upstream needs to have one element for each input unit")
if clid_layer.size != weight_mat.shape[1]:
raise ValueError("clid_layer needs to have one element for each unit in the layer of interest")
unq_up = np.unique(clid_upstream)
unq_this = np.unique(clid_layer)
if id_exlude is not None:
unq_up = unq_up[unq_up != id_exlude]
unq_this = unq_this[unq_this != id_exlude]
conn_mat = np.zeros((unq_up.size, unq_this.size))
for j, t in enumerate(unq_this):
cols = clid_layer == t
w = weight_mat[:, cols] / cols.sum()
for i, u in enumerate(unq_up):
rows = clid_upstream == u
conn_mat[i, j] = np.sum(w[rows, :]) / rows.sum()
return conn_mat
@property
def convolution_data(self):
"""
The weights and biases of the convolution layer(s)
"""
self._check_init()
with self._graph.as_default():
if 't' in self._branches:
to_get = ['t', 's', 'a']
else:
to_get = ['m']
g = self._session.graph
w = {tg: g.get_tensor_by_name(self.cvn("WEIGHT", tg, -1)+":0").eval(session=self._session) for tg in to_get}
b = {tg: g.get_tensor_by_name(self.cvn("BIAS", tg, -1) + ":0").eval(session=self._session) for tg in to_get}
return w, b
class ZfGpNetworkModel(GpNetworkModel):
"""
Class representing zebrafish gradient prediction network model
"""
def __init__(self, use_dale_constraint=False, use_tanh=False):
"""
Creates a new ZfGpNetworkModel
"""
super().__init__(use_dale_constraint, use_tanh)
# Private API
def _create_output(self, prev_out: tf.Tensor) -> tf.Tensor:
"""
Creates the output layer for reporting predicted temperature of all four behaviors
:param prev_out: The output of the previous layer
:return: output
"""
w = create_weight_var(self.cvn("WEIGHT", 'o', 0), [prev_out.shape[1].value, 4], self.w_decay,
dale=self.use_dale_constraint)
b = create_bias_var(self.cvn("BIAS", 'o', 0), [4], self.bias_init)
out = tf.add(tf.matmul(prev_out, w), b, name=self.cvn("OUTPUT", 'o', 0))
return out
def _create_real_out_placeholder(self):
"""
Creates placeholder variable for labels
"""
# real outputs: BATCHSIZE x (dT(Stay), dT(Straight), dT(Left), dT(Right))
return tf.placeholder(tf.float32, [None, 4], "y_")
# Public API
@staticmethod
def plot_network(activations: dict, index: int):
"""
Plots network structure with node darkness corresponding to its activation
:param activations: Dictionary of branches with layer activation lists
:param index: The frame in activations at which the network should be visualized
:return: figure and axes object
"""
def circle_pos(rownum, colnum, n_rows, n_cols):
"""
Compute the relative position of one circle within a layer
:param rownum: The row of the circle
:param colnum: The column of the circle
:param n_rows: The total number of rows in the layer
:param n_cols: The total number of columns in the layer
:return: The x,y position of the center
"""
if rownum >= n_rows or colnum >= n_cols:
raise ValueError("Row and column numbers can't be larger than totals")
y_spread = (n_rows-1) * c_c_dist
y_pos = - y_spread / 2 + rownum * c_c_dist
x_spread = (n_cols-1) * c_c_dist
x_pos = - x_spread / 2 + colnum * c_c_dist
return x_pos, y_pos
def layer_dim(values: np.ndarray):
"""
Computes the width and height of the layer bounding box
"""
l_size = values.size
n_rows = (l_size - 1) // max_width + 1
n_cols = max_width if l_size >= max_width else l_size
boundx = 0 - (n_cols / 2) * (circle_dist + 2 * circle_rad) - circle_rad / 2
boundy = 0 - (n_rows / 2) * (circle_dist + 2 * circle_rad) - circle_rad / 2
boundw = (0 - boundx) * 2
boundh = (0 - boundy) * 2
return boundw, boundh
def draw_layer(x_center, y_center, values: np.ndarray):
"""
Creates artists for one whole layer of the network
:param x_center: The x center coordinate of the layer
:param y_center: They y center coordinate of the layer
:param values: For each unit in the layer its normalized activation
:return:
[0] List of artists that draw this layer
[1] (xmin, xmax, ymin, ymax) tuple of rectangle containing this layer
"""
if np.any(values > 1) or np.any(values < 0):
raise ValueError("values can't be smaller 0 or larger 1")
arts = []
l_size = values.size
n_rows = (l_size-1) // max_width + 1
n_cols = max_width if l_size >= max_width else l_size
# compute bounding rectangle
boundx = x_center - (n_cols / 2) * (circle_dist + 2*circle_rad) - circle_rad/2
boundy = y_center - (n_rows / 2) * (circle_dist + 2*circle_rad) - circle_rad/2
boundw = (x_center - boundx) * 2
boundh = (y_center - boundy) * 2
# draw units according to their activations
for i, v in enumerate(values):
x, y = circle_pos(i // max_width, i % max_width, n_rows, n_cols)
x += x_center
y += y_center
arts.append(pl.Circle((x, y), circle_rad, color=(1-v, 1-v, 1-v)))
return arts, (boundx, boundx+boundw, boundy, boundy+boundh)
# compute normalization across whole timeseries
max_width = 32 # maximum number of units in a row
circle_rad = 10 # radius of each given circle
circle_dist = 7 # the edge-to-edge distance of circles
c_c_dist = circle_dist + 2*circle_rad # the center-to-center distance btw. neighboring circles
xcents = {'o': 0, 'm': 0, 't': -0.85*max_width*(c_c_dist+circle_rad), 's': 0,
'a': 0.85*max_width*(c_c_dist+circle_rad)}
# the branch order from bottom to top
order = ['o', 'm', 't', 's', 'a']
# for each branch compute the y-center of its first layer
thickness = {}
for b in order:
if b not in activations:
thickness[b] = 0
else:
thickness[b] = 0
for l in activations[b]:
thickness[b] += (layer_dim(l[index, :])[1] + c_c_dist * 2)
ystarts = {}
for b in order:
if b == 'o':
ystarts[b] = 0
elif b == 'm':
ystarts[b] = thickness[b] / 4
else:
ystarts[b] = ystarts['m'] + thickness['m'] + thickness[b] / 4
all_artists = []
fig_bounds = np.zeros(4)
for b in order:
if b not in activations:
continue
xc = xcents[b]
prev_offset = 0
for i, l in enumerate(activations[b]):
yc = ystarts[b] + prev_offset
prev_offset += layer_dim(l[index, :])[1] + c_c_dist
minval = np.min(l, 0)
diff = np.max(l, 0) - minval
diff[diff == 0] = 0.1
layer_arts, bounds = draw_layer(xc, yc, (l[index, :]-minval) / diff)
all_artists += layer_arts
# update figure bounds
if fig_bounds[0] > bounds[0]:
fig_bounds[0] = bounds[0]
if fig_bounds[1] < bounds[1]:
fig_bounds[1] = bounds[1]
if fig_bounds[2] > bounds[2]:
fig_bounds[2] = bounds[2]
if fig_bounds[3] < bounds[3]:
fig_bounds[3] = bounds[3]
# create actual figure
fig, ax = pl.subplots()
for a in all_artists:
ax.add_artist(a)
ax.axis('square')
# update limits
ax.set_xlim(fig_bounds[0], fig_bounds[1])
ax.set_ylim(fig_bounds[2], fig_bounds[3])
sns.despine(fig, ax, True, True, True, True)
ax.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
right='off',
left='off',
labelbottom='off',
labelleft='off')
return fig, ax
class CeGpNetworkModel(GpNetworkModel):
"""
Class representing C elegans gradient prediction network model
"""
def __init__(self, use_dale_constraint=False, use_tanh=False):
"""
Creates a new CeGpNetworkModel
"""
super().__init__(use_dale_constraint, use_tanh)
# Private API
def _create_real_out_placeholder(self):
"""
Creates placeholder variable for labels
"""
# real outputs: BATCHSIZE x (dT(Continue), dT(StrongTurn), dt(Pirouette), dT(LeftTurn), dT(RightTurn))
return tf.placeholder(tf.float32, [None, 5], "y_")
def _create_output(self, prev_out: tf.Tensor):
"""
Creates the output layer for reporting predicted temperature of all five behaviors
:param prev_out: The output of the previous layer
:return: output
"""
w = create_weight_var(self.cvn("WEIGHT", 'o', 0), [prev_out.shape[1].value, 5], self.w_decay,
dale=self.use_dale_constraint)
b = create_bias_var(self.cvn("BIAS", 'o', 0), [5], self.bias_init)
out = tf.add(tf.matmul(prev_out, w), b, name=self.cvn("OUTPUT", 'o', 0))
return out
class SimpleRLNetwork(NetworkModel):
"""
Simple reinforcement learning network with only temperature input
"""
def __init__(self, use_dale_constraint=False, use_tanh=False):
"""
Create a new simple reinforcment learning network model
"""
super().__init__(use_dale_constraint, use_tanh)
# initialize fields that will be populated later
self.n_units = None
self.n_layers = None
self._n_dense = None
# model fields that are later needed for parameter feeding
self._keep_prob = None # drop-out keep probability
self._reward = None # reward delivered (for training)
self._pick = None # the behaviors that was chosen and led to the reward above
self._responsible_out = None # the output responsible for the current reward
# network output
self._value_out = None # type: tf.Tensor
self._log_value_out = None # type: tf.Tensor
self._action = None # type: tf.Tensor
# the reward based loss (loss w.o. weight decay)
self._loss = None # type: tf.Tensor
# total loss across the network
self._total_loss = None # type: tf.Tensor
# the training step to train the network
self._train_step = None # type: tf.Operation
# create cash of uniform random numbers
self._uni_cash = RandCash(1000, lambda s: np.random.rand(s))
# Private API
def _create_unit_lists(self):
"""
Creates lists of hidden unit counts and branch list according to network configuration
"""
self._det_remove = {}
self._n_dense = [self.n_units] * self.n_layers
self._branches = ['t', 'o']
def _create_branch(self, branch: str, prev_out: tf.Tensor) -> tf.Tensor:
"""
Creates a branch of the network
:param branch: The name of the branch
:param prev_out: The output of the previous layer
:return: The output of the branch
"""
if branch not in self._branches:
raise ValueError("branch {0} is not valid. Has to be one of {1}".format(branch, self._branches))
last = prev_out
for i in range(self.n_layers):
last = self._create_hidden_layer(branch, i, last, self._n_dense[i])
last = tf.nn.dropout(last, self._keep_prob, name=self.cvn("DROP", branch, i))
return last
def _create_values(self, prev_out: tf.Tensor):
"""
Creates the output layer for our policy
:param prev_out: The output of the previous layer
:return: output probabilities, log_output probabilities
"""
w = create_weight_var(self.cvn("WEIGHT", 'o', 0), [prev_out.shape[1].value, 4], self.w_decay)
b = create_bias_var(self.cvn("BIAS", 'o', 0), [4], self.bias_init)
out = tf.nn.softmax((tf.matmul(prev_out, w) + b), name=self.cvn("OUTPUT", 'o', 0))
log_out = tf.nn.log_softmax((tf.matmul(prev_out, w) + b), name=self.cvn("OUTPUT", 'o', -1))
return out, log_out
def _create_feed_dict(self, x_in, rewards=None, picks=None, keep=1.0, removal=None) -> dict:
"""
Create network feed dict
:param x_in: The network input value
:param rewards: The delivered rewards
:param picks: The chosen units (optional but needs to be present if reward != None)
:param keep: The dropout probability for keeping all units
:param removal: Deterministic keep/removal vectors
:return: The feeding dict to pass to the network
"""
f_dict = {self._x_in: x_in, self._keep_prob: keep}
if rewards is not None:
# augment rewards to 2D if necessary
if rewards.ndim == 1:
rewards = rewards[:, None]
f_dict[self._reward] = rewards
if picks is None or picks.size != rewards.size:
raise ValueError("If rewards are provided, picks need to be provided with the same number of samples!")
picks = np.c_[np.arange(picks.size)[:, None], picks[:, None]]
f_dict[self._pick] = picks
# Fill deterministic removal part of feed dict
for b in self._branches:
if b == 'o':
continue
for i, dr in enumerate(self._det_remove[b]):
s = dr.shape[0].value
if removal is None or b not in removal:
f_dict[dr] = np.ones(s, dtype=np.float32)
else:
if removal[b][i].size != s:
raise ValueError("removal in branch {0} layer {1} does not have required size of {2}".format(b,
i,
s))
f_dict[dr] = removal[b][i]
return f_dict
# Public API
def setup(self, n_conv_layers: int, n_units, n_layers: int):
"""
Creates the network graph
:param n_conv_layers: The number of convolutional layers operating on the input
:param n_units: The number of units in each dense layer
:param n_layers: The number of dense layers
"""
self.clear()
# ingest parameters
if n_layers < 1:
raise ValueError("Network needs at least one hidden layer")
self.n_conv_layers = n_conv_layers
if n_units < 1:
raise ValueError("Each hidden layer needs at least one unit")
self.n_units = n_units
self.n_layers = n_layers
self._create_unit_lists()
self._graph = tf.Graph()
with self._graph.as_default():
# create deterministic removal units
for b in self._branches:
if b == 't':
self._det_remove[b] = [tf.placeholder(tf.float32, shape=[self._n_dense[i]],
name=self.cvn("REMOVE", b, i))
for i in range(self.n_layers)]
# dropout probability placeholder
self._keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# model input: NSAMPLES x Temp x HISTORYSIZE x 1 CHANNEL
self._x_in = tf.placeholder(tf.float32, [None, 1, GlobalDefs.frame_rate*GlobalDefs.hist_seconds, 1], "x_in")
# reward values: NSAMPLES x 1 (Note: Only picked behavior will get rewarded!)
self._reward = tf.placeholder(tf.float32, [None, 1], name="reward")
# sample index and index of the picked behavior: NSAMPLES x 2 (either 0, 1, 2 or 3, i.e. stay, s, l or r)
self._pick = tf.placeholder(tf.int32, [None, 2], name="pick")
# data binning layer
xin_pool = create_meanpool2d("xin_pool", self._x_in, 1, self.t_bin)
# create convolution layer and deep layers
conv = self._create_convolution_layer('t', xin_pool)
deep_out = self._create_branch('t', conv)
self._value_out, self._log_value_out = self._create_values(deep_out)
self._action = tf.multinomial(self._log_value_out, 1, name="action")
self._responsible_out = tf.gather_nd(self._log_value_out, self._pick, "responsible_out")
self._loss = -tf.reduce_sum(self._responsible_out * self._reward)
tf.add_to_collection("losses", self._loss)
# compute the total loss which includes our weight-decay
self._total_loss = tf.add_n(tf.get_collection("losses"), name="total_loss")
# create training step
optimizer = tf.train.AdamOptimizer(1e-5)
gradients, variables = zip(*optimizer.compute_gradients(self._total_loss))
self._train_step = optimizer.apply_gradients(zip(gradients, variables))
# self._train_step = tf.train.AdamOptimizer(1e-6).minimize(self._total_loss)
# self._train_step = create_train_step(self._total_loss)
# store our training operation
tf.add_to_collection('train_op', self._train_step)
# create session
self._session = tf.Session()
# mark network as initialized
self.initialized = True
# intialize all variables
self.init_variables()
def load(self, meta_file: str, checkpoint_file: str):
"""
Loads model definitions from model description file and populates data from given checkpoint
:param meta_file: The model definition file
:param checkpoint_file: The saved model checkpoint (weights, etc.)
"""
super().load(meta_file, checkpoint_file)
with self._graph.as_default():
# restore graph and variables
self._session = tf.Session()
saver = tf.train.import_meta_graph(meta_file)
saver.restore(self._session, checkpoint_file)
graph = self._session.graph
self._value_out = graph.get_tensor_by_name(self.cvn("OUTPUT", 'o', 0)+":0")
self._log_value_out = graph.get_tensor_by_name(self.cvn("OUTPUT", 'o', -1) + ":0")
self._action = tf.multinomial(self._log_value_out, 1, name="action")
self._responsible_out = graph.get_tensor_by_name("responsible_out:0")
self._x_in = graph.get_tensor_by_name("x_in:0")
self._keep_prob = graph.get_tensor_by_name("keep_prob:0")
self._reward = graph.get_tensor_by_name("reward:0")
self._pick = graph.get_tensor_by_name("pick:0")
self._branches = ['t', 'o']
# collect deterministic removal units
for b in self._branches:
try:
graph.get_tensor_by_name(self.cvn("REMOVE", b, 0)+":0")
self._det_remove[b] = []
i = 0
try:
while True:
self._det_remove[b].append(graph.get_tensor_by_name(self.cvn("REMOVE", b, i)+":0"))
i += 1
except KeyError:
pass
except KeyError:
continue
self.n_layers = len(self._det_remove['t'])
self._n_dense = [self._det_remove['t'][i].shape[0].value for i in range(self.n_layers)]
self.n_units = self._n_dense[0]
# retrieve training step
self._train_step = graph.get_collection("train_op")[0]
# set up loss calculation
self._loss = -(tf.log(self._responsible_out) * self._reward)[0]
self.initialized = True
# use convolution data biases to get number of convolution layers
conv_biases = self.convolution_data[1]
self.n_conv_layers = conv_biases.popitem()[1].shape[0]
def clear(self):
"""
Clears the network graph
"""
if not self.initialized:
return
super().clear()
self.n_conv_layers = None
self.n_units = None
self.n_layers = None
# mark network as not initialized
self.initialized = False
def train(self, x_in, reward: np.ndarray, pick: np.ndarray, keep=0.5):
"""
Runs a single move training step
:param x_in: The network input
:param reward: The delivered reward
:param pick: The chosen action
:param keep: The keep probability of each unit
"""
self._check_init()
if reward.size > 1:
warn("Providing more than one concurrent training sample is discouraged since credit assignment unclear.")
with self._graph.as_default():
fd = self._create_feed_dict(x_in, reward, pick, keep)
self._train_step.run(fd, self._session)
def get_values(self, x_in, keep=1.0, det_drop=None) -> np.ndarray:
"""
Compute the predicted value of straight swim or turn
:param x_in: The temperature history as network input
:param keep: The keep probability of each unit
:param det_drop: The deterministic keep/drop of each unit
:return: 4-element vector of values for stay, straight, left and right
"""
self._check_init()
with self._graph.as_default():
v = self._value_out.eval(self._create_feed_dict(x_in, keep=keep, removal=det_drop), session=self._session)
return v
def choose_action(self, x_in, p_explore=0.01, keep=1.0, det_drop=None):
"""
Our policy. Calculate value of each action given input. Choose random action with p_explore probability
and higher-valued action otherwise
:param x_in: The temperature history as network input
:param p_explore: Probability of choosing random action instead of following policy
:param keep: The keep probability of each unit
:param det_drop: The deterministic keep/drop of each unit
:return:
[0]: The index of the chosen action (stay=0, straight=1, left=2, right=3)
[1]: True if the action was exploratory
"""
if self._uni_cash.next_rand() < p_explore:
return np.random.randint(4), True
with self._graph.as_default():
a = self._action.eval(self._create_feed_dict(x_in, keep=keep, removal=det_drop), session=self._session)
return np.asscalar(a), False
def final_hidden_output(self, xbatch, det_drop=None) -> np.ndarray:
"""
Computes the activations of all units in the last hidden layer of the network
:param xbatch: The network input
:param det_drop: The deterministic keep/drop of each unit
:return: The activations of the last hidden network layer
"""
self._check_init()
# obtain the name of the last hidden layer
tensor_name = self.cvn("HIDDEN", 't', self.n_layers-1)
tensor_name = tensor_name + ":0"
with self._graph.as_default():
fd = self._create_feed_dict(xbatch, removal=det_drop)
tensor = self._graph.get_tensor_by_name(tensor_name)
layer_out = tensor.eval(fd, session=self._session)
return layer_out
def unit_stimulus_responses(self, temperature, temp_mean, temp_std) -> dict:
"""
Computes and returns the responses of each unit in the network in response to a stimulus
:param temperature: The temperature stimulus in C
:param temp_mean: The temperature average when the network was trained
:param temp_std: The temperature standard deviation when the network was trained
:return: Branch-wise dictionary of lists with n_hidden elements, each an array of time x n_units activations
"""
self._check_init()
temperature = (temperature - temp_mean) / temp_std
with self._graph.as_default():
history = self.input_dims[2]
activity = {}
ix = indexing_matrix( | np.arange(temperature.size) | numpy.arange |
import numpy as np
from scipy import signal
import scipy.ndimage as ndimage
import re
import collections
from scipy import interpolate
import qcodes
# from . import data_array
from qcodes.data import data_array
## supporting functions for data processing
def create_kernel(x_dev, y_dev, cutoff, distr):
distributions = {
'gaussian': lambda r: np.exp(-(r**2) / 2.0),
'exponential': lambda r: np.exp(-abs(r) * np.sqrt(2.0)),
'lorentzian': lambda r: 1.0 / (r**2+1.0),
'thermal': lambda r: np.exp(r) / (1 * (1+np.exp(r))**2)
}
func = distributions[distr]
hx = np.floor((x_dev * cutoff) / 2.0)
hy = np.floor((y_dev * cutoff) / 2.0)
x = np.linspace(-hx, hx, int(hx * 2) + 1) / x_dev
y = np.linspace(-hy, hy, int(hy * 2) + 1) / y_dev
if x.size == 1: x = np.zeros(1)
if y.size == 1: y = np.zeros(1)
xv, yv = np.meshgrid(x, y)
kernel = func(np.sqrt(xv**2+yv**2))
kernel /= np.sum(kernel)
# print(kernel)
return kernel
def get_limits(x,y,z=[]):
xmin, xmax = np.nanmin(x), np.nanmax(x)
ymin, ymax = np.nanmin(y), np.nanmax(y)
if z!=[]:
zmin, zmax = np.nanmin(z), np.nanmax(z)
# Thickness for 1d scans, should we do this here or
# in the drawing code?
if xmin == xmax:
xmin, xmax = -1, 1
if ymin == ymax:
ymin, ymax = -1, 1
return xmin, xmax, ymin, ymax, zmin, zmax
##
def f_identity(w):
return w
def f_abs(w):
"""Take the absolute value of every datapoint."""
w['name']='abs'
wout = np.abs(w['ydata'])
w['ydata'] = wout
w['label']='abs('+w['processpar']+')'
w['unit']='nA'
return w
def f_log(w):
"""The base-10 logarithm of every datapoint."""
w['name']='logarithm'
# wout = np.log10(np.abs(w['ydata']))
wout = np.log10(np.abs(w['ydata']),out=np.zeros_like(w['ydata']),where=(np.abs(w['ydata'])!=0))
w['ydata'] = wout
w['label']='log'+r'$_{10}$'+'(abs('+w['processpar']+'))'
w['unit']='nA'
return w
def f_xderiv(w,method,sigma):
"""Partial derivative along x axis"""
try:
# sigma=2
if method=='numerical':
wout= np.diff(w['ydata'],axis=0)#,prepend=w['ydata'][0][0])
wout=np.insert(wout,0,wout[0][0],axis=0)
# wout.append(wout
elif method=='smooth':
wout = diffSmooth(w['ydata'], dy='y', sigma=sigma) # dy (str): direction to differentiate in; sigma (float): parameter for gaussian filter kernel
w['ydata'] = wout
w['label']='d'+w['processpar']+'/dVx'
w['unit']=r'$\mu$'+'Siemens'
except:
print('partial x: Cannot differentiate')
return w
def f_yderiv(w,method,sigma):
"""Partial derivative along y axis"""
try:
# sigma=2
if method=='numerical':
wout= np.diff(w['ydata'],axis=1)#,prepend=w['ydata'][0][0])
wout=np.insert(wout,0,wout[0][0],axis=1)
elif method=='smooth':
wout = diffSmooth(w['ydata'], dy='x', sigma=sigma) # dy (str): direction to differentiate in; sigma (float): parameter for gaussian filter kernel
w['ydata'] = wout
w['label']='d'+w['processpar']+'/dVy'
w['unit']=r'$\mu$'+'Siemens'
except:
print('partial y: Cannot differentiate')
return w
def f_xintegrate(w):
"""Numerical integration - x axis."""
if w['ydata'].ndim == 1: #if 1D
w['ydata'] = np.cumsum(w['ydata'])
wout = w['ydata'] / abs(w['xdata'][0][0]-w['xdata'][0][1]) * 0.0129064037
else:
if w['xdata'][1][0][0]!=w['xdata'][1][1][0]:
sweepback=True
else:
sweepback=False
wout=np.cumsum(w['ydata'],axis=0)
if sweepback:
for wcol in range(np.shape(w['ydata'])[0]):
if wcol%2!=0:
wout[wcol]=np.array(list(reversed(wout[wcol])))
wout = wout / abs(w['xdata'][1][0][0]-w['xdata'][1][0][1]) * 0.0129064037
w['label']='I.dV'
w['unit']= r'$\mathrm{e}^2/\mathrm{h}$'
w['ydata'] = wout
return w
def f_yintegrate(w):
"""Numerical integration - y axis."""
if w['ydata'].ndim == 1: #if 1D
print('Function not valid.')
return
else:
if w['xdata'][1][0][0]!=w['xdata'][1][1][0]:
sweepback=True
else:
sweepback=False
wout=np.cumsum(w['ydata'],axis=1)
if sweepback:
for wcol in range(np.shape(w['ydata'])[0]):
if wcol%2!=0:
wout[wcol]=np.array(list(reversed(wout[wcol])))
wout = wout / abs(w['xdata'][1][0][0]-w['xdata'][1][0][1]) * 0.0129064037
w['label']='I.dV'
w['unit']= r'$\mathrm{e}^2/\mathrm{h}$'
w['ydata'] = wout
return w
def f_lowpass(w, x_width=3, y_height=3, method='gaussian'):
"""Perform a low-pass filter."""
kernel = create_kernel(x_width, y_height, 7, method)
w['ydata'] = ndimage.filters.convolve(w['ydata'], kernel)
w['ydata'] = np.ma.masked_invalid(w['ydata'])
return w
def f_highpass(w, x_width=3, y_height=3, method='gaussian'):
"""Perform a high-pass filter."""
kernel = create_kernel(x_width, y_height, 7, method)
w['ydata'] = w['ydata'] - ndimage.filters.convolve(w['ydata'], kernel)
# kernel = create_kernel(x_width, y_height, 7, method)
# self.z = self.z - ndimage.filters.convolve(self.z, kernel)
return w
def f_deriv(w,sigma):
"""Calculate the length of every gradient vector."""
try:
# sigma=2
wout = diffSmooth(w['ydata'], dy='xy', sigma=sigma) # dy (str): direction to differentiate in; sigma (float): parameter for gaussian filter kernel
w['ydata'] = wout
w['label']='d'+w['processpar']+'/dV'
w['unit']=r'$\mu$'+'Siemens'
except:
print('xy: Cannot differentiate')
return w
def f_movavg(w,m,n):
"""Moving average filter."""
# print('moving average')
# (m, n) = (int(w['avg_m']), int(w['avg_n']))
datac=w['ydata']
if datac.ndim==1:
win=np.ones((m,))
win/=win.sum()
wout=signal.convolve(w['ydata'], win, mode='same')
# wout=moving_average_1d(w['ydata'],win)
else:
win=np.ones((m,n))
win/=win.sum()
wout=signal.convolve2d(w['ydata'], win, mode='same', boundary='symm')
# wout=moving_average_2d(w['ydata'],win)
w['ydata'] = wout
return w
def f_savgol(w,samples,order,deriv):
"""Savitsky-Golay filter."""
# print('savgol')
nanvalues= np.isnan(w['ydata'])
w['ydata'][nanvalues]=0
print(nanvalues)
deltay = abs(w['ydata'][0][0]-w['ydata'][0][1]) / 0.0129064037
print(deltay)
# try:
wout = signal.savgol_filter(w['ydata'], int(samples), int(order), int(deriv), delta = deltay)
# except:
# print('Error smoothing. Check: samples must be odd and smaller than array')
# wout=w['ydata']
w['ydata'] = wout
return w
def offset(w,value):
w['ydata'] = w['ydata'] - value*np.ones(np.shape(w['ydata']))
w['label']=w['processpar']+'-'+str(value)
# w['unit']='nA'
return w
def remove_bg_line(w,axis):
# add smoothing
data=w['ydata']
# print('_____in')
# print(w['ydata'][0])
if axis=='y':
line_avg=np.zeros(np.shape(data)[1])
count_avg=0
for data_line in data:
if not any(np.isnan(data_line)):
count_avg+=1
line_avg+=data_line #/np.shape(w['ydata'])[0]
line_avg=line_avg/count_avg
data_sub = data - np.array(line_avg)
w['ydata'] = data_sub
#w['ydata']-=line_avg
elif axis=='x':
x_line_avg=np.zeros(np.shape(data)[0])
count_avg=np.zeros(np.shape(data)[0])
element_avg=np.zeros(np.shape(data)[0])
for row in list(zip(*data)): # [b[row] for b in a]
for element,element_ind in zip(row,range(len(row)-1)):
if not np.isnan(element):
count_avg[element_ind]+=1
element_avg[element_ind]+=element
x_line_avg=element_avg/count_avg
data_sub=[]
for line_ind in range(np.shape(data)[0]):
dataadd = data[line_ind] - np.ones(np.shape(data)[1])*x_line_avg[line_ind]
data_sub.append(dataadd)
w['ydata'] = data_sub
# print('_____out')
# print(w['ydata'][0])
return w
def f_deinterlace(w,indices):
"""Deinterlace."""
z=[]
if w['xdata'][1][0][0]!=w['xdata'][1][1][0]:
sweepback=True
else:
sweepback=False
if indices=='odd':
for ii in range(0,np.shape(w['ydata'])[0]):
zarray=[]
if ii%2!=0:
for jj in range(0,np.shape(w['ydata'])[1]):
zarray.append(w['ydata'][ii,jj])
else:
try:
zarray.append(w['ydata'][ii+1,0])
for jj in range(1,np.shape(w['ydata'])[1]):
zarray.append(w['ydata'][ii+1,jj])
except:
for jj in range(0,np.shape(w['ydata'])[1]):
zarray.append(w['ydata'][ii-1,0])
if sweepback:
zarray=list(reversed(zarray))
z.append(np.array(zarray))
wout=np.array(z)
elif indices=='even':
for ii in range(0, | np.shape(w['ydata']) | numpy.shape |
from ..solution import Solution
from collections import deque
import numpy as np
from numpy.typing import NDArray
def make_seq_boards(input: list[str]) -> tuple[deque, NDArray]:
seq = deque([int(digit) for digit in input[0].split(",")])
boards = []
for i in range(2, len(input), 6):
board = [input[j].split() for j in range(i, i+5)]
boards.append(np.array(board, int))
return seq, np.array(boards, np.ndarray)
def is_finished(board: NDArray) -> bool:
for i in range(5):
if np.sum(board[i, :]) == -5 or np.sum(board[:, i]) == -5:
return True
return False
def play_bingo(boards: NDArray, numbers: deque[int]) -> list[tuple[NDArray, int]]:
playing = [i for i in range(0, boards.shape[0])]
finished = []
while len(playing) >= 1:
number = numbers.popleft()
still_playing = []
for idx in playing:
boards[idx] = np.where(boards[idx] == number, -1, boards[idx])
if is_finished(boards[idx]):
finished.append((boards[idx], number))
else:
still_playing.append(idx)
playing = still_playing
return finished
def score(nstate: tuple[NDArray, int]) -> int:
board, number = nstate
board = | np.where(board == -1, 0, board) | numpy.where |
"""
======================
RTL IOfile module
======================
Provides verilog- file-io related attributes and methods
for TheSyDeKick RTL intereface.
Initially written by <NAME>, <EMAIL>,
<NAME>, 2018
"""
import os
import sys
from abc import *
from thesdk import *
from thesdk.iofile import iofile
import numpy as np
import pandas as pd
import sortedcontainers as sc
from rtl.connector import intend
class rtl_iofile(iofile):
'''
Class to provide file IO for verilog simulations. When created,
adds a rtl_iofile object to the parents iofile_bundle attribute.
Accessible as self.iofile_bundle.Members['name'].
Provides methods and attributes that can be used to construct sections
in Verilog testbenches, like file io routines, file open and close routines,
file io routines, file io format strings and read/write conditions.
Example
-------
Initiated in parent as:
_=rtl_iofile(self,name='foobar')
'''
def __init__(self,parent=None,**kwargs):
'''Parameters
-----------
parent : object
The parent object initializing the
rtl_iofile instance. Default None
**kwargs :
name : str
Name of the file. Appended with
random string during the simulation.
param : str, -g `'g_file_'`
The string defining the testbench parameter to be be
passed to the simulator at command line. Sets the paramname attribute.
ioformat : str, %d
sets the ioformat attribute.
'''
#This is a redundant check, but doens not hurt.to have it here too.
if parent==None:
self.print_log(type='F', msg="Parent of Verilog input file not given")
try:
super(rtl_iofile,self).__init__(parent=parent,**kwargs)
self.paramname=kwargs.get('param','-g g_file_')
self._ioformat=kwargs.get('ioformat','%d') #by default, the io values are decimal integer numbers
except:
self.print_log(type='F', msg="Verilog IO file definition failed")
self._DictData = None # data structure for event-based IO data
#Overload from iofile package
@property
def file(self):
''' Name of the IO file to be read or written.
'''
if not hasattr(self,'_file'):
self._file=self.parent.rtlsimpath +'/' + self.name \
+ '_' + self.rndpart +'.txt'
return self._file
@property
def ioformat(self):
'''Formatting string for verilog file reading
Default %d, i.e. content of the file is single column of
integers.
'''
if hasattr(self,'_ioformat'):
return self._ioformat
else:
self._ioformat='%d'
return self._ioformat
@ioformat.setter
def ioformat(self,velue):
self._ioformat=value
@property
def simparam(self):
''' String definition for parameter to be passed to the simulator
as a command line argument
'''
self._simparam=self.paramname \
+ self.name + '=' + self.file
return self._simparam
@property
def rtlparam(self):
'''Extracts the parameter name and value from simparam attribute.
Used to construct the parameter definitions for Verilog testbench.
Default {'g_file_<self.name>', self.file }
'''
if not hasattr(self,'_rtlparam'):
key=re.sub(r"-g ",'',self.simparam).split('=')[0]
val=re.sub(r"-g ",'',self.simparam).split('=')[1]
self._rtlparam={key:'\"%s\"'%(val)}
return self._rtlparam
# Status parameter
@property
def verilog_stat(self):
'''Status variable name to be used in verilog testbench.
'''
if not hasattr(self,'_verilog_stat'):
self._verilog_stat='status_%s' %(self.name)
return self._verilog_stat
@verilog_stat.setter
def verilog_stat(self,value):
self._verilog_stat=value
#Timestamp integers for control files
@property
def verilog_ctstamp(self):
'''Current time stamp variable name to be used in verilog testbench.
Used in event type file IO.
'''
if not hasattr(self,'_verilog_ctstamp'):
self._verilog_ctstamp='ctstamp_%s' %(self.name)
return self._verilog_ctstamp
@property
def verilog_ptstamp(self):
'''Past time stamp variable for verilog testbench. Used in event type file IO.
'''
if not hasattr(self,'_verilog_ptstamp'):
self._verilog_ptstamp='ptstamp_%s' %(self.name)
return self._verilog_ptstamp
@property
def verilog_tdiff(self):
'''Verilog time differencec variable. Used in event based file IO.
'
'''
if not hasattr(self,'_verilog_diff'):
self._verilog_tdiff='tdiff_%s' %(self.name)
return self._verilog_tdiff
# Status integer verilog definitions
@property
def verilog_statdef(self):
'''Verilog file read status integer variable definitions and initializations strings.
'''
if self.iotype=='sample':
self._verilog_statdef='integer %s, %s;\n' %(self.verilog_stat, self.verilog_fptr)
elif self.iotype=='event':
self._verilog_statdef='integer %s, %s, %s, %s, %s;\n' %(self.verilog_stat,
self.verilog_fptr, self.verilog_ctstamp, self.verilog_ptstamp,
self.verilog_tdiff)
self._verilog_statdef+='initial %s=0;\n' %(self.verilog_ctstamp)
self._verilog_statdef+='initial %s=0;\n' %(self.verilog_ptstamp)
for connector in self.verilog_connectors:
self._verilog_statdef+='integer buffer_%s;\n' %(connector.name)
return self._verilog_statdef
# File pointer
@property
def verilog_fptr(self):
'''Verilog file pointer name.
'''
self._verilog_fptr='f_%s' %(self.name)
return self._verilog_fptr
@verilog_fptr.setter
def verilog_fptr(self,value):
self._verilog_fptr=value
# File opening, direction dependent
@property
def verilog_fopen(self):
'''Verilog file open routine string.
'''
if self.dir=='in':
self._verilog_fopen='initial %s = $fopen(%s,\"r\");\n' %(self.verilog_fptr,next(iter(self.rtlparam)))
if self.dir=='out':
self._verilog_fopen='initial %s = $fopen(%s,\"w\");\n' %(self.verilog_fptr,next(iter(self.rtlparam)))
return self._verilog_fopen
# File close
@property
def verilog_fclose(self):
'''Verilog file close routine sting.
'''
self._verilog_fclose='$fclose(%s);\n' %(self.verilog_fptr)
return self._verilog_fclose
@property
def verilog_connectors(self):
''' List for verilog connectors.
These are the verilog signals/regs associated with this file
'''
if not hasattr(self,'_verilog_connectors'):
self._verilog_connectors=[]
return self._verilog_connectors
@verilog_connectors.setter
def verilog_connectors(self,value):
#Ordered list.
self._verilog_connectors=value
def connector_datamap(self,**kwargs):
'''Verilog_connectors is an ordered list. Order defines the assumed order of columns in the
file to be read or written.
This datamap provides {'name' : index } dictionary to assing data to
correct columns. Less use for data files, more for controls
'''
name=kwargs.get('name')
if not self._verilog_connectors:
self.print_log(type='F', msg='Connector undefined, can\'t access.')
else:
if self.iotype=='sample':
self._verilog_connector_datamap=dict()
elif self.iotype=='event':
self._verilog_connector_datamap={'time':0}
index=0
for val in self.verilog_connectors:
index+=1
self._verilog_connector_datamap.update({'%s' %(val.name): index})
return self._verilog_connector_datamap[name]
def set_control_data(self,**kwargs):
'''Method to define event based data value with name, time, and value.
Uses a python dictionary instead of a numpy array for more efficient insertions.
The 'time' column acts as the dictionary key, the remaining columns are stored as the value.
Parameters
----------
**kwargs :
time: int, 0
name: str
val: type undefined
init: int, 0
vector of values to initialize the data. lenght should correpond to `self.verilog_connectors+1`
'''
time=kwargs.get('time',int(0))
name=kwargs.get('name')
val=kwargs.get('val')
init=kwargs.get('init',int(0))
# sanity checks
assert isinstance(time, int), "Argument 'time' should have the type 'int'"
# Init Data and add first element
if self.DictData is None:
self.DictData = sc.SortedDict()
if np.isscalar(init):
self.DictData[0] = (np.ones(len(self._verilog_connectors))*init).astype(int)
elif init.shape[1] == len(self._verilog_connectors)+1:
init_array = init.astype(int)
for row in init_array:
self.DictData[row[0]] = row[1:]
# Add subsequent elements as diffs as follows:
# None -- no change
# int -- change signal to the given value
else:
# add a new row if the time is not yet in the dictionary
if time not in self.DictData:
# init diff as no change
self.DictData[time] = [None for _ in range(len(self._verilog_connectors))]
# change corresponding value
self.DictData[time][self.connector_datamap(name=name)-1] = val
# Overload self.Data accessors to keep them consistent with the assumption of using numpy arrays
# To hold IO data. These methods convert to and from the diff-based data structure used in this
# module. I.e. the self.Data property will look like an numpy array as seen from external modules
# while in reality it's using the more efficient SortedDict implementation internally.
# Getter - This takes the difference based format stored in DictData and converts it to a numpy array
@property
def Data(self):
if not hasattr(self, '_Data'):
self._Data=None
else:
if self.iotype=='event' and hasattr(self, '_DictData'):
diff_array = np.array([np.insert(signals, 0, time) for (time, signals) in self.DictData.items()])
# populate None values from previous timestamps
transposed = np.transpose(diff_array)
for i in range(1, transposed.shape[0]):
for j in range(1, transposed.shape[1]):
if transposed[i,j] is None:
transposed[i,j] = transposed[i, j-1]
self._Data = np.transpose(transposed).astype(int)
return self._Data
# Setter - Takes a numpy array and converts it to the diff-based SortedDict
@Data.setter
def Data(self, value):
# convert value to equivalent SortedDict representation
if self.iotype=='event':
for row in value:
self.DictData[row[0]] = row[1:]
# build a numpy array from the dict and sort it by time column
diff_array = np.array([np.insert(signals, 0, time) for (time, signals) in self.DictData.items()])
# populate None values from previous timestamps
transposed = | np.transpose(diff_array) | numpy.transpose |
#!/usr/bin/env python
#FIXME: Seperate the tests for mesh and general_mesh
#FIXME (Ole): Maxe this test independent of anything that inherits from General_mesh (namely shallow_water)
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import unittest
from math import sqrt
from anuga.abstract_2d_finite_volumes.neighbour_mesh import *
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_periodic
from anuga.config import epsilon
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.geometry.polygon import is_inside_polygon
from anuga.utilities.numerical_tools import ensure_numeric
import numpy as num
def distance(x, y):
return sqrt(num.sum((num.array(x)-num.array(y))**2))
class Test_Mesh(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_triangle_inputs(self):
points = [[0.0, 0.0], [4.0, 0.0], [0.0, 3.0]]
vertices = [0,1,2] #Wrong
try:
mesh = Mesh(points, vertices)
except:
pass
else:
msg = 'Should have raised exception'
raise Exception(msg)
def test_basic_triangle(self):
a = [0.0, 0.0]
b = [4.0, 0.0]
c = [0.0, 3.0]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices)
#Centroid
centroid = mesh.centroid_coordinates[0]
assert centroid[0] == 4.0/3
assert centroid[1] == 1.0
#Area
assert mesh.areas[0] == 6.0,\
'Area was %f, should have been 6.0' %mesh.areas[0]
#Normals
normals = mesh.get_normals()
assert num.allclose(normals[0, 0:2], [3.0/5, 4.0/5])
assert num.allclose(normals[0, 2:4], [-1.0, 0.0])
assert num.allclose(normals[0, 4:6], [0.0, -1.0])
assert num.allclose(mesh.get_normal(0,0), [3.0/5, 4.0/5])
assert num.allclose(mesh.get_normal(0,1), [-1.0, 0.0])
assert num.allclose(mesh.get_normal(0,2), [0.0, -1.0])
#Edge lengths
assert num.allclose(mesh.edgelengths[0], [5.0, 3.0, 4.0])
#Vertex coordinates
#V = mesh.get_vertex_coordinates()
#assert allclose(V[0], [0.0, 0.0, 4.0, 0.0, 0.0, 3.0])
V = mesh.get_vertex_coordinates()
assert num.allclose(V, [ [0.0, 0.0],
[4.0, 0.0],
[0.0, 3.0] ])
V0 = mesh.get_vertex_coordinate(0, 0)
assert num.allclose(V0, [0.0, 0.0])
V1 = mesh.get_vertex_coordinate(0, 1)
assert num.allclose(V1, [4.0, 0.0])
V2 = mesh.get_vertex_coordinate(0, 2)
assert num.allclose(V2, [0.0, 3.0])
#General tests:
#Test that points are arranged in a counter clock wise order etc
mesh.check_integrity()
#Test that the centroid is located 2/3 of the way
#from each vertex to the midpoint of the opposite side
V = mesh.get_vertex_coordinates()
x0 = V[0, 0]; y0 = V[0, 1]
x1 = V[1, 0]; y1 = V[1, 1]
x2 = V[2, 0]; y2 = V[2, 1]
#x0 = V[0,0]
#y0 = V[0,1]
#x1 = V[0,2]
#y1 = V[0,3]
#x2 = V[0,4]
#y2 = V[0,5]
m0 = [old_div((x1 + x2),2), old_div((y1 + y2),2)]
m1 = [old_div((x0 + x2),2), old_div((y0 + y2),2)]
m2 = [old_div((x1 + x0),2), old_div((y1 + y0),2)]
d0 = distance(centroid, [x0, y0])
d1 = distance(m0, [x0, y0])
assert d0 == old_div(2*d1,3)
#
d0 = distance(centroid, [x1, y1])
d1 = distance(m1, [x1, y1])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
d0 = distance(centroid, [x2, y2])
d1 = distance(m2, [x2, y2])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
#Radius
d0 = distance(centroid, m0)
assert d0 == 5.0/6
d1 = distance(centroid, m1)
assert d1 == sqrt(73.0/36)
d2 = distance(centroid, m2)
assert d2 == sqrt(13.0/9)
assert mesh.radii[0] == min(d0, d1, d2)
assert mesh.radii[0] == 5.0/6
#Let x be the centroid of triangle abc.
#Test that areas of the three triangles axc, cxb, and bxa are equal.
points = [a, b, c, centroid]
vertices = [[0,3,2], [2,3,1], [1,3,0]]
new_mesh = Mesh(points, vertices)
assert new_mesh.areas[0] == new_mesh.areas[1]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == old_div(mesh.areas[0],3)
def test_general_triangle(self):
a = [2.0, 1.0]
b = [6.0, 2.0]
c = [1.0, 3.0]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices)
centroid = mesh.centroid_coordinates[0]
#Test that the centroid is located 2/3 of the way
#from each vertex to the midpoint of the opposite side
V = mesh.get_vertex_coordinates()
x0 = V[0, 0]; y0 = V[0, 1]
x1 = V[1, 0]; y1 = V[1, 1]
x2 = V[2, 0]; y2 = V[2, 1]
#x0 = V[0,0]
#y0 = V[0,1]
#x1 = V[0,2]
#y1 = V[0,3]
#x2 = V[0,4]
#y2 = V[0,5]
m0 = [old_div((x1 + x2),2), old_div((y1 + y2),2)]
m1 = [old_div((x0 + x2),2), old_div((y0 + y2),2)]
m2 = [old_div((x1 + x0),2), old_div((y1 + y0),2)]
d0 = distance(centroid, [x0, y0])
d1 = distance(m0, [x0, y0])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
#
d0 = distance(centroid, [x1, y1])
d1 = distance(m1, [x1, y1])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
d0 = distance(centroid, [x2, y2])
d1 = distance(m2, [x2, y2])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
#Radius
d0 = distance(centroid, m0)
d1 = distance(centroid, m1)
d2 = distance(centroid, m2)
assert mesh.radii[0] == min(d0, d1, d2)
#Let x be the centroid of triangle abc.
#Test that areas of the three triangles axc, cxb, and bxa are equal.
points = [a, b, c, centroid]
vertices = [[0,3,2], [2,3,1], [1,3,0]]
new_mesh = Mesh(points, vertices)
assert new_mesh.areas[0] == new_mesh.areas[1]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == old_div(mesh.areas[0],3)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
def test_inscribed_circle_equilateral(self):
"""test that the radius is calculated correctly by mesh in the case of an equilateral triangle"""
a = [0.0, 0.0]
b = [2.0, 0.0]
c = [1.0, sqrt(3.0)]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices,use_inscribed_circle=False)
assert num.allclose(mesh.radii[0],old_div(sqrt(3.0),3)),'Steve''s doesn''t work'
mesh = Mesh(points, vertices,use_inscribed_circle=True)
assert num.allclose(mesh.radii[0],old_div(sqrt(3.0),3)),'inscribed circle doesn''t work'
def test_inscribed_circle_rightangle_triangle(self):
"""test that the radius is calculated correctly by mesh in the case of a right-angled triangle"""
a = [0.0, 0.0]
b = [4.0, 0.0]
c = [0.0, 3.0]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices,use_inscribed_circle=False)
assert num.allclose(mesh.radii[0],5.0/6),'Steve''s doesn''t work'
mesh = Mesh(points, vertices,use_inscribed_circle=True)
assert num.allclose(mesh.radii[0],1.0),'inscribed circle doesn''t work'
def test_two_triangles(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
e = [2.0, 2.0]
points = [a, b, c, e]
vertices = [ [1,0,2], [1,2,3] ] #bac, bce
mesh = Mesh(points, vertices)
assert mesh.areas[0] == 2.0
assert num.allclose(mesh.centroid_coordinates[0], [2.0/3, 2.0/3])
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
def test_more_triangles(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0, 0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0, 0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe, daf, dae
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4]]
mesh = Mesh(points, vertices)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
assert mesh.areas[0] == 2.0
assert mesh.areas[1] == 2.0
assert mesh.areas[2] == 2.0
assert mesh.areas[3] == 2.0
assert mesh.edgelengths[1,0] == 2.0
assert mesh.edgelengths[1,1] == 2.0
assert mesh.edgelengths[1,2] == sqrt(8.0)
assert num.allclose(mesh.centroid_coordinates[0], [2.0/3, 2.0/3])
assert num.allclose(mesh.centroid_coordinates[1], [4.0/3, 4.0/3])
assert num.allclose(mesh.centroid_coordinates[2], [8.0/3, 2.0/3])
assert num.allclose(mesh.centroid_coordinates[3], [2.0/3, 8.0/3])
def test_mesh_and_neighbours(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0,0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4] ]
mesh = Mesh(points, vertices)
mesh.check_integrity()
T = mesh
tid = 0
assert T.number_of_boundaries[tid] == 2
assert T.neighbours[tid, 0] < 0 #Opposite point b (0,2)
assert T.neighbours[tid, 1] == 1 #Opposite point a (0,0)
assert T.neighbours[tid, 2] < 0 #Opposite point c (2,0)
tid = 1
assert T.number_of_boundaries[tid] == 0
assert T.neighbours[tid, 0] == 2 #Opposite point b (0,2)
assert T.neighbours[tid, 1] == 3 #Opposite point c (2,0)
assert T.neighbours[tid, 2] == 0 #Opposite point e (2,2)
tid = 2
assert T.number_of_boundaries[tid] == 2
assert T.neighbours[tid, 0] < 0 #Opposite point e (2,2)
assert T.neighbours[tid, 1] < 0 #Opposite point c (2,0)
assert T.neighbours[tid, 2] == 1 #Opposite point f (4,0)
tid = 3
assert T.number_of_boundaries[tid] == 2
assert T.neighbours[tid, 0] == 1 #Opposite point d (0,4)
assert T.neighbours[tid, 1] < 0 #Opposite point b (0,3)
assert T.neighbours[tid, 2] < 0 #Opposite point e (2,2)
#Neighbouring edges
tid = 0
assert T.neighbour_edges[tid, 0] < 0 #Opposite point b (0,2)
assert T.neighbour_edges[tid, 1] == 2 #Opposite point a (0,0)
assert T.neighbour_edges[tid, 2] < 0 #Opposite point c (2,0)
tid = 1
assert T.neighbour_edges[tid, 0] == 2 #Opposite point b (0,2)
assert T.neighbour_edges[tid, 1] == 0 #Opposite point c (2,0)
assert T.neighbour_edges[tid, 2] == 1 #Opposite point e (2,2)
tid = 2
assert T.neighbour_edges[tid, 0] < 0 #Opposite point e (2,2)
assert T.neighbour_edges[tid, 1] < 0 #Opposite point c (2,0)
assert T.neighbour_edges[tid, 2] == 0 #Opposite point f (4,0)
tid = 3
assert T.neighbour_edges[tid, 0] == 1 #Opposite point d (0,4)
assert T.neighbour_edges[tid, 1] < 0 #Opposite point b (0,3)
assert T.neighbour_edges[tid, 2] < 0 #Opposite point e (2,2)
def test_build_neighbour_structure_duplicates(self):
p0 = [-66.0, 14.0]
p1 = [14.0, -66.0]
p2 = [14.0, 14.0]
p3 = [60.0, 20.0]
p4 = [10.0, 60.0]
p5 = [60.0, 60.0]
points = [p0, p1, p2, p3, p4, p5]
triangles = [ [0, 1, 2],
[3, 2, 1],
[0, 2, 4],
[0, 2, 4],
[4, 2, 5],
[5, 2, 3]]
try:
mesh = Mesh(points, triangles)
except:
pass
else:
raise Exception("triangle edge duplicates not caught")
def test_rectangular_mesh_basic(self):
M=1
N=1
points, vertices, boundary = rectangular(M, N)
mesh = Mesh(points, vertices, boundary)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
M=2
N=2
points, vertices, boundary = rectangular(M, N)
mesh = Mesh(points, vertices, boundary)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
#assert mesh.boundary[(7,1)] == 2 # top
assert mesh.boundary[(7,1)] == 'top' # top
assert mesh.boundary[(3,1)] == 'top' # top
def test_boundary_tags(self):
points, vertices, boundary = rectangular(4, 4)
mesh = Mesh(points, vertices, boundary)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
#print mesh.get_boundary_tags()
#print mesh.boundary
for k in [1, 3, 5, 7]:
assert mesh.boundary[(k,2)] == 'left'
for k in [24, 26, 28, 30]:
assert mesh.boundary[(k,2)] == 'right'
for k in [7, 15, 23, 31]:
assert mesh.boundary[(k,1)] == 'top'
for k in [0, 8, 16, 24]:
assert mesh.boundary[(k,1)] == 'bottom'
def test_rectangular_mesh(self):
M=4
N=16
len1 = 100.0
len2 = 17.0
points, vertices, boundary = rectangular(M, N, len1, len2)
mesh = Mesh(points, vertices, boundary)
assert len(mesh) == 2*M*N
for i in range(len(mesh)):
assert mesh.areas[i] == old_div(len1*len2,(2*M*N))
hypo = sqrt((old_div(len1,M))**2 + (old_div(len2,N))**2) #hypothenuse
assert mesh.edgelengths[i, 0] == hypo
assert mesh.edgelengths[i, 1] == old_div(len1,M) #x direction
assert mesh.edgelengths[i, 2] == old_div(len2,N) #y direction
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
def test_rectangular_mesh2(self):
#Check that integers don't cause trouble
N = 16
points, vertices, boundary = rectangular(2*N, N, len1=10, len2=10)
mesh = Mesh(points, vertices, boundary)
def test_surrogate_neighbours(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0,0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4] ]
mesh = Mesh(points, vertices)
mesh.check_integrity()
T = mesh
tid = 0
assert T.number_of_boundaries[tid] == 2
assert T.surrogate_neighbours[tid, 0] == tid
assert T.surrogate_neighbours[tid, 1] == 1
assert T.surrogate_neighbours[tid, 2] == tid
tid = 1
assert T.number_of_boundaries[tid] == 0
assert T.surrogate_neighbours[tid, 0] == 2
assert T.surrogate_neighbours[tid, 1] == 3
assert T.surrogate_neighbours[tid, 2] == 0
tid = 2
assert T.number_of_boundaries[tid] == 2
assert T.surrogate_neighbours[tid, 0] == tid
assert T.surrogate_neighbours[tid, 1] == tid
assert T.surrogate_neighbours[tid, 2] == 1
tid = 3
assert T.number_of_boundaries[tid] == 2
assert T.surrogate_neighbours[tid, 0] == 1
assert T.surrogate_neighbours[tid, 1] == tid
assert T.surrogate_neighbours[tid, 2] == tid
def test_boundary_inputs(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0,0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4] ]
boundary = { (0, 0): 'First',
(0, 2): 'Second',
(2, 0): 'Third',
(2, 1): 'Fourth',
(3, 1): 'Fifth',
(3, 2): 'Sixth'}
mesh = Mesh(points, vertices, boundary)
mesh.check_integrity()
#Check enumeration
#for k, (vol_id, edge_id) in enumerate(mesh.boundary_segments):
# b = -k-1
# assert mesh.neighbours[vol_id, edge_id] == b
def test_boundary_inputs_using_one_default(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0,0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4] ]
boundary = { (0, 0): 'First',
(0, 2): 'Second',
(2, 0): 'Third',
(2, 1): 'Fourth',
#(3, 1): 'Fifth', #Skip this
(3, 2): 'Sixth'}
mesh = Mesh(points, vertices, boundary)
mesh.check_integrity()
from anuga.config import default_boundary_tag
assert mesh.boundary[ (3, 1) ] == default_boundary_tag
#Check enumeration
#for k, (vol_id, edge_id) in enumerate(mesh.boundary_segments):
# b = -k-1
# assert mesh.neighbours[vol_id, edge_id] == b
def test_boundary_inputs_using_all_defaults(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0,0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4] ]
boundary = { (0, 0): 'First',
(0, 2): 'Second',
(2, 0): 'Third',
(2, 1): 'Fourth',
#(3, 1): 'Fifth', #Skip this
(3, 2): 'Sixth'}
mesh = Mesh(points, vertices) #, boundary)
mesh.check_integrity()
from anuga.config import default_boundary_tag
assert mesh.boundary[ (0, 0) ] == default_boundary_tag
assert mesh.boundary[ (0, 2) ] == default_boundary_tag
assert mesh.boundary[ (2, 0) ] == default_boundary_tag
assert mesh.boundary[ (2, 1) ] == default_boundary_tag
assert mesh.boundary[ (3, 1) ] == default_boundary_tag
assert mesh.boundary[ (3, 2) ] == default_boundary_tag
#Check enumeration
#for k, (vol_id, edge_id) in enumerate(mesh.boundary_segments):
# b = -k-1
# assert mesh.neighbours[vol_id, edge_id] == b
def test_inputs(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0,0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4] ]
#Too few points
try:
mesh = Mesh([points[0]], vertices)
except AssertionError:
pass
else:
raise Exception('Should have raised an exception')
#Too few points - 1 element
try:
mesh = Mesh([points[0]], [vertices[0]])
except AssertionError:
pass
else:
raise Exception('Should have raised an exception')
#Wrong dimension of vertices
try:
mesh = Mesh(points, vertices[0])
except AssertionError:
pass
else:
raise Exception('Should have raised an exception')
#Unsubscriptable coordinates object raises exception
try:
mesh = Mesh(points[0], [vertices[0]])
except AssertionError:
pass
else:
raise Exception('Should have raised an exception')
#FIXME: This has been commented out pending a decision
#whether to allow partial boundary tags or not
#
#Not specifying all boundary tags
#try:
# mesh = Mesh(points, vertices, {(3,0): 'x'})
#except AssertionError:
# pass
#else:
# raise Exception('Should have raised an exception')
#Specifying wrong non existing segment
try:
mesh = Mesh(points, vertices, {(5,0): 'x'})
except AssertionError:
pass
except RuntimeError:
pass
else:
raise Exception('Should have raised an exception')
def test_internal_boundaries(self):
"""
get values based on triangle lists.
"""
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
# Add an internal boundary
boundary[(2,0)] = 'internal'
boundary[(1,0)] = 'internal'
#Create shallow water domain
domain = Mesh(points, vertices, boundary)
domain.build_tagged_elements_dictionary({'bottom':[0,1],
'top':[4,5],
'all':[0,1,2,3,4,5]})
def test_boundary_polygon(self):
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
#from mesh import Mesh
#Create basic mesh
points, vertices, boundary = rectangular(2, 2)
mesh = Mesh(points, vertices, boundary)
P = mesh.get_boundary_polygon()
assert len(P) == 8
assert num.allclose(P, [[0.0, 0.0], [0.5, 0.0], [1.0, 0.0],
[1.0, 0.5], [1.0, 1.0], [0.5, 1.0],
[0.0, 1.0], [0.0, 0.5]])
for p in points:
#print p, P
assert is_inside_polygon(p, P)
def test_boundary_polygon_II(self):
#Points
a = [0.0, 0.0] #0
b = [0.0, 0.5] #1
c = [0.0, 1.0] #2
d = [0.5, 0.0] #3
e = [0.5, 0.5] #4
f = [1.0, 0.0] #5
g = [1.0, 0.5] #6
h = [1.0, 1.0] #7
i = [1.5, 0.5] #8
points = [a, b, c, d, e, f, g, h, i]
#dea, bae, bec, fgd,
#edg, ghe, gfi, gih
vertices = [ [3,4,0], [1,0,4], [1,4,2], [5,6,3],
[4,3,6], [6,7,4], [6,5,8], [6,8,7]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
assert len(P) == 8
assert num.allclose(P, [a, d, f, i, h, e, c, b])
for p in points:
#print p, P
assert is_inside_polygon(p, P)
def test_boundary_polygon_III(self):
"""Same as II but vertices ordered differently
"""
#Points
a = [0.0, 0.0] #0
b = [0.0, 0.5] #1
c = [0.0, 1.0] #2
d = [0.5, 0.0] #3
e = [0.5, 0.5] #4
f = [1.0, 0.0] #5
g = [1.0, 0.5] #6
h = [1.0, 1.0] #7
i = [1.5, 0.5] #8
points = [a, b, c, d, e, f, g, h, i]
#edg, ghe, gfi, gih
#dea, bae, bec, fgd,
vertices = [[4,3,6], [6,7,4], [6,5,8], [6,8,7],
[3,4,0], [1,0,4], [1,4,2], [5,6,3]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
assert len(P) == 8
assert num.allclose(P, [a, d, f, i, h, e, c, b])
for p in points:
assert is_inside_polygon(p, P)
def test_boundary_polygon_IIIa(self):
"""
test_boundary_polygon_IIIa - Check pathological situation where
one triangle has no neighbours. This may be the case if a mesh
is partitioned using pymetis.
"""
#Points
a = [0.0, 0.0] #0
b = [0.0, 0.5] #1
c = [0.0, 1.0] #2
d = [0.5, 0.0] #3
e = [0.5, 0.5] #4
f = [1.0, 0.0] #5
g = [1.0, 0.5] #6
h = [1.0, 1.0] #7
# Add pathological triangle with no neighbours to an otherwise
# trivial mesh
points = [a, b, c, d, e, f, g, h]
#cbe, aeb, dea, fed, ghe (pathological triangle)
vertices = [[2,1,4], [0,4,1], [3,4,0], [5,4,3],
[6,7,4]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
P = mesh.get_boundary_polygon(verbose=False)
assert len(P) == 9
# Note that point e appears twice!
assert num.allclose(P, [a, d, f, e, g, h, e, c, b])
for p in points:
msg = 'Point %s is not inside polygon %s'\
%(p, P)
assert is_inside_polygon(p, P), msg
def test_boundary_polygon_IV(self):
"""Reproduce test test_spatio_temporal_file_function_time
from test_util.py that looked as if it produced the wrong boundary
"""
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
#Create a domain to hold test grid
#(0:15, -20:10)
points, vertices, boundary =\
rectangular(4, 4, 15, 30, origin = (0, -20))
#####
mesh = Mesh(points, vertices)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
#print P
assert len(P) == 16
for p in points:
assert is_inside_polygon(p, P)
#####
mesh = Mesh(points, vertices, boundary)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
#print P, len(P)
assert len(P) == 16
for p in points:
assert is_inside_polygon(p, P)
#print mesh.statistics()
def test_boundary_polygon_V(self):
"""Create a discontinuous mesh (duplicate vertices)
and check that boundary is as expected
"""
#Points
a = [0.0, 0.0] #0
b = [0.0, 0.5] #1
c = [0.0, 1.0] #2
d = [0.5, 0.0] #3
e = [0.5, 0.5] #4
f = [1.0, 0.0] #5
g = [1.0, 0.5] #6
h = [1.0, 1.0] #7
i = [1.5, 0.5] #8
#Duplicate points for triangles edg [4,3,6] (central) and
#gid [6,8,7] (top right boundary) to them disconnected
#from the others
e0 = [0.5, 0.5] #9
d0 = [0.5, 0.0] #10
g0 = [1.0, 0.5] #11
i0 = [1.5, 0.5] #12
points = [a, b, c, d, e, f, g, h, i, e0, d0, g0, i0]
#dea, bae, bec, fgd,
#edg, ghe, gfi, gih
#vertices = [ [3,4,0], [1,0,4], [1,4,2], [5,6,3],
# [4,3,6], [6,7,4], [6,5,8], [6,8,7]]
#dea, bae, bec, fgd,
#e0d0g0, ghe, gfi, g0i0h
vertices = [[3,4,0], [1,0,4], [1,4,2], [5,6,3],
[9,10,11], [6,7,4], [6,5,8], [11,12,7]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
#print P
assert len(P) == 8
assert num.allclose(P, [a, d, f, i, h, e, c, b])
assert num.allclose(P, [(0.0, 0.0), (0.5, 0.0), (1.0, 0.0), (1.5, 0.5), (1.0, 1.0), (0.5, 0.5), (0.0, 1.0), (0.0, 0.5)])
for p in points:
#print p, P
assert is_inside_polygon(p, P)
def test_boundary_polygon_VI(self):
"""test_boundary_polygon_VI(self)
Create a discontinuous mesh (duplicate vertices) from a real situation that failed
and check that boundary is as expected
"""
# First do the continuous version of mesh
points = [[ 6626.85400391, 0. ],
[ 0. , 38246.4140625 ],
[ 9656.2734375 , 68351.265625 ],
[ 20827.25585938, 77818.203125 ],
[ 32755.59375 , 58126.9765625 ],
[ 35406.3359375 , 79332.9140625 ],
[ 31998.23828125, 88799.84375 ],
[ 23288.65820313, 104704.296875 ],
[ 32187.57617188, 109816.4375 ],
[ 50364.08984375, 110763.1328125 ],
[ 80468.9453125 , 96184.0546875 ],
[ 86149.1015625 , 129886.34375 ],
[ 118715.359375 , 129886.34375 ],
[ 117768.6640625 , 85770.4296875 ],
[ 101485.5390625 , 45251.9453125 ],
[ 49985.4140625 , 2272.06396484],
[ 51737.94140625, 90559.2109375 ],
[ 56659.0703125 , 65907.6796875 ],
[ 75735.4765625 , 23762.00585938],
[ 52341.70703125, 38563.39453125]]
triangles = [[19, 0,15],
[ 2, 4, 3],
[ 4, 2, 1],
[ 1,19, 4],
[15,18,19],
[18,14,17],
[19, 1, 0],
[ 6, 8, 7],
[ 8, 6,16],
[10, 9,16],
[17, 5, 4],
[16,17,10],
[17,19,18],
[ 5,17,16],
[10,14,13],
[10,17,14],
[ 8,16, 9],
[12,11,10],
[10,13,12],
[19,17, 4],
[16, 6, 5]]
triangles = num.array(triangles,num.int)
points = num.array(points,num.float)
mesh = Mesh(points, triangles)
mesh.check_integrity()
Pref = mesh.get_boundary_polygon()
#plot_polygons([ensure_numeric(Pref)], 'goodP')
for p in points:
assert is_inside_polygon(p, Pref)
# Then do the discontinuous version
import warnings
warnings.filterwarnings('ignore')
points = [[ 52341.70703125, 38563.39453125],
[ 6626.85400391, 0. ],
[ 49985.4140625 , 2272.06396484],
[ 9656.2734375 , 68351.265625 ],
[ 32755.59375 , 58126.9765625 ],
[ 20827.25585938, 77818.203125 ],
[ 32755.59375 , 58126.9765625 ],
[ 9656.2734375 , 68351.265625 ],
[ 0. , 38246.4140625 ],
[ 0. , 38246.4140625 ],
[ 52341.70703125, 38563.39453125],
[ 32755.59375 , 58126.9765625 ],
[ 49985.4140625 , 2272.06396484],
[ 75735.4765625 , 23762.00585938],
[ 52341.70703125, 38563.39453125],
[ 75735.4765625 , 23762.00585938],
[ 101485.5390625 , 45251.9453125 ],
[ 56659.0703125 , 65907.6796875 ],
[ 52341.70703125, 38563.39453125],
[ 0. , 38246.4140625 ],
[ 6626.85400391, 0. ],
[ 31998.23828125, 88799.84375 ],
[ 32187.57617188, 109816.4375 ],
[ 23288.65820313, 104704.296875 ],
[ 32187.57617188, 109816.4375 ],
[ 31998.23828125, 88799.84375 ],
[ 51737.94140625, 90559.2109375 ],
[ 80468.9453125 , 96184.0546875 ],
[ 50364.08984375, 110763.1328125 ],
[ 51737.94140625, 90559.2109375 ],
[ 56659.0703125 , 65907.6796875 ],
[ 35406.3359375 , 79332.9140625 ],
[ 32755.59375 , 58126.9765625 ],
[ 51737.94140625, 90559.2109375 ],
[ 56659.0703125 , 65907.6796875 ],
[ 80468.9453125 , 96184.0546875 ],
[ 56659.0703125 , 65907.6796875 ],
[ 52341.70703125, 38563.39453125],
[ 75735.4765625 , 23762.00585938],
[ 35406.3359375 , 79332.9140625 ],
[ 56659.0703125 , 65907.6796875 ],
[ 51737.94140625, 90559.2109375 ],
[ 80468.9453125 , 96184.0546875 ],
[ 101485.5390625 , 45251.9453125 ],
[ 117768.6640625 , 85770.4296875 ],
[ 80468.9453125 , 96184.0546875 ],
[ 56659.0703125 , 65907.6796875 ],
[ 101485.5390625 , 45251.9453125 ],
[ 32187.57617188, 109816.4375 ],
[ 51737.94140625, 90559.2109375 ],
[ 50364.08984375, 110763.1328125 ],
[ 118715.359375 , 129886.34375 ],
[ 86149.1015625 , 129886.34375 ],
[ 80468.9453125 , 96184.0546875 ],
[ 80468.9453125 , 96184.0546875 ],
[ 117768.6640625 , 85770.4296875 ],
[ 118715.359375 , 129886.34375 ],
[ 52341.70703125, 38563.39453125],
[ 56659.0703125 , 65907.6796875 ],
[ 32755.59375 , 58126.9765625 ],
[ 51737.94140625, 90559.2109375 ],
[ 31998.23828125, 88799.84375 ],
[ 35406.3359375 , 79332.9140625 ]]
scaled_points = old_div(ensure_numeric(points, num.int),1000) # Simplify for ease of interpretation
triangles = [[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9,10,11],
[12,13,14],
[15,16,17],
[18,19,20],
[21,22,23],
[24,25,26],
[27,28,29],
[30,31,32],
[33,34,35],
[36,37,38],
[39,40,41],
[42,43,44],
[45,46,47],
[48,49,50],
[51,52,53],
[54,55,56],
[57,58,59],
[60,61,62]]
# First use scaled points for ease of debugging
mesh = Mesh(scaled_points, triangles)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
for p in scaled_points:
assert is_inside_polygon(p, P)
# Then use original points and test
mesh = Mesh(points, triangles)
mesh.check_integrity()
P = mesh.get_boundary_polygon()
for p in points:
assert is_inside_polygon(p, P)
assert num.allclose(P, Pref)
def test_lone_vertices(self):
a = [2.0, 1.0]
b = [6.0, 2.0]
c = [1.0, 3.0]
d = [2.0, 4.0]
e = [4.0, 3.0]
points = [a, b, d, c, e]
vertices = [[0,1,3]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
loners = mesh.get_lone_vertices()
#print loners
self.assertTrue(loners==[2,4],
'FAILED!')
a = [2.0, 1.0]
b = [6.0, 2.0]
c = [1.0, 3.0]
d = [2.0, 4.0]
points = [d, a, b, c]
vertices = [[3,1,2]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
loners = mesh.get_lone_vertices()
self.assertTrue(loners==[0],
'FAILED!')
def test_mesh_get_boundary_polygon_with_georeferencing(self):
"""test_mesh_get_boundary_polygon_with_georeferencing
Test that get_boundary_polygon returns absolute coordinates
"""
# test
a = [0.0, 0.0]
b = [4.0, 0.0]
c = [0.0, 4.0]
absolute_points = [a, b, c]
vertices = [[0, 1, 2]]
geo = Geo_reference(56, 67, -56)
relative_points = geo.change_points_geo_ref(absolute_points)
mesh = Mesh(relative_points, vertices, geo_reference=geo)
boundary_polygon = mesh.get_boundary_polygon()
assert num.allclose(absolute_points, boundary_polygon)
def test_get_triangle_containing_point(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0, 0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0, 0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4]]
mesh = Mesh(points, vertices)
mesh.check_integrity()
try:
id = mesh.get_triangle_containing_point([3.0, 5.0])
except:
pass
else:
msg = 'Should have caught point outside polygon (Non)'
raise Exception(msg)
id = mesh.get_triangle_containing_point([0.5, 1.0])
assert id == 0
id = mesh.get_triangle_containing_point([1.0, 3.0])
assert id == 3
for i, point in enumerate(mesh.get_centroid_coordinates()):
id = mesh.get_triangle_containing_point(point)
assert id == i
def test_get_triangle_neighbours(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
e = [2.0, 2.0]
points = [a, b, c, e]
vertices = [ [1,0,2], [1,2,3] ] #bac, bce
mesh = Mesh(points, vertices)
neighbours = mesh.get_triangle_neighbours(0)
assert num.allclose(neighbours, [-1,1,-2])
neighbours = mesh.get_triangle_neighbours(-10)
assert neighbours == []
neighbours = mesh.get_triangle_neighbours(2)
assert neighbours == []
def test_get_intersecting_segments1(self):
"""test_get_intersecting_segments(self):
Very simple test (horizontal lines)
"""
# Build test mesh
# Create basic mesh
# 9 points at (0,0), (0, 1), ..., (2,2)
# 8 triangles enumerated from left bottom to right top.
points, vertices, boundary = rectangular(2, 2, 2, 2)
mesh = Mesh(points, vertices, boundary)
# Very simple horizontal line intersecting
#
for y_line in [0.1, 0.2, 0.314159, 0.41, 0.6, 0.99, 1.01, 1.5, 1.77, 1.9]:
if y_line < 1:
ceiling = 1
floor = 0
intersected_triangles = [0,1,4,5]
elif y_line > 1:
ceiling = 2
floor = 1
intersected_triangles = [2,3,6,7]
else:
raise Exception('this test is not for parallel lines')
line = [[-1,y_line], [3,y_line]]
L = mesh.get_intersecting_segments(line)
assert len(L) == 4
# Check all normals point straight down etc
total_length = 0
for x in L:
if x.triangle_id % 2 == 0:
assert num.allclose(x.length, ceiling-y_line)
else:
assert num.allclose(x.length, y_line-floor)
assert num.allclose(x.normal, [0,-1])
assert num.allclose(x.segment[1][0], x.segment[0][0] + x.length)
assert num.allclose(x.segment[0][1], y_line)
assert num.allclose(x.segment[1][1], y_line)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 2), msg
def test_get_intersecting_segments_coinciding(self):
"""test_get_intersecting_segments_coinciding(self):
Test that lines coinciding with triangle edges work.
"""
# Build test mesh
# Create basic mesh
# 9 points at (0,0), (0, 1), ..., (2,2)
# 8 triangles enumerated from left bottom to right top.
points, vertices, boundary = rectangular(2, 2, 2, 2)
mesh = Mesh(points, vertices, boundary)
intersected_triangles = [1,5]
# Very simple horizontal line intersecting
#
y_line = 1.0
line = [[-1,y_line], [3,y_line]]
L = mesh.get_intersecting_segments(line)
msg = 'Only two triangles should be returned'
assert len(L) == 2, msg
# Check all
total_length = 0
for x in L:
assert num.allclose(x.length, 1.0)
assert num.allclose(x.normal, [0,-1])
assert num.allclose(x.segment[1][0], x.segment[0][0] + x.length)
assert num.allclose(x.segment[0][1], y_line)
assert num.allclose(x.segment[1][1], y_line)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 2), msg
def test_get_intersecting_segments_partially_coinciding(self):
"""test_get_intersecting_segments_partially_coinciding(self):
Test that line coinciding with triangle edges work.
But this ones only coincide with parts of the edge.
"""
# Build test mesh
# Create basic mesh
# 9 points at (0,0), (0, 1), ..., (2,2)
# 8 triangles enumerated from left bottom to right top.
points, vertices, boundary = rectangular(2, 2, 2, 2)
mesh = Mesh(points, vertices, boundary)
intersected_triangles = [1,5]
# Horizontal line intersecting along center but stopping
# parway through second triangle's edge
#
y_line = 1.0
#line = [[0, y_line], [2, y_line]]
line = [[0, y_line], [1.5, y_line]]
L = mesh.get_intersecting_segments(line)
#for x in L:
# print x
msg = 'Two triangles should be returned'
assert len(L) == 2, msg
# Check all
total_length = 0
for x in L:
if x.triangle_id == 1:
assert num.allclose(x.length, 1)
assert num.allclose(x.normal, [0, -1])
if x.triangle_id == 5:
assert num.allclose(x.length, 0.5)
assert num.allclose(x.normal, [0, -1])
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 1.5), msg
def test_get_intersecting_segments2(self):
"""test_get_intersecting_segments(self):
Lines with a slope
"""
s2 = old_div(sqrt(2.0),2)
# Build test mesh
# Create basic mesh
# 9 points at (0,0), (0, 1), ..., (2,2)
# 8 triangles enumerated from left bottom to right top.
points, vertices, boundary = rectangular(2, 2, 2, 2)
mesh = Mesh(points, vertices, boundary)
# Diagonal cutting through a vertex and hypothenuses
line = [[0, 2], [2, 0]]
intersected_triangles = [3,2,5,4]
L = mesh.get_intersecting_segments(line)
assert len(L) == 4
#print L
# Check all segments
total_length = 0
for i, x in enumerate(L):
assert num.allclose(x.length, s2)
assert num.allclose(x.normal, [-s2, -s2])
assert num.allclose(sum(x.normal**2), 1)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 4*s2), msg
# Diagonal cutting through a vertex and hypothenuses (reversed)
line = [[2, 0], [0, 2]]
intersected_triangles = [3,2,5,4]
L = mesh.get_intersecting_segments(line)
assert len(L) == 4
#print L
# Check all segments
total_length = 0
for i, x in enumerate(L):
assert num.allclose(x.length, s2)
assert num.allclose(x.normal, [s2, s2])
assert num.allclose(sum(x.normal**2), 1)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 4*s2), msg
# Diagonal coinciding with hypothenuses
line = [[2, 2], [0, 0]]
intersected_triangles = [6,0]
L = mesh.get_intersecting_segments(line)
assert len(L) == 2
#print L
# Check all segments
total_length = 0
for i, x in enumerate(L):
assert num.allclose(x.length, 2*s2)
assert num.allclose(x.normal, [-s2, s2])
assert num.allclose(sum(x.normal**2), 1)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 4*s2), msg
# Diagonal coinciding with hypothenuses (reversed)
line = [[0, 0], [2, 2]]
intersected_triangles = [6,0]
L = mesh.get_intersecting_segments(line)
assert len(L) == 2
#print L
# Check all segments
total_length = 0
for i, x in enumerate(L):
assert num.allclose(x.length, 2*s2)
assert num.allclose(x.normal, [s2, -s2])
assert num.allclose(sum(x.normal**2), 1)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 4*s2), msg
# line with slope [1, -1] cutting through vertices of tri 7 and 6
line = [[1, 2], [2, 1]]
intersected_triangles = [7,6]
L = mesh.get_intersecting_segments(line)
assert len(L) == 2
#print L
# Check all segments
total_length = 0
for i, x in enumerate(L):
assert num.allclose(x.length, s2)
assert num.allclose(x.normal, [-s2, -s2])
assert num.allclose(sum(x.normal**2), 1)
assert x.triangle_id in intersected_triangles
total_length += x.length
msg = 'Segments do not add up'
assert num.allclose(total_length, 2*s2), msg
# Arbitrary line with slope [1, -1] cutting through tri 7 and 6
line = [[1.1, 2], [2.1, 1]]
intersected_triangles = [7,6]
L = mesh.get_intersecting_segments(line)
assert len(L) == 2
# Check all segments
total_length = 0
for i, x in enumerate(L):
assert num.allclose(x.normal, [-s2, -s2])
assert num.allclose(sum(x.normal**2), 1)
msg = 'Triangle %d' %x.triangle_id + ' is not in %s' %(intersected_triangles)
assert x.triangle_id in intersected_triangles, msg
def test_get_intersecting_segments3(self):
"""test_get_intersecting_segments(self):
Check that line can stop inside a triangle
"""
s2 = old_div(sqrt(2.0),2)
# Build test mesh
# Create basic mesh
# 9 points at (0,0), (0, 1), ..., (2,2)
# 8 triangles enumerated from left bottom to right top.
points, vertices, boundary = rectangular(2, 2, 2, 2)
mesh = Mesh(points, vertices, boundary)
# Line cutting through one triangle and ending on its edge
line = [[0.5, 3], [0.5, 1.5]]
intersected_triangles = [3]
L = mesh.get_intersecting_segments(line)
assert len(L) == 1
assert L[0].triangle_id == 3
assert num.allclose(L[0].length, 0.5)
assert num.allclose(L[0].normal, [-1,0])
# Now try to shorten it so that its endpoint falls short of the far edge
line = [[0.5, 3], [0.5, 1.6]]
intersected_triangles = [3]
L = mesh.get_intersecting_segments(line)
assert len(L) == 1
assert L[0].triangle_id == 3
assert num.allclose(L[0].length, 0.4)
assert | num.allclose(L[0].normal, [-1,0]) | numpy.allclose |
"""
{This script finds the best-fit hybrid/halo quenching model parameters for the ECO
data (in h=1.0) so that they can be applied to the mocks when measuring
error in data}
"""
from cosmo_utils.utils import work_paths as cwpaths
from matplotlib.ticker import MaxNLocator
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
import numpy as np
import math
import os
__author__ = '{<NAME>}'
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']},size=20)
rc('text', usetex=True)
plt.rcParams['legend.title_fontsize'] = 'xx-small'
plt.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def read_data_catl(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
# columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
# 'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
# 'fc', 'grpmb', 'grpms','modelu_rcorr']
# 13878 galaxies
# eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \
# usecols=columns)
eco_buff = reading_catls(path_to_file)
if mf_type == 'smf':
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
elif survey == 'resolvea' or survey == 'resolveb':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh',
'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']
# 2286 galaxies
resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0, \
usecols=columns)
if survey == 'resolvea':
if mf_type == 'smf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.30
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
elif survey == 'resolveb':
if mf_type == 'smf':
# 487 - cz, 369 - grpcz
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3
# cvar = 0.58
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
return catl, volume, z_median
def std_func(bins, mass_arr, vel_arr):
## Calculate std from mean=0
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
if index1 == last_index:
break
cen_deltav_arr = []
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def std_func_mod(bins, mass_arr, vel_arr):
mass_arr_bin_idxs = np.digitize(mass_arr, bins)
# Put all galaxies that would have been in the bin after the last in the
# bin as well i.e galaxies with bin number 5 and 6 from previous line all
# go in one bin
for idx, value in enumerate(mass_arr_bin_idxs):
if value == 6:
mass_arr_bin_idxs[idx] = 5
mean = 0
std_arr = []
for idx in range(1, len(bins)):
cen_deltav_arr = []
current_bin_idxs = np.argwhere(mass_arr_bin_idxs == idx)
cen_deltav_arr.append(np.array(vel_arr)[current_bin_idxs])
diff_sqrd_arr = []
# mean = np.mean(cen_deltav_arr)
for value in cen_deltav_arr:
# print(mean)
# print(np.mean(cen_deltav_arr))
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
# print(std)
# print(np.std(cen_deltav_arr))
std_arr.append(std)
return std_arr
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
def measure_all_smf(table, volume, data_bool):
"""
Calculates differential stellar mass function for all, red and blue galaxies
from mock/data
Parameters
----------
table: pandas Dataframe
Dataframe of either mock or data
volume: float
Volume of simulation/survey
cvar: float
Cosmic variance error
data_bool: Boolean
Data or mock
Returns
---------
3 multidimensional arrays of stellar mass, phi, total error in SMF and
counts per bin for all, red and blue galaxies
"""
colour_col = 'colour_label'
if data_bool:
logmstar_col = 'logmstar'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, False, 'B')
else:
logmstar_col = 'stellar_mass'
# logmstar_col = '{0}'.format(randint_logmstar)
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, True)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, True, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, True, 'B')
return [max_total, phi_total, err_total, counts_total] , \
[max_red, phi_red, err_red, counts_red] , \
[max_blue, phi_blue, err_blue, counts_blue]
def get_deltav_sigma_data(df):
"""
Measure spread in velocity dispersion separately for red and blue galaxies
by binning up central stellar mass (changes logmstar units from h=0.7 to h=1)
Parameters
----------
df: pandas Dataframe
Data catalog
Returns
---------
std_red: numpy array
Spread in velocity dispersion of red galaxies
centers_red: numpy array
Bin centers of central stellar mass for red galaxies
std_blue: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue: numpy array
Bin centers of central stellar mass for blue galaxies
"""
catl = df.copy()
if survey == 'eco' or survey == 'resolvea':
catl = catl.loc[catl.logmstar >= 8.9]
elif survey == 'resolveb':
catl = catl.loc[catl.logmstar >= 8.7]
catl.logmstar = np.log10((10**catl.logmstar) / 2.041)
red_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'R') & (catl.fc == 1)].values)
blue_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'B') & (catl.fc == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups with a
# red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func_mod(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups with a
# blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func_mod(blue_stellar_mass_bins, blue_cen_stellar_mass_arr,
blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, centers_red, std_blue, centers_blue
def chi_squared(data, model, err_data, inv_corr_mat):
"""
Calculates chi squared
Parameters
----------
data: array
Array of data values
model: array
Array of model values
err_data: array
Array of error in data values
Returns
---------
chi_squared: float
Value of chi-squared given a model
"""
# chi_squared_arr = (data - model)**2 / (err_data**2)
# chi_squared = np.sum(chi_squared_arr)
data = data.flatten() # from (4,5) to (1,20)
model = model.flatten() # same as above
print("data: " , data , "\n")
print("model: " , model , "\n")
print("data error: " , err_data , "\n")
first_term = ((data - model) / (err_data)).reshape(1,data.size)
third_term = np.transpose(first_term)
# chi_squared is saved as [[value]]
chi_squared = np.dot(np.dot(first_term,inv_corr_mat),third_term)
return chi_squared[0][0]
def lnprob(theta, phi_red_data, phi_blue_data, std_red_data, std_blue_data,
err, corr_mat_inv, gals_df):
"""
Calculates log probability for emcee
Parameters
----------
theta: array
Array of parameter values
phi: array
Array of y-axis values of mass function
err: numpy.array
Array of error values of red and blue mass function
corr_mat: array
Array of inverse of correlation matrix
Returns
---------
lnp: float
Log probability given a model
chi2: float
Value of chi-squared given a model
"""
if quenching == 'hybrid':
f_red_cen, f_red_sat, cen_mstar, sat_hosthalom, sat_mstar = \
hybrid_quenching_model(theta, gals_df)
elif quenching == 'halo':
f_red_cen, f_red_sat = halo_quenching_model(theta, gals_df)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df)
v_survey = volume
total_model, red_model, blue_model = measure_all_smf(gals_df, v_survey
, True)
std_red_model, centers_red_model, std_blue_model, centers_blue_model = \
get_deltav_sigma_data(gals_df)
data_arr = []
data_arr.append(phi_red_data)
data_arr.append(phi_blue_data)
data_arr.append(std_red_data)
data_arr.append(std_blue_data)
model_arr = []
model_arr.append(red_model[1])
model_arr.append(blue_model[1])
model_arr.append(std_red_model)
model_arr.append(std_blue_model)
err_arr = err
data_arr, model_arr = np.array(data_arr), np.array(model_arr)
chi2 = chi_squared(data_arr, model_arr, err_arr, corr_mat_inv)
return chi2
def hybrid_quenching_model(theta, gals_df):
"""
Apply hybrid quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mstar_q = theta[0] # Msun/h
Mh_q = theta[1] # Msun/h
mu = theta[2]
nu = theta[3]
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df)
cen_stellar_mass_arr, sat_stellar_mass_arr = get_stellar_mock(gals_df)
f_red_cen = 1 - np.exp(-((10**cen_stellar_mass_arr/(10**Mstar_q))**mu))
g_Mstar = np.exp(-((10**sat_stellar_mass_arr/(10**Mstar_q))**mu))
h_Mh = np.exp(-((10**sat_hosthalo_mass_arr/(10**Mh_q))**nu))
f_red_sat = 1 - (g_Mstar * h_Mh)
return f_red_cen, f_red_sat, cen_stellar_mass_arr, sat_hosthalo_mass_arr, sat_stellar_mass_arr
def halo_quenching_model(theta, gals_df):
"""
Apply halo quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mh_qc = theta[0] # Msun/h
Mh_qs = theta[1] # Msun/h
mu_c = theta[2]
mu_s = theta[3]
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df)
f_red_cen = 1 - np.exp(-(((10**cen_hosthalo_mass_arr)/(10**Mh_qc))**mu_c))
f_red_sat = 1 - np.exp(-(((10**sat_hosthalo_mass_arr)/(10**Mh_qs))**mu_s))
return f_red_cen, f_red_sat
def get_deltav_sigma_mocks_urcolour(survey, mock_df):
"""
Calculate spread in velocity dispersion from survey mocks (logmstar converted
to h=1 units before analysis)
Parameters
----------
survey: string
Name of survey
mock_df: Pandas DataFrame
Mock catalog
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
mock_pd = mock_df.copy()
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func_mod(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func_mod(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red = np.array(centers_red)
centers_blue = np.array(centers_blue)
return std_red, std_blue, centers_red, centers_blue
def get_err_data_urcolour(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
deltav_sig_arr_red = []
deltav_sig_arr_blue = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
logmstar_arr = mock_pd.logmstar.values
u_r_arr = mock_pd.u_r.values
colour_label_arr = np.empty(len(mock_pd), dtype='str')
for idx, value in enumerate(logmstar_arr):
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
elif value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
elif value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
mock_pd['colour_label'] = colour_label_arr
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, False, 'B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
deltav_sig_red, deltav_sig_blue, deltav_sig_cen_red, \
deltav_sig_cen_blue = get_deltav_sigma_mocks_urcolour(survey,
mock_pd)
deltav_sig_arr_red.append(deltav_sig_red)
deltav_sig_arr_blue.append(deltav_sig_blue)
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
deltav_sig_arr_red = np.array(deltav_sig_arr_red)
deltav_sig_arr_blue = np.array(deltav_sig_arr_blue)
phi_red_0 = phi_arr_red[:,0]
phi_red_1 = phi_arr_red[:,1]
phi_red_2 = phi_arr_red[:,2]
phi_red_3 = phi_arr_red[:,3]
phi_red_4 = phi_arr_red[:,4]
phi_blue_0 = phi_arr_blue[:,0]
phi_blue_1 = phi_arr_blue[:,1]
phi_blue_2 = phi_arr_blue[:,2]
phi_blue_3 = phi_arr_blue[:,3]
phi_blue_4 = phi_arr_blue[:,4]
dv_red_0 = deltav_sig_arr_red[:,0]
dv_red_1 = deltav_sig_arr_red[:,1]
dv_red_2 = deltav_sig_arr_red[:,2]
dv_red_3 = deltav_sig_arr_red[:,3]
dv_red_4 = deltav_sig_arr_red[:,4]
dv_blue_0 = deltav_sig_arr_blue[:,0]
dv_blue_1 = deltav_sig_arr_blue[:,1]
dv_blue_2 = deltav_sig_arr_blue[:,2]
dv_blue_3 = deltav_sig_arr_blue[:,3]
dv_blue_4 = deltav_sig_arr_blue[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4})
# Correlation matrix of phi and deltav colour measurements combined
corr_mat_colour = combined_df.corr()
corr_mat_inv_colour = np.linalg.inv(corr_mat_colour.values)
err_colour = np.sqrt(np.diag(combined_df.cov()))
return err_colour, corr_mat_inv_colour
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
def assign_colour_label_mock(f_red_cen, f_red_sat, gals_df, drop_fred=False):
"""
Assign colour label to mock catalog
Parameters
----------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
gals_df: pandas Dataframe
Mock catalog
drop_fred: boolean
Whether or not to keep red fraction column after colour has been
assigned
Returns
---------
df: pandas Dataframe
Dataframe with colour label and random number assigned as
new columns
"""
# Copy of dataframe
df = gals_df.copy()
# Saving labels
color_label_arr = [[] for x in range(len(df))]
rng_arr = [[] for x in range(len(df))]
# Adding columns for f_red to df
df.loc[:, 'f_red'] = np.zeros(len(df))
df.loc[df['fc'] == 1, 'f_red'] = f_red_cen
df.loc[df['fc'] == 0, 'f_red'] = f_red_sat
# Converting to array
f_red_arr = df['f_red'].values
# Looping over galaxies
for ii, cs_ii in enumerate(df['fc']):
# Draw a random number
rng = np.random.uniform()
# Comparing against f_red
if (rng >= f_red_arr[ii]):
color_label = 'B'
else:
color_label = 'R'
# Saving to list
color_label_arr[ii] = color_label
rng_arr[ii] = rng
## Assigning to DataFrame
df.loc[:, 'colour_label'] = color_label_arr
df.loc[:, 'rng'] = rng_arr
# Dropping 'f_red` column
if drop_fred:
df.drop('f_red', axis=1, inplace=True)
return df
def get_host_halo_mock(catl):
"""
Get host halo mass from mock catalog
Parameters
----------
catl: pandas dataframe
Data catalog
Returns
---------
cen_halos: array
Array of central host halo masses
sat_halos: array
Array of satellite host halo masses
"""
df = catl.copy()
cen_halos = []
sat_halos = []
for index, value in enumerate(df.fc):
if value == 1:
cen_halos.append(df.logmh_s.values[index])
else:
sat_halos.append(df.logmh_s.values[index])
cen_halos = np.array(cen_halos)
sat_halos = np.array(sat_halos)
return cen_halos, sat_halos
def get_stellar_mock(catl):
"""
Get stellar mass from mock catalog
Parameters
----------
catl: pandas dataframe
Data catalog
Returns
---------
cen_gals: array
Array of central stellar masses
sat_gals: array
Array of satellite stellar masses
"""
df = catl.copy()
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.fc):
if value == 1:
cen_gals.append(df.logmstar.values[idx])
elif value == 0:
sat_gals.append(df.logmstar.values[idx])
cen_gals = np.array(cen_gals)
sat_gals = np.array(sat_gals)
return cen_gals, sat_gals
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
path_to_data = dict_of_paths['data_dir']
global volume
global quenching
survey = 'eco'
mf_type = 'smf'
quenching = 'halo'
catl_file = path_to_proc + "gal_group_eco_data.hdf5"
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
catl, volume, z_median = read_data_catl(catl_file, survey)
catl = assign_colour_label_data(catl)
# Measurements in h=1.0
total_data, red_data, blue_data = measure_all_smf(catl, volume, True)
# Masses in h=1.0
sigma_red, cen_red, sigma_blue, cen_blue = get_deltav_sigma_data(catl)
# SMF measurements and masses in h=1.0 before matrix and error calculations
err_data_colour, corr_mat_colour_inv = get_err_data_urcolour(survey,
path_to_mocks)
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
x0 = [Mstar_q, Mh_q, mu, nu]
Mh_qc = 12.20 # Msun/h
Mh_qs = 12.17 # Msun/h
mu_c = 0.38
mu_s = 0.15
x0 = [Mh_qc, Mh_qs, mu_c, mu_s]
catl, volume, z_median = read_data_catl(catl_file, survey)
res = minimize(lnprob, x0, args=(red_data[1], blue_data[1], sigma_red,
sigma_blue, err_data_colour, corr_mat_colour_inv, catl),
method='nelder-mead', options={'maxiter':20, 'disp': True})
best_fit = res.x
## After running minimize for 10, 20, 40 and 80 iterations
catl = catl.loc[catl.logmstar.values >= 8.9]
f_red_cen_fid, f_red_sat_fid, mstar_cen_fid, sat_hosthalo_fid, mstar_sat_fid = \
hybrid_quenching_model(x0, catl)
f_red_cen, f_red_sat, mstar_cen, sat_hosthalo, mstar_sat = \
hybrid_quenching_model(best_fit, catl)
f_red_cen_fid, f_red_sat_fid = halo_quenching_model(x0, catl)
f_red_cen, f_red_sat = halo_quenching_model(best_fit, catl)
gals_df_fid = assign_colour_label_mock(f_red_cen_fid, f_red_sat_fid, catl)
v_survey = volume
total_model_fid, red_model_fid, blue_model_fid = measure_all_smf(gals_df_fid,
v_survey, True)
std_red_model_fid, centers_red_model_fid, std_blue_model_fid, \
centers_blue_model_fid = get_deltav_sigma_data(gals_df_fid)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, catl)
v_survey = volume
total_model, red_model, blue_model = measure_all_smf(gals_df, v_survey, True)
std_red_model, centers_red_model, std_blue_model, centers_blue_model = \
get_deltav_sigma_data(gals_df)
## Red fraction of centrals
plt.scatter(mstar_cen, f_red_cen, c='mediumorchid', label='ECO best fit')
plt.scatter(mstar_cen_fid, f_red_cen_fid, c='cornflowerblue', label='fiducial')
plt.ylabel(r'$f_{red}$', fontsize=30)
plt.xlabel(r'$M_{*, cen}$',fontsize=20)
plt.legend(loc='best')
plt.show()
## Red fraction of satellites
plt.scatter(mstar_sat, f_red_sat, c=10**sat_hosthalo, label='ECO best fit')
plt.ylabel(r'$f_{red}$', fontsize=30)
plt.xlabel(r'$M_{*, sat}$',fontsize=20)
plt.colorbar()
plt.legend(loc='best')
plt.show()
plt.scatter(mstar_sat_fid, f_red_sat_fid, c=10**sat_hosthalo_fid,
label='fiducial')
plt.ylabel(r'$f_{red}$', fontsize=30)
plt.xlabel(r'$M_{*, sat}$',fontsize=20)
plt.legend(loc='best')
plt.show()
## SMF plot - data vs best fit
# plt.plot(total_model[0], total_model[1], c='k', linestyle='--', linewidth=5, label='model')
plt.plot(red_model[0], red_model[1], c='maroon', linestyle='--', linewidth=5,
label='model')
plt.plot(blue_model[0], blue_model[1], c='mediumblue', linestyle='--',
linewidth=5)
# plt.plot(total_data[0], total_data[1], c='k', linestyle='-', linewidth=5, label='data')
plt.plot(red_data[0], red_data[1], c='indianred', linestyle='-', linewidth=5,
label='data')
plt.plot(blue_data[0], blue_data[1], c='cornflowerblue', linestyle='-',
linewidth=5)
yerr_red = err_data_colour[0:5]
yerr_blue = err_data_colour[5:10]
plt.fill_between(x=red_data[0], y1=red_data[1]+yerr_red,
y2=red_data[1]-yerr_red, color='r', alpha=0.3)
plt.fill_between(x=blue_data[0], y1=blue_data[1]+yerr_blue,
y2=blue_data[1]-yerr_blue, color='b', alpha=0.3)
plt.legend(loc='best')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, '
r'\mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,'
r'\mathrm{h}^{3} \right]$', fontsize=20)
plt.title(r'ECO SMF ($\chi^2$\ = {0}) (best-fit vs data)'.\
format(np.round(res80.fun, 2)))
plt.show()
## Spread in velocity difference plot - data vs best fit
plt.scatter(centers_red_model, std_red_model, c='maroon', s=50, label='model')
plt.scatter(centers_blue_model, std_blue_model, c='mediumblue', s=50,
label='model')
plt.scatter(cen_red, sigma_red, c='indianred', s=50, label='data')
plt.scatter(cen_blue, sigma_blue, c='cornflowerblue', s=50, label='data')
yerr_red = err_data_colour[10:15]
yerr_blue = err_data_colour[15:20]
plt.fill_between(x=cen_red, y1=sigma_red+yerr_red,
y2=sigma_red-yerr_red, color='r', alpha=0.3)
plt.fill_between(x=cen_blue, y1=sigma_blue+yerr_blue,
y2=sigma_blue-yerr_blue, color='b', alpha=0.3)
plt.legend(loc='best')
plt.xlabel(r'\boldmath$\log_{10}\ M_{\star,cen} \left[\mathrm{M_\odot}\, '
r'\mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\sigma$', fontsize=30)
plt.title(r'ECO spread in $\delta v$\ (best-fit vs data)')
plt.show()
## SMF plot - fiducial vs best fit
# plt.plot(total_model[0], total_model[1], c='k', linestyle='--', linewidth=5, label='model')
plt.plot(red_model_fid[0], red_model_fid[1], c='maroon', linestyle='--',
linewidth=5, label='fiducial')
plt.plot(blue_model_fid[0], blue_model_fid[1], c='mediumblue', linestyle='--',
linewidth=5)
# plt.plot(total_data[0], total_data[1], c='k', linestyle='-', linewidth=5, label='data')
plt.plot(red_model[0], red_model[1], c='indianred', linestyle='-', linewidth=5,
label='best fit')
plt.plot(blue_model[0], blue_model[1], c='cornflowerblue', linestyle='-',
linewidth=5)
yerr_red = err_data_colour[0:5]
yerr_blue = err_data_colour[5:10]
plt.fill_between(x=red_data[0], y1=red_data[1]+yerr_red,
y2=red_data[1]-yerr_red, color='r', alpha=0.3)
plt.fill_between(x=blue_data[0], y1=blue_data[1]+yerr_blue,
y2=blue_data[1]-yerr_blue, color='b', alpha=0.3)
plt.legend(loc='best')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, '
r'\mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,'
r'\mathrm{h}^{3} \right]$', fontsize=20)
plt.title(r'ECO SMF ($\chi^2$\ = {0})'.format(np.round(res80.fun, 2)))
plt.show()
## Spread in velocity difference plot - fiducial vs best fit
plt.scatter(centers_red_model_fid, std_red_model_fid, c='maroon', s=50,
label='fiducial')
plt.scatter(centers_blue_model_fid, std_blue_model_fid, c='mediumblue', s=50,
label='fiducial')
plt.scatter(centers_red_model, std_red_model, c='indianred', s=50,
label='best fit')
plt.scatter(centers_blue_model, std_blue_model, c='cornflowerblue', s=50,
label='best fit')
yerr_red = err_data_colour[10:15]
yerr_blue = err_data_colour[15:20]
plt.fill_between(x=cen_red, y1=sigma_red+yerr_red,
y2=sigma_red-yerr_red, color='r', alpha=0.3)
plt.fill_between(x=cen_blue, y1=sigma_blue+yerr_blue,
y2=sigma_blue-yerr_blue, color='b', alpha=0.3)
plt.legend(loc='best')
plt.xlabel(r'\boldmath$\log_{10}\ M_{\star,cen} \left[\mathrm{M_\odot}\, '
r'\mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\sigma$', fontsize=30)
plt.title(r'ECO spread in $\delta v$')
plt.show()
## Histogram of M* of galaxies labeled red and blue in data and model
plt.hist(catl.logmstar.loc[catl.colour_label == 'R'],
bins=np.linspace(8.9, 12, 10), label='data',
histtype='step', color='r', lw=5)
plt.hist(gals_df.logmstar.loc[gals_df.colour_label == 'R'],
bins=np.linspace(8.9, 12, 10), label='best-fit',
histtype='step', color='indianred', lw=5)
plt.hist(catl.logmstar.loc[catl.colour_label == 'B'],
bins=np.linspace(8.9, 11.5, 8), label='data',
histtype='step', color='b', lw=5)
plt.hist(gals_df.logmstar.loc[gals_df.colour_label == 'B'],
bins=np.linspace(8.9, 11.5, 8), label='best-fit',
histtype='step', color='cornflowerblue', lw=5)
plt.legend(loc='best')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, '
r'\mathrm{h}^{-1} \right]$', fontsize=20)
plt.show()
## Histogram of u-r colours of galaxies labeled red and blue in data and model
plt.hist(catl.modelu_rcorr.loc[catl.colour_label == 'R'],
bins=np.linspace(1.4, 3.4, 7), label='data',
histtype='step', color='r', lw=5)
plt.hist(gals_df.modelu_rcorr.loc[gals_df.colour_label == 'R'],
bins=np.linspace(1.4, 3.4, 7), label='best-fit',
histtype='step', color='indianred', lw=5)
plt.hist(catl.modelu_rcorr.loc[catl.colour_label == 'B'],
bins=np.linspace(0.5, 2.0, 7), label='data',
histtype='step', color='b', lw=5)
plt.hist(gals_df.modelu_rcorr.loc[gals_df.colour_label == 'B'],
bins=np.linspace(0.5, 2.0, 7), label='best-fit',
histtype='step', color='cornflowerblue', lw=5)
plt.legend(loc='best')
plt.xlabel(r'\boldmath(u-r)', fontsize=20)
plt.show()
## Plot of change of parameter values and chi-squared at each iteration
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, sharex=True)
labels=['10', '20', '40', '80']
colours=['indianred', 'yellowgreen', 'cornflowerblue', 'orchid']
for idx in range(len(labels)):
idx2 = 0
res = vars()['res{0}'.format(labels[idx])]
ax1.scatter(idx+1, res.x[idx2], c=colours[idx], s=100, label=labels[idx])
ax1.axhline(x0[0], ls='--', color='k')
ax1.minorticks_on()
ax1.set_axisbelow(True)
ax1.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax1.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax1.set_ylabel(r'$\mathbf{M^{q}_{*}}$')
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
ax1.legend(loc='best', prop={'size': 8}, title='Iterations')
ax2.scatter([1,2,3,4], [res10.x[1], res20.x[1], res40.x[1], best_fit[1]],
c=['indianred', 'yellowgreen', 'cornflowerblue', 'orchid'], s=100)
ax2.axhline(x0[1], ls='--', color='k')
ax2.minorticks_on()
ax2.set_axisbelow(True)
ax2.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax2.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax2.set_ylabel(r'$\mathbf{M^{q}_{h}}$')
ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
ax3.scatter([1,2,3,4], [res10.x[2], res20.x[2], res40.x[2], best_fit[2]],
c=['indianred', 'yellowgreen', 'cornflowerblue', 'orchid'], s=100)
ax3.axhline(x0[2], ls='--', color='k')
ax3.minorticks_on()
ax3.set_axisbelow(True)
ax3.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax3.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax3.set_ylabel(r'$\boldsymbol{\mu}$')
ax3.xaxis.set_major_locator(MaxNLocator(integer=True))
ax4.scatter([1,2,3,4], [res10.x[3], res20.x[3], res40.x[3], best_fit[3]],
c=['indianred', 'yellowgreen', 'cornflowerblue', 'orchid'], s=100)
ax4.axhline(x0[3], ls='--', color='k')
ax4.minorticks_on()
ax4.set_axisbelow(True)
ax4.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax4.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax4.set_ylabel(r'$\boldsymbol{\nu}$')
ax4.xaxis.set_major_locator(MaxNLocator(integer=True))
ax5.scatter([1,2,3,4], [res10.fun, res20.fun, res40.fun, res80.fun],
c=['indianred', 'yellowgreen', 'cornflowerblue', 'orchid'], s=100)
ax5.minorticks_on()
ax5.set_axisbelow(True)
ax5.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax5.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax5.set_ylabel(r'$\boldsymbol{{\ \chi}^2}$')
ax5.xaxis.set_major_locator(MaxNLocator(integer=True))
fig.suptitle('Hybrid quenching model parameters')
plt.xlabel('Set')
plt.show()
################################################################################
## Extra check
from halotools.empirical_models import PrebuiltSubhaloModelFactory
from cosmo_utils.utils.stats_funcs import Stats_one_arr
from halotools.sim_manager import CachedHaloCatalog
from cosmo_utils.utils import work_paths as cwpaths
from collections import OrderedDict
from multiprocessing import Pool
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
import numpy as np
import argparse
import random
import math
import time
import os
__author__ = '{<NAME>}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('text.latex', preamble=[r"\usepackage{amsmath}"])
def read_chi2(path_to_file):
"""
Reads chi-squared values from file
Parameters
----------
path_to_file: string
Path to chi-squared values file
Returns
---------
chi2: array
Array of reshaped chi^2 values to match chain values
"""
ver = 2.0
chi2_df = pd.read_csv(path_to_file,header=None,names=['chisquared'])
# Applies to runs prior to run 5?
if mf_type == 'smf' and survey == 'eco' and ver==1.0:
# Needed to reshape since flattened along wrong axis,
# didn't correspond to chain
test_reshape = chi2_df.chisquared.values.reshape((1000,250))
chi2 = np.ndarray.flatten(np.array(test_reshape),'F')
else:
chi2 = chi2_df.chisquared.values
return chi2
def read_mcmc(path_to_file):
"""
Reads mcmc chain from file
Parameters
----------
path_to_file: string
Path to mcmc chain file
Returns
---------
emcee_table: pandas dataframe
Dataframe of mcmc chain values with NANs removed
"""
ver = 2.0
colnames = ['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',\
'scatter']
if mf_type == 'smf' and survey == 'eco' and ver==1.0:
emcee_table = pd.read_csv(path_to_file,names=colnames,sep='\s+',\
dtype=np.float64)
else:
emcee_table = pd.read_csv(path_to_file, names=colnames,
delim_whitespace=True, header=None)
emcee_table = emcee_table[emcee_table.mhalo_c.values != '#']
emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64)
emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64)
emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64)
# Cases where last parameter was a NaN and its value was being written to
# the first element of the next line followed by 4 NaNs for the other
# parameters
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[4] == True and np.isnan(row)[3] == False:
scatter_val = emcee_table.values[idx+1][0]
row[4] = scatter_val
# Cases where rows of NANs appear
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
return emcee_table
def read_mock_catl(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def get_paramvals_percentile(mcmc_table, pctl, chi2):
"""
Isolates 68th percentile lowest chi^2 values and takes random 1000 sample
Parameters
----------
mcmc_table: pandas dataframe
Mcmc chain dataframe
pctl: int
Percentile to use
chi2: array
Array of chi^2 values
Returns
---------
mcmc_table_pctl: pandas dataframe
Sample of 100 68th percentile lowest chi^2 values
"""
pctl = pctl/100
mcmc_table['chi2'] = chi2
mcmc_table = mcmc_table.sort_values('chi2').reset_index(drop=True)
slice_end = int(pctl*len(mcmc_table))
mcmc_table_pctl = mcmc_table[:slice_end]
# Best fit params are the parameters that correspond to the smallest chi2
bf_params = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\
values[0][:5]
bf_chi2 = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\
values[0][5]
# Randomly sample 100 lowest chi2
mcmc_table_pctl = mcmc_table_pctl.drop_duplicates().sample(100)
return mcmc_table_pctl, bf_params, bf_chi2
def get_centrals_mock(gals_df):
"""
Get centrals from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_gals: array
Array of central galaxy masses
cen_halos: array
Array of central halo masses
"""
C_S = []
for idx in range(len(gals_df)):
if gals_df['halo_hostid'][idx] == gals_df['halo_id'][idx]:
C_S.append(1)
else:
C_S.append(0)
C_S = np.array(C_S)
gals_df['C_S'] = C_S
cen_gals = []
cen_halos = []
for idx,value in enumerate(gals_df['C_S']):
if value == 1:
cen_gals.append(gals_df['stellar_mass'][idx])
cen_halos.append(gals_df['halo_mvir'][idx])
cen_gals = np.log10(np.array(cen_gals))
cen_halos = np.log10(np.array(cen_halos))
return cen_gals, cen_halos
def halocat_init(halo_catalog,z_median):
"""
Initial population of halo catalog using populate_mock function
Parameters
----------
halo_catalog: string
Path to halo catalog
z_median: float
Median redshift of survey
Returns
---------
model: halotools model instance
Model based on behroozi 2010 SMHM
"""
halocat = CachedHaloCatalog(fname=halo_catalog, update_cached_fname=True)
model = PrebuiltSubhaloModelFactory('behroozi10', redshift=z_median, \
prim_haloprop_key='halo_macc')
model.populate_mock(halocat,seed=5)
return model
def populate_mock(theta):
"""
Populate mock based on five parameter values
Parameters
----------
theta: array
Array of parameter values
Returns
---------
gals_df: pandas dataframe
Dataframe of mock catalog
"""
mhalo_characteristic, mstellar_characteristic, mlow_slope, mhigh_slope,\
mstellar_scatter = theta
model_init.param_dict['smhm_m1_0'] = mhalo_characteristic
model_init.param_dict['smhm_m0_0'] = mstellar_characteristic
model_init.param_dict['smhm_beta_0'] = mlow_slope
model_init.param_dict['smhm_delta_0'] = mhigh_slope
model_init.param_dict['scatter_model_param1'] = mstellar_scatter
model_init.mock.populate()
if survey == 'eco' or survey == 'resolvea':
if mf_type == 'smf':
limit = np.round(np.log10((10**8.9) / 2.041), 1)
elif mf_type == 'bmf':
limit = np.round(np.log10((10**9.4) / 2.041), 1)
elif survey == 'resolveb':
if mf_type == 'smf':
limit = np.round(np.log10((10**8.7) / 2.041), 1)
elif mf_type == 'bmf':
limit = np.round(np.log10((10**9.1) / 2.041), 1)
sample_mask = model_init.mock.galaxy_table['stellar_mass'] >= 10**limit
gals = model_init.mock.galaxy_table[sample_mask]
gals_df = gals.to_pandas()
return gals_df
def get_best_fit_model(best_fit_params):
"""
Get SMF and SMHM information of best fit model given a survey
Parameters
----------
survey: string
Name of survey
Returns
---------
max_model: array
Array of x-axis mass values
phi_model: array
Array of y-axis values
err_tot_model: array
Array of error values per bin
cen_gals: array
Array of central galaxy masses
cen_halos: array
Array of central halo masses
"""
v_sim = 130**3
gals_df = populate_mock(best_fit_params)
mstellar_mock = gals_df.stellar_mass.values # Read stellar masses
if mf_type == 'smf':
max_model, phi_model, err_tot_model, bins_model, counts_model =\
diff_smf(mstellar_mock, v_sim, True)
elif mf_type == 'bmf':
max_model, phi_model, err_tot_model, bins_model, counts_model =\
diff_bmf(mstellar_mock, v_sim, True)
cen_gals, cen_halos = get_centrals_mock(gals_df)
return max_model, phi_model, err_tot_model, counts_model, cen_gals, \
cen_halos
def get_xmhm_mocks(survey, path, mf_type):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between samples of 8 mocks
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
x_arr = []
y_arr = []
y_std_err_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = read_mock_catl(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
if mf_type == 'smf':
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
cen_gals = np.log10(10**(mock_pd.logmstar.loc
[mock_pd.cs_flag == 1])/2.041)
# cen_halos = mock_pd.M_group.loc[mock_pd.cs_flag == 1]
cen_halos = mock_pd.loghalom.loc[mock_pd.cs_flag == 1]
x,y,y_std,y_std_err = Stats_one_arr(cen_halos, cen_gals, base=0.4,
bin_statval='center')
elif mf_type == 'bmf':
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit)]
cen_gals_stellar = np.log10(10**(mock_pd.logmstar.loc
[mock_pd.cs_flag == 1])/2.041)
cen_gals_gas = mock_pd.mhi.loc[mock_pd.cs_flag == 1]
cen_gals_gas = | np.log10((1.4 * cen_gals_gas)/2.041) | numpy.log10 |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&75': np.array([0.0, 0.47562425924289314]),
'versicolor&2&76': np.array([0.0, 0.4854368956593117]),
'versicolor&2&77': np.array([0.0, -0.7348263896003956]),
'versicolor&2&78': np.array([0.0, -0.7920887571493729]),
'versicolor&2&79': np.array([0.0, -0.507614207038711]),
'versicolor&2&80': np.array([0.0, 0.4854368956593117]),
'versicolor&2&81': np.array([0.0, -0.3982542883933272]),
'versicolor&2&82': np.array([0.0, -0.08633733326458487]),
'versicolor&2&83': np.array([0.0, 0.4039238345412103]),
'versicolor&2&84': np.array([0.0, -0.38897705551367706]),
'versicolor&2&85': np.array([0.0, -0.06915310813754129]),
'versicolor&2&86': np.array([0.0, 0.41580041887839214]),
'versicolor&2&87': np.array([0.0, -0.7348263896003956]),
'versicolor&2&88': np.array([0.0, -0.7348263896003956]),
'versicolor&2&89': np.array([0.0, -0.7920887571493729]),
'versicolor&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&92': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&93': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&96': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&97': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&99': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&100': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&102': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&103': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&104': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&107': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&108': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&111': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&112': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&114': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&115': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&117': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&118': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&119': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&120': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&121': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&122': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&123': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&124': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&125': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&126': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&127': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&128': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&129': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&130': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&131': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&132': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&133': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&134': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&137': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&138': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&141': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&142': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&144': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&145': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&147': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&148': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&149': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&152': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&153': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&156': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&157': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&159': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&160': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&162': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&163': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&164': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&167': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&168': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&171': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&172': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&174': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&175': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&177': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&178': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&179': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&180': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&181': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&182': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&183': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&184': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&185': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&186': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&187': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&188': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&189': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&190': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&191': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&192': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&193': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&194': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&197': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&198': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&201': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&202': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&204': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&205': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&207': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&208': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&209': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&212': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&213': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&216': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&217': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&219': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&220': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&222': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&223': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&224': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&225': np.array([-0.5775629083348267, 0.7118687782288384]),
'versicolor&2&226': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&227': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&228': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&229': np.array([-0.25603689955471853, -0.451727980232351]),
'versicolor&2&230': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&231': np.array([-0.1269405801024398, -0.34161216844748166]),
'versicolor&2&232': np.array([-0.33176333807327857, 0.09538228407203546]),
'versicolor&2&233': np.array([-0.564696311454556, 0.6421194512020755]),
'versicolor&2&234': np.array([-0.12669523681593967, -0.32786313310034665]),
'versicolor&2&235': np.array([-0.35960845047491363, 0.1335988694092619]),
'versicolor&2&236': np.array([-0.589572650064144, 0.6697478899606418]),
'versicolor&2&237': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&238': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&239': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'versicolor&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&242': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&243': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'versicolor&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&246': np.array([0.4022739113634462, -0.4700171786183992]),
'versicolor&2&247': np.array([0.5046771347249378, -0.33609610934748635]),
'versicolor&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'versicolor&2&249': np.array([0.41683021879255133, -0.4812793747667524]),
'versicolor&2&250': np.array([0.5150371666265885, -0.33852139184639396]),
'versicolor&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'versicolor&2&252': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&253': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&254': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&257': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&258': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&261': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&262': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&264': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&265': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&267': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&268': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&269': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&270': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&271': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&275': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&276': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&277': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&278': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&279': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&280': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&281': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&285': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&286': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&290': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&291': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&292': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&293': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&294': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&295': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&296': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&300': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&301': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&305': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&306': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&307': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&308': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&309': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&310': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&311': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'virginica&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&2': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&3': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&6': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&7': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&9': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&10': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&12': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&13': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&14': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&17': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&18': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&21': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&22': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&24': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&25': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&27': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&28': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&29': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&32': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&33': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&36': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&37': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&39': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&40': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&42': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&43': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&44': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&45': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&46': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&47': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&48': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&49': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&50': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&51': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&52': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&53': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&54': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&55': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&56': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&57': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&58': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&59': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&62': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&63': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&66': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&67': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&69': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&70': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&72': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&73': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&74': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&0&75': np.array([0.0, -0.95124502153736]),
'virginica&0&76': np.array([0.0, -0.9708703761803881]),
'virginica&0&77': np.array([0.0, -0.5659706098422994]),
'virginica&0&78': np.array([0.0, -0.3962828716108186]),
'virginica&0&79': np.array([0.0, 0.2538069363248767]),
'virginica&0&80': np.array([0.0, -0.9708703761803881]),
'virginica&0&81': np.array([0.0, -0.5659706098422994]),
'virginica&0&82': np.array([0.0, -0.3962828716108186]),
'virginica&0&83': np.array([0.0, -0.8943993997517804]),
'virginica&0&84': np.array([0.0, -0.5659706098422994]),
'virginica&0&85': np.array([0.0, -0.3962828716108186]),
'virginica&0&86': np.array([0.0, -0.9166476163222441]),
'virginica&0&87': np.array([0.0, -0.3962828716108186]),
'virginica&0&88': np.array([0.0, -0.5466925844560601]),
'virginica&0&89': np.array([0.0, -0.38529908946531777]),
'virginica&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&92': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&93': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&96': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&97': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&99': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&100': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&102': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&103': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&104': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&107': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&108': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&111': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&112': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&114': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&115': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&117': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&118': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&119': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&120': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&121': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&122': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&123': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&124': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&125': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&126': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&127': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&128': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&129': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&130': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&131': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&132': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&133': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&134': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&137': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&138': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&141': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&142': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&144': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&145': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&147': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&148': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&149': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&152': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&153': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&156': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&157': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&159': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&160': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&162': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&163': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&164': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&167': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&168': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&171': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&172': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&174': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&175': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&177': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&178': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&179': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&180': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&181': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&182': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&183': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&184': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&185': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&186': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&187': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&188': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&189': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&190': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&191': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&192': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&193': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&194': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&197': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&198': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&201': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&202': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&204': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&205': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&207': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&208': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&209': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&212': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&213': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&216': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&217': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&219': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&220': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&222': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&223': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&224': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&225': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&226': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&227': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&228': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&229': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&230': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&231': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&232': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&233': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&234': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&235': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&236': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&237': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&238': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&239': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'virginica&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&242': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&243': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'virginica&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&246': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&247': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'virginica&0&249': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&250': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'virginica&0&252': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&253': np.array([-0.544626974647221, -0.24972982107967573]),
'virginica&0&254': np.array([-0.6426355680762406, -0.20016519137103667]),
'virginica&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&257': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&258': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&261': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&262': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&264': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&265': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&267': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&268': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&269': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&270': np.array([-0.04201361383207032, -0.9372571358382161]),
'virginica&0&271': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&272': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&273': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&274': np.array([-0.4167677904879879, 0.22207334821665425]),
'virginica&0&275': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&276': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&277': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&278': np.array([-0.07857689135903215, -0.8696882596532965]),
'virginica&0&279': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&280': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&281': np.array([-0.05160969201296555, -0.9000166344885441]),
'virginica&0&282': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&283': np.array([-0.0766197045034485, -0.5080325256323984]),
'virginica&0&284': np.array([-0.32767091750230254, -0.19689316772421933]),
'virginica&0&285': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&286': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&287': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&288': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&289': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&290': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&291': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&292': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&293': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&294': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&295': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&296': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&297': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&298': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&299': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&302': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&303': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&306': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&307': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&309': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&310': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&312': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&313': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&314': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&2': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&3': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&6': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&7': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&9': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&10': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&12': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&13': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&14': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&17': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&18': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&21': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&22': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&24': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&25': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&27': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&28': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&29': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&32': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&33': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&36': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&37': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&39': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&40': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&42': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&43': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&44': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&45': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&46': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&47': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&48': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&49': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&50': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&51': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&52': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&53': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&54': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&55': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&56': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&57': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&58': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&59': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&62': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&63': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&66': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&67': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&69': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&70': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&72': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&73': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&74': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&1&75': np.array([0.0, 0.4756207622944677]),
'virginica&1&76': np.array([0.0, 0.4854334805210761]),
'virginica&1&77': np.array([0.0, -0.16885577975809632]),
'virginica&1&78': np.array([0.0, -0.39580588553855395]),
'virginica&1&79': np.array([0.0, 0.2538072707138344]),
'virginica&1&80': np.array([0.0, 0.4854334805210761]),
'virginica&1&81': np.array([0.0, -0.16885577975809632]),
'virginica&1&82': np.array([0.0, -0.39580588553855395]),
'virginica&1&83': np.array([0.0, 0.4904755652105692]),
'virginica&1&84': np.array([0.0, -0.16885577975809632]),
'virginica&1&85': np.array([0.0, -0.39580588553855395]),
'virginica&1&86': np.array([0.0, 0.5008471974438506]),
'virginica&1&87': np.array([0.0, -0.39580588553855395]),
'virginica&1&88': np.array([0.0, -0.14423919730424817]),
'virginica&1&89': np.array([0.0, -0.3847817540585927]),
'virginica&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&92': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&93': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&96': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&97': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&99': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&100': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&102': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&103': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&104': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&107': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&108': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&111': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&112': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&114': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&115': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&117': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&118': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&119': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&120': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&121': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&122': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&123': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&124': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&125': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&126': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&127': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&128': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&129': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&130': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&131': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&132': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&133': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&134': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&137': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&138': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&141': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&142': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&144': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&145': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&147': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&148': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&149': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&152': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&153': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&156': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&157': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&159': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&160': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&162': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&163': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&164': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&167': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&168': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&171': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&172': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&174': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&175': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&177': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&178': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&179': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&180': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&181': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&182': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&183': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&184': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&185': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&186': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&187': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&188': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&189': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&190': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&191': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&192': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&193': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&194': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&197': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&198': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&201': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&202': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&204': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&205': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&207': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&208': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&209': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&212': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&213': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&216': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&217': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&219': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&220': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&222': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&223': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&224': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&225': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&226': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&227': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&228': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&229': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&230': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&231': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&232': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&233': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&234': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&235': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&236': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&237': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&238': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&239': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'virginica&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&242': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&243': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'virginica&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&246': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&247': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'virginica&1&249': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&250': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'virginica&1&252': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&253': np.array([0.581569928198426, -0.46134543884925855]),
'virginica&1&254': np.array([0.42361197252581306, -0.5068181610814407]),
'virginica&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&257': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&258': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&261': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&262': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&264': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&265': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&267': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&268': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&269': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&270': np.array([-0.6288817118959938, 0.6849987400957501]),
'virginica&1&271': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&272': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&273': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&274': np.array([-0.3507937472799825, 0.22709708691079003]),
'virginica&1&275': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&276': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&277': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&278': np.array([-0.6219129029345898, 0.6860569455333333]),
'virginica&1&279': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&280': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&281': np.array([-0.6423063482710314, 0.7078274136226649]),
'virginica&1&282': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&283': np.array([-0.38798262782075055, 0.05152547330256509]),
'virginica&1&284': np.array([-0.23804537254556749, -0.24790919248823104]),
'virginica&1&285': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&286': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&287': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&288': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&289': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&290': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&291': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&292': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&293': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&294': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&295': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&296': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&297': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&298': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&299': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&302': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&303': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&306': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&307': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&309': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&310': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&312': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&313': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&314': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&2': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&3': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&6': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&7': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&9': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&10': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&12': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&13': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&14': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&17': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&18': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&21': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&22': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&24': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&25': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&27': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&28': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&29': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'virginica&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&32': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&33': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'virginica&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&36': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&37': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'virginica&2&39': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&40': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'virginica&2&42': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&43': np.array([0.15466782862660866, 0.5877736906472755]),
'virginica&2&44': np.array([0.37833006296225374, 0.5922410451071548]),
'virginica&2&45': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&46': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&47': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&48': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&49': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&50': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&51': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&52': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&53': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&54': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&55': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&56': np.array([0.835455914569297, 0.10189258327760495]),
'virginica&2&57': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&58': np.array([0.6958244586699014, 0.2551528503043789]),
'virginica&2&59': np.array([0.7857855057542923, 0.17526869720012267]),
'virginica&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'virginica&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'virginica&2&62': np.array([-0.2661726847443776, 0.6902916602462779]),
'virginica&2&63': np.array([-0.2741128763380603, 0.7260889090887469]),
'virginica&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'virginica&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'virginica&2&66': np.array([-0.2661726847443776, 0.6902916602462779]),
'virginica&2&67': np.array([-0.2741128763380603, 0.7260889090887469]),
'virginica&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'virginica&2&69': np.array([-0.2661726847443776, 0.6902916602462779]),
'virginica&2&70': np.array([-0.2741128763380603, 0.7260889090887469]),
'virginica&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'virginica&2&72': np.array([-0.2741128763380603, 0.7260889090887469]),
'virginica&2&73': np.array([-0.3951343262323671, 0.6428414057947632]),
'virginica&2&74': np.array([-0.4001176958439725, 0.6972674869002595]),
'virginica&2&75': np.array([0.0, 0.47562425924289314]),
'virginica&2&76': np.array([0.0, 0.4854368956593117]),
'virginica&2&77': np.array([0.0, 0.7348263896003954]),
'virginica&2&78': np.array([0.0, 0.7920887571493729]),
'virginica&2&79': np.array([0.0, -0.507614207038711]),
'virginica&2&80': np.array([0.0, 0.4854368956593117]),
'virginica&2&81': np.array([0.0, 0.7348263896003954]),
'virginica&2&82': np.array([0.0, 0.7920887571493729]),
'virginica&2&83': np.array([0.0, 0.4039238345412103]),
'virginica&2&84': np.array([0.0, 0.7348263896003954]),
'virginica&2&85': np.array([0.0, 0.7920887571493729]),
'virginica&2&86': np.array([0.0, 0.41580041887839214]),
'virginica&2&87': np.array([0.0, 0.7920887571493729]),
'virginica&2&88': np.array([0.0, 0.6909317817603084]),
'virginica&2&89': np.array([0.0, 0.7700808435239105]),
'virginica&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&92': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&93': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&96': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&97': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&99': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&100': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&102': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&103': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&104': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'virginica&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&107': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&108': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'virginica&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&111': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&112': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'virginica&2&114': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&115': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'virginica&2&117': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&118': np.array([0.15466782862660866, 0.5877736906472755]),
'virginica&2&119': np.array([0.37833006296225374, 0.5922410451071548]),
'virginica&2&120': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&121': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&122': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&123': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&124': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&125': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&126': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&127': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&128': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&129': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&130': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&131': np.array([0.835455914569297, 0.10189258327760495]),
'virginica&2&132': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&133': np.array([0.6958244586699014, 0.2551528503043789]),
'virginica&2&134': np.array([0.7857855057542923, 0.17526869720012267]),
'virginica&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'virginica&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'virginica&2&137': np.array([-0.05474251929945989, 0.756649813459784]),
'virginica&2&138': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'virginica&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'virginica&2&141': np.array([-0.05474251929945989, 0.756649813459784]),
'virginica&2&142': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'virginica&2&144': np.array([-0.05474251929945989, 0.756649813459784]),
'virginica&2&145': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'virginica&2&147': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&148': np.array([-0.05208936793838525, 0.7130690628827158]),
'virginica&2&149': np.array([0.16027227650394366, 0.7374943640687518]),
'virginica&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&152': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&153': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&156': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&157': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&159': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&160': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&162': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&163': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&164': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'virginica&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&167': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&168': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'virginica&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&171': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&172': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'virginica&2&174': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&175': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'virginica&2&177': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&178': np.array([0.15466782862660866, 0.5877736906472755]),
'virginica&2&179': np.array([0.37833006296225374, 0.5922410451071548]),
'virginica&2&180': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&181': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&182': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&183': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&184': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&185': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&186': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&187': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&188': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&189': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&190': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&191': np.array([0.835455914569297, 0.10189258327760495]),
'virginica&2&192': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&193': np.array([0.6958244586699014, 0.2551528503043789]),
'virginica&2&194': np.array([0.7857855057542923, 0.17526869720012267]),
'virginica&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'virginica&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'virginica&2&197': np.array([-0.05474251929945989, 0.756649813459784]),
'virginica&2&198': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'virginica&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'virginica&2&201': np.array([-0.05474251929945989, 0.756649813459784]),
'virginica&2&202': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'virginica&2&204': np.array([-0.05474251929945989, 0.756649813459784]),
'virginica&2&205': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'virginica&2&207': np.array([0.17291299562995102, 0.7651995812779756]),
'virginica&2&208': np.array([-0.05208936793838525, 0.7130690628827158]),
'virginica&2&209': np.array([0.16027227650394366, 0.7374943640687518]),
'virginica&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&212': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&213': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&216': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&217': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&219': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&220': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&222': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&223': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&224': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&225': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&226': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&227': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&228': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&229': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&230': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&231': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&232': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&233': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&234': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&235': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&236': np.array([0.835455914569297, 0.10189258327760495]),
'virginica&2&237': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&238': np.array([0.6958244586699014, 0.2551528503043789]),
'virginica&2&239': np.array([0.7857855057542923, 0.17526869720012267]),
'virginica&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'virginica&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'virginica&2&242': np.array([-0.052990507284891915, 0.7625494034929867]),
'virginica&2&243': np.array([0.22461127196921116, 0.7375780139111495]),
'virginica&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'virginica&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'virginica&2&246': np.array([-0.052990507284891915, 0.7625494034929867]),
'virginica&2&247': np.array([0.22461127196921116, 0.7375780139111495]),
'virginica&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'virginica&2&249': np.array([-0.052990507284891915, 0.7625494034929867]),
'virginica&2&250': np.array([0.22461127196921116, 0.7375780139111495]),
'virginica&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'virginica&2&252': np.array([0.22461127196921116, 0.7375780139111495]),
'virginica&2&253': np.array([-0.036942953551205526, 0.7110752599289349]),
'virginica&2&254': np.array([0.21902359555042725, 0.7069833524524777]),
'virginica&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'virginica&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&257': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&258': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'virginica&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&261': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&262': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'virginica&2&264': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&265': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'virginica&2&267': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&268': np.array([0.15466782862660866, 0.5877736906472755]),
'virginica&2&269': np.array([0.37833006296225374, 0.5922410451071548]),
'virginica&2&270': np.array([0.6708953257280641, 0.25225839574246695]),
'virginica&2&271': np.array([0.6634195777991901, 0.2600030585430812]),
'virginica&2&272': np.array([0.4116759784429769, 0.5265856222946096]),
'virginica&2&273': np.array([0.5333797278655124, 0.5027313480722909]),
'virginica&2&274': np.array([0.7675615377679706, -0.4491704351274441]),
'virginica&2&275': np.array([0.6634195777991901, 0.2600030585430812]),
'virginica&2&276': np.array([0.4116759784429769, 0.5265856222946096]),
'virginica&2&277': np.array([0.5333797278655124, 0.5027313480722909]),
'virginica&2&278': np.array([0.7004897942936227, 0.18363131411996206]),
'virginica&2&279': np.array([0.4116759784429769, 0.5265856222946096]),
'virginica&2&280': np.array([0.5333797278655124, 0.5027313480722909]),
'virginica&2&281': np.array([0.693916040283998, 0.19218922086587742]),
'virginica&2&282': np.array([0.5333797278655124, 0.5027313480722909]),
'virginica&2&283': np.array([0.46460233232419923, 0.45650705232983346]),
'virginica&2&284': np.array([0.5657162900478705, 0.4448023602124502]),
'virginica&2&285': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&286': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&287': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&288': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&289': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&290': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&291': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&292': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&293': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&294': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&295': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&296': | np.array([0.835455914569297, 0.10189258327760495]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 21:10:21 2020
@author: pengning
does the Green's function Arnoldi iteration over a shell domain for spherical waves
nice analytical properties of polynomial representation lost when using shell domain leaving out origin
try going back to spatial discretization idea instead
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
from .shell_domain import shell_rho_M, shell_rho_N
import mpmath
from mpmath import mp
def grid_integrate_trap(integrandgrid,diffgrid):
#integrate a spatial grid representation of the integrand using trapezoid rule
return np.sum((integrandgrid[:-1]+integrandgrid[1:])*diffgrid/2.0)
def rgrid_Mmn_normsqr(vecMgrid, rsqrgrid, rdiffgrid):
return np.real(grid_integrate_trap(np.conj(vecMgrid)*vecMgrid*rsqrgrid, rdiffgrid))
def rgrid_Mmn_dot(vecM1grid, vecM2grid, rsqrgrid, rdiffgrid):
return grid_integrate_trap(vecM1grid*vecM2grid*rsqrgrid, rdiffgrid)
def rgrid_Mmn_vdot(vecM1grid, vecM2grid, rsqrgrid, rdiffgrid):
return grid_integrate_trap(np.conj(vecM1grid)*vecM2grid*rsqrgrid, rdiffgrid)
def rgrid_Mmn_plot(vecMgrid, rgrid):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.plot(rgrid,np.real(vecMgrid))
ax2.plot(rgrid,np.imag(vecMgrid))
plt.show()
def shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, vecMgrid):
"""
evaluates G(r,r')*vecM(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgMvecMrsqr_grid = RgMgrid*vecMgrid*rsqrgrid
Im_newvecMgrid = k**3 * grid_integrate_trap(RgMvecMrsqr_grid, rdiffgrid) * RgMgrid
Re_ImMfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_ImMfactgrid[1:] = k**3 * np.cumsum((RgMvecMrsqr_grid[:-1]+RgMvecMrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImMvecMrsqr_grid = np.flip(ImMgrid*vecMgrid*rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgMfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_RgMfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImMvecMrsqr_grid[:-1]+rev_ImMvecMrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecMgrid = -ImMgrid*Re_ImMfactgrid - RgMgrid*Re_RgMfactgrid
return Re_newvecMgrid + 1j*Im_newvecMgrid
def shell_Green_grid_Arnoldi_Mmn_oneshot(n,k,R1,R2, invchi, vecnum, gridpts=200):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgMgrid = sp.spherical_jn(n, k*rgrid) #the argument for radial part of spherical waves is kr
ImMgrid = sp.spherical_yn(n, k*rgrid)
RgMgrid = RgMgrid.astype(np.complex)
ImMgrid = ImMgrid.astype(np.complex)
vecMgrid = RgMgrid / np.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid,rdiffgrid))
rgrid_Mmn_plot(vecMgrid, rgrid)
unitMvecs = [vecMgrid]
for i in range(1,vecnum):
newvecMgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs[-1])
newvecMgrid[:] = np.real(newvecMgrid)
print('before orthogonalization and normalization:')
rgrid_Mmn_plot(newvecMgrid, rgrid)
for j in range(len(unitMvecs)):
unitMvec = unitMvecs[j]
coeff = rgrid_Mmn_vdot(unitMvec, newvecMgrid, rsqrgrid,rdiffgrid)
newvecMgrid -= coeff*unitMvec
newvecMgrid /= np.sqrt(rgrid_Mmn_normsqr(newvecMgrid, rsqrgrid,rdiffgrid))
rgrid_Mmn_plot(newvecMgrid, rgrid)
print(rgrid_Mmn_vdot(RgMgrid, newvecMgrid, rsqrgrid,rdiffgrid))
unitMvecs.append(newvecMgrid)
Green = np.zeros((vecnum,vecnum), dtype=np.complex)
for i in range(vecnum):
for j in range(vecnum):
GMjgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs[j])
Green[i,j] = rgrid_Mmn_vdot(unitMvecs[i],GMjgrid, rsqrgrid,rdiffgrid)
print(Green)
Umat = np.eye(vecnum)*invchi - Green
return Green, Umat
def shell_Green_grid_Arnoldi_Mmn_step(n,k, invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=False):
"""
this method does one more Arnoldi step, given existing Arnoldi vectors in unitMvecs
the last entry in unitMvecs is G*unitMvecs[-2] without orthogonalization and normalization
so len(unitMvecs) = len(Gmat)+1 going in and going out of the method
this is setup for most efficient iteration since G*unitMvec is only computed once
the unitMvecs list is modified on spot; a new enlarged Gmat nparray is returned at the end
"""
#first, begin by orthogonalizing and normalizing unitMvecs[-1]
#use relation U = V^{-1} - G
"""
see comment for analogous method for N waves, shell_Green_grid_Arnoldi_Nmn_step
coef1 = Gmat[-1,-1]
unitMvecs[-1] -= coef1*unitMvecs[-2]
if Gmat.shape[0]>1: #since G has symmetric Arnoldi representation (so tridiagonal), G*M_j has non-zero overlap with M_j and M_{j-1}
coef2 = Gmat[-2,-1]
unitMvecs[-1] -= coef2*unitMvecs[-3]
unitMvecs[-1][:] = np.real(unitMvecs[-1][:])
"""
vecnum = Gmat.shape[0]
for i in range(vecnum):
coef = rgrid_Mmn_vdot(unitMvecs[i], unitMvecs[-1], rsqrgrid,rdiffgrid)
unitMvecs[-1] -= coef*unitMvecs[i]
unitMvecs[-1][:] = np.real(unitMvecs[-1][:])
norm = np.sqrt(rgrid_Mmn_normsqr(unitMvecs[-1], rsqrgrid,rdiffgrid))
unitMvecs[-1] /= norm
if plotVectors:
rgrid_Mmn_plot(unitMvecs[-1], rgrid)
#get new vector
newvecM = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, unitMvecs[-1])
newvecM[:] = np.real(newvecM)
newGmat = np.zeros((Gmat.shape[0]+1,Gmat.shape[1]+1), dtype=np.complex)
newGmat[:-1,:-1] = Gmat[:,:]
newGmat[-1,-1] = rgrid_Mmn_vdot(unitMvecs[-1], newvecM, rsqrgrid,rdiffgrid)
newGmat[-2,-1] = rgrid_Mmn_vdot(unitMvecs[-2], newvecM, rsqrgrid,rdiffgrid)
newGmat[-1,-2] = newGmat[-2,-1]
unitMvecs.append(newvecM) #append to end of unitMvecs for next round of iteration
return newGmat
def shell_Green_grid_Arnoldi_Mmn_Uconverge(n,k,R1,R2, invchi, gridpts=1000, Unormtol=1e-10, veclim=3, delveclim=2, plotVectors=False):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgMgrid = sp.spherical_jn(n, k*rgrid) #the argument for radial part of spherical waves is kr
ImMgrid = sp.spherical_yn(n, k*rgrid)
RgMgrid = RgMgrid.astype(np.complex)
ImMgrid = ImMgrid.astype(np.complex)
vecMgrid = RgMgrid / np.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid,rdiffgrid))
unitMvecs = [vecMgrid]
if plotVectors:
rgrid_Mmn_plot(vecMgrid, rgrid)
GvecMgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, vecMgrid)
Gmat = np.array([[rgrid_Mmn_vdot(vecMgrid, GvecMgrid, rsqrgrid,rdiffgrid)]], dtype=np.complex)
Uinv = invchi*np.eye(1)-Gmat
unitMvecs.append(GvecMgrid) #append unorthogonalized, unnormalized Arnoldi vector for further iterations
prevUnorm = 1.0/Uinv[0,0]
i=1
while i<veclim:
Gmat = shell_Green_grid_Arnoldi_Mmn_step(n,k,invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=plotVectors)
i += 1
if i==veclim:
#solve for first column of U and see if its norm has converged
Uinv = invchi*np.eye(Gmat.shape[0])-Gmat
b = np.zeros((Uinv.shape[0],1))
b[0] = 1.0
x = np.linalg.solve(Uinv,b)
Unorm = np.linalg.norm(x)
print('Unorm:', Unorm)
if np.abs(Unorm-prevUnorm) > np.abs(Unorm)*Unormtol:
veclim += delveclim
prevUnorm = Unorm
return RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat
def rgrid_Nmn_dot(vecB1grid,vecP1grid, vecB2grid,vecP2grid, rsqrgrid,rdiffgrid):
return grid_integrate_trap((vecB1grid*vecB2grid+vecP1grid*vecP2grid)*rsqrgrid, rdiffgrid)
def rgrid_Nmn_vdot(vecB1grid,vecP1grid, vecB2grid,vecP2grid, rsqrgrid,rdiffgrid):
return grid_integrate_trap((np.conj(vecB1grid)*vecB2grid+np.conj(vecP1grid)*vecP2grid)*rsqrgrid, rdiffgrid)
def rgrid_Nmn_normsqr(vecBgrid,vecPgrid, rsqrgrid,rdiffgrid):
return np.real(rgrid_Nmn_vdot(vecBgrid,vecPgrid, vecBgrid,vecPgrid, rsqrgrid,rdiffgrid))
def rgrid_Nmn_plot(vecBgrid,vecPgrid, rgrid):
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4,figsize=(10,4))
ax1.plot(rgrid,np.real(vecBgrid))
ax2.plot(rgrid,np.real(vecPgrid))
ax3.plot(rgrid,np.imag(vecBgrid))
ax4.plot(rgrid,np.imag(vecPgrid))
ax1.set_title('B real'); ax2.set_title('P real'); ax3.set_title('B imag'); ax4.set_title('P imag')
plt.show()
def shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, vecBgrid,vecPgrid):
"""
evaluates G(r,r')*vecN(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgNvecNrsqr_grid = (RgBgrid*vecBgrid+RgPgrid*vecPgrid)*rsqrgrid
imfac = k**3 * grid_integrate_trap(RgNvecNrsqr_grid, rdiffgrid)
Im_newvecBgrid = imfac * RgBgrid
Im_newvecPgrid = imfac * RgPgrid
Re_ImNfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_ImNfactgrid[1:] = k**3 * np.cumsum((RgNvecNrsqr_grid[:-1]+RgNvecNrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImNvecNrsqr_grid = np.flip((ImBgrid*vecBgrid + ImPgrid*vecPgrid) * rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgNfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_RgNfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImNvecNrsqr_grid[:-1]+rev_ImNvecNrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecBgrid = -ImBgrid*Re_ImNfactgrid - RgBgrid*Re_RgNfactgrid
Re_newvecPgrid = -ImPgrid*Re_ImNfactgrid - RgPgrid*Re_RgNfactgrid - vecPgrid #last term is delta contribution
return Re_newvecBgrid + 1j*Im_newvecBgrid, Re_newvecPgrid + 1j*Im_newvecPgrid
def shell_Green_grid_Arnoldi_Nmn_oneshot(n,k,R1,R2, invchi, vecnum, gridpts=200):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgBgrid = sp.spherical_jn(n, k*rgrid)/(k*rgrid) + sp.spherical_jn(n,k*rgrid,derivative=True) #the argument for radial part of spherical waves is kr
RgPgrid = np.sqrt(n*(n+1))*sp.spherical_jn(n, k*rgrid)/(k*rgrid)
ImBgrid = sp.spherical_yn(n, k*rgrid)/(k*rgrid) + sp.spherical_yn(n,k*rgrid,derivative=True)
ImPgrid = np.sqrt(n*(n+1))*sp.spherical_yn(n, k*rgrid)/(k*rgrid)
RgBgrid = RgBgrid.astype(np.complex)
RgPgrid = RgPgrid.astype(np.complex)
ImBgrid = ImBgrid.astype(np.complex)
ImPgrid = ImPgrid.astype(np.complex)
normvec = np.sqrt(rgrid_Nmn_normsqr(RgBgrid,RgPgrid, rsqrgrid,rdiffgrid))
vecBgrid = RgBgrid / normvec
vecPgrid = RgPgrid / normvec
rgrid_Nmn_plot(vecBgrid,vecPgrid, rgrid)
unitBvecs = [vecBgrid]; unitPvecs = [vecPgrid]
for i in range(1,vecnum):
newvecBgrid, newvecPgrid = shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs[-1],unitPvecs[-1])
newvecBgrid[:] = np.real(newvecBgrid)
newvecPgrid[:] = np.real(newvecPgrid)
print('before orthogonalization and normalization:')
rgrid_Nmn_plot(newvecBgrid,newvecPgrid, rgrid)
for j in range(len(unitBvecs)):
unitBvec = unitBvecs[j]; unitPvec = unitPvecs[j]
coeff = rgrid_Nmn_vdot(unitBvec,unitPvec, newvecBgrid,newvecPgrid, rsqrgrid,rdiffgrid)
newvecBgrid -= coeff*unitBvec; newvecPgrid -= coeff*unitPvec
normvec = np.sqrt(rgrid_Nmn_normsqr(newvecBgrid,newvecPgrid, rsqrgrid,rdiffgrid))
newvecBgrid /= normvec; newvecPgrid /= normvec
rgrid_Nmn_plot(newvecBgrid,newvecPgrid, rgrid)
#print(rgrid_Mmn_vdot(RgMgrid, newvecMgrid, rsqrgrid,rdiffgrid))
unitBvecs.append(newvecBgrid); unitPvecs.append(newvecPgrid)
Green = np.zeros((vecnum,vecnum), dtype=np.complex)
for i in range(vecnum):
for j in range(vecnum):
GNj_Bgrid, GNj_Pgrid = shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs[j],unitPvecs[j])
Green[i,j] = rgrid_Nmn_vdot(unitBvecs[i],unitPvecs[i], GNj_Bgrid,GNj_Pgrid, rsqrgrid,rdiffgrid)
#print(Green)
Umat = | np.eye(vecnum) | numpy.eye |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import matplotlib.pyplot as plt
pio.templates.default = "simple_white"
from sklearn import linear_model
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
full_data = pd.read_csv(filename).dropna().drop_duplicates()
features = full_data
bad_rows_if_negative = ["bathrooms", "floors", "sqft_basement",
"yr_renovated"]
bad_rows_if_not_positive = ["price", "sqft_living", "sqft_lot",
"sqft_above", "yr_built", "sqft_living15",
"sqft_lot15"]
for col in bad_rows_if_negative:
features = features[features[col] >= 0]
for col in bad_rows_if_not_positive:
features = features[features[col] > 0]
Start = pd.to_datetime(features["date"])
End = pd.to_datetime('2014-01-01 00:00', format='%Y%m%d%T', errors='ignore')
features["days"] = (Start - pd.to_datetime(End)).dt.days
features = features[features["view"].isin(range(0, 5)) & features[
"condition"].isin(range(3, 6)) & features["grade"].isin(range(5, 12))]
features = features[features["bedrooms"] < 7]
features = features[features["bathrooms"] < 4]
features = features[features["sqft_living"] < 5000]
features = features[features["floors"] < 4]
features = features[features["sqft_lot"] < 84000]
features = features[features["sqft_lot15"] < 1000000]
features = features[features["sqft_above"] < 3050]
# features = features[features["sqft_above"] < 1000]
# para_list = ["zipcode"]
features.loc[features.yr_renovated == 0, "yr_renovated"] = features["yr_built"]
# features = categorail_var(features, para_list)
features = features.dropna()
labels = features["price"]
features = features.drop(columns=["date", "price", "id", "lat", "long", "zipcode"])
return features, labels
def categorail_var(features , str_var_list):
for str_var in str_var_list:
var = features[str_var].to_list()
pd_var = pd.Series(var)
df_var = pd.get_dummies(pd_var)
features = pd.concat([features, df_var], axis='columns')
features = features.drop(columns=str_var_list)
return features
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
Xy = pd.concat([X, y], axis='columns')
sigma = Xy.std()
covariance = Xy.cov()
dfA = pd.DataFrame(sigma)
dfB = pd.DataFrame(sigma)
sigma_pow = dfA.dot(dfB.T)
person = covariance/sigma_pow
person.loc[person.price < 0, "price"] = person["price"]*-1
for i in X.columns:
fig = px.scatter(Xy, x=i, y="price", title="the Pearson Correlation :" + str(person[i]["price"]))
fig.write_image(output_path + str(i) + "_vs_price.png")
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
df, cancellation_labels = load_data("../datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df, cancellation_labels, "../figs/")
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(df, cancellation_labels)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
present = np.arange(10, 101)
reps = np.arange(10)
mue = []
std = []
confidence_up = []
confidence_down = []
# lin_reg = linear_model.LinearRegression()
for p in present:
lin_reg = LinearRegression()
sample_loss = np.empty([0])
for r in reps:
train_X_change, train_y_change, test_X_change, test_y_change = split_train_test(train_X, train_y, p/100)
lin_reg.fit(train_X_change, train_y_change)
sample_loss = np.append(sample_loss, lin_reg.loss(test_X, test_y))
mue.append(np.mean(sample_loss))
std.append(np.std(sample_loss))
confidence_up.append(mue[-1] + 2*std[-1])
confidence_down.append(mue[-1] - 2 * std[-1])
pan_mue = pd.Series(mue)
pan_confidence_up = pd.Series(confidence_up)
pan_confidence_down = pd.Series(confidence_down)
df["mue"] = pan_mue
df["confidence_up"] = pan_confidence_up
df["confidence_down"] = pan_confidence_down
fig = go.Figure([
go.Scatter(
name='MeanError',
x=present,
y=df['mue'],
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
),
go.Scatter(
name='Upper Bound',
x=present,
y=df['confidence_up'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
showlegend=False
),
go.Scatter(
name='Lower Bound',
x=present,
y=df['confidence_down'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty',
showlegend=False
)
])
fig.update_layout(
yaxis_title='loss in test set',
xaxis_title='present',
title=' mean loss as a function of p% with confidence interval',
hovermode="x"
)
# fig.update_yaxes(range=[-40000000000, 40000000000])
# fig.update_xaxes(range=[10, 100])
# fig.update()
fig.write_image("../images/ex2q14graph.png")
# fig.show()
# fig = px.line(df, x=present, y=mue)
# fig.write_image()
plt.plot(present, mue)
plt.plot(present, confidence_up)
plt.plot(present, confidence_down)
print(cancellation_labels.mean())
print(cancellation_labels.var())
print( | np.sqrt(mue[-1]) | numpy.sqrt |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.