prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
'''
DESCRIPTION
----------
An assortment of code written for sanity checks on our 2017 TESS GI proposal
about difference imaging of clusters.
Most of this involving parsing Kharchenko et al (2013)'s table, hence the name
`parse_MWSC.py`.
The tools here do things like:
* Find how many open clusters we could observe
* Find how many member stars within those we could observe
* Compute TESS mags for everything (mostly via `ticgen`)
* Estimate blending effects, mainly through the dilution (computed just by
summing magnitudes appropriately)
* Using K+13's King profile fits, estimate the surface density of member stars.
It turns out that this radically underestimates the actual surface density
of stars (because of all the background blends). Moreover, for purposes of
motivating our difference imaging, "the number of stars in your aperture"
is more relevant than "a surface density", and even more relevant than both
of those is dilution.
So I settled on the dilution calculation.
The plotting scripts here also make the skymap figure of the proposal. (Where
are the clusters on the sky?)
USAGE
----------
From /src/, select desired functions from __main__ below. Then:
>>> python parse_MWSC.py > output.log
'''
import matplotlib.pyplot as plt, seaborn as sns
import pandas as pd, numpy as np
from astropy.table import Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
from math import pi
import pickle, os
from scipy.interpolate import interp1d
global COLORS
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# cite:
#
# <NAME>. & <NAME>. 2017, ticgen: A tool for calculating a TESS
# magnitude, and an expected noise level for stars to be observed by TESS.,
# v1.0.0, Zenodo, doi:10.5281/zenodo.888217
#
# and Stassun & friends (2017).
#import ticgen as ticgen
# # These two, from the website
# # http://dc.zah.uni-heidelberg.de/mwsc/q/clu/form
# # are actually outdated or something. They provided too few resuls..
# close_certain = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
# close_junk = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
def get_cluster_data():
# Downloaded the MWSC from
# http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=J%2FA%2BA%2F558%2FA53&target=http&
tab = Table.read('../data/Kharchenko_2013_MWSC.vot', format='votable')
df = tab.to_pandas()
for colname in ['Type', 'Name', 'n_Type', 'SType']:
df[colname] = [e.decode('utf-8') for e in list(df[colname])]
# From erratum:
# For the Sun-like star, a 4 Re planet produces a transit depth of 0.13%. The
# limiting magnitude for transits to be detectable is about I_C = 11.4 . This
# also corresponds to K_s ~= 10.6 and a maximum distance of 290 pc, assuming no
# extinction.
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
N_c_r0 = int(np.sum(close['N1sr0']))
N_c_r1 = int(np.sum(close['N1sr1']))
N_c_r2 = int(np.sum(close['N1sr2']))
N_f_r0 = int(np.sum(far['N1sr0']))
N_f_r1 = int(np.sum(far['N1sr1']))
N_f_r2 = int(np.sum(far['N1sr2']))
type_d = {'a':'association', 'g':'globular cluster', 'm':'moving group',
'n':'nebulosity/presence of nebulosity', 'r':'remnant cluster',
's':'asterism', '': 'no label'}
ntype_d = {'o':'object','c':'candidate','':'no label'}
print('*'*50)
print('\nMilky Way Star Clusters (close := <500pc)'
'\nN_clusters: {:d}'.format(len(close))+\
'\nN_stars (in core): {:d}'.format(N_c_r0)+\
'\nN_stars (in central part): {:d}'.format(N_c_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_c_r2))
print('\n'+'*'*50)
print('\nMilky Way Star Clusters (far := <1000pc)'
'\nN_clusters: {:d}'.format(len(far))+\
'\nN_stars (in core): {:d}'.format(N_f_r0)+\
'\nN_stars (in central part): {:d}'.format(N_f_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_f_r2))
print('\n'+'*'*50)
####################
# Post-processing. #
####################
# Compute mean density
mean_N_star_per_sqdeg = df['N1sr2'] / (pi * df['r2']**2)
df['mean_N_star_per_sqdeg'] = mean_N_star_per_sqdeg
# Compute King profiles
king_profiles, theta_profiles = [], []
for rt, rc, k, d in zip(np.array(df['rt']),
np.array(df['rc']),
np.array(df['k']),
np.array(df['d'])):
sigma, theta = get_king_proj_density_profile(rt, rc, k, d)
king_profiles.append(sigma)
theta_profiles.append(theta)
df['king_profile'] = king_profiles
df['theta'] = theta_profiles
ra = np.array(df['RAJ2000'])
dec = np.array(df['DEJ2000'])
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
galactic_long = np.array(c.galactic.l)
galactic_lat = np.array(c.galactic.b)
ecliptic_long = np.array(c.barycentrictrueecliptic.lon)
ecliptic_lat = np.array(c.barycentrictrueecliptic.lat)
df['galactic_long'] = galactic_long
df['galactic_lat'] = galactic_lat
df['ecliptic_long'] = ecliptic_long
df['ecliptic_lat'] = ecliptic_lat
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
return close, far, df
def distance_histogram(df):
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
hist, bin_edges = np.histogram(
df['d'],
bins=np.append(np.logspace(1,6,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist), 'k-', where='post')
ax.set_xlabel('distance [pc]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xlim([5e1,1e4])
ax.set_xscale('log')
ax.set_yscale('log')
f.tight_layout()
f.savefig('d_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_cumdist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist),
where='post', label=t+' '+scale_d[k])
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='upper left', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,7), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t+' '+scale_d[k],
alpha=0.7)
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def mean_density_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
hist, bin_edges = np.histogram(
dat['mean_N_star_per_sqdeg'],
bins=np.append(np.logspace(0,4,9), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t,
alpha=0.7)
ix += 1
def tick_function(N_star_per_sqdeg):
tess_px = 21*u.arcsec
tess_px_area = tess_px**2
deg_per_tess_px = tess_px_area.to(u.deg**2).value
vals = N_star_per_sqdeg * deg_per_tess_px
outstrs = ['%.1E'%z for z in vals]
outstrs = ['$'+o[0] + r'\! \cdot \! 10^{\mathrm{-}' + o[-1] + r'}$' \
for o in outstrs]
return outstrs
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('mean areal density [stars/$\mathrm{deg}^{2}$]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.logspace(0,4,5)
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('mean areal density [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout()
f.savefig('mean_density_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def plot_king_profiles(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f, axs = plt.subplots(figsize=(4,7), nrows=2, ncols=1, sharex=True)
for theta, profile in zip(close['theta'], close['king_profile']):
axs[0].plot(theta, profile, alpha=0.2, c=colors[0])
for theta, profile in zip(far['theta'], far['king_profile']):
axs[1].plot(theta, profile, alpha=0.1, c=colors[1])
# Add text in top right.
axs[0].text(0.95, 0.95, '$d < 500\ \mathrm{pc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[0].transAxes,
fontsize='large')
axs[1].text(0.95, 0.95, '$d < 1\ \mathrm{kpc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[1].transAxes,
fontsize='large')
xmin, xmax = 1, 1e3
for ax in axs:
ax.set_xscale('log')
ax.set_xlim([xmin, xmax])
if ax == axs[1]:
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('angular distance [TESS px]')
ax.tick_params(which='both', direction='in', zorder=0)
ax.set_ylabel(r'$\Sigma(r)$ [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout(h_pad=0)
f.savefig('king_density_profiles_close_MWSC.pdf', dpi=300,
bbox_inches='tight')
def get_king_proj_density_profile(r_t, r_c, k, d):
'''
r_t: King's tidal radius [pc]
r_c: King's core radius [pc]
k: normalization [pc^{-2}]
d: distance [pc]
returns density profile in number per sq tess pixel
'''
# Eq 4 of Ernst et al, 2010 https://arxiv.org/pdf/1009.0710.pdf
# citing King (1962).
r = np.logspace(-2, 2.4, num=int(2e4))
X = 1 + (r/r_c)**2
C = 1 + (r_t/r_c)**2
vals = k * (X**(-1/2) - C**(-1/2))**2
#NOTE: this fails when r_t does not exist. This might be important...
vals[r>r_t] = 0
# vals currently in number per square parsec. want in number per TESS px.
# first convert to number per square arcsec
# N per sq arcsec. First term converts to 1/AU^2. Then the angular surface
# density scales as the square of the distance (same number of things,
# smaller angle)
sigma = vals * 206265**(-2) * d**2
tess_px = 21*u.arcsec
arcsec_per_px = 21
sigma_per_sq_px = sigma * arcsec_per_px**2 # N per px^2
# r is in pc. we want the profile vs angular distance.
AU_per_pc = 206265
r *= AU_per_pc # r now in AU
theta = r / d # angular distance in arcsec
tess_px = 21 # arcsec per px
theta *= (1/tess_px) # angular distance in px
return sigma_per_sq_px, theta
def make_wget_script(df):
'''
to download stellar data for each cluster, need to run a script of wgets.
this function makes the script.
'''
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
names = np.array(df['Name'])
f = open('../data/MWSC_stellar_data/get_stellar_data.sh', 'w')
outstrs = []
for mwsc_id, name in zip(mwsc_ids, names):
startstr = 'wget '+\
'ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/558/A53/stars/2m_'
middlestr = str(mwsc_id) + '_' + str(name)
endstr = '.dat.bz2 ;\n'
outstr = startstr + middlestr + endstr
outstrs.append(outstr)
f.writelines(outstrs)
f.close()
print('made wget script!')
def get_stellar_data_too(df, savstr, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster, it computes surface density vs angular distance from
cluster center.
%%%Method 1 (outdated):
%%%Interpolating these results over the King profiles, it associates a surface
%%% density with each star.
%%%(WARNING: how many clusters do not have King profiles?)
Method 2 (used):
Associate a surface density with each star by counting stars in annuli.
This is also not very useful.
It then returns "close", "far", and the entire dataframe
'''
names = np.array(df['Name'])
r2s = np.array(df['r2']) # cluster radius (deg)
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s)):
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name != 'Melotte_20':
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
member_T_mags = np.array(temp['Tmag'])
noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = noise
#########################################################################
## METHOD #1 to assign surface densities:
## The King profile for the cluster is already known. Assign each member
## star a surface density from the King profile evaluated at the member
## star's angular position.
#king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
#king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
## theta is saved in units of TESS px. Get each star's distance from the
## center in TESS pixels.
#arcsec_per_tesspx = 21
#Rcl = np.array(mdf['Rcl'])*u.deg
#dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
## interpolate over the King profile
#func = interp1d(theta, king_profile, fill_value='extrapolate')
#try:
# density_per_sq_px = func(dists_from_center)
#except:
# print('SAVED OUTPUT TO ../data/Kharachenko_full.p')
# pickle.dump(outd, open('../data/Kharachenko_full.p', 'wb'))
# print('interpolation failed. check!')
# import IPython; IPython.embed()
#mdf['density_per_sq_px'] = density_per_sq_px
#########################################################################
#########################################################################
# METHOD #2 for surface densities (because Method #1 only counts
# member stars!).
# Just count stars in annuli.
king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
inds = (tab['Rcl'] < r2)
stars_in_annulus = tab[inds]
sia = stars_in_annulus.to_pandas()
arcsec_per_tesspx = 21
Rcl = np.array(sia['Rcl'])*u.deg
dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
maxdist = ((r2*u.deg).to(u.arcsec).value/arcsec_per_tesspx)
n_pts = np.min((50, int(len(sia)/2)))
angsep_grid = np.linspace(0, maxdist, num=n_pts)
# Attempt to compute Tmags for everything. Only count stars with
# T<limiting magnitude as "contaminants" (anything else is probably too
# faint to really matter!)
mags = sia[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
T_mags = np.array(temp['Tmag'])
all_dists = dists_from_center[(T_mags > 0) & (T_mags < 17) & \
(np.isfinite(T_mags))]
N_in_bin, edges = np.histogram(
all_dists,
bins=angsep_grid,
normed=False)
# compute empirical surface density, defined on the midpoints
outer, inner = angsep_grid[1:], angsep_grid[:-1]
sigma = N_in_bin / (pi * (outer**2 - inner**2))
midpoints = angsep_grid[:-1] + np.diff(angsep_grid)/2
# interpolate over the empirical surface density as a function of
# angular separation to assign surface densities to member stars.
func = interp1d(midpoints, sigma, fill_value='extrapolate')
member_Rcl = np.array(mdf['Rcl'])*u.deg
member_dists_from_center = np.array(member_Rcl.to(u.arcsec).value/\
arcsec_per_tesspx)
try:
member_density_per_sq_px = func(member_dists_from_center)
except:
print('SAVED OUTPUT TO ../data/Kharachenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharachenko_full_{:s}.p'.format(savstr), 'wb'))
print('interpolation failed. check!')
import IPython; IPython.embed()
mdf['density_per_sq_px'] = member_density_per_sq_px
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = int(len(mdf))
got_Tmag = (np.array(mdf['Tmag']) > 0)
N_with_Tmag = len(mdf[got_Tmag])
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
lens = np.array([len(member_T_mags),
len(noise),
len(member_dists_from_center),
len(member_density_per_sq_px)])
np.testing.assert_equal(lens, lens[0]*np.ones_like(lens))
# for members
outd[name]['Tmag'] = np.array(mdf['Tmag'])
outd[name]['noise_1hr'] = np.array(mdf['noise_1hr'])
outd[name]['Rcl'] = member_dists_from_center
outd[name]['density_per_sq_px'] = member_density_per_sq_px
# Ocassionally, do some output plots to compare profiles
if ix%50 == 0:
plt.close('all')
f, ax=plt.subplots()
ax.scatter(member_dists_from_center, member_density_per_sq_px)
ax.plot(king_theta, king_profile)
ax.set_ylim([0,np.max((np.max(member_density_per_sq_px),
np.max(king_profile) ) )])
ax.set_xlim([0, 1.02*np.max(member_dists_from_center)])
ax.set_xlabel('angular sep [TESS px]')
ax.set_ylabel('surface density (line: King model, dots: empirical'
' [per tess px area]', fontsize='xx-small')
f.savefig('king_v_empirical/{:s}_{:d}.pdf'.format(name, ix),
bbox_inches='tight')
del mdf
ix += 1
print(50*'*')
print('SAVED OUTPUT TO ../data/Kharchenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharchenko_full_{:s}.p'.format(savstr), 'wb'))
print(50*'*')
close = df[df['d'] < 500]
far = df[df['d'] < 1000]
return close, far, df
def get_dilutions_and_distances(df, savstr, faintest_Tmag=16, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster member, it then finds all cataloged stars (not necessarily
cluster members) within 2, 3, 4, 5, 6 TESS pixels.
It sums the fluxes, and computes a dilution.
It saves (for each cluster member):
* number of stars in various apertures
* dilution for various apertures
* distance of cluster member
* Tmag of cluster member
* noise_1hr for cluster member
* ra,dec for cluster member
'''
names = np.array(df['Name'])
r2s = np.array(df['r2'])
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
start, step = 3, 7
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s))[start::step]:
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
outpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
if os.path.exists(outpath):
print('found {:s}, continue'.format(outpath))
continue
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name not in ['Melotte_20', 'Sco_OB4']:
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
member_T_mags = np.array(temp['Tmag'])
member_noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = member_noise
desired_Tmag_inds = ((member_T_mags > 0) & (member_T_mags < faintest_Tmag) & \
(np.isfinite(member_T_mags)) )
sel_members = mdf[desired_Tmag_inds]
# Compute T mag for everything in this cluster field. NOTE this
# consistently seems to fail for ~10% of the stars. This is not
# precision science (we are getting coarse estimates), so ignore this
# likely bug.
mags = tab[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_pandas().to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
all_Tmag = np.array(temp['Tmag'])
tab['Tmag'] = all_Tmag
Tmag_inds = ((all_Tmag>0) & (all_Tmag<28) & (np.isfinite(all_Tmag)))
sel_in_field = tab[Tmag_inds]
# Want, for all cluster members with T<faintest_Tmag
# * distance of cluster member
# * Tmag of cluster member
# * noise_1hr for cluster member
# * ra,dec for cluster member
# * number of stars in various apertures
# * dilution for various apertures
sel_members['dist'] = np.ones_like(np.array(sel_members['RAhour']))*\
float(df.loc[df['Name']==name, 'd'])
Nstar_dict, dil_dict = {}, {}
arcsec_per_px = 21
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
Nstar_dict[Nstar_str] = []
dil_dict[dil_str] = []
# Iterate over members, then over apertures.
print('finding all neighbors and computing dilutions')
for sm_ra, sm_dec, sm_Tmag in zip(sel_members['RAhour'],
sel_members['DEdeg'],
sel_members['Tmag']):
member_c = SkyCoord(ra=sm_ra*u.hourangle, dec=sm_dec*u.degree)
nbhr_RAs = np.array(sel_in_field['RAhour'])*u.hourangle
nbhr_DECs = np.array(sel_in_field['DEdeg'])*u.degree
c = SkyCoord(ra=nbhr_RAs, dec=nbhr_DECs)
seps = c.separation(member_c)
# Find neighboring stars in aperture.
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
aper_radius_in_as = aper_radius * arcsec_per_px * u.arcsecond
in_aperture = (seps < aper_radius_in_as)
stars_in_aperture = sel_in_field[in_aperture]
Nstar_in_aperture = len(stars_in_aperture)
# NB this list includes the target star.
Tmags_in_aperture = np.array(stars_in_aperture['Tmag'])
# Compute dilution.
numerator = 10**(-0.4 * sm_Tmag)
denominator = np.sum( 10**(-0.4 * Tmags_in_aperture) )
dilution = numerator/denominator
Nstar_dict[Nstar_str].append(Nstar_in_aperture)
dil_dict[dil_str].append(dilution)
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
sel_members[Nstar_str] = Nstar_dict[Nstar_str]
sel_members[dil_str] = dil_dict[dil_str]
print('done computing dilutions')
out = sel_members[
['dist','Tmag','noise_1hr','RAhour','DEdeg',
'Nstar_2px','Nstar_3px','Nstar_4px','Nstar_5px','Nstar_6px',
'dil_2px','dil_3px','dil_4px','dil_5px','dil_6px'
]
]
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = len(mdf)
N_with_Tmag = len(out)
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
#########################################################################
fpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
print('saving to {:s}'.format(fpath))
out.to_csv(fpath, index=False)
print('done with dilution calculation')
def plot_King_density_vs_Tmag_scatter(close, far):
c_names = np.sort(close['Name'])
f_names = np.sort(far['Name'])
obj = pickle.load(open('../data/Kharachenko_full.p','rb'))
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# Close clusters
Tmags, densities = np.array([]), np.array([])
for c_name in c_names:
c = obj[c_name]
#XXX FIXME THIS IS WRONG!!!!!!!!
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='hex',
color=colors[0],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-6,0])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{King}}\ [\mathrm{member\ stars/TESS\ px}^2]$)')
g.savefig('king_density_vs_Tmag_scatter_close.pdf', dpi=300,
bbox_inches='tight')
# Far clusters
Tmags, densities = np.array([]), np.array([])
for f_name in f_names:
c = obj[f_name]
#XXX FIXME THIS IS WRONG
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='hex',
color=colors[1],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-6,0])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{King}}\ [\mathrm{member\ stars/TESS\ px}^2]$)')
g.savefig('king_density_vs_Tmag_scatter_far.pdf', dpi=300,
bbox_inches='tight')
def plot_empirical_density_vs_Tmag_scatter(close, far):
c_names = np.sort(close['Name'])
f_names = np.sort(far['Name'])
obj = pickle.load(open('../data/Kharchenko_full_Tmag_lt_18.p','rb'))
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# Close clusters
Tmags, densities = np.array([]), np.array([])
for c_name in c_names:
c = obj[c_name]
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='kde',
color=colors[0],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-1.5,0.5])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{empirical}}\ [\mathrm{obsd\ stars/TESS\ px}^2]$)')
g.savefig('empirical_density_vs_Tmag_scatter_close.pdf', dpi=300,
bbox_inches='tight')
# Far clusters
Tmags, densities = np.array([]), np.array([])
for f_name in f_names:
c = obj[f_name]
#XXX FIXME THIS IS WRONG!!
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='kde',
color=colors[1],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-1.5,0.5])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{empirical}}\ [\mathrm{obsd\ stars/TESS\ px}^2]$)')
g.savefig('empirical_density_vs_Tmag_scatter_far.pdf', dpi=300,
bbox_inches='tight')
def plot_cluster_positions(close, far):
'''
Show the positions on Kavrayskiy VII, a global projection similar to
Robinson, used widely in the former Soviet Union.
'''
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
for coord in ['galactic','ecliptic']:
plt.close('all')
f, ax = plt.subplots(figsize=(4,4))
m = Basemap(projection='kav7',lon_0=0, resolution='c', ax=ax)
lats = np.array(close[coord+'_lat'])
lons = np.array(close[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,3,marker='o',color=colors[0], label='$d<0.5$kpc',
zorder=4)
lats = np.array(far[coord+'_lat'])
lons = np.array(far[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,3,marker='o',color=colors[1], label='$0.5<d<1$kpc',
zorder=3)
parallels = np.arange(-90.,120.,30.)
meridians = np.arange(0.,420.,60.)
# labels = [left,right,top,bottom]
m.drawparallels(parallels, labels=[1,0,0,0], zorder=2,
fontsize='small')
ms = m.drawmeridians(meridians, labels=[0,0,0,1], zorder=2,
fontsize='small')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.91, -0.07),
fancybox=True, ncol=1, fontsize='x-small')
for _m in ms:
try:
ms[_m][1][0].set_rotation(45)
except:
pass
ax.set_xlabel(coord+' long', labelpad=25, fontsize='small')
ax.set_ylabel(coord+' lat', labelpad=25, fontsize='small')
####################
# add TESS footprint
dat = np.genfromtxt('../data/fig4_bundle/nhemi_shemi.csv', delimiter=',')
dat = pd.DataFrame(np.transpose(dat), columns=['icSys', 'tSys', 'teff',
'logg', 'r', 'm', 'eLat', 'eLon', 'micSys', 'mvSys', 'mic', 'mv',
'stat', 'nPntg'])
eLon, eLat = np.array(dat.eLon), np.array(dat.eLat)
nPntg = np.array(dat.nPntg)
if coord=='galactic':
c = SkyCoord(lat=eLat*u.degree, lon=eLon*u.degree,
frame='barycentrictrueecliptic')
lon = np.array(c.galactic.l)
lat = np.array(c.galactic.b)
elif coord=='ecliptic':
lon, lat = eLon, eLat
nPntg[nPntg >= 4] = 4
ncolor = 4
cmap1 = mpl.colors.ListedColormap(
sns.color_palette("Greys", n_colors=ncolor, desat=1))
bounds= list(np.arange(0.5,ncolor+1,1))
norm1 = mpl.colors.BoundaryNorm(bounds, cmap1.N)
x, y = m(lon, lat)
out = m.scatter(x,y,s=0.2,marker='s',c=nPntg, zorder=1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=0.5)
out = m.scatter(x,y,s=0, marker='s',c=nPntg, zorder=-1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=1)
m.drawmapboundary()
cbar = f.colorbar(out, cmap=cmap1, norm=norm1, boundaries=bounds,
fraction=0.025, pad=0.05, ticks=np.arange(ncolor)+1,
orientation='vertical')
ylabels = np.arange(1,ncolor+1,1)
cbarlabels = list(map(str, ylabels))[:-1]
cbarlabels.append('$\geq 4$')
cbar.ax.set_yticklabels(cbarlabels)
cbar.set_label('N pointings', rotation=270, labelpad=5)
####################
f.savefig('cluster_positions_'+coord+'.pdf', bbox_inches='tight')
def plot_cluster_positions_scicase(df):
'''
Show the positions of d<2kpc clusters, and highlight those with rotation
period measurements & transiting planets.
'''
rotn_clusters = ['NGC_1976', # AKA the orion nebula cluster
'NGC_6530',
'NGC_2264',
'Cep_OB3',
'NGC_2362',
'NGC_869', # h Per, one of the double cluster
'NGC_2547',
'IC_2391',
'Melotte_20', # alpha Persei cluster, alpha Per
'Melotte_22', # AKA Pleiades
'NGC_2323', # M 50
'NGC_2168', #M 35
'NGC_2516',
'NGC_1039', #M 34
'NGC_2099', # M 37
#'NGC_2632', #Praesepe, comment out to avoid overlap
#'NGC_6811', #comment out to avoid overlap
'NGC_2682' ] #M 67
transiting_planet_clusters = [
'NGC_6811',
'NGC_2632' #Praesepe
]
df = df[df['d'] < 2000]
df_rotn = df.loc[df['Name'].isin(rotn_clusters)]
df_rotn = df_rotn[
['ecliptic_lat','ecliptic_long','galactic_lat','galactic_long',
'Name']
]
df_tra = df.loc[df['Name'].isin(transiting_planet_clusters)]
# Above rotation lists were from Table 1 of Gallet & Bouvier 2015,
# including M67 which was observed by K2. Transiting planets from the few
# papers that have them. They are cross-matching MWSC's naming scheme. I
# could not find the Hyades or ScoCen OB. They both have transiting
# planets, and the former has rotation studies done.
c_Hyades = SkyCoord(ra='4h27m', dec=15*u.degree + 52*u.arcminute)
df_hyades = pd.DataFrame({
'Name':'Hyades',
'ecliptic_long':float(c_Hyades.barycentrictrueecliptic.lon.value),
'ecliptic_lat':float(c_Hyades.barycentrictrueecliptic.lat.value),
'galactic_long':float(c_Hyades.galactic.l.value),
'galactic_lat':float(c_Hyades.galactic.b.value)}, index=[0])
c_ScoOB2 = SkyCoord(ra='16h10m14.73s', dec='-19d19m09.38s') # Mann+2016's position
df_ScoOB2 = pd.DataFrame({
'Name':'Sco_OB2',
'ecliptic_long':float(c_ScoOB2.barycentrictrueecliptic.lon.value),
'ecliptic_lat':float(c_ScoOB2.barycentrictrueecliptic.lat.value),
'galactic_long':float(c_ScoOB2.galactic.l.value),
'galactic_lat':float(c_ScoOB2.galactic.b.value)}, index=[0])
df_tra = df_tra.append(df_hyades, ignore_index=True)
df_tra = df_tra.append(df_ScoOB2, ignore_index=True)
#df_rotn = df_rotn.append(df_hyades, ignore_index=True) #avoid overlap
# End of data wrangling.
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
for coord in ['galactic','ecliptic']:
plt.close('all')
#f, ax = plt.subplots(figsize=(4,4))
f = plt.figure(figsize=(0.7*5,0.7*4))
ax = plt.gca()
m = Basemap(projection='kav7',lon_0=0, resolution='c', ax=ax)
lats = np.array(df[coord+'_lat'])
lons = np.array(df[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,2,marker='o',facecolor=COLORS[0], zorder=4,
alpha=0.9,edgecolors=COLORS[0], lw=0)
lats = np.array(df_rotn[coord+'_lat'])
lons = np.array(df_rotn[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,42,marker='*',color=COLORS[1],edgecolors='k',
label='have rotation studies', zorder=5,lw=0.4)
lats = np.array(df_tra[coord+'_lat'])
lons = np.array(df_tra[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,13,marker='s',color=COLORS[1],edgecolors='k',
label='also have transiting planets', zorder=6, lw=0.45)
parallels = np.arange(-90.,120.,30.)
meridians = np.arange(0.,420.,60.)
# labels = [left,right,top,bottom]
ps = m.drawparallels(parallels, labels=[1,0,0,0], zorder=2,
fontsize='x-small')
ms = m.drawmeridians(meridians, labels=[0,0,0,1], zorder=2,
fontsize='x-small')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
#ax.legend(loc='upper center', bbox_to_anchor=(0.01, 0.02),
# fancybox=True, ncol=1, fontsize='x-small')
for _m in ms:
try:
#ms[_m][1][0].set_rotation(45)
if '60' in ms[_m][1][0].get_text():
ms[_m][1][0].set_text('')
except:
pass
for _p in ps:
try:
if '30' in ps[_p][1][0].get_text():
ps[_p][1][0].set_text('')
except:
pass
ax.set_xlabel(coord+' long', labelpad=13, fontsize='x-small')
ax.set_ylabel(coord+' lat', labelpad=13, fontsize='x-small')
######################
# add TESS footprint #
######################
dat = np.genfromtxt('../data/fig4_bundle/nhemi_shemi.csv', delimiter=',')
dat = pd.DataFrame(np.transpose(dat), columns=['icSys', 'tSys', 'teff',
'logg', 'r', 'm', 'eLat', 'eLon', 'micSys', 'mvSys', 'mic', 'mv',
'stat', 'nPntg'])
eLon, eLat = np.array(dat.eLon), np.array(dat.eLat)
nPntg = np.array(dat.nPntg)
if coord=='galactic':
c = SkyCoord(lat=eLat*u.degree, lon=eLon*u.degree,
frame='barycentrictrueecliptic')
lon = np.array(c.galactic.l)
lat = np.array(c.galactic.b)
elif coord=='ecliptic':
lon, lat = eLon, eLat
nPntg[nPntg >= 4] = 4
ncolor = 4
cmap1 = mpl.colors.ListedColormap(
sns.color_palette("Greys", n_colors=ncolor, desat=1))
bounds= list(np.arange(0.5,ncolor+1,1))
norm1 = mpl.colors.BoundaryNorm(bounds, cmap1.N)
x, y = m(lon, lat)
out = m.scatter(x,y,s=0.2,marker='s',c=nPntg, zorder=1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=0.5)
out = m.scatter(x,y,s=0, marker='s',c=nPntg, zorder=-1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=1)
m.drawmapboundary()
#cbar = f.colorbar(out, cmap=cmap1, norm=norm1, boundaries=bounds,
# fraction=0.025, pad=0.05, ticks=np.arange(ncolor)+1,
# orientation='vertical')
#ylabels = np.arange(1,ncolor+1,1)
#cbarlabels = list(map(str, ylabels))[:-1]
#cbarlabels.append('$\geq\! 4$')
#cbar.ax.set_yticklabels(cbarlabels, fontsize='x-small')
#cbar.set_label('N pointings', rotation=270, labelpad=5, fontsize='x-small')
####################
f.tight_layout()
f.savefig('cluster_positions_'+coord+'_scicase.pdf', bbox_inches='tight')
def plot_HATS_field_positions():
'''
Show the positions on Kavrayskiy VII, a global projection similar to
Robinson, used widely in the former Soviet Union.
N.B. we're just markering the HATS field center (13x13 deg each)
'''
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
df = pd.read_csv('../data/HATPI_field_ids.txt', delimiter='|')
ra = df['ra']
dec = df['decl']
fieldnums = df['field_num']
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
lons = np.array(c.barycentrictrueecliptic.lon)
lats = | np.array(c.barycentrictrueecliptic.lat) | numpy.array |
import copy
from typing import Iterable
import numpy as np
from IPython.core.display import clear_output
from rdkit import DataStructs
from tqdm import tqdm
from synergetic_molecule_generator import molecules
def _relativize_vector(vector: np.ndarray):
std = vector.std()
if std == 0:
return np.ones(len(vector))
standard = (vector - vector.mean()) / std
standard[standard > 0] = np.log(1 + standard[standard > 0]) + 1
standard[standard <= 0] = np.exp(standard[standard <= 0])
return standard
def relativize_vector(vector: np.ndarray):
std = vector.std()
if std == 0:
return np.ones(len(vector))
standard = (vector - vector.mean()) / std
return np.exp(standard)
class DataStorage:
"""This is a class for storing the states and the observations of a Swarm.
This way is slower than storing it in a numpy array, but it allows to store
any kind of states and observations."""
def __init__(self):
self.states = {}
self.actions = {}
self.walker_ids = []
def __getitem__(self, item):
states = self.get_states(item)
actions = self.get_actions(item)
return states, actions
def reset(self):
self.states = {}
self.actions = {}
self.walker_ids = []
def get_states(self, labels: Iterable) -> list:
return [self.states[label] for label in labels]
def get_actions(self, labels: Iterable) -> list:
return [self.actions[label] for label in labels]
def append(self, walker_ids: [list, np.ndarray], states: Iterable, actions):
actions = actions if actions is not None else [None] * len(walker_ids)
for w_id, state, action in zip(walker_ids, states, actions):
if w_id not in self.walker_ids:
self.states[w_id] = copy.deepcopy(state)
if actions is not None:
self.actions[w_id] = copy.deepcopy(action)
self.walker_ids = list(set(self.walker_ids))
self.walker_ids += list(set(walker_ids))
def update_values(self, walker_ids):
# This is not optimal, but ensures no memory leak
new_states = {}
new_actions = {}
walker_ids = list(walker_ids)
walker_ids.append(0)
for w_id in walker_ids:
new_states[w_id] = self.states[w_id]
new_actions[w_id] = self.actions[w_id]
self.states = new_states
self.actions = new_actions
self.walker_ids = walker_ids
class Swarm:
"""This is the most basic mathematical entity that can be derived from Fractal AI theory.
It represents a cloud of points that propagates through an state space. Each walker of the
swarm evolves by either cloning to another walker or perturbing the environment.
"""
def __init__(
self,
model,
scoring_function=None,
neural_network=None,
n_walkers: int = 100,
balance: float = 1.0,
render_every: int = 1e10,
accumulate_rewards: bool = True,
):
"""
:param env: Environment that will be sampled.
:param model: Model used for sampling actions from observations.
:param n_walkers: Number of walkers that the swarm will use
:param balance: Balance coefficient for the virtual reward formula.
:param render_every: Number of iterations that will be performed before printing the Swarm
status.
:param accumulate_rewards: Use the accumulated reward when scoring the walkers.
False to use instantaneous reward.
"""
# Parameters of the algorithm
self._model = model
self.n_walkers = n_walkers
self.balance = balance
self.render_every = render_every
self.accumulate_rewards = accumulate_rewards
# Environment information sources
self.observations = None
self.rewards = | np.ones(self.n_walkers) | numpy.ones |
import numpy as np
import math
from rtree import index
from pyneval.model import euclidean_point as euc_p
from pyneval.metric.utils import config_utils
from pyneval.model import swc_node
def get_match_edges(gold_swc_tree=None, test_swc_tree=None,
radius_threshold=-1.0, length_threshold=0.2,
debug=False):
"""
get matched edge set
Args:
gold_swc_tree(SwcTree):
test_swc_tree(SwcTree):
radius_threshold(float): threshold of key point radius
length_threshold(float): threshold of length of the matching edges
debug(bool): list debug info ot not
Returns:
match_edge(set): include all edge that are matched in gold swc tree
edge(tuple): every edge is a tuple contains two side nodes of a edge
test_match_length(float): it's the total length of matched area on test swc tree
Raises:
[Error: ] Max id in test swc tree too large
the size of vis list is depend on the maximum id of the tree.
this number couldn't be too large
"""
match_edge = set()
edge_use_dict = {}
id_rootdis_dict = {}
test_match_length = 0.0
test_rtree = get_edge_rtree(test_swc_tree)
id_edge_dict = get_idedge_dict(test_swc_tree)
gold_node_list = gold_swc_tree.get_node_list()
test_node_list = test_swc_tree.get_node_list()
# node num need to be larger than the max id
test_maxum = 0
for node in test_node_list:
id_rootdis_dict[node.get_id()] = node.root_length
test_maxum = max(test_maxum, node.get_id())
try:
vis_list = np.zeros(test_maxum+5, dtype='int8')
test_swc_tree.get_lca_preprocess(node_num=test_maxum+5)
except:
raise Exception("[Error: ] Max id in test swc tree too large")
for node in gold_node_list:
if node.is_virtual() or node.parent.is_virtual():
continue
rad_threshold1, rad_threshold2 = cal_rad_threshold(radius_threshold, node.radius(), node.parent.radius())
line_tuple_a_set = get_nearby_edges(rtree=test_rtree, point=node, id_edge_dict=id_edge_dict,
threshold=rad_threshold1, not_self=False, debug=debug)
line_tuple_b_set = get_nearby_edges(rtree=test_rtree, point=node.parent, id_edge_dict=id_edge_dict,
threshold=rad_threshold2, not_self=False, debug=debug)
done = False
for line_tuple_a_dis in line_tuple_a_set:
if done:
break
for line_tuple_b_dis in line_tuple_b_set:
line_tuple_a = line_tuple_a_dis[0]
dis_a = line_tuple_a_dis[1]
line_tuple_b = line_tuple_b_dis[0]
dis_b = line_tuple_b_dis[1]
test_length = get_lca_length(test_swc_tree, \
line_tuple_a, \
line_tuple_b, \
euc_p.Line(e_node_1=node.get_center(),
e_node_2=node.parent.get_center()),
id_rootdis_dict)
gold_length = node.parent_distance()
if test_length == config_utils.DINF:
continue
len_threshold1 = cal_len_threshold(length_threshold, gold_length)
if not (dis_a <= rad_threshold1 and dis_b <= rad_threshold2):
if debug:
print(node.get_id(), dis_a, rad_threshold1, dis_b, rad_threshold2, "error1")
continue
if not (math.fabs(test_length - gold_length) < len_threshold1):
if debug:
print(node.get_id(), "error2")
continue
if not is_route_clean(gold_swc_tree=test_swc_tree,
gold_line_tuple_a=line_tuple_a, gold_line_tuple_b=line_tuple_b,
node1=node, node2=node.parent,
edge_use_dict=edge_use_dict, vis_list= vis_list, debug=debug):
if debug:
print(node.get_id(), "error3")
continue
match_edge.add(tuple([node, node.parent]))
test_match_length += test_length
done = True
break
if not done:
node._type = 9
if debug:
print("{} not done".format(node.get_id()))
return match_edge, test_match_length
def get_edge_rtree(swc_tree=None):
"""
build a rtree based on the swc tree
Args:
swc_tree(Swc Tree): Swc_Tree, to build rtree
Return:
rtree(rtree index)
"""
swc_tree_list = swc_tree.get_node_list()
p = index.Property()
p.dimension = 3
rtree = index.Index(properties=p)
for node in swc_tree_list:
if node.is_virtual() or node.parent.is_virtual():
continue
rtree.insert(node.get_id(), get_bounds(node, node.parent, extra=node.radius()))
return rtree
def get_bounds(point_a, point_b, extra=0):
"""
get bounding box of a segment
Args:
point_a: two points to identify the square
point_b:
extra: float, a threshold
Return:
res(tuple):
"""
point_a = np.array(point_a.get_center()._pos)
point_b = np.array(point_b.get_center()._pos)
res = (np.where(point_a > point_b, point_b, point_a) - extra).tolist() + ( | np.where(point_a > point_b, point_a, point_b) | numpy.where |
import os
import numpy as np
from pygeo import DVGeometry, DVGeometryAxi
from pyspline import Curve
##################
# DVGeometry Tests
##################
def setupDVGeo(base_path, rotType=None):
# create the Parent FFD
FFDFile = os.path.join(base_path, "../inputFiles/outerBoxFFD.xyz")
DVGeo = DVGeometry(FFDFile)
# create a reference axis for the parent
axisPoints = [[-1.0, 0.0, 0.0], [1.5, 0.0, 0.0]]
c1 = Curve(X=axisPoints, k=2)
if rotType is not None:
DVGeo.addRefAxis("mainAxis", curve=c1, axis="y", rotType=rotType)
else:
DVGeo.addRefAxis("mainAxis", curve=c1, axis="y")
# create the child FFD
FFDFile = os.path.join(base_path, "../inputFiles/simpleInnerFFD.xyz")
DVGeoChild = DVGeometry(FFDFile, child=True)
# create a reference axis for the child
axisPoints = [[-0.5, 0.0, 0.0], [0.5, 0.0, 0.0]]
c1 = Curve(X=axisPoints, k=2)
DVGeoChild.addRefAxis("nestedAxis", curve=c1, axis="y")
return DVGeo, DVGeoChild
def setupDVGeoD8(base_path, isComplex):
# create the Parent FFD
FFDFile = os.path.join(base_path, "../inputFiles/bodyFFD.xyz")
DVGeo = DVGeometry(FFDFile, isComplex=isComplex)
# create a reference axis for the parent
axisPoints = [[0.0, 0.0, 0.0], [26.0, 0.0, 0.0], [30.5, 0.0, 0.9], [32.5, 0.0, 1.01], [34.0, 0.0, 0.95]]
c1 = Curve(X=axisPoints, k=2)
DVGeo.addRefAxis("mainAxis", curve=c1, axis="y")
# create the child FFD
FFDFile = os.path.join(base_path, "../inputFiles/nozzleFFD.xyz")
DVGeoChild = DVGeometry(FFDFile, child=True, isComplex=isComplex)
# create a reference axis for the child
axisPoints = [[32.4, 1.0, 1.0], [34, 1.0, 0.9]]
c1 = Curve(X=axisPoints, k=2)
DVGeoChild.addRefAxis("nestedAxis", curve=c1, axis="y")
return DVGeo, DVGeoChild
def setupDVGeoAxi(base_path):
FFDFile = os.path.join(base_path, "../inputFiles/axiTestFFD.xyz")
DVGeo = DVGeometryAxi(FFDFile, center=(0.0, 0.0, 0.0), collapse_into=("x", "z"))
axisPoints = [[0, 0.0, 0.0], [0, 0.0, 1.0]]
c1 = Curve(X=axisPoints, k=2)
DVGeo.addRefAxis("stretch", curve=c1, axis="z")
return DVGeo
# define a nested global design variable
def childAxisPoints(val, geo):
C = geo.extractCoef("nestedAxis")
# Set the coefficients
C[0, 0] = val[0]
geo.restoreCoef(C, "nestedAxis")
return
# define a nested global design variable
def mainAxisPoints(val, geo):
C = geo.extractCoef("mainAxis")
# Set the coefficients
C[0, 0] = val[0]
geo.restoreCoef(C, "mainAxis")
return
# define a nested global design variable
def childAxisPointsD8(val, geo):
C = geo.extractCoef("nestedAxis")
# Set the coefficients
for i in range(len(val)):
C[i, 0] = val[i]
geo.restoreCoef(C, "nestedAxis")
return
# define a nested global design variable
def mainAxisPointsD8(val, geo):
C = geo.extractCoef("mainAxis")
# Set the coefficients
for i in range(len(val)):
C[i, 0] = val[i]
geo.restoreCoef(C, "mainAxis")
return
def mainAxisPointAxi(val, DVgeo):
C = DVgeo.extractCoef("stretch")
C[0, 2] = val[0]
DVgeo.restoreCoef(C, "stretch")
return
def totalSensitivityFD(DVGeo, nPt, ptName, step=1e-1):
xDV = DVGeo.getValues()
refPoints = DVGeo.update(ptName)
# now get FD Sensitivity
dIdxFD = {}
# step = 1e-1#8
for key in xDV:
baseVar = xDV[key].copy()
nDV = len(baseVar)
dIdxFD[key] = np.zeros([nPt, nDV])
for i in range(nDV):
# print('perturbing',key)
xDV[key][i] = baseVar[i] + step
# print('setting design vars')
DVGeo.setDesignVars(xDV)
# print('calling top level update')
newPoints = DVGeo.update(ptName)
deriv = (newPoints - refPoints) / step
dIdxFD[key][:, i] = deriv.flatten()
# print('Deriv',key, i,deriv)
xDV[key][i] = baseVar[i]
return dIdxFD
def totalSensitivityCS(DVGeo, nPt, ptName):
xDV = DVGeo.getValues()
# now get CS Sensitivity
dIdxCS = {}
step = 1e-40j
for key in xDV:
baseVar = xDV[key].copy()
dIdxCS[key] = np.zeros([nPt, len(baseVar)])
for i in range(len(baseVar)):
xDV[key][i] = baseVar[i] + step
DVGeo.setDesignVars(xDV)
newPoints = DVGeo.update(ptName)
deriv = np.imag(newPoints) / np.imag(step)
dIdxCS[key][:, i] = deriv.flatten()
# print 'Deriv',key, i,deriv
xDV[key][i] = baseVar[i]
# Before we exit make sure we have reset the DVs
DVGeo.setDesignVars(xDV)
return dIdxCS
def testSensitivities(DVGeo, refDeriv, handler, pointset=1):
# create test points
points = np.zeros([2, 3])
if pointset == 1:
points[0, :] = [0.25, 0, 0]
points[1, :] = [-0.25, 0, 0]
elif pointset == 2:
points[0, :] = [0.25, 0.4, 4]
points[1, :] = [-0.8, 0.2, 7]
else:
raise Warning("Enter a valid pointset")
# add points to the geometry object
ptName = "testPoints"
DVGeo.addPointSet(points, ptName)
# generate dIdPt
nPt = 6
dIdPt = | np.zeros([nPt, 2, 3]) | numpy.zeros |
"""
Utility functions
Copyright 2021 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from hyfed_client.util.data_type import DataType
import logging
logger = logging.getLogger(__name__)
# noise values are generated in the range [0, largest_prime) for integers >= 0
largest_prime_non_negative_int54 = 18014398509481951 # largest prime number that can fit in 54 bits
# for integers < 0 and floating-point parameters,
# the noise is generated by Gaussian distribution with mean=0 and std=gaussian_std
def make_noisy(original_value, data_type, gaussian_std):
""" Generate noise value with the same shape as the original value,
add it to the original value, and return both noise and noisy value
integers >= 0 are masked with integer noise
integers < 0 or real-valued parameters are masked with Gaussian noise
with mean of zero and standard deviation of gaussian_std
"""
try:
if data_type == DataType.NON_NEGATIVE_INTEGER :
noise = np.random.randint(low=0, high=largest_prime_non_negative_int54)
noisy_value = (original_value + noise) % largest_prime_non_negative_int54 # modular arithmetic
return noisy_value, noise
if data_type == DataType.NEGATIVE_INTEGER or data_type == DataType.FLOAT:
noise = np.random.normal(loc=0, scale=gaussian_std)
noisy_value = original_value + noise
return noisy_value, noise
if data_type == DataType.NUMPY_ARRAY_NON_NEGATIVE_INTEGER:
noise = np.random.randint(low=0, high=largest_prime_non_negative_int54, size=original_value.shape)
noisy_value = (original_value + noise) % largest_prime_non_negative_int54 # modular arithmetic
return noisy_value, noise
if data_type == DataType.NUMPY_ARRAY_NEGATIVE_INTEGER or data_type == DataType.NUMPY_ARRAY_FLOAT:
noise = | np.random.normal(loc=0, scale=gaussian_std, size=original_value.shape) | numpy.random.normal |
import numpy
import numpy.matlib
import copy
import pandas
import wave
import struct
import os
import math
import ctypes
import multiprocessing
import warnings
import scipy
from scipy import ndimage
import scipy.stats as stats
from scipy.fftpack import fft
from scipy.signal import decimate
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6])
def read_wav(input_file_name):
"""
:param input_file_name:
:return:
"""
wfh = wave.open(input_file_name, "r")
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wfh.getparams()
raw = wfh.readframes(nframes * nchannels)
out = struct.unpack_from("%dh" % nframes * nchannels, raw)
sig = numpy.reshape(numpy.array(out), (-1, nchannels)).squeeze()
wfh.close()
return sig.astype(numpy.float32), framerate, sampwidth
def read_pcm(input_file_name):
"""Read signal from single channel PCM 16 bits
:param input_file_name: name of the PCM file to read.
:return: the audio signal read from the file in a ndarray encoded on 16 bits, None and 2 (depth of the encoding in bytes)
"""
with open(input_file_name, 'rb') as f:
f.seek(0, 2) # Go to te end of the file
# get the sample count
sample_count = int(f.tell() / 2)
f.seek(0, 0) # got to the begining of the file
data = numpy.asarray(struct.unpack('<' + 'h' * sample_count, f.read()))
return data.astype(numpy.float32), None, 2
def read_audio(input_file_name, framerate=None):
""" Read a 1 or 2-channel audio file in SPHERE, WAVE or RAW PCM format.
The format is determined from the file extension.
If the sample rate read from the file is a multiple of the one given
as parameter, we apply a decimation function to subsample the signal.
:param input_file_name: name of the file to read from
:param framerate: frame rate, optional, if lower than the one read from the file, subsampling is applied
:return: the signal as a numpy array and the sampling frequency
"""
if framerate is None:
raise TypeError("Expected sampling frequency required in sidekit.frontend.io.read_audio")
ext = os.path.splitext(input_file_name)[-1]
if ext.lower() == '.sph':
sig, read_framerate, sampwidth = read_sph(input_file_name, 'p')
elif ext.lower() == '.wav' or ext.lower() == '.wave':
sig, read_framerate, sampwidth = read_wav(input_file_name)
elif ext.lower() == '.pcm' or ext.lower() == '.raw':
sig, read_framerate, sampwidth = read_pcm(input_file_name)
read_framerate = framerate
else:
raise TypeError("Unknown extension of audio file")
# Convert to 16 bit encoding if needed
sig *= (2**(15-sampwidth))
if framerate > read_framerate:
print("Warning in read_audio, up-sampling function is not implemented yet!")
elif read_framerate % float(framerate) == 0 and not framerate == read_framerate:
print("downsample")
sig = decimate(sig, int(read_framerate / float(framerate)), n=None, ftype='iir', axis=0)
return sig.astype(numpy.float32), framerate
def rasta_filt(x):
"""Apply RASTA filtering to the input signal.
:param x: the input audio signal to filter.
cols of x = critical bands, rows of x = frame
same for y but after filtering
default filter is single pole at 0.94
"""
x = x.T
numerator = numpy.arange(.2, -.3, -.1)
denominator = numpy.array([1, -0.94])
# Initialize the state. This avoids a big spike at the beginning
# resulting from the dc offset level in each band.
# (this is effectively what rasta/rasta_filt.c does).
# Because Matlab uses a DF2Trans implementation, we have to
# specify the FIR part to get the state right (but not the IIR part)
y = numpy.zeros(x.shape)
zf = numpy.zeros((x.shape[0], 4))
for i in range(y.shape[0]):
y[i, :4], zf[i, :4] = lfilter(numerator, 1, x[i, :4], axis=-1, zi=[0, 0, 0, 0])
# .. but don't keep any of these values, just output zero at the beginning
y = numpy.zeros(x.shape)
# Apply the full filter to the rest of the signal, append it
for i in range(y.shape[0]):
y[i, 4:] = lfilter(numerator, denominator, x[i, 4:], axis=-1, zi=zf[i, :])[0]
return y.T
def cms(features, label=None, global_mean=None):
"""Performs cepstral mean subtraction
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: a logical vector
:param global_mean: pre-computed mean to use for feature normalization if given
:return: a feature stream
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if label.sum() == 0:
mu = numpy.zeros((features.shape[1]))
if global_mean is not None:
mu = global_mean
else:
mu = numpy.mean(features[label, :], axis=0)
features -= mu
def cmvn(features, label=None, global_mean=None, global_std=None):
"""Performs mean and variance normalization
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param global_mean: pre-computed mean to use for feature normalization if given
:param global_std: pre-computed standard deviation to use for feature normalization if given
:param label: a logical verctor
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if global_mean is not None and global_std is not None:
mu = global_mean
stdev = global_std
features -= mu
features /= stdev
elif not label.sum() == 0:
mu = numpy.mean(features[label, :], axis=0)
stdev = numpy.std(features[label, :], axis=0)
features -= mu
features /= stdev
def stg(features, label=None, win=301):
"""Performs feature warping on a sliding window
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: label of selected frames to compute the Short Term Gaussianization, by default, al frames are used
:param win: size of the frame window to consider, must be an odd number to get a symetric context on left and right
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
speech_features = features[label, :]
add_a_feature = False
if win % 2 == 1:
# one feature per line
nframes, dim = numpy.shape(speech_features)
# If the number of frames is not enough for one window
if nframes < win:
# if the number of frames is not odd, duplicate the last frame
# if nframes % 2 == 1:
if not nframes % 2 == 1:
nframes += 1
add_a_feature = True
speech_features = numpy.concatenate((speech_features, [speech_features[-1, ]]))
win = nframes
# create the output feature stream
stg_features = numpy.zeros(numpy.shape(speech_features))
# Process first window
r = numpy.argsort(speech_features[:win, ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[: (win - 1) / 2] + 0.5) / win
stg_features[: (win - 1) / 2, :] = stats.norm.ppf(arg, 0, 1)
# process all following windows except the last one
for m in range(int((win - 1) / 2), int(nframes - (win - 1) / 2)):
idx = list(range(int(m - (win - 1) / 2), int(m + (win - 1) / 2 + 1)))
foo = speech_features[idx, :]
r = numpy.sum(foo < foo[(win - 1) / 2], axis=0) + 1
arg = (r - 0.5) / win
stg_features[m, :] = stats.norm.ppf(arg, 0, 1)
# Process the last window
r = numpy.argsort(speech_features[list(range(nframes - win, nframes)), ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[(win + 1) / 2: win, :] + 0.5) / win
stg_features[list(range(int(nframes - (win - 1) / 2), nframes)), ] = stats.norm.ppf(arg, 0, 1)
else:
# Raise an exception
raise Exception('Sliding window should have an odd length')
# wrapFeatures = np.copy(features)
if add_a_feature:
stg_features = stg_features[:-1]
features[label, :] = stg_features
def cep_sliding_norm(features, win=301, label=None, center=True, reduce=False):
"""
Performs a cepstal mean substitution and standard deviation normalization
in a sliding windows. MFCC is modified.
:param features: the MFCC, a numpy array
:param win: the size of the sliding windows
:param label: vad label if available
:param center: performs mean subtraction
:param reduce: performs standard deviation division
"""
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if numpy.sum(label) <= win:
if reduce:
cmvn(features, label)
else:
cms(features, label)
else:
d_win = win // 2
df = pandas.DataFrame(features[label, :])
r = df.rolling(window=win, center=True)
mean = r.mean().values
std = r.std().values
mean[0:d_win, :] = mean[d_win, :]
mean[-d_win:, :] = mean[-d_win-1, :]
std[0:d_win, :] = std[d_win, :]
std[-d_win:, :] = std[-d_win-1, :]
if center:
features[label, :] -= mean
if reduce:
features[label, :] /= std
def pre_emphasis(input_sig, pre):
"""Pre-emphasis of an audio signal.
:param input_sig: the input vector of signal to pre emphasize
:param pre: value that defines the pre-emphasis filter.
"""
if input_sig.ndim == 1:
return (input_sig - numpy.c_[input_sig[numpy.newaxis, :][..., :1],
input_sig[numpy.newaxis, :][..., :-1]].squeeze() * pre)
else:
return input_sig - numpy.c_[input_sig[..., :1], input_sig[..., :-1]] * pre
"""Generate a new array that chops the given array along the given axis
into overlapping frames.
This method has been implemented by <NAME>,
as part of the talk box toolkit
example::
segment_axis(arange(10), 4, 2)
array([[0, 1, 2, 3],
( [2, 3, 4, 5],
[4, 5, 6, 7],
[6, 7, 8, 9]])
:param a: the array to segment
:param length: the length of each frame
:param overlap: the number of array elements by which the frames should overlap
:param axis: the axis to operate on; if None, act on the flattened array
:param end: what to do with the last frame, if the array is not evenly
divisible into pieces. Options are:
- 'cut' Simply discard the extra values
- 'wrap' Copy values from the beginning of the array
- 'pad' Pad with a constant value
:param endvalue: the value to use for end='pad'
:return: a ndarray
The array is not copied unless necessary (either because it is unevenly
strided and being flattened or because end is set to 'pad' or 'wrap').
"""
if axis is None:
a = numpy.ravel(a) # may copy
axis = 0
l = a.shape[axis]
if overlap >= length:
raise ValueError("frames cannot overlap by more than 100%")
if overlap < 0 or length <= 0:
raise ValueError("overlap must be nonnegative and length must" +
"be positive")
if l < length or (l - length) % (length - overlap):
if l > length:
roundup = length + (1 + (l - length) // (length - overlap)) * (length - overlap)
rounddown = length + ((l - length) // (length - overlap)) * (length - overlap)
else:
roundup = length
rounddown = 0
assert rounddown < l < roundup
assert roundup == rounddown + (length - overlap) or (roundup == length and rounddown == 0)
a = a.swapaxes(-1, axis)
if end == 'cut':
a = a[..., :rounddown]
l = a.shape[0]
elif end in ['pad', 'wrap']: # copying will be necessary
s = list(a.shape)
s[-1] = roundup
b = numpy.empty(s, dtype=a.dtype)
b[..., :l] = a
if end == 'pad':
b[..., l:] = endvalue
elif end == 'wrap':
b[..., l:] = a[..., :roundup - l]
a = b
a = a.swapaxes(-1, axis)
if l == 0:
raise ValueError("Not enough data points to segment array " +
"in 'cut' mode; try 'pad' or 'wrap'")
assert l >= length
assert (l - length) % (length - overlap) == 0
n = 1 + (l - length) // (length - overlap)
s = a.strides[axis]
new_shape = a.shape[:axis] + (n, length) + a.shape[axis + 1:]
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
try:
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
except TypeError:
a = a.copy()
# Shape doesn't change but strides does
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
def speech_enhancement(X, Gain, NN):
"""This program is only to process the single file seperated by the silence
section if the silence section is detected, then a counter to number of
buffer is set and pre-processing is required.
Usage: SpeechENhance(wavefilename, Gain, Noise_floor)
:param X: input audio signal
:param Gain: default value is 0.9, suggestion range 0.6 to 1.4,
higher value means more subtraction or noise redcution
:param NN:
:return: a 1-dimensional array of boolean that
is True for high energy frames.
Copyright 2014 <NAME> and <NAME>
"""
if X.shape[0] < 512: # creer une exception
return X
num1 = 40 # dsiable buffer number
Alpha = 0.75 # original value is 0.9
FrameSize = 32 * 2 # 256*2
FrameShift = int(FrameSize / NN) # FrameSize/2=128
nfft = FrameSize # = FrameSize
Fmax = int(numpy.floor(nfft / 2) + 1) # 128+1 = 129
# arising hamming windows
Hamm = 1.08 * (0.54 - 0.46 * numpy.cos(2 * numpy.pi * numpy.arange(FrameSize) / (FrameSize - 1)))
y0 = numpy.zeros(FrameSize - FrameShift) # 128 zeros
Eabsn = numpy.zeros(Fmax)
Eta1 = Eabsn
###################################################################
# initial parameter for noise min
mb = numpy.ones((1 + FrameSize // 2, 4)) * FrameSize / 2 # 129x4 set four buffer * FrameSize/2
im = 0
Beta1 = 0.9024 # seems that small value is better;
pxn = numpy.zeros(1 + FrameSize // 2) # 1+FrameSize/2=129 zeros vector
###################################################################
old_absx = Eabsn
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[
numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
###################################################################
# add the pre-noise estimates
for i in range(200):
Frame += 1
fftn = fft(x * Hamm) # get its spectrum
absn = numpy.abs(fftn[0:Fmax]) # get its amplitude
# add the following part from noise estimation algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # Beta=0.9231 recursive pxn
im = (im + 1) % 40 # noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
# 0-2 vector shifted to 1 to 3
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
# over_sub_noise= oversubtraction factor
# end of noise detection algotihm
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
if In_data.shape[0] < FrameShift: # to check file is out
EOF = 1
break
else:
x[FrameSize - FrameShift:FrameSize] = In_data # shift new 128 to position 129 to FrameSize location
# end of for loop for noise estimation
# end of prenoise estimation ************************
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
X1 = numpy.zeros(X.shape)
Frame = 0
while EOF == 0:
Frame += 1
xwin = x * Hamm
fftx = fft(xwin, nfft) # FrameSize FFT
absx = numpy.abs(fftx[0:Fmax]) # Fmax=129,get amplitude of x
argx = fftx[:Fmax] / (absx + numpy.spacing(1)) # normalize x spectrum phase
absn = absx
# add the following part from rainer algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # s Beta=0.9231 recursive pxn
im = int((im + 1) % (num1 * NN / 2)) # original =40 noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
Eabsn = pn
Gaina = Gain
temp1 = Eabsn * Gaina
Eta1 = Alpha * old_absx + (1 - Alpha) * numpy.maximum(absx - temp1, 0)
new_absx = (absx * Eta1) / (Eta1 + temp1) # wiener filter
old_absx = new_absx
ffty = new_absx * argx # multiply amplitude with its normalized spectrum
y = numpy.real(numpy.fft.fftpack.ifft(numpy.concatenate((ffty,
numpy.conj(ffty[numpy.arange(Fmax - 2, 0, -1)])))))
y[:FrameSize - FrameShift] = y[:FrameSize - FrameShift] + y0
y0 = y[FrameShift:FrameSize] # keep 129 to FrameSize point samples
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
z = 2 / NN * y[:FrameShift] # left channel is the original signal
z /= 1.15
z = numpy.minimum(z, 32767)
z = numpy.maximum(z, -32768)
index0 = numpy.arange(FrameShift * (Frame - 1), FrameShift * Frame)
if not all(index0 < X1.shape[0]):
idx = 0
while (index0[idx] < X1.shape[0]) & (idx < index0.shape[0]):
X1[index0[idx]] = z[idx]
idx += 1
else:
X1[index0] = z
if In_data.shape[0] == 0:
EOF = 1
else:
x[numpy.arange(FrameSize - FrameShift, FrameSize + In_data.shape[0] - FrameShift)] = In_data
X1 = X1[X1.shape[0] - X.shape[0]:]
# }
# catch{
# }
return X1
def vad_percentil(log_energy, percent):
"""
:param log_energy:
:param percent:
:return:
"""
thr = numpy.percentile(log_energy, percent)
return log_energy > thr, thr
def vad_energy(log_energy,
distrib_nb=3,
nb_train_it=8,
flooring=0.0001, ceiling=1.0,
alpha=2):
# center and normalize the energy
log_energy = (log_energy - numpy.mean(log_energy)) / numpy.std(log_energy)
# Initialize a Mixture with 2 or 3 distributions
world = Mixture()
# set the covariance of each component to 1.0 and the mean to mu + meanIncrement
world.cst = numpy.ones(distrib_nb) / (numpy.pi / 2.0)
world.det = numpy.ones(distrib_nb)
world.mu = -2 + 4.0 * numpy.arange(distrib_nb) / (distrib_nb - 1)
world.mu = world.mu[:, numpy.newaxis]
world.invcov = numpy.ones((distrib_nb, 1))
# set equal weights for each component
world.w = numpy.ones(distrib_nb) / distrib_nb
world.cov_var_ctl = copy.deepcopy(world.invcov)
# Initialize the accumulator
accum = copy.deepcopy(world)
# Perform nbTrainIt iterations of EM
for it in range(nb_train_it):
accum._reset()
# E-step
world._expectation(accum, log_energy)
# M-step
world._maximization(accum, ceiling, flooring)
# Compute threshold
threshold = world.mu.max() - alpha * numpy.sqrt(1.0 / world.invcov[world.mu.argmax(), 0])
# Apply frame selection with the current threshold
label = log_energy > threshold
return label, threshold
def vad_snr(sig, snr, fs=16000, shift=0.01, nwin=256):
"""Select high energy frames based on the Signal to Noise Ratio
of the signal.
Input signal is expected encoded on 16 bits
:param sig: the input audio signal
:param snr: Signal to noise ratio to consider
:param fs: sampling frequency of the input signal in Hz. Default is 16000.
:param shift: shift between two frames in seconds. Default is 0.01
:param nwin: number of samples of the sliding window. Default is 256.
"""
overlap = nwin - int(shift * fs)
sig /= 32768.
sig = speech_enhancement(numpy.squeeze(sig), 1.2, 2)
# Compute Standard deviation
sig += 0.1 * numpy.random.randn(sig.shape[0])
std2 = segment_axis(sig, nwin, overlap, axis=None, end='cut', endvalue=0).T
std2 = | numpy.std(std2, axis=0) | numpy.std |
from os import path
import numpy as np
from numpy.testing import *
import datetime
class TestDateTime(TestCase):
def test_creation(self):
for unit in ['Y', 'M', 'W', 'B', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert dt1 == np.dtype('datetime64[750%s]' % unit)
dt2 = np.dtype('m8[%s]' % unit)
assert dt2 == np.dtype('timedelta64[%s]' % unit)
def test_divisor_conversion_year(self):
assert np.dtype('M8[Y/4]') == np.dtype('M8[3M]')
assert np.dtype('M8[Y/13]') == np.dtype('M8[4W]')
assert np.dtype('M8[3Y/73]') == np.dtype('M8[15D]')
def test_divisor_conversion_month(self):
assert np.dtype('M8[M/2]') == np.dtype('M8[2W]')
assert np.dtype('M8[M/15]') == np.dtype('M8[2D]')
assert np.dtype('M8[3M/40]') == np.dtype('M8[54h]')
def test_divisor_conversion_week(self):
assert np.dtype('m8[W/5]') == np.dtype('m8[B]')
assert np.dtype('m8[W/7]') == np.dtype('m8[D]')
assert np.dtype('m8[3W/14]') == np.dtype('m8[36h]')
assert np.dtype('m8[5W/140]') == np.dtype('m8[360m]')
def test_divisor_conversion_bday(self):
assert np.dtype('M8[B/12]') == np.dtype('M8[2h]')
assert np.dtype('M8[B/120]') == np.dtype('M8[12m]')
assert np.dtype('M8[3B/960]') == np.dtype('M8[270s]')
def test_divisor_conversion_day(self):
assert np.dtype('M8[D/12]') == np.dtype('M8[2h]')
assert np.dtype('M8[D/120]') == np.dtype('M8[12m]')
assert np.dtype('M8[3D/960]') == np.dtype('M8[270s]')
def test_divisor_conversion_hour(self):
assert np.dtype('m8[h/30]') == np.dtype('m8[2m]')
assert np.dtype('m8[3h/300]') == np.dtype('m8[36s]')
def test_divisor_conversion_minute(self):
assert np.dtype('m8[m/30]') == np.dtype('m8[2s]')
assert np.dtype('m8[3m/300]') == np.dtype('m8[600ms]')
def test_divisor_conversion_second(self):
assert np.dtype('m8[s/100]') == np.dtype('m8[10ms]')
assert np.dtype('m8[3s/10000]') == np.dtype('m8[300us]')
def test_divisor_conversion_fs(self):
assert np.dtype('M8[fs/100]') == np.dtype('M8[10as]')
self.assertRaises(ValueError, lambda : np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
self.assertRaises(ValueError, lambda : np.dtype('M8[as/10]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
class TestDateTimeModulo(TestCase):
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_years(self):
timesteps = np.array([0,1,2], dtype='datetime64[Y]//10')
assert timesteps[0] == np.datetime64('1970')
assert timesteps[1] == np.datetime64('1980')
assert timesteps[2] == np.datetime64('1990')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_months(self):
timesteps = np.array([0,1,2], dtype='datetime64[M]//10')
assert timesteps[0] == np.datetime64('1970-01')
assert timesteps[1] == np.datetime64('1970-11')
assert timesteps[2] == np.datetime64('1971-09')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_weeks(self):
timesteps = np.array([0,1,2], dtype='datetime64[W]//3')
assert timesteps[0] == np.datetime64('1970-01-01')
assert timesteps[1] == np.datetime64('1970-01-22')
assert timesteps[2] == np.datetime64('1971-02-12')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_business_days(self):
timesteps = np.array([0,1,2], dtype='datetime64[B]//4')
assert timesteps[0] == np.datetime64('1970-01-01')
assert timesteps[1] == np.datetime64('1970-01-07')
assert timesteps[2] == np.datetime64('1971-01-13')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_days(self):
timesteps = np.array([0,1,2], dtype='datetime64[D]//17')
assert timesteps[0] == np.datetime64('1970-01-01')
assert timesteps[1] == np.datetime64('1970-01-18')
assert timesteps[2] == np.datetime64('1971-02-04')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_hours(self):
timesteps = np.array([0,1,2], dtype='datetime64[h]//17')
assert timesteps[0] == np.datetime64('1970-01-01 00')
assert timesteps[1] == np.datetime64('1970-01-01 17')
assert timesteps[2] == np.datetime64('1970-01-02 10')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_minutes(self):
timesteps = np.array([0,1,2], dtype='datetime64[m]//42')
assert timesteps[0] == np.datetime64('1970-01-01 00:00')
assert timesteps[1] == np.datetime64('1970-01-01 00:42')
assert timesteps[2] == np.datetime64('1970-01-01 01:24')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_seconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[s]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:42')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:24')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_milliseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[ms]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_microseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[us]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.000084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_nanoseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[ns]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.000000084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_picoseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[ps]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.000000000084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_femtoseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[fs]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000000000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000000042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.000000000000084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_attoseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[as]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000000000000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000000000000042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.000000000000000084')
class TestTimeDeltaSetters(TestCase):
def setUp(self):
self.timedeltas = np.ones(3, dtype='m8[ms]')
@dec.skipif(True, "timedelta64 takes only 1 arg.")
def test_set_timedelta64_from_int(self):
self.timedeltas[0] = 12
assert self.timedeltas[0] == np.timedelta64(12, 'ms')
@dec.skipif(True, "timedelta64 takes only 1 arg.")
def test_set_timedelta64_from_datetime_timedelta(self):
self.timedeltas[1] = datetime.timedelta(0, 0, 13000)
assert self.timedeltas[1] == np.timedelta64(13, 'ms')
@dec.skipif(True, "timedelta64 takes only 1 arg.")
def test_set_timedelta64_from_string(self):
self.timedeltas[2] = '0:00:00.014'
assert self.timedeltas[2] == np.timedelta64(14, 'ms')
class TestTimeDeltaGetters(TestCase):
def setUp(self):
self.timedeltas = np.array([12, 13, 14], 'm8[ms]')
@dec.knownfailureif(True, "Fails")
def test_get_str_from_timedelta64(self):
assert str(self.timedeltas[0]) == '0:00:00.012'
assert str(self.timedeltas[1]) == '0:00:00.013'
assert str(self.timedeltas[2]) == '0:00:00.014'
@dec.knownfailureif(True, "Fails")
def test_get_repr_from_timedelta64(self):
assert repr(self.timedeltas[0]) == "timedelta64(12, 'ms')"
assert repr(self.timedeltas[1]) == "timedelta64(13, 'ms')"
assert repr(self.timedeltas[2]) == "timedelta64(14, 'ms')"
def test_get_str_from_timedelta64_item(self):
assert str(self.timedeltas[0].item()) == '0:00:00.012000'
assert str(self.timedeltas[1].item()) == '0:00:00.013000'
assert str(self.timedeltas[2].item()) == '0:00:00.014000'
def test_get_repr_from_timedelta64_item(self):
assert repr(self.timedeltas[0].item()) == 'datetime.timedelta(0, 0, 12000)'
assert repr(self.timedeltas[1].item()) == 'datetime.timedelta(0, 0, 13000)'
assert repr(self.timedeltas[2].item()) == 'datetime.timedelta(0, 0, 14000)'
@dec.knownfailureif(True, "Fails")
def test_get_str_from_timedelta64_array(self):
assert str(self.timedeltas) == '[0:00:00.012 0:00:00.014 0:00:00.014]'
@dec.knownfailureif(True, "Fails")
def test_get_repr_from_timedelta64_array(self):
assert repr(self.timedeltas) == 'array([12, 13, 14], dtype="timedelta64[ms]")'
class TestTimeDeltaComparisons(TestCase):
def setUp(self):
self.timedeltas = np.array([12, 13, 14], 'm8[ms]')
def test_compare_timedelta64_to_timedelta64_array(self):
comparison = (self.timedeltas == np.array([12, 13, 13], 'm8[ms]'))
assert_equal(comparison, [True, True, False])
@dec.skipif(True, "timedelta64 takes only 1 arg.")
def test_compare_timedelta64_to_timedelta64_broadcast(self):
comparison = (self.timedeltas == np.timedelta64(13, 'ms'))
assert_equal(comparison, [False, True, True])
@dec.knownfailureif(True, "Returns FALSE")
def test_compare_timedelta64_to_string_broadcast(self):
comparison = (self.timedeltas == '0:00:00.012')
assert_equal(comparison, [True, False, True])
class TestDateTimeAstype(TestCase):
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_years(self):
datetimes = np.array([0, 40, 15], dtype="datetime64[M]")
assert_equal(datetimes.astype('datetime64[Y]'), np.array([0, 3, 2], dtype="datetime64[Y]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_months(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[Y]")
assert_equal(datetimes.astype('datetime64[M]'), np.array([0, 36, 24], dtype="datetime64[M]"))
datetimes = np.array([0, 100, 70], dtype="datetime64[D]")
assert_equal(datetimes.astype('datetime64[M]'), np.array([0, 3, 2], dtype="datetime64[M]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_weeks(self):
datetimes = np.array([0, 22, 15], dtype="datetime64[D]")
assert_equal(datetimes.astype('datetime64[W]'), np.array([0, 3, 2], dtype="datetime64[W]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_business_days(self):
# XXX: There will probably be a more direct way to check for
# *Not a Time* values.
datetimes = np.arange(5, dtype='datetime64[D]')
expected_array_str = '[1970-01-01 1970-01-02 NaT NaT 1970-01-05]'
assert_equal(datetimes.astype('datetime64[B]'), expected_array_str)
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_days(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[W]")
assert_equal(datetimes.astype('datetime64[D]'), np.array([0, 21, 7], dtype="datetime64[D]"))
datetimes = np.array([0, 37, 24], dtype="datetime64[h]")
assert_equal(datetimes.astype('datetime64[D]'), np.array([0, 3, 2], dtype="datetime64[D]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_hours(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[D]")
assert_equal(datetimes.astype('datetime64[h]'), np.array([0, 36, 24], dtype="datetime64[D]"))
datetimes = np.array([0, 190, 153], dtype="datetime64[m]")
assert_equal(datetimes.astype('datetime64[h]'), np.array([0, 3, 2], dtype="datetime64[h]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_minutes(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[h]")
assert_equal(datetimes.astype('datetime64[m]'), np.array([0, 180, 120], dtype="datetime64[m]"))
datetimes = np.array([0, 190, 153], dtype="datetime64[s]")
assert_equal(datetimes.astype('datetime64[m]'), np.array([0, 3, 2], dtype="datetime64[m]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_seconds(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[m]")
assert_equal(datetimes.astype('datetime64[s]'), np.array([0, 180, 120], dtype="datetime64[s]"))
datetimes = np.array([0, 3200, 2430], dtype="datetime64[ms]")
assert_equal(datetimes.astype('datetime64[s]'), np.array([0, 3, 2], dtype="datetime64[s]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_milliseconds(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[s]")
assert_equal(datetimes.astype('datetime64[ms]'), np.array([0, 3000, 2000], dtype="datetime64[ms]"))
datetimes = np.array([0, 3200, 2430], dtype="datetime64[us]")
assert_equal(datetimes.astype('datetime64[ms]'), np.array([0, 3, 2], dtype="datetime64[ms]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_microseconds(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[ms]")
assert_equal(datetimes.astype('datetime64[us]'), np.array([0, 3000, 2000], dtype="datetime64[us]"))
datetimes = np.array([0, 3200, 2430], dtype="datetime64[ns]")
assert_equal(datetimes.astype('datetime64[us]'), np.array([0, 3, 2], dtype="datetime64[us]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_nanoseconds(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[us]")
assert_equal(datetimes.astype('datetime64[ns]'), np.array([0, 3000, 2000], dtype="datetime64[ns]"))
datetimes = np.array([0, 3200, 2430], dtype="datetime64[ps]")
assert_equal(datetimes.astype('datetime64[ns]'), np.array([0, 3, 2], dtype="datetime64[ns]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_picoseconds(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[ns]")
assert_equal(datetimes.astype('datetime64[ps]'), np.array([0, 3000, 2000], dtype="datetime64[ps]"))
datetimes = np.array([0, 3200, 2430], dtype="datetime64[ns]")
assert_equal(datetimes.astype('datetime64[ps]'), np.array([0, 3, 2], dtype="datetime64[ps]"))
@dec.knownfailureif(True, "datetime converions fail.")
def test_datetime_astype_femtoseconds(self):
datetimes = np.array([0, 3, 2], dtype="datetime64[ps]")
assert_equal(datetimes.astype('datetime64[fs]'), np.array([0, 3000, 2000], dtype="datetime64[fs]"))
datetimes = | np.array([0, 3200, 2430], dtype="datetime64[ps]") | numpy.array |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from numpy.testing import assert_array_equal
import skbeam.core.mask as mask
logger = logging.getLogger(__name__)
def test_threshold_mask():
xdim = 10
ydim = 10
stack_size = 10
img_stack = np.random.randint(1, 3, (stack_size, xdim, ydim))
img_stack[0][0, 1] = 100
img_stack[0][9, 1] = 98
img_stack[6][8, 8] = 75
img_stack[7][6, 6] = 80
th = mask.threshold(img_stack, 75)
for final in th:
pass
y = np.ones_like(img_stack[0])
y[0, 1] = 0
y[9, 1] = 0
y[8, 8] = 0
y[6, 6] = 0
assert_array_equal(final, y)
def test_bad_to_nan_gen():
xdim = 2
ydim = 2
stack_size = 5
img_stack = np.random.randint(1, 3, (stack_size, xdim, ydim))
bad_list = [1, 3]
img = mask.bad_to_nan_gen(img_stack, bad_list)
y = []
for im in img:
y.append(im)
assert np.isnan(np.asarray(y)[1]).all()
assert np.isnan(np.asarray(y)[3]).all()
assert not np.isnan(np.asarray(y)[4]).all()
def test_margin():
size = (10, 10)
edge = 1
mask1 = mask.margin(size, edge)
mask2 = np.zeros(size)
mask2[:, :edge] = 1
mask2[:, -edge:] = 1
mask2[:edge, :] = 1
mask2[-edge:, :] = 1
mask2 = mask2.astype(bool)
assert_array_equal(mask1, ~mask2)
def test_ring_blur_mask():
from skbeam.core import recip
g = recip.geo.Geometry(
detector='Perkin', pixel1=.0002, pixel2=.0002,
dist=.23,
poni1=.209, poni2=.207,
# rot1=.0128, rot2=-.015, rot3=-5.2e-8,
wavelength=1.43e-11
)
r = g.rArray((2048, 2048))
# make some sample data
Z = 100 * np.cos(50 * r) ** 2 + 150
np.random.seed(10)
pixels = []
for i in range(0, 100):
a, b = np.random.randint(low=0, high=2048), \
np.random.randint(low=0, high=2048)
if | np.random.random() | numpy.random.random |
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.cluster.kmeans import KMeansClusterer
from scipy.spatial import distance
from stemming.porter2 import stem
import pandas as pd
import numpy as np
import re
import os
import io
import math
import functools
import pdb; pdb.set_trace() # XXX BREAKPOINT
def get_max_number_keywords(list_of_keywords):
n_keywords = []
for keywords in list_of_keywords:
n_keywords.append(len(keywords.split(',')))
return max(n_keywords)
def get_words_frequency(full_text):
# Counting words
words_no_symbols = clean_text_and_tokenize(full_text)
final_words, count = np.unique(words_no_symbols, return_counts=True)
count = map(lambda n: float(n)/len(final_words), count)
return zip(final_words, count)
def clean_text_and_tokenize(text):
words = word_tokenize(text)
words_lower = map(lambda w: w.lower(), words)
words_no_stop = filter(lambda w: w not in stopwords.words('english'),
words_lower)
words_no_symbols = filter(re.compile(r'[a-z1-9].*').search, words_no_stop)
return map(stem, words_no_symbols)
def sum_word_freq(words_in_sentence, word_freq):
# Sum the frequency of words in a sentence
n_words = len(words_in_sentence)
sum_freq = sum([word_freq[w]/n_words for w in words_in_sentence
if w in word_freq])
return sum_freq
def get_keywords_similarity(words_in_sentence, keywords):
keywords_match = []
for words in keywords:
matches = map(lambda w: 1 if w in words_in_sentence else 0, words)
keywords_match.append(2.0 * sum(matches) / (
len(words) + len(words_in_sentence)))
return keywords_match
def get_section_similarity(words_in_sentence, words_in_section):
matches = map(lambda w: 1 if w in words_in_section else 0,
words_in_sentence)
if len(matches) <= 0:
return 0
return 2.0 * sum(matches)/(len(words_in_sentence) + len(words_in_section))
def get_title(text):
return text.split('\n')[0]
def get_highlights(file_path):
""" Read the txt file with the research highlights of the respective files
"""
text_file = io.open(file_path, mode='r', encoding='utf-8')
highlights = text_file.read().split('\n')
# highlights = '^~_'.join(text_file.read().split('\n'))
text_file.close()
return highlights
def get_session_lines(text, session):
lines = text.split('\n')
if session == 'a': # abstract
r_start = re.compile("^Abstract$")
r_end = re.compile("Keywords|Abbreviations")
elif session == 'i': # introduction
r_start = re.compile(r'1.\s+Introduction\s*')
r_end = re.compile(r'2.\s+[A-Z0-9][a-zA-Z0-9]+.*')
else: # conclusion
r_start = re.compile(r'[1-9][0-9]?.\s+(Conclu.*|Discussion.*|Summary'
'*|.*conclu.*|.*future.*.|Results.*|Final.*)')
r_end = re.compile(r'(Append.*|^1$)')
session_lines = []
candidate_sessions = []
found_session = False
for i in range(len(lines)):
if r_start.match(lines[i]):
candidate_sessions.append(i)
found_session = True
if found_session:
session_lines.append(candidate_sessions[-1])
i = session_lines[0] + 1
while i < len(lines) and not(r_end.match(lines[i])):
session_lines.append(i)
i += 1
return session_lines
def extract_keywords(text):
""" After finding the string "Keywords", each line
is a keyword until an empty line is found """
keywords = list()
reading_keywords = False
all_lines = text.split('\n')
for line in all_lines:
if 'Keywords' in line:
reading_keywords = True
# nothing in line
elif not line and reading_keywords:
return ','.join(keywords)
elif reading_keywords:
keywords.append(line)
return ','.join(keywords)
def extract_content(path):
"""
Extracts the keywords, highlights and the text in a article
'path': name of the file
"""
article = io.open(path, mode="r", encoding="utf-8")
abstract, introduction, conclusion, final_text = '', '', '', ''
full_text = article.read()
full_text_split = np.array(full_text.split('\n'))
abstract_lines = get_session_lines(full_text, 'a')
abstract = '\n'.join(full_text_split[abstract_lines])
# get the lines containing the introduction
intro_lines = get_session_lines(full_text, 'i')
introduction = '\n'.join(full_text_split[intro_lines])
text_without_intro = '\n'.join(full_text_split[(intro_lines[-1]+1):])
text_without_intro_split = np.array(text_without_intro.split('\n'))
conclu_lines = get_session_lines(text_without_intro, 'c')
if conclu_lines:
conclusion = '\n'.join(text_without_intro_split[conclu_lines])
text_without_conclu_1 = '\n'.join(text_without_intro_split[
0:conclu_lines[0]])
text_without_conclu_2 = '' if(conclu_lines[-1]+1) >= \
len(text_without_intro_split) else \
'\n'.join(text_without_intro_split[(conclu_lines[-1]+1):])
final_text = text_without_conclu_1 + text_without_conclu_2
else:
final_text = text_without_intro
return get_title(full_text), extract_keywords(full_text), abstract, \
introduction, conclusion, final_text
def create_sentences_table(list_of_files, highlights=False):
if highlights:
cols = ['title', 'keywords', 'abstract', 'introduction', 'conclusion',
'text', 'highlights']
df = pd.DataFrame([list(extract_content(f)) + [get_highlights(f)]
for f in list_of_files], columns=cols)
else:
cols = ['title', 'keywords', 'abstract', 'introduction', 'conclusion',
'text']
df = pd.DataFrame([list(extract_content(f)
) for f in list_of_files], columns=cols)
df.to_csv("articles_highlights.csv", sep='\t', encoding='utf-8',
index=False)
def calc_df(word, sentences):
n_sents_with_word = 0
for sent in sentences:
n_sents_with_word += 1 if word in sent else 0
return n_sents_with_word
def calc_tf_idf_word(word, sentences):
df = calc_df(word, sentences)
N = len(sentences)
tfidf_vals = []
for sent in sentences:
tf = float(sent.count(word)) / len(sent)
idf = math.log(float(N) / df)
tfidf_vals.append(tf * idf)
return | np.array(tfidf_vals) | numpy.array |
"""DataArray provides a thin wrapper around multidimensional arrays and metadata
"""
from logging import getLogger
import numpy as np # type: ignore
from smif.exception import SmifDataError, SmifDataMismatchError
from smif.metadata.spec import Spec
# Import pandas, xarray if available (optional dependencies)
try:
import pandas # type: ignore
import xarray # type: ignore
except ImportError:
pass
INSTALL_WARNING = """\
Please install pandas and xarray to access smif.DataArray
data as pandas.DataFrame or xarray.DataArray. Try running:
pip install smif[data]
or:
conda install pandas xarray
"""
class DataArray():
"""DataArray provides access to input/parameter/results data, with conversions to common
python data libraries (for example: numpy, pandas, xarray).
Attributes
----------
spec: smif.metadata.spec.Spec
data: numpy.ndarray
"""
def __init__(self, spec: Spec, data: np.ndarray):
self.logger = getLogger(__name__)
if not hasattr(data, 'shape'):
self.logger.debug("Data is not an numpy.ndarray")
data = | np.array(data) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Any, Iterable
import numpy
__author__ = "<NAME>"
__all__ = ["PrioritisedBuffer"]
__doc__ = r"""
"""
import random
from neodroidagent.common.memory.data_structures.sum_tree import SumTree
class PrioritisedBuffer:
"""
"""
def __init__(
self,
capacity: int,
per_epsilon: float = 0.01,
per_alpha: float = 0.6,
per_beta: float = 0.4,
per_beta_increment_per_sampling: float = 0.000,
max_abs_dist: float = 1.0,
):
"""
@param capacity:
@param per_epsilon:
@param per_alpha:
@param per_beta:
@param per_beta_increment_per_sampling:
@param max_abs_dist:
"""
self._epsilon = per_epsilon
self._alpha = per_alpha
self._beta = per_beta
self._beta_increment_per_sampling = per_beta_increment_per_sampling
self._max_abs_dist = max_abs_dist
self._tree = SumTree(capacity)
def _get_priority(self, dist: float) -> float:
"""
@param dist:
@return:
"""
abs_dist = numpy.abs(dist) + self._epsilon
if self._max_abs_dist:
abs_dist = min(abs_dist, self._max_abs_dist)
return abs_dist ** self._alpha
def add(self, sample: Any, dist: float) -> None:
"""
@param sample:
@param error:
@return:
"""
self._tree.push(sample, self._get_priority(dist))
def sample(self, num: int) -> Iterable:
"""
@param num:
@return:
"""
segment = self._tree.total / num
data = []
leaf_indices = []
# priorities = []
self._beta = numpy.min([1.0, self._beta + self._beta_increment_per_sampling])
for i in range(num):
(leaf_index, _, _, data_) = self._tree.get(
random.uniform(segment * i, segment * (i + 1)), normalised_sum=False
)
# priorities.append(priority)
data.append(data_)
leaf_indices.append(leaf_index)
"""
sampling_probabilities = priorities / self._tree.total
weights = numpy.power(self._tree._num_entries * sampling_probabilities, -self._beta)
weights /= (weights.max() + 1e-10) # Normalize for stability
"""
self._last_leaf_indices = leaf_indices
return data
def update_last_batch(self, errors: Iterable) -> None:
"""
@param errors:
@return:
"""
for leaf_index, error in zip(self._last_leaf_indices, errors):
self.update(leaf_index, error)
def update(self, leaf_index: int, error: float) -> None:
"""
@param leaf_index:
@param error:
@return:
"""
self._tree.update_leaf(leaf_index, self._get_priority(error))
def __len__(self) -> int:
"""
@return:
"""
return len(self._tree)
@property
def capacity(self) -> int:
"""
@return:
@rtype:
"""
return self._tree.capacity
if __name__ == "__main__":
def stest_experience_buffer():
"""
"""
capacity = 2 ** 8
batch_size = 4
rb = PrioritisedBuffer(capacity)
for i in range(capacity):
a = (i, i, i, i, i)
rb.add(a, sum(a))
print(rb.sample(batch_size))
rb.update_last_batch( | numpy.random.rand(capacity) | numpy.random.rand |
from utils.image_utils import concatenate_images_by_width
import numpy as np
import cv2
class Image:
def __init__(self, config, input_imgs, label=None, logit=None):
self.config = config
self.baseline_img = input_imgs[:, :, :3]
self.current_img = input_imgs[:, :, 3:]
self.label = label
self.logit = logit
def apply_box(self, image, bounding_box, color):
"""
:param image:
:param bounding_box: (left_x, top_y, right_x, bottom_y)
:param color:
:return:
"""
line_width = self.config.figure.line_width
left_x, top_y, right_x, bottom_y = bounding_box
try:
mid_x = int((left_x + right_x) / 2)
mid_y = int((top_y + bottom_y) / 2)
image[mid_y - line_width:mid_y + line_width, mid_x - line_width + mid_x + line_width] = color
image[top_y - line_width:top_y, left_x:right_x] = color
image[top_y:bottom_y, left_x:left_x + line_width] = color
image[bottom_y - line_width:bottom_y, left_x:right_x] = color
image[top_y:bottom_y, right_x:right_x + line_width] = color
except Exception as exception:
pass
return image
def get_bounding_box(self, y_idx, x_idx, yxhw):
num_grid_cells_width = self.config.model.model_output_size[1]
num_grid_cells_height = self.config.model.model_output_size[0]
image_width = self.config.model.input_shape[1]
image_height = self.config.model.input_shape[0]
grid_cell_width = image_width / num_grid_cells_width
grid_cell_height = image_height / num_grid_cells_height
my, mx, h, w = yxhw[0], yxhw[1], yxhw[2], yxhw[3]
# print(yxhw)
mx = (mx * grid_cell_width) + (x_idx * grid_cell_width)
my = (my * grid_cell_height) + (y_idx * grid_cell_height)
h = h * grid_cell_height
w = w * grid_cell_width
# print(mx, my, h, w)
lx = mx - (w / 2)
rx = mx + (w / 2)
ty = my - (h / 2)
by = my + (h / 2)
# print(lx, rx, ty, by)
ret = int(lx), int(ty), int(rx), int(by)
return ret
def get_color(self, categories):
category = np.argmax(categories)
return self.config.figure.color_map[category]
def apply_label(self, image, label):
for _y in range(label.shape[0]):
for _x in range(label.shape[1]):
has_defect = label[_y, _x, 0]
if has_defect > 0.5:
bounding_box = self.get_bounding_box(_y, _x, label[_y, _x, 5:])
color = self.get_color(label[_y, _x, 1:5])
image = self.apply_box(image, bounding_box, color)
return image
def get_has_defect_graph(self, grid):
white_space = np.transpose([Image.sigmoid(grid[:, :, 0]) * 255] * 3, [1, 2, 0])
new_size = (self.current_img.shape[1], self.current_img.shape[0])
return cv2.resize(white_space, new_size, interpolation=cv2.INTER_NEAREST)
def get_log_image(self):
first_image = self.baseline_img
second_image = self.apply_label(np.copy(self.current_img), self.label) if self.label is not None else None
third_image = self.apply_label( | np.copy(self.current_img) | numpy.copy |
# Copyright 2021 Jij Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
import openjij
import openjij.model
from openjij.sampler import BaseSampler
from openjij.utils.decorator import deprecated_alias
from openjij.utils.graph_utils import qubo_to_ising
import cxxjij
import dimod
import cimod
"""
This module contains Simulated Annealing sampler.
"""
class SASampler(BaseSampler):
"""Sampler with Simulated Annealing (SA).
Args:
beta_min (float):
Minmum beta (inverse temperature).
You can overwrite in methods .sample_*.
beta_max (float):
Maximum beta (inverse temperature).
You can overwrite in methods .sample_*.
num_reads (int):
number of sampling (algorithm) runs. defaults None.
You can overwrite in methods .sample_*.
num_sweeps (int):
number of MonteCarlo steps during SA. defaults None.
You can overwrite in methods .sample_*.
schedule_info (dict):
Information about an annealing schedule.
Raises:
ValueError: If schedules or variables violate as below.
- not list or numpy.array.
- not list of tuple (beta : float, step_length : int).
- beta is less than zero.
"""
@property
def parameters(self):
return {
'beta_min': ['parameters'],
'beta_max': ['parameters'],
}
def __init__(self,
beta_min=None, beta_max=None,
num_sweeps=1000, schedule=None,
num_reads=1):
self.default_params = {
'beta_min': beta_min,
'beta_max': beta_max,
'num_sweeps': num_sweeps,
'schedule': schedule,
'num_reads': num_reads
}
self.params = {
'beta_min': beta_min,
'beta_max': beta_max,
'num_sweeps': num_sweeps,
'schedule': schedule,
'num_reads': num_reads
}
self._make_system = {
'singlespinflip': cxxjij.system.make_classical_ising,
'singlespinflippolynomial': cxxjij.system.make_classical_ising_polynomial,
'swendsenwang': cxxjij.system.make_classical_ising
}
self._algorithm = {
'singlespinflip': cxxjij.algorithm.Algorithm_SingleSpinFlip_run,
'singlespinflippolynomial': cxxjij.algorithm.Algorithm_SingleSpinFlip_run,
'swendsenwang': cxxjij.algorithm.Algorithm_SwendsenWang_run
}
def _convert_validation_schedule(self, schedule):
"""Checks if the schedule is valid and returns cxxjij schedule
"""
if not isinstance(schedule, (list, np.array)):
raise ValueError("schedule should be list or numpy.array")
if isinstance(schedule[0], cxxjij.utility.ClassicalSchedule):
return schedule
if len(schedule[0]) != 2:
raise ValueError(
"schedule is list of tuple or list (beta : float, step_length : int)")
# schedule validation 0 <= beta
beta = np.array(schedule).T[0]
if not np.all(0 <= beta):
raise ValueError("schedule beta range is '0 <= beta'.")
# convert to cxxjij.utility.ClassicalSchedule
cxxjij_schedule = []
for beta, step_length in schedule:
_schedule = cxxjij.utility.ClassicalSchedule()
_schedule.one_mc_step = step_length
_schedule.updater_parameter.beta = beta
cxxjij_schedule.append(_schedule)
return cxxjij_schedule
def sample(self, bqm, beta_min=None, beta_max=None,
num_sweeps=None, num_reads=None, schedule=None,
initial_state=None, updater=None,
sparse=False,
reinitialize_state=True, seed=None,
):
"""sample Ising model.
Args:
bqm (oj.BinaryQuadraticModel) binary quadratic model
beta_min (float): minimal value of inverse temperature
beta_max (float): maximum value of inverse temperature
num_sweeps (int): number of sweeps
num_reads (int): number of reads
schedule (list): list of inverse temperature
initial_state (dict): initial state
updater(str): updater algorithm
reinitialize_state (bool): if true reinitialize state for each run
seed (int): seed for Monte Carlo algorithm
Returns:
:class:`openjij.sampler.response.Response`: results
Examples:
for Ising case::
>>> h = {0: -1, 1: -1, 2: 1, 3: 1}
>>> J = {(0, 1): -1, (3, 4): -1}
>>> sampler = oj.SASampler()
>>> res = sampler.sample_ising(h, J)
for QUBO case::
>>> Q = {(0, 0): -1, (1, 1): -1, (2, 2): 1, (3, 3): 1, (4, 4): 1, (0, 1): -1, (3, 4): 1}
>>> sampler = oj.SASampler()
>>> res = sampler.sample_qubo(Q)
"""
#Set default updater
if updater is None:
updater='single spin flip'
_updater_name = updater.lower().replace('_', '').replace(' ', '')
# swendsen wang algorithm runs only on sparse ising graphs.
if _updater_name == 'swendsenwang' or sparse:
sparse = True
else:
sparse = False
if type(bqm) == dimod.BinaryQuadraticModel:
bqm = openjij.BinaryQuadraticModel(dict(bqm.linear), dict(bqm.quadratic), bqm.offset, bqm.vartype, sparse=sparse)
if sparse and bqm.sparse == False:
# convert to sparse bqm
bqm = openjij.BinaryQuadraticModel(bqm.linear, bqm.quadratic, bqm.offset, bqm.vartype, sparse=True)
# alias
model = bqm
ising_graph, offset = model.get_cxxjij_ising_graph()
self._set_params(
beta_min=beta_min, beta_max=beta_max,
num_sweeps=num_sweeps, num_reads=num_reads,
schedule=schedule
)
# set annealing schedule -------------------------------
if self.params['schedule'] is None:
self.params['schedule'], beta_range = geometric_ising_beta_schedule(
model=model,
beta_max=self.params['beta_max'],
beta_min=self.params['beta_min'],
num_sweeps=self.params['num_sweeps']
)
self.schedule_info = {
'beta_max': beta_range[0],
'beta_min': beta_range[1],
'num_sweeps': self.params['num_sweeps']
}
else:
self.params['schedule'] = self._convert_validation_schedule(self.params['schedule'])
self.schedule_info = {'schedule': 'custom schedule'}
# ------------------------------- set annealing schedule
# make init state generator --------------------------------
if initial_state is None:
def _generate_init_state(): return ising_graph.gen_spin(seed) if seed != None else ising_graph.gen_spin()
else:
if isinstance(initial_state, dict):
initial_state = [initial_state[k] for k in model.variables]
_init_state = np.array(initial_state)
# validate initial_state size
if len(initial_state) != ising_graph.size():
raise ValueError(
"the size of the initial state should be {}"
.format(ising_graph.size()))
def _generate_init_state(): return np.array(_init_state)
# -------------------------------- make init state generator
# choose updater -------------------------------------------
_updater_name = updater.lower().replace('_', '').replace(' ', '')
if _updater_name not in self._make_system:
raise ValueError('updater is one of "single spin flip or swendsen wang"')
algorithm = self._algorithm[_updater_name]
sa_system = self._make_system[_updater_name](_generate_init_state(), ising_graph)
# ------------------------------------------- choose updater
response = self._cxxjij_sampling(
model, _generate_init_state,
algorithm, sa_system,
reinitialize_state, seed
)
response.info['schedule'] = self.schedule_info
return response
def sample_hubo(self, J, vartype=None,
beta_min=None, beta_max=None,
num_sweeps=None, num_reads=None, schedule=None,
initial_state=None, updater=None,
reinitialize_state=True, seed=None):
"""sampling from higher order unconstrainted binary optimization.
Args:
J (dict): Interactions.
vartype (str, openjij.VarType): "SPIN" or "BINARY".
beta_min (float, optional): Minimum beta (initial inverse temperature). Defaults to None.
beta_max (float, optional): Maximum beta (final inverse temperature). Defaults to None.
schedule (list, optional): schedule list. Defaults to None.
num_sweeps (int, optional): number of sweeps. Defaults to None.
num_reads (int, optional): number of reads. Defaults to 1.
init_state (list, optional): initial state. Defaults to None.
reinitialize_state (bool): if true reinitialize state for each run
seed (int, optional): seed for Monte Carlo algorithm. Defaults to None.
Returns:
:class:`openjij.sampler.response.Response`: results
Examples::
for Ising case::
>>> sampler = oj.SASampler()
>>> J = {(0,): -1, (0, 1): -1, (0, 1, 2): 1}
>>> response = sampler.sample_hubo(J, "SPIN")
for Binary case::
>>> sampler = oj.SASampler()
>>> J = {(0,): -1, (0, 1): -1, (0, 1, 2): 1}
>>> response = sampler.sample_hubo(J, "BINARY")
"""
if str(type(J)) == str(type(openjij.BinaryPolynomialModel({}, "SPIN"))):
if vartype is not None:
raise ValueError("vartype must not be specified")
model = J
elif str(type(J)) == str(type(cimod.BinaryPolynomialModel({}, "SPIN"))):
if vartype is not None:
raise ValueError("vartype must not be specified")
model = J
else:
model = openjij.BinaryPolynomialModel(J, vartype)
# make init state generator --------------------------------
if initial_state is None:
if model.vartype == openjij.SPIN:
def _generate_init_state(): return cxxjij.graph.Polynomial(model.num_variables).gen_spin(seed) if seed != None else cxxjij.graph.Polynomial(model.num_variables).gen_spin()
elif model.vartype == openjij.BINARY:
def _generate_init_state(): return cxxjij.graph.Polynomial(model.num_variables).gen_binary(seed) if seed != None else cxxjij.graph.Polynomial(model.num_variables).gen_binary()
else:
raise ValueError("Unknown vartype detected")
else:
if isinstance(initial_state, dict):
initial_state = [initial_state[k] for k in model.indices]
def _generate_init_state(): return np.array(initial_state)
# -------------------------------- make init state generator
# determine system class and algorithm --------------------------------
if model.vartype == openjij.SPIN:
if updater is None or updater == "single spin flip":
sa_system = cxxjij.system.make_classical_ising_polynomial(_generate_init_state(), model.to_serializable())
algorithm = cxxjij.algorithm.Algorithm_SingleSpinFlip_run
elif updater == "k-local":
raise ValueError("k-local update is only supported for binary variables")
else:
raise ValueError("Unknown updater name")
elif model.vartype == openjij.BINARY:
if updater == "k-local" or updater is None:
sa_system = cxxjij.system.make_k_local_polynomial(_generate_init_state(), model.to_serializable())
algorithm = cxxjij.algorithm.Algorithm_KLocal_run
elif updater == "single spin flip":
sa_system = cxxjij.system.make_classical_ising_polynomial(_generate_init_state(), model.to_serializable())
algorithm = cxxjij.algorithm.Algorithm_SingleSpinFlip_run
else:
raise ValueError("Unknown updater name")
else:
raise ValueError("Unknown vartype detected")
# -------------------------------- determine system class and algorithm
self._set_params(
beta_min=beta_min, beta_max=beta_max,
num_sweeps=num_sweeps, num_reads=num_reads,
schedule=schedule
)
# set annealing schedule -------------------------------
if self.params['schedule'] is None:
self.params['schedule'], beta_range = geometric_hubo_beta_schedule(
sa_system, self.params['beta_max'],
self.params['beta_min'], self.params['num_sweeps']
)
self.schedule_info = {
'beta_max': beta_range[0],
'beta_min': beta_range[1],
'num_sweeps': self.params['num_sweeps']
}
else:
self.schedule_info = {'schedule': 'custom schedule'}
# ------------------------------- set annealing schedule
response = self._cxxjij_sampling(
model, _generate_init_state,
algorithm, sa_system,
reinitialize_state, seed
)
response.info['schedule'] = self.schedule_info
return response
def geometric_ising_beta_schedule(model: openjij.model.BinaryQuadraticModel,
beta_max=None, beta_min=None,
num_sweeps=1000):
"""make geometric cooling beta schedule
Args:
model (openjij.BinaryQuadraticModel)
beta_max (float, optional): [description]. Defaults to None.
beta_min (float, optional): [description]. Defaults to None.
num_sweeps (int, optional): [description]. Defaults to 1000.
Returns:
list of cxxjij.utility.ClassicalSchedule, list of beta range [max, min]
"""
if beta_min is None or beta_max is None:
# generate Ising matrix
ising_interaction = model.interaction_matrix()
# set the right-bottom element zero (see issue #209)
mat_size = ising_interaction.shape[0]
ising_interaction[mat_size-1, mat_size-1] = 0
if (model.vartype == openjij.BINARY):
# convert to ising matrix
qubo_to_ising(ising_interaction)
abs_ising_interaction = np.abs(ising_interaction)
max_abs_ising_interaction = np.max(abs_ising_interaction)
#automatical setting of min, max delta energy
abs_bias = np.sum(abs_ising_interaction, axis=1)
#apply threshold to avoid extremely large beta_max
THRESHOLD = 1e-8
min_delta_energy = | np.min(abs_ising_interaction[abs_ising_interaction > max_abs_ising_interaction*THRESHOLD]) | numpy.min |
""" A module containing LCPrimitive and its subclasses. They implement
components of a pulsar light curve. Includes primitives (Gaussian,
Lorentzian), etc. as well as more sophisticated holistic templates that
provide single-parameter (location) representations of the light curve.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/lcprimitives.py,v 1.35 2017/03/17 21:37:52 kerrm Exp $
author: <NAME> <<EMAIL>>
"""
# NB -- big TODO -- I don't think wrapped primitives quite correctly return
# Monte Carlo variables because they don't account for the uniform approx.
# perhaps this isn't a big deal
import numpy as np
from scipy.special import erf,i0,i1
from scipy.integrate import simps,quad
from scipy.interpolate import interp1d
from scipy.stats import norm,cauchy
from math import sin,cos,sinh,cosh,atan,tan
ROOT2PI = (2*np.pi)**0.5
R2DI = (2/np.pi)**0.5
ROOT2 = 2**0.5
TWOPI = (2*np.pi)
PI = np.pi*1
MAXWRAPS = 15
MINWRAPS = 3
WRAPEPS = 1e-8
# TODO -- possible "LCBase" class with certain method common to LCPrimitive and LCTemplate
def two_comp_mc(n,w1,w2,loc,func):
""" Helper function to generate MC photons from a two-sided
distribution.
NB -- this should work as is if w1,w2,loc are vectors.
n -- total number of photons
w1 -- scale parameter for func, lefthand peak
w2 -- scale parameter for func, righthand peak
loc -- position of peak
func -- an 'rvs' function from scipy
"""
frac1 = w1/(w1+w2)
# number of photons required from left side
n1 = (np.random.rand(n) < frac1).sum()
r1 = func(loc=0,scale=w1,size=n1)
# reflect and relocate photons to right or lef side
r1 = loc + np.where(r1<=0,r1,-r1)
r2 = func(loc=0,scale=w2,size=n-n1)
r2 = loc + np.where(r2>0,r2,-r2)
return np.mod(np.append(r1,r2),1)
def approx_gradient(func,phases,log10_ens,eps=1e-6):
""" Return a numerical gradient. This works for both LCPrimitive and
LCTemplate objects. HOW AWESOME!
"""
orig_p = func.get_parameters(free=False).copy()
g = np.zeros([len(orig_p),len(phases)])
weights = np.asarray([-1,8,-8,1])/(12*eps)
def do_step(which,eps):
p0 = orig_p.copy()
p0[which] += eps
func.set_parameters(p0,free=False)
return func(phases,log10_ens)
for i in range(len(orig_p)):
# use a 4th-order central difference scheme
for j,w in zip([2,1,-1,-2],weights):
g[i,:] += w*do_step(i,j*eps)
func.set_parameters(orig_p,free=False)
return g
def check_gradient(func,atol=1e-8,rtol=1e-5,quiet=False):
""" Test gradient function with a set of MC photons.
This works with either LCPrimitive or LCTemplate objects.
TODO -- there is trouble with the numerical gradient when
a for the location-related parameters when the finite step
causes the peak to shift from one side of an evaluation phase
to the other."""
en = np.random.rand(1000)*2 + 1 # 100 MeV to 10 GeV
ph = func.random(en)
if hasattr(func,'closest_to_peak'):
eps = min(1e-6,0.2*func.closest_to_peak(ph))
else:
eps = 1e-6
g1 = func.gradient(ph,en,free=False)
g2 = func.approx_gradient(ph,en,eps=eps)
anyfail = False
for i in range(g1.shape[0]):
d1 = np.abs(g1[i]-g2[i])
a = np.argmax(d1)
fail = np.any(d1 > (atol + rtol*np.abs(g2)))
if not quiet:
pass_string = 'FAILED' if fail else 'passed'
print ('%02d (%s) %.3g (abs)'%(i,pass_string,d1.max()))
anyfail = anyfail or fail
return not anyfail
class Fittable(object):
# TODO
""" Base class for any object with fittable parameters.
Handle parameter names, etc. here?"""
def get_parameters(self): pass
def set_parameters(self): pass
def get_bounds(self): pass
def __call__(self,*args):
return self._call(*args)[self.free]
def _call(self,*args):
raise NotImplementedError('Child class instantiates.')
def grad(self,*args):
return self._grad(*args)[self.free]
def _grad(self,*args):
raise NotImplementedError('Child class instantiates.')
class LCPrimitive(object):
""" Base class for various components of a light curve. All "analytic"
light curve models must inherit and must implement the three
'virtual' functions below."""
def is_energy_dependent(self):
return False
def is_two_sided(self):
""" True if primitive is asymmetric. Default is False, two-sided
child classes should override."""
return False
def copy(self):
from copy import deepcopy
return deepcopy(self)
def __call__(self,phases):
raise NotImplementedError('Virtual function must be implemented by child class.')
def integrate(self,x1=0,x2=1,log10_ens=3):
""" Base implemention with scipy quad."""
f = lambda ph: self(ph,log10_ens)
return quad(f,x1,x2)[0]
def cdf(self,x,log10_ens=3):
return self.integrate(x1=0,x2=x,log10_ens=3)
def fwhm(self):
"""Return the full-width at half-maximum of the light curve model."""
return self.hwhm(0)+self.hwhm(1)
def hwhm(self,right=False):
"""Return the half-width at half-maximum of the light curve model."""
raise NotImplementedError('Virtual function must be implemented by child class.')
def init(self):
self.p = np.asarray([1])
self.pnames = []
self.name = 'Default'
self.shortname = 'None'
def _asarrays(self):
for key in ['p','free','bounds','errors','slope','slope_free']:
if hasattr(self,key):
v = self.__dict__[key]
if v is not None:
self.__dict__[key] = np.asarray(v,dtype=bool if 'free' in key else float)
def _default_bounds(self):
bounds = [[]] *len(self.p)
# this order works for LCHarmonic, too
bounds[0] = [0.005,0.5] # width
bounds[-1] = [-1,1] # position
if len(bounds) > 2:
bounds[1] = [0.005,0.5] # width
return bounds
def _default_priors(self):
loc = self.p.copy()
width = np.asarray([0.1]*len(self.p))
enable = np.asarray([False]*len(self.p))
return loc,width,enable
def __init__(self,**kwargs):
""" Generally, class-specific setup work is performed in init.
Here, init is called and certain guaranteed default members
are established."""
self.init()
if not hasattr(self,'bounds'):
self.bounds = self._default_bounds() # default
self.errors = np.zeros_like(self.p)
self.free = np.asarray([True]*len(self.p))
self.__dict__.update(kwargs)
self._asarrays()
self.gauss_prior_loc, self.gauss_prior_width, self.gauss_prior_enable = self._default_priors()
self.shift_mode = False
def _make_p(self,log10_ens=3):
""" Internal method to return parameters appropriate for use
in functional form."""
return [None] + list(self.p)
def set_parameters(self,p,free=True):
if free:
self.p[self.free] = p
else:
self.p[:] = p
# adjust position to be between 0 and 1
self.p[-1] = self.p[-1] % 1
return np.all(self.p >= 0)
def get_parameters(self,free=True):
if free:
return self.p[self.free]
return self.p
def get_parameter_names(self,free=True):
return [p for (p,b) in zip(self.pnames,self.free) if b]
def set_errors(self,errs):
n = self.free.sum()
self.errors[:] = 0.
self.errors[self.free] = errs[:n]
return n
def get_errors(self,free=True):
return self.errors[self.free]
def get_bounds(self): return self.bounds[self.free]
def get_gauss_prior_parameters(self):
mod_array = [False]*(len(self.p)-1)+[True]
return (
self.gauss_prior_loc[self.free],
self.gauss_prior_width[self.free],
np.asarray(mod_array)[self.free],
self.gauss_prior_enable[self.free],
)
def enable_gauss_prior(self,enable=True):
""" [Convenience] Turn on gaussian prior."""
self.gauss_prior_enable[:] = enable
def center_gauss_prior(self,enable=False):
""" [Convenience] Set gauss mode to current params."""
self.gauss_prior_loc[:] = self.p[:]
if enable: self.enable_gauss_prior()
def get_location(self,error=False):
if error: return np.asarray([self.p[-1],self.errors[-1]])
return self.p[-1]
def set_location(self,loc):
self.p[-1] = loc
def get_norm(self,error=False):
#if error: return np.asarray([self.p[0],self.errors[0]])
#return self.p[0]
return 1
def get_width(self,error=False,hwhm=False,right=False):
""" Return the width of the distribution.
Keyword arguments:
-----------------
error [False] if True, return tuple with value and error
hwhm [False] if True, scale width to be HWHM
right [False] if True, return "right" component, else "left".
There is no distinction for symmetric dists.
"""
scale = self.hwhm(right=right)/self.p[int(right)] if hwhm else 1
if error: return np.asarray([self.p[int(right)],self.errors[int(right)]])*scale
return self.p[int(right)]*scale
def get_gradient(self,phases,log10_ens=3):
raise DeprecationWarning()
return self.gradient(phases,log10_ens,free=True)
def gradient(self,phases,log10_ens=3,free=False):
raise NotImplementedError('No gradient function found for this object.')
def random(self,n):
""" Default is accept/reject."""
if n < 1: return 0
M = self(np.asarray([self.p[-1]])) # peak amplitude
rvals = np.empty(n)
position = 0
rfunc = np.random.rand
while True:
cand_phases = rfunc(n)
cand_phases = cand_phases[rfunc(n) < self(cand_phases)/M]
ncands = len(cand_phases)
if ncands == 0: continue
rvals[position:position + ncands] = cand_phases[:n-position]
position += ncands
if position >= n: break
return rvals
def __str__(self):
m=max([len(n) for n in self.pnames])
l = []
errors = self.errors if hasattr(self,'errors') else [0]*len(self.pnames)
for i in range(len(self.pnames)):
fstring = '' if self.free[i] else ' [FIXED]'
n=self.pnames[i][:m]
t_n = n+(m-len(n))*' '
l += [t_n + ': %.4f +\- %.4f%s'%(self.p[i],errors[i],fstring)]
l = [self.name+'\n------------------'] + l
return '\n'.join(l)
def approx_gradient(self,phases,log10_ens=3,eps=1e-5):
return approx_gradient(self,phases,log10_ens,eps=eps)
def check_gradient(self,atol=1e-8,rtol=1e-5,quiet=False):
return check_gradient(self,atol=atol,rtol=rtol,quiet=quiet)
def sanity_checks(self,eps=1e-6):
""" A few checks on normalization, integration, etc. """
errfac = 1
# Normalization test
y,ye = quad(self,0,1)
#t1 = abs(self.p[0]-y)<(ye*errfac)
t1 = abs(1-y)<(ye*errfac)
# integrate method test
#t2 = abs(self.p[0]-self.integrate(0,1))<eps
t2 = abs(1-self.integrate(0,1))<eps
# FWHM test
t3 = (self(self.p[-1])*0.5-self(self.p[-1]-self.fwhm()/2))<eps
# gradient test
try:
t4 = self.check_gradient(quiet=True)
except: t4 = False
# boundary conditions
t5 = abs(self(0)-self(1-eps))<eps
if not t1: print ('Failed Normalization test')
if not t2: print ('Failed integrate method test')
if not t3: print ('Failed FWHM test')
if not t4: print ('Failed gradient test')
if not t5: print ('Did not pass boundary conditions')
return np.all([t1,t2,t3,t4,t5])
def eval_string(self):
""" Return a string that can be evaled to instantiate a nearly-
identical object."""
return '%s(p=%s,free=%s,slope=%s,slope_free=%s)'%(
self.__class__.__name__,str(list(self.p)),str(list(self.free)),
str(list(self.slope)) if hasattr(self,'slope') else None,
str(list(self.slope_free)) if hasattr(self,'slope_free') else None)
def dict_string(self):
""" Return a string to express the object as a dictionary that can
be easily instantiated using its keys."""
def pretty_list(l,places=5):
fmt = '%.'+'%d'%places+'f'
s = ', '.join([fmt%x for x in l])
return '['+s+']'
t = ['name = %s'%self.__class__.__name__,
'p = %s'%(pretty_list(self.p)),
'free = %s'%(str(list(self.free))),
'slope = %s'%(pretty_list(self.slope) if hasattr(self,'slope') else None),
'slope_free = %s'%(str(list(self.slope_free)) if hasattr(self,'slope_free') else None),
]
#return 'dict(\n'+'\n '.join(t)+'\n
return t
def closest_to_peak(self,phases):
""" Return the minimum distance between a member of the array of
phases and the position of the mode of the primitive."""
return np.abs(phases-self.get_location()).min()
def get_fixed_energy_version(self,log10_en=3):
return self
class LCWrappedFunction(LCPrimitive):
""" Super-class for profiles derived from wrapped functions.
While some distributions (e.g. the wrapped normal) converge
quickly, others (e.g. the wrapped Lorentzian) converge very slowly
and must be truncated before machine precision is reached.
In order to preserve normalization, the pdf is slightly adjusted:
f(phi) = sum_(i,-N,N,g(phi+i)) + (1 - int(phi,-N,N,g(phi)) ).
This introduces an additional parameteric dependence which must
be accounted for by computation of the gradient.
"""
def _norm(self,nwraps,log10_ens=3):
""" Compute the truncated portion of the template."""
#return self.p[0]-self.base_int(-nwraps,nwraps+1)
return 1-self.base_int(-nwraps,nwraps+1,log10_ens)
def _grad_norm(self,nwraps,log10_ens=3):
""" Compute the gradient terms due to truncated portion. That is,
since we add on a uniform component beyond nwraps, the
amplitude of this component depends on the CDF and hence on
the parameters.
Default implementation is to ignore these terms, applicable
for rapidly-converging distributions (e.g. wrapped normal with
small width parameter). On the other hand, it is not
negligible for long-tailed distributions, e.g. Lorentzians."""
return None
def __call__(self,phases,log10_ens=3):
""" Return wrapped template + DC component corresponding to truncation."""
results = self.base_func(phases,log10_ens)
for i in range(1,MAXWRAPS+1):
t = self.base_func(phases,log10_ens,index= i)
t += self.base_func(phases,log10_ens,index=-i)
results += t
if (i>=MINWRAPS) and (np.all(t < WRAPEPS)): break
return results+self._norm(i,log10_ens)
def gradient(self,phases,log10_ens=3,free=False):
""" Return the gradient evaluated at a vector of phases.
output : a num_parameter x len(phases) ndarray,
the num_parameter-dim gradient at each phase
"""
results = self.base_grad(phases,log10_ens)
for i in range(1,MAXWRAPS+1):
t = self.base_grad(phases,log10_ens,index=i)
t += self.base_grad(phases,log10_ens,index=-i)
results += t
if (i >= MINWRAPS) and (np.all(t < WRAPEPS)): break
gn = self._grad_norm(i,log10_ens)
if gn is not None:
for i in range(len(gn)):
results[i,:] += gn[i]
if free:
return results[self.free]
return results
def integrate(self,x1,x2,log10_ens=3):
#if(x1==0) and (x2==0): return 1.
# NB -- this method is probably overkill now.
results = self.base_int(x1,x2,log10_ens,index=0)
for i in range(1,MAXWRAPS+1):
t = self.base_int(x1,x2,log10_ens,index=i)
t += self.base_int(x1,x2,log10_ens,index=-i)
results += t
if np.all(t < WRAPEPS):
break
return results+(x2-x1)*self._norm(i,log10_ens)
def base_func(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_func function found for this object.')
def base_grad(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_grad function found for this object.')
def base_int(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_int function found for this object.')
class LCGaussian(LCWrappedFunction):
""" Represent a (wrapped) Gaussian peak.
Parameters
Width the standard deviation parameter of the norm dist.
Location the mode of the Gaussian distribution
"""
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Width','Location']
self.name = 'Gaussian'
self.shortname = 'G'
def hwhm(self,right=False):
return self.p[0]*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
return (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
def base_grad(self,phases,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
f = (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
return np.asarray([f/width*(z**2 - 1.),f/width*z])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z1 = (x1 + index - x0)/width
z2 = (x2 + index - x0)/width
return 0.5*(erf(z2/ROOT2)-erf(z1/ROOT2))
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(norm.rvs(loc=self.p[-1],scale=self.p[0],size=n),1)
class LCGaussian2(LCWrappedFunction):
""" Represent a (wrapped) two-sided Gaussian peak.
Parameters
Width1 the standard deviation parameter of the norm dist.
Width2 the standard deviation parameter of the norm dist.
Location the mode of the distribution
"""
def init(self):
self.p = np.asarray([0.03,0.03,0.5])
self.pnames = ['Width1','Width2','Location']
self.name = 'Gaussian2'
self.shortname = 'G2'
def is_two_sided(self):
return True
def hwhm(self,right=False):
return (self.p[int(right)])*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
z *= np.where(z <= 0, 1./width1, 1./width2)
return (R2DI/(width1+width2)) * np.exp(-0.5*z**2 )
def base_grad(self,phases,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
m = (z <= 0)
w = np.where(m, width1, width2)
z /= w
f = (R2DI/(width1+width2)) * np.exp(-0.5*z**2 )
k = 1./(width1+width2)
z2w = z**2/w
t = f*(z2w-k)
g1 = f*(z2w*( m)-k)
g2 = f*(z2w*(~m)-k)
g3 = f*z/w
return np.asarray([g1,g2,g3])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
if index==0 and (x1 < x0) and (x2 > x0):
z1 = (x1 + index - x0)/width1
z2 = (x2 + index - x0)/width2
k1 = 2*width1/(width1+width2)
k2 = 2*width2/(width1+width2)
return 0.5*(k2*erf(z2/ROOT2)-k1*erf(z1/ROOT2))
w = width1 if ((x1+index) < x0) else width2
z1 = (x1 + index - x0)/w
z2 = (x2 + index - x0)/w
k = 2*w/(width1+width2)
return 0.5*k*(erf(z2/ROOT2)-erf(z1/ROOT2))
def random(self,n):
""" Use multinomial technique to return random photons from
both components."""
if hasattr(n,'__len__'):
n = len(n)
return two_comp_mc(n,self.p[0],self.p[1],self.p[-1],norm.rvs)
class LCLorentzian(LCPrimitive):
""" Represent a (wrapped) Lorentzian peak.
Parameters
Width the width paramater of the wrapped Cauchy distribution,
namely HWHM*2PI for narrow distributions
Location the center of the peak in phase
"""
def init(self):
self.p = np.asarray([0.1,0.5])
self.pnames = ['Width','Location']
self.name = 'Lorentzian'
self.shortname = 'L'
def hwhm(self,right=False):
# NB -- bounds on p[1] set such that this is well-defined
return np.arccos( 2-cosh(self.p[0]) )/TWOPI
def __call__(self,phases,log10_ens=3):
e,gamma,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
# NB -- numpy call not as efficient as math.sinh etc.
# but this allows easy inheritance for the energy-dependence
return np.sinh(gamma)/(np.cosh(gamma)-np.cos(z))
def gradient(self,phases,log10_ens=3,free=False):
e,gamma,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
s1 = np.sinh(gamma); c1 = np.cosh(gamma)
c = np.cos(z); s = np.sin(z)
f = s1/(c1-c)
f2 = f**2
g1 = f*(c1/s1) - f2
g2 = f2*(TWOPI/s1)*s
if free:
return np.asarray([g1,g2])[self.free]
return np.asarray([g1,g2])
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(cauchy.rvs(loc=self.p[-1],scale=self.p[0]/TWOPI,size=n),1)
def integrate(self,x1,x2,log10_ens=3):
# NB -- due to the use of tans below, must be careful to use an angle
# range of -pi/2 to pi/2 rather than 0 to pi as one would want
# I haven't carefully tested this solution
e,gamma,loc = self._make_p(log10_ens)
x1 = PI*(x1-loc)
x2 = PI*(x2-loc)
t = 1./np.tanh(0.5*gamma) # coth(gamma/2)
v2 = np.arctan(t*tan(x2))/PI
v1 = np.arctan(t*tan(x1))/PI
return (v2<=v1) + v2 - v1 # correction for tan wrapping
class LCLorentzian2(LCWrappedFunction):
""" Represent a (wrapped) two-sided Lorentzian peak.
Parameters
Width1 the HWHM of the distribution (left)
Width2 the HWHM of the distribution (right)
Location the mode of the distribution
"""
def init(self):
self.p = np.asarray([0.03,0.03,0.5])
self.pnames = ['Width1','Width2','Location']
self.name = 'Lorentzian2'
self.shortname = 'L2'
def is_two_sided(self):
return True
def hwhm(self,right=False):
return self.p[int(right)]
def _grad_norm(self,nwraps,log10_ens=3):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z1 = (-nwraps-x0)/gamma1
z2 = (nwraps+1-x0)/gamma2
t = gamma2*np.arctan(z2)-gamma1*np.arctan(z1)
t1 = 1./(1+z1**2)
t2 = 1./(1+z2**2)
k = 2/(gamma1+gamma2)/PI
f = k*t
g1 = -1./(gamma1+gamma2)-(np.arctan(z1)-z1*t1)/t
g2 = -1./(gamma1+gamma2)+(np.arctan(z2)-z2*t2)/t
g3 = (t1-t2)/t
return [-f*g1,-f*g2,-f*g3]
def base_func(self,phases,log10_ens=3,index=0):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
z *= np.where(z<=0, 1./gamma1, 1./gamma2)
k = 2/(gamma1+gamma2)/PI
return k/(1+z**2)
def base_grad(self,phases,log10_ens=3,index=0):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
m = z < 0
g = np.where(m,1./gamma1,1./gamma2)
t1 = 1+(z*g)**2
t2 = 2*(z*g)/t1
g1 = -1/(gamma1+gamma2)+t2*((m*z)/gamma1**2)
g2 = -1/(gamma1+gamma2)+t2*((~m*z)/gamma2**2)
g3 = t2*g
f = (2./(gamma1+gamma2)/PI)/t1
return np.asarray([f*g1,f*g2,f*g3])
def base_int(self,x1,x2,log10_ens=3,index=0):
gamma1,gamma2,x0 = self.p
# the only case where g1 and g2 can be different is if we're on the
# 0th wrap, i.e. index=0; this also includes the case when we want
# to use base_int to do a "full" integral
if index==0 and (x1 < x0) and (x2 > x0):
g1,g2 = gamma1,gamma2
else:
g1,g2 = [gamma1]*2 if ((x1+index) < x0) else [gamma2]*2
z1 = (x1 + index - x0)/g1
z2 = (x2 + index - x0)/g2
k = (2./(gamma1+gamma2)/PI)
return k*(g2*atan(z2)-g1*atan(z1))
def random(self,n):
""" Use multinomial technique to return random photons from
both components."""
return two_comp_mc(n,self.p[0],self.p[1],self.p[-1],cauchy.rvs)
class LCVonMises(LCPrimitive):
""" Represent a peak from the von Mises distribution. This function is
used in directional statistics and is naturally wrapped.
Parameters:
Width inverse of the 'kappa' parameter in the std. def.
Location the center of the peak in phase
"""
def init(self):
self.p = np.asarray([0.05,0.5])
self.pnames = ['Width','Location']
self.name = 'VonMises'
self.shortname = 'VM'
def hwhm(self,right=False):
return 0.5*np.arccos(self.p[0]*np.log(0.5)+1)/TWOPI
def __call__(self,phases,log10_ens=3):
e,width,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
return np.exp(np.cos(z)/width)/i0(1./width)
def gradient(self,phases,log10_ens=3,free=False):
e,width,loc = self._make_p(log10_ens)
my_i0 = i0(1./width)
my_i1 = i1(1./width)
z = TWOPI*(phases-loc)
cz = np.cos(z)
sz = np.sin(z)
f = (np.exp(cz)/width)/my_i0
return np.asarray([-cz/width**2*f,TWOPI*(sz/width+my_i1/my_i0)*f])
class LCKing(LCWrappedFunction):
""" Represent a (wrapped) King function peak.
Parameters
Sigma the width parameter
Gamma the tail parameter
Location the mode of the distribution
"""
# NOTES -- because we don't integrate over solid angle, the norm
# integral / jacobean for the usual King function isn't trivial;
# need to see if this is a show stopper
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Sigma','Gamma','Location']
self.name = 'King'
self.shortname = 'K'
def hwhm(self,right=False):
raise NotImplementedError()
return self.p[0]*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,s,g,x0 = self._make_p(log10_ens)
z = phases+index-x0
u = 0.5*(z/s)**2
return (g-1)/g*(1.+u/g)**-g
def base_grad(self,phases,log10_ens=3,index=0):
raise NotImplementedError()
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
f = (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
return np.asarray([f/width*(z**2 - 1.),f/width*z])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,s,g,x0 = self._make_p(log10_ens)
z1 = x1 + index - x0
z2 = x2 + index - x0
u1 = 0.5*((x1 + index - x0)/s)**2
u2 = 0.5*((x2 + index - x0)/s)**2
f1 = 1-(1.+u1/g)**(1-g)
f2 = 1-(1.+u2/g)**(1-g)
if (z1*z2<0): # span the peak
return 0.5*(f1+f2)
if z1 < 0:
return 0.5*(f1-f2)
return 0.5*(f2-f1)
def random(self,n):
raise NotImplementedError()
if hasattr(n,'__len__'):
n = len(n)
return np.mod(norm.rvs(loc=self.p[-1],scale=self.p[0],size=n),1)
class LCTopHat(LCPrimitive):
""" Represent a top hat function.
Parameters:
Width right edge minus left edge
Location center of top hat
"""
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Width','Location']
self.name = 'TopHat'
self.shortname = 'TH'
self.fwhm_scale = 1
def hwhm(self,right=False):
return self.p[0]/2
def __call__(self,phases,wrap=True):
width,x0 = self.p
return np.where(np.mod(phases - x0 + width/2,1) < width,1./width,0)
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(
np.random.rand(n)*self.p[0]+self.p[-1]-self.p[0]/2,1)
class LCHarmonic(LCPrimitive):
"""Represent a sinusoidal shape corresponding to a harmonic in a Fourier expansion.
Parameters:
Location the phase of maximum
"""
def init(self):
self.p = np.asarray([0.])
self.order = 1
self.pnames = ['Location']
self.name = 'Harmonic'
self.shortname = 'H'
def __call__(self,phases,log10_ens=3):
e,x0 = self._make_p(log10_ens)
return 1+np.cos( (TWOPI*self.order) * (phases - x0 ) )
def integrate(self,x1,x2,log10_ens=3):
e,x0 = self._make_p(log10_ens)
t = self.order*TWOPI
return (x2-x1)+(np.sin(t*(x2-x0))-np.sin(t*(x1-x0)))/t
class LCEmpiricalFourier(LCPrimitive):
""" Calculate a Fourier representation of the light curve.
The only parameter is an overall shift.
Cannot be used with other LCPrimitive objects!
Parameters:
Shift : overall shift from original template phase
"""
def init(self):
self.nharm = 20
self.p = | np.asarray([0.]) | numpy.asarray |
# -*- coding:
# Project: Linear Regression
import pandas as pd
import numpy as np
import math
import pylab
import statsmodels.api as sm
from statsmodels.formula.api import ols
from sklearn.model_selection import train_test_split
import scipy.stats as spstats
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error as MSE
house = pd.read_csv('HousePrices.csv')
# check the dimension
house.shape
# check columns name
house.columns
# Check Datatypes
d_types = house.dtypes
# summarised the data set
desc = house.describe()
# NULL Check
d_null = house.isnull().sum()
# check 0
d_zeros = house[house==0].count()
# split the dataset into numeric to factor columns
numColumn = house.select_dtypes(include=['int32','int64','float32','float64']).columns.values
factorColumn = house.select_dtypes(include=['object']).columns.values
# LotFrontage set null value to 0
house.LotFrontage[house.LotFrontage.isnull()] = 0
# setting the garage related columns from null
# since there is no garage for the given rows
cols = ['GarageType','GarageYrBlt','GarageFinish','GarageCars','GarageArea','GarageQual','GarageCond']
for i in cols:
house[i][house[i].isnull()] = "No"
# Alley Columns NA converts to No value
house.Alley[house.Alley.isnull()] = "No"
# MasVnrType object columns none converts to No value
house.MasVnrType[house.MasVnrType.isnull()] = "No"
# MasVnrArea columns " " converts to 0 value
house.MasVnrArea[house.MasVnrArea.isnull()] = 0
# Basement
basement_cols = ['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2']
for i in basement_cols:
house[i][house[i].isnull()] = "No"
# FireplaceQu
house.FireplaceQu[house.FireplaceQu.isnull()] = "No"
# Electrical
house.Electrical[house.Electrical.isnull()] = "Mix"
# PoolQC
house.PoolQC[house.PoolQC.isnull()] = "No"
# Fence
house.Fence[house.Fence.isnull()] = "No"
# MiscFeature
house.MiscFeature[house.MiscFeature.isnull()] = "No"
# Drop ID column
house = house.drop('Id', axis=1)
# Rename 3 columns
house = house.rename(columns = {'1stFlrSF' : 'FirststFlrSF', '2ndFlrSF' : 'SecondndFlrSF', '3SsnPorch' : 'ThirdSsnPorch'}, inplace = False)
# 9 numeric columns remove
# BsmtFinSF2 available singulariti 0.8822
house.BsmtFinSF2.value_counts()
house = house.drop('BsmtFinSF2', axis=1)
# LowQualFinSF available singulariti 0.9811
house.LowQualFinSF.value_counts()
house = house.drop('LowQualFinSF', axis=1)
# BsmtHalfBath available singulariti 0.9450
house.BsmtHalfBath.value_counts()
house = house.drop('BsmtHalfBath', axis=1)
# KitchenAbvGr available singulariti 0.9479
house.KitchenAbvGr.value_counts()
house = house.drop('KitchenAbvGr', axis=1)
# EnclosedPorch available singulariti 0.8634
house.EnclosedPorch.value_counts()
house = house.drop('EnclosedPorch', axis=1)
# ScreenPorch available singulariti 0.9223
house.ScreenPorch.value_counts()
house = house.drop('ScreenPorch', axis=1)
# PoolArea available singulariti 0.9961
house.PoolArea.value_counts()
house = house.drop('PoolArea', axis=1)
# MiscVal available singulariti 0.9628
house.MiscVal.value_counts()
house = house.drop('MiscVal', axis=1)
# ThirdSsnPorch available singulariti 0.9845
house.ThirdSsnPorch.value_counts()
house = house.drop('ThirdSsnPorch', axis=1)
# 21 factor columns remove
# Road_Type available singulariti 0.9951
house.Road_Type.value_counts()
house = house.drop('Road_Type', axis=1)
# Alley available singulariti 0.9377
house.Alley.value_counts()
house = house.drop('Alley', axis=1)
# LandContour available singulariti 0.8943
house.LandContour.value_counts()
house = house.drop('LandContour', axis=1)
# Utilities available singulariti 0.9985
house.Utilities.value_counts()
house = house.drop('Utilities', axis=1)
# LandSlope available singulariti 0.9445
house.LandSlope.value_counts()
house = house.drop('LandSlope', axis=1)
# Condition1 available singulariti 0.8654
house.Condition1.value_counts()
house = house.drop('Condition1', axis=1)
# Condition2 available singulariti 0.9908
house.Condition2.value_counts()
house = house.drop('Condition2', axis=1)
# RoofMatl available singulariti 0.9821
house.RoofMatl.value_counts()
house = house.drop('RoofMatl', axis=1)
# ExterCond available singulariti 0.8755
house.ExterCond.value_counts()
house = house.drop('ExterCond', axis=1)
# BsmtCond available singulariti 0.8929
house.BsmtCond.value_counts()
house = house.drop('BsmtCond', axis=1)
# BsmtFinType2 available singulariti 0.8538
house.BsmtFinType2.value_counts()
house = house.drop('BsmtFinType2', axis=1)
# Heating available singulariti 0.9768
house.Heating.value_counts()
house = house.drop('Heating', axis=1)
# CentralAir available singulariti 0.9334
house.CentralAir.value_counts()
house = house.drop('CentralAir', axis=1)
# Electrical available singulariti 0.9170
house.Electrical.value_counts()
house = house.drop('Electrical', axis=1)
# Functional available singulariti 0.9343
house.Functional.value_counts()
house = house.drop('Functional', axis=1)
# GarageQual available singulariti 0.9020
house.GarageQual.value_counts()
house = house.drop('GarageQual', axis=1)
# GarageCond available singulariti 0.9093
house.GarageCond.value_counts()
house = house.drop('GarageCond', axis=1)
# PavedDrive available singulariti 0.9228
house.PavedDrive.value_counts()
house = house.drop('PavedDrive', axis=1)
# PoolQC available singulariti 0.9961
house.PoolQC.value_counts()
house = house.drop('PoolQC', axis=1)
# MiscFeature available singulariti 0.9614
house.MiscFeature.value_counts()
house = house.drop('MiscFeature', axis=1)
# SaleType available singulariti 0.8668
house.SaleType.value_counts()
house = house.drop('SaleType', axis=1)
# ANOVA Test
colsObject = list(house.select_dtypes(include=['object']).columns.values)
for i in colsObject:
model = ols('Property_Sale_Price ~ '+i+' ', data=house).fit()
anovaValue = sm.stats.anova_lm(model, typ=2)
pcritical = 0.05
pvalue = anovaValue["PR(>F)"][0]
if pvalue > pcritical:
print(i)
house = house.drop(i, axis=1)
# find correlation
numCols = house.select_dtypes(include=['int32','int64','float32','float64']).columns.values
# delete y variable
numCols = np.delete(numCols,26)
# find correlation
cor = house[numCols].corr()
# trill the correlation dataframe
cor = np.tril(cor)
# heatmap for correlation
sns.heatmap(cor,xticklabels=numCols,yticklabels=numCols,vmin=-1,vmax=1,annot=True)
# copy for original dataset
housePrices_d = house.copy()
# create for dummy columns for factor columns
for c in colsObject:
dummy = pd.get_dummies(housePrices_d[c],drop_first=True, prefix=c)
housePrices_d = housePrices_d.join(dummy)
# original columns are drop in dataset
housePrices_d = housePrices_d.drop(colsObject, axis=1)
# split the data train and test
train,test = train_test_split(housePrices_d, test_size=0.3)
# train split in trainx and trainy
# first drop the y column
trainx = train.drop('Property_Sale_Price',axis=1)
# add y column in trainy
trainy = train['Property_Sale_Price']
# test split in testx and testy
# first drop the y column
testx = train.drop('Property_Sale_Price', axis=1)
# add y column in testy
testy = train['Property_Sale_Price']
# add a constant term to the trainx and testx
trainx = sm.add_constant(trainx)
testx = sm.add_constant(testx)
# 1 OLS MODEL build
mo = sm.OLS(trainy,trainx).fit()
# summary of OLS model
# mo.summary()
# Predict value trainx value
pre = mo.predict(trainx)
err1 = trainy - pre
np.mean(err1)
# check the normal distrubution
sns.distplot(err1)
# predict value testx value
pre = mo.predict(testx)
# create Dataframe for testx
results = pd.DataFrame({'Property_Sale_Price':testy,
'Predict_Property_Sale_Price':np.round(pre,2)})
# error find for prediction value and actual value
results['err'] = results['Property_Sale_Price'] - results['Predict_Property_Sale_Price']
# square error
results['sqerr'] = results['err']**2
# find Sum Of Square Error
sse = np.sum(results.sqerr)
# find Mean Of Squared Error
mse = sse/len(testy)
# Model 2
# transform y to boxcox y
bcy = spstats.boxcox(housePrices_d.Property_Sale_Price)
# lambda value
lamda = bcy[1]
# transform array include in dataframe
housePrices_d['Property_Sale_Price_BCT'] = bcy[0]
# split the data in train2 and test2
train2, test2 = train_test_split(housePrices_d, test_size=0.3)
# drop the old y variable
train2 = train2.drop('Property_Sale_Price', axis=1)
test2 = test2.drop('Property_Sale_Price', axis=1)
# train2 data split in 2 variables train2x and train2y
train2x = train2.drop('Property_Sale_Price_BCT', axis=1)
train2y = train2['Property_Sale_Price_BCT']
# test2 data split in 2 variables test2x and test2y
test2x = test2.drop('Property_Sale_Price_BCT', axis=1)
test2y = test2['Property_Sale_Price_BCT']
# add to constant value train2x and test2x
train2x = sm.add_constant(train2x)
test2x = sm.add_constant(test2x)
# 2 OLS MODEL build
mo2 = sm.OLS(train2y,train2x).fit()
# predict train2x value
pre1 = mo2.predict(train2x)
err1 = train2y - pre1
np.mean(err1)
# predict test2x value
pre1 = mo2.predict(test2x)
# convert the predicted values to the actual format
acty2 = np.exp(np.log(test2y*lamda+1)/lamda)
predy2 = np.round(np.exp( | np.log(pre1*lamda+1) | numpy.log |
from __future__ import print_function
import sys
sys.path.append('..')
from Game import Game
from .KindoLogic import Board
import numpy as np
'''
Author: <NAME> (https://github.com/scalettar/)
Date: March, 2020
'''
class KindoGame(Game):
'''
This class specifies the Kindo game.
It interacts with the Board class which controls the game state.
'''
# For displaying board in text format
display_owner = {
1: "x",
0: " ",
-1: "o"
}
display_wallDirection = {
1: ".",
2: ":",
3: ".",
4: ":"
}
display_hasDot = {
True: "-",
False: " "
}
display_isKing = {
True: "K",
False: " "
}
display_isUnwallable = {
True: ".",
False: " "
}
def __init__(self, n=5):
# Board dimensions n x n
self.n = n
# Number of different types of tiles a player can have
# 0: No wall
# 1: Wall facing up
# 2: Wall facing right
# 3: Wall facing down
# 4: Wall facing left
self.tileTypes = 5
def getInitBoard(self):
"""
Returns:
startBoard: a representation of the board (ideally this is the form
that will be the input to your neural network)
"""
b = Board(self.n)
return np.array(b.tiles)
def getBoardSize(self):
"""
Returns:
(x,y): a tuple of board dimensions
"""
return (self.n, self.n)
def getActionSize(self):
"""
Returns:
actionSize: number of all possible actions
"""
# 5 different tile placement options, 4 walls or a neutral placement
# Add 1 action to total; This final index represents no other legal actions
# e.g. for n = 5 (5 x 5 board):
# STARTING INDICES FOR TILE PLACEMENT ACTIONS FOR EACH TILE (5 actions per tile)
# 0 1 2 3 4
# -----------------------
# 0 | 0 5 10 15 20
# 1 | 25 30 35 40 45
# 2 | 50 55 60 65 70
# 3 | 75 80 85 90 95
# 4 | 100 105 110 115 120
# Actions 0 - 124 represent a move of form (x, y, w) flattened into a single index
# Action 125 is only true if actions 0 - 124 are all invalid
# EXAMPLE 1.
# A move (x, y, w) = (3, 2, 4) would mean placing a left facing wall
# in space with x-coordinate = 3 and y-coordinate = 2
# This move cooresponds with action 85 + 4 = 89
# EXAMPLE 2.
# Action 90 would coorespond with move (3, 3, 0)
# This means capturing tile with x-coordinate = 3 and y-coordinate = 3
# without placing any walls
return self.n * self.n * self.tileTypes + 1
def getNextState(self, board, player, action):
"""
Input:
board: current board
player: current player (1 or -1)
action: action taken by current player
Returns:
nextBoard: board after applying action
nextPlayer: player who plays in the next turn (should be -player)
"""
# Check if no valid actions
if action == self.getActionSize() - 1:
# No valid actions, in Kindo this means the game must be over so make no changes
return (board, player)
# Create a copy of the current board
b = Board(self.n)
b.tiles = np.copy(board)
# Get move (x, y, w) from flattened action index
x = int(action / (self.n * self.tileTypes))
y = int((action % (self.n * self.tileTypes)) / self.tileTypes)
w = (action % (self.n * self.tileTypes)) % self.tileTypes
move = (x, y, w)
# Execute move
currentPlayerID = b.execute_move(move, player)
# Return updated state and current player
return (b.tiles, currentPlayerID)
def getValidMoves(self, board, player):
"""
Input:
board: current board
player: current player
Returns:
validMoves: a binary vector of length self.getActionSize(), 1 for
moves that are valid from the current board and player,
0 for invalid moves
"""
# Array of valid moves to return, initialize to all false (0)
validMoves = [0] * self.getActionSize()
# Create a copy of the current board
b = Board(self.n)
b.tiles = np.copy(board)
# Find the legal (valid) moves
legalMoves = b.get_legal_moves(player)
# No legal moves found (in Kindo this should only occur when the game is over)
if len(legalMoves) == 0:
validMoves[-1] = 1 # last move (action) index means no other moves are valid
return np.array(validMoves)
# Create flattened array of valid moves (actions) from legalMoves list
for x, y, w in legalMoves:
validMoves[(x * self.n * self.tileTypes) + (y * self.tileTypes) + w] = 1
# Convert to np array for performance
return np.array(validMoves)
def getGameEnded(self, board, player):
"""
Input:
board: current board
player: current player (1 or -1)
Returns:
r: 0 if game has not ended. 1 if player won, -1 if player lost,
small non-zero value for draw.
"""
# Create a copy of the current board
b = Board(self.n)
b.tiles = | np.copy(board) | numpy.copy |
"""Functions for manipulating the skeleton object (These could be moved to wkskel repo if not already there)"""
import os
from typing import Sequence
import numpy as np
import pandas as pd
from tqdm import tqdm
import wkskel
from wkskel import Skeleton
from genEM3.data.wkwdata import WkwData
from genEM3.util.path import get_data_dir
def getAllTreeCoordinates(skel):
"""Get the coordinates of all the nodes in trees within a skeleton object"""
# Get the coordinate of the nodes as numpy array
coordsList = [skel.nodes[x].position.values for x in range(skel.num_trees())]
# Concatenate into a single array of coordinates
return np.vstack(coordsList)
def get_volume_df(skeletons: Sequence[Skeleton]):
"""Function to return a table containing the information from test box annotations
Arguments:
skeletons: a list of skeletons of the annotated test boxes
Returns:
volume_df: pandas data frame which contains the skeleton id, tree_idx and id, coordinate
of the center of the patches and their relative location (xi, yi). Finally, it contains
the class of the annotated patch(debris(0, False) vs. clean(1, True))
"""
volume_df = pd.DataFrame(columns=['skel_idx', 'tree_idx', 'tree_id',
'x', 'y', 'z', 'xi', 'yi', 'class', 'explicit'])
for skel_idx, skel in enumerate(skeletons):
group_ids = np.array(skel.group_ids)
for plane_group in skel.groups:
plane_group_id = plane_group.id
plane_group_class = bool(int(plane_group.name[-1]))
plane_tree_inds = np.where(group_ids == plane_group_id)[0]
plane_matrix = np.zeros((5, 5), dtype=np.bool)
plane_df = pd.DataFrame(columns=['skel_idx', 'tree_idx', 'tree_id',
'x', 'y', 'z', 'xi', 'yi', 'class', 'explicit'])
for tree_idx in plane_tree_inds:
patch_class = skel.names[tree_idx][-1]
if patch_class.isnumeric():
patch_class = bool(int(patch_class))
explicit = True
else:
patch_class = plane_group_class
explicit = False
patch_xi = int(skel.names[tree_idx][5:7])
patch_yi = int(skel.names[tree_idx][8:10])
plane_matrix[patch_xi, patch_yi] = patch_class
c_id = np.argmax(np.bincount(skel.edges[tree_idx].flatten()))
c_abs = skel.nodes[tree_idx].loc[skel.nodes[tree_idx]['id'] == c_id, 'position'].values[0].astype(int)
plane_df = plane_df.append({
'skel_idx': skel_idx,
'tree_idx': tree_idx,
'tree_id': skel.tree_ids[tree_idx],
'x': c_abs[0],
'y': c_abs[1],
'z': c_abs[2],
'xi': patch_xi,
'yi': patch_yi,
'class': patch_class,
'explicit': explicit
}, ignore_index=True)
volume_df = volume_df.append(plane_df)
return volume_df
def add_bbox_tree_from_center(coord_center, input_shape, tree_name, skel):
"""Adds a bbox skeleton at specified coordinate to skeleton"""
cx, cy, cz = coord_center
positions = np.array([
[cx, cy, cz],
[cx - input_shape[0]/2, cy - input_shape[1]/2, cz],
[cx - input_shape[0]/2, cy + input_shape[1]/2, cz],
[cx + input_shape[0]/2, cy - input_shape[1]/2, cz],
[cx + input_shape[0]/2, cy + input_shape[1]/2, cz],
[cx + input_shape[0]/2, cy + input_shape[1]/2 - 1, cz]
])
min_id = skel.max_node_id() + 1
max_id = min_id + positions.shape[0] - 1
nodes = skel.define_nodes(
position_x=positions[:, 0].tolist(),
position_y=positions[:, 1].tolist(),
position_z=positions[:, 2].tolist(),
id=list(range(min_id, max_id + 1))
)
edges = np.array([
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[2, 3],
[2, 4],
[3, 5],
[4, 6]
]) + skel.max_node_id()
skel.add_tree(
nodes=nodes,
edges=edges,
name=tree_name)
def make_skel_from_json(json_path: str):
"""
Creates a skeleton object from the binary targets of the data sources in json
Args:
json_path: the path of the data source json file
Returns:
skel: the skeleton object
"""
data_sources_dict = WkwData.convert_ds_to_dict(WkwData.read_short_ds_json(json_path=json_path))
# Init with empty skeleton
empty_skel_name = os.path.join(get_data_dir(), 'NML', 'empty_skel.nml')
skel = wkskel.Skeleton(nml_path=empty_skel_name)
# Loop over each bbox
keys = list(data_sources_dict.keys())
num_nodes_perTree = 5
for idx, key in tqdm(enumerate(keys), desc='Making bbox nml', total=len(keys)):
# Get minimum and maximum node id
min_id = (num_nodes_perTree * idx) + 1
max_id = num_nodes_perTree * (idx + 1)
# Encode the target in the tree name
cur_target = data_sources_dict[key]['target_class']
cur_name = f'{key}, Debris: {cur_target[0]}, Myelin: {cur_target[1]}'
# add current tree
add_bbox_tree(skel=skel,
bbox=data_sources_dict[key]['input_bbox'],
tree_name=cur_name,
node_id_min_max=[min_id, max_id])
return skel
def add_bbox_tree(skel, bbox: list, tree_name: str, node_id_min_max: list):
"""
Get the nodes and edges of a bounding box tree
"""
corners = corners_from_bbox(bbox=bbox)
min_id, max_id = node_id_min_max
# Nodes
nodes = skel.define_nodes(
position_x=corners[:, 0].tolist(),
position_y=corners[:, 1].tolist(),
position_z=corners[:, 2].tolist(),
id=list(range(min_id, max_id + 1))
)
# Edges
edges = np.array([
[1, 2],
[2, 4],
[4, 3],
[3, 5]
]) + min_id - 1
# Add tree
# Note: There's no need to return the object since the change is not limited to the scope of the function
skel.add_tree(nodes=nodes, edges=edges, name=tree_name)
def corners_from_bbox(bbox: list):
"""
Get the coordinates of the corners given a webknossos style bounding box
Args:
bbox: 1 x 6 vector of webknossos bbox: upper left corner + bbox shape
Returns:
corners: 5 x 3 coordinates of the corners of the 2D image patch.
Upper left corner is repeated to close the loop
"""
# Get upper left corner and shape of bbox
upper_left = | np.asarray(bbox[0:3]) | numpy.asarray |
"""Test keras.layers.core.Layer.__call__"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras.layers.core import Dense
from keras.models import Sequential, Graph
def test_layer_call():
"""Test keras.layers.core.Layer.__call__"""
nb_samples, input_dim, output_dim = 3, 10, 5
layer = Dense(output_dim, input_dim=input_dim)
W = np.asarray(K.eval(layer.W)).astype(K.floatx())
X = K.placeholder(ndim=2)
Y = layer(X)
f = K.function([X], [Y])
x = np.ones((nb_samples, input_dim)).astype(K.floatx())
y = f([x])[0].astype(K.floatx())
t = np.dot(x, W).astype(K.floatx())
assert_allclose(t, y, rtol=.2)
def test_sequential_call():
"""Test keras.models.Sequential.__call__"""
nb_samples, input_dim, output_dim = 3, 10, 5
model = Sequential()
model.add(Dense(output_dim=output_dim, input_dim=input_dim))
model.compile('sgd', 'mse')
# test flat model
X = K.placeholder(ndim=2)
Y = model(X)
f = K.function([X], [Y])
x = np.ones((nb_samples, input_dim)).astype(K.floatx())
y1 = f([x])[0].astype(K.floatx())
y2 = model.predict(x)
# results of __call__ should match model.predict
assert_allclose(y1, y2)
# test nested model
model2 = Sequential()
model2.add(model)
model2.compile('sgd', 'mse')
Y2 = model2(X)
f = K.function([X], [Y2])
y1 = f([x])[0].astype(K.floatx())
y2 = model2.predict(x)
# results of __call__ should match model.predict
assert_allclose(y1, y2)
def test_graph_call():
"""Test keras.models.Graph.__call__"""
nb_samples, input_dim, output_dim = 3, 10, 5
model = Graph()
model.add_input('input', input_shape=(input_dim, ))
model.add_node(Dense(output_dim=output_dim, input_dim=input_dim),
input='input', name='output', create_output=True)
model.compile('sgd', {'output': 'mse'})
# test flat model
X = K.placeholder(ndim=2)
Y = model(X)
f = K.function([X], [Y])
x = np.ones((nb_samples, input_dim)).astype(K.floatx())
y1 = f([x])[0].astype(K.floatx())
y2 = model.predict({'input': x})['output']
# results of __call__ should match model.predict
assert_allclose(y1, y2)
# test nested Graph models
model2 = Graph()
model2.add_input('input', input_shape=(input_dim, ))
model2.add_node(model, input='input', name='output', create_output=True)
# need to turn off cache because we're reusing model
model2.cache_enabled = False
model2.compile('sgd', {'output': 'mse'})
Y2 = model2(X)
f = K.function([X], [Y2])
y1 = f([x])[0].astype(K.floatx())
y2 = model2.predict({'input': x})['output']
# results of __call__ should match model.predict
| assert_allclose(y1, y2) | numpy.testing.assert_allclose |
import numpy as np
import math
import random
class Ai():
def __init__(self, resolution, all_pos: list, all_ids, options: list):
self.board_resolution = resolution
self.all_pos = all_pos
self.player_option, self.ai_option = options
self.available_tiles = self.all_pos[:]
self.ai_move = []
self.id_array = np.array(all_ids)
self.id_array = | np.reshape(self.id_array, (3, 3)) | numpy.reshape |
"""
Handles multidimensional huge data.
since it requires huge size memory:
- we use the mean from different cell types instead of just using samples.
- we use PCA to reduce the number of cell types
There are two approaches:
1. Discrete - discretization for words for each sequence, and then building words by combining them
2. Continuous - Use the real values of the channel with multi dimensional gaussian and covariance matrix to evaluate
Assumptions:
Just as an estimation for the size: 242 cells x 2492506 chromosome 1 size (bins of size 100)
requires 4.5Gb
see also:
multichannel_classify - script for multichannel classifications
"""
import numpy as np
from models.ClassifierStrategy import ClassifierStrategy
from models.PcaTransformer import PcaTransformer
from hmm.HMMModel import GaussianHMM, DiscreteHMM
from hmm.bwiter import bw_iter, IteratorCondition, DiffCondition
__author__ = 'eranroz'
def continuous_state_selection(data, num_states):
"""
Heuristic creation of emission for states/selecting number of stats.
Instead of random selection of the emission matrix we find clusters of co-occurring values,
and use those clusters as means for states and the close values as estimation for covariance matrix
Nubmer of clusters/states is subject to pruning if not pre-selected
@param num_states: number of states in model
@param data: dense data for specific chromosome
@return: initial emission for gaussian mixture model HMM (array of (mean, covariance)
"""
def soft_k_means_step(clustered_data, clusters_means):
"""
Soft k means
@param clustered_data: data to cluster
@param clusters_means: number of clusters
@return: new clusters means
"""
w = np.array([np.sum(np.power(clustered_data - c, 2), axis=1) for c in clusters_means])
w /= ((np.max(w) + np.mean(w)) / 1000) # scale w
w = np.minimum(w, 500) # 500 is enough (to eliminate underflow)
w = np.exp(-w)
w = w / np.sum(w, 0) # normalize for each point
w = w / np.sum(w, 1)[:, None] # normalize for all cluster
return np.dot(w, clustered_data)
data = data.T
num_sub_samples = 2
sub_indics = np.random.permutation(np.arange(data.shape[0] - data.shape[0] % num_sub_samples))
n_clusters = num_states or data.shape[1] * 2 # number of clustering will be subject to pruning
clusters = np.random.random((n_clusters, data.shape[1])) * np.max(data, 0)
# once we have assumption for clusters work with real sub batches of the data
sub_indics = sub_indics.reshape(num_sub_samples, -1)
different_clusters = False
step = 0
while not different_clusters:
diff = np.ones(1)
iter_count = 0
while np.any(diff > 1e-1) and iter_count < 10:
sub_data = data[sub_indics[step % num_sub_samples], :]
new_clusters = soft_k_means_step(sub_data, clusters)
diff = np.sum((new_clusters - clusters) ** 2, axis=1)
clusters = new_clusters
iter_count += 1
step += 1
if num_states:
different_clusters = True
else:
dist_matrix = np.array([np.sum(np.power(clusters - c, 2), axis=1) for c in clusters])
np.fill_diagonal(dist_matrix, 1000)
closest_cluster = np.min(dist_matrix)
threshold = 2 * np.mean(dist_matrix) / np.var(dist_matrix) # or to just assign 0.1?
if closest_cluster < threshold:
# pruning the closest point and add random to close points
subject_to_next_prune = list(set(np.where(dist_matrix < threshold)[0]))
clusters[subject_to_next_prune, :] += 0.5 * clusters[subject_to_next_prune, :] * np.random.random(
(len(subject_to_next_prune), data.shape[1]))
clusters = clusters[np.arange(n_clusters) != np.where(dist_matrix == closest_cluster)[0][0], :]
n_clusters -= 1
else:
different_clusters = True
# now assign points to clusters
# and add some random
clusters += clusters * np.random.random(clusters.shape) * 0.1
clusters = clusters[np.argsort(np.sum(clusters ** 2, 1))] # to give some meaning
weight = np.array([np.sum(np.power(data - c, 2), axis=1) for c in clusters])
weight /= (np.mean(weight) / 500) # scale w
weight = np.minimum(weight, 500)
weight = np.exp(-weight)
weight /= np.sum(weight, 0) # normalize for each point
weight /= np.sum(weight, 1)[:, None] # normalize for all cluster
means = np.dot(weight, data)
covs = []
min_std = 10 * np.finfo(float).tiny
for mu, p in zip(means, weight):
seq_min_mean = data - mu
new_cov = np.dot((seq_min_mean.T * p), seq_min_mean)
new_cov = np.maximum(new_cov, min_std)
covs.append(new_cov)
means_covs = list(zip(means, covs))
return means_covs
class GMMClassifier(ClassifierStrategy):
"""
multivariate version of HMMClassifier for multichannel data
* It uses PCA to reduce number of learned channels
* It adds some functions for smart selection of the initial state
"""
def __init__(self, model=None, pca_reduction=None, train_chromosome='chr1', study_diff=True):
"""
@type model: GaussianHMM
@param model: GaussianHMM to model the multichannel data
"""
self.model = model
self.pca_reduction = pca_reduction
self.train_chromosome = train_chromosome
self.study_diff = study_diff # whether we should reduce the mean from each location before PCA
def pca_ndims(self):
"""
number of dimensions
@return: number of dimensions
"""
return self.pca_reduction.w.shape
def training_chr(self, chromosome):
"""
Specifies on which chromosome we want to train or fit the model
@param chromosome: chromosome name for training
@return: None
"""
self.train_chromosome = chromosome
def fit(self, data, iterations=None, energy=0.9, pca_components=None):
"""
fits the classifiers to training sequences and returns the log likelihood after fitting
@param pca_components: number of dimensions to use for PCA (set energy to None)
@param energy: cumulative energy to use for pca (set pca_components to None)
@param data: data to use for PCA reduction matrix selection
@param iterations: number of iterations number of iteration
@return: likelihood for the model based on the model
"""
old_model = self.model
print("Starting fitting")
training_seqs = data[self.train_chromosome]
if self.pca_reduction is None:
print('Fitting PCA')
self.pca_reduction = PcaTransformer()
self.pca_reduction.fit(training_seqs[0], min_energy=energy, ndim=pca_components)
transformer = self.pca_reduction
training_seqs = transformer(training_seqs)
# TODO: use different sequences?
bw_stop_condition = IteratorCondition(iterations) if iterations is not None else DiffCondition()
self.model, p = bw_iter(training_seqs, self.model, bw_stop_condition)
print("Model fitting finished. likelihood", p)
print("Old model")
print(old_model)
print("New model")
print(self.model)
fit_params = {
'likelihoods': bw_stop_condition.prev_likelihoods
}
return p, fit_params
def classify(self, sequence_dict):
"""
Classifies chromosomes across samples (such as different tissues)
@param sequence_dict: dict like object with keys as chromosomes and values as matrix
@return: viterbi state assignment for the genome
"""
classified = dict()
transformer = self.pca_reduction
for chromosome, sequence in sequence_dict.items():
print('Classifying chromosome', chromosome)
# reduce dimensions
sequence = transformer(sequence)
# fit
classified[chromosome] = self.model.viterbi(sequence)
return classified
def data_transform(self):
"""
get associated data transformation pre-processing
@return: log(x+1)
"""
def log_diff(data):
log_data = np.log(np.array(data) + 1)
return log_data - np.mean(log_data, 0)
if self.study_diff:
return log_diff
else:
return lambda x: np.log(np.array(x) + 1)
def init_pca_clustering(self, data, train_chromosome='chr8', num_states=10, pca_energy=None):
"""
Default initialization for GMM classifier with PCA and then clustering (before actual training)
* "training" for PCA (based on train chromosome covar)
* heuristic selection of number of state and their emission (soft k means)
* state transition - random initialization with some prior assumptions
@param pca_energy: minimum energy for PCA (to select number of dimensions).
@type train_chromosome: str
@type num_states: int
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
"""
chrom_data = data[train_chromosome]
transformer = PcaTransformer()
transformer.fit(chrom_data, min_energy=pca_energy)
chrom_data = transformer(chrom_data)
self.init_by_clustering({train_chromosome: chrom_data}, train_chromosome, num_states)
self.pca_reduction = transformer # override if PCA reduction with the trained PCA
def init_by_clustering(self, data, train_chromosome='chr8', num_states=10):
"""
Default initialization for GMM classifier with clustering (before actual training)
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
"""
chrom_data = data[train_chromosome]
emission = continuous_state_selection(chrom_data, num_states=num_states)
n_states = len(emission) + 1 # number of states plus begin state
print('Number of states selected %i' % (n_states - 1))
state_transition = np.random.random((n_states, n_states))
# fill diagonal with higher values
np.fill_diagonal(state_transition, np.sum(state_transition, 1))
state_transition[:, 0] = 0 # set transition to begin state to zero
# normalize
state_transition /= np.sum(state_transition, 1)[:, np.newaxis]
# initial guess
initial_model = GaussianHMM(state_transition, emission)
self.model = initial_model
self.pca_reduction = PcaTransformer.empty()
self.train_chromosome = train_chromosome
@staticmethod
def default_strategy(data, train_chromosome='chr8', num_states=10):
"""
Creates a default GMM classifier with heuristic guess (see default)
@type train_chromosome: str
@type num_states: int
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
@return: a GMM classifier
"""
classifier = GMMClassifier()
classifier.init_pca_clustering(data, train_chromosome, num_states)
return classifier
def __str__(self):
return str(self.model)
def states_html(self, input_labels=None, column_title='Data/State'):
"""
Creates a nice html table with some description/meaning for the states
@param column_title: title for the columns
@param input_labels: labels for the input (original dimensions before PCA)
@return: table with html representation of the states
"""
import matplotlib as mpl
import matplotlib.cm as cm
mean_vars_states = [state[0] for state in self.model.emission.mean_vars]
mean_states = np.array([mean[0] for mean, var in mean_vars_states])
mean_states = self.pca_reduction.recover(mean_states)
n_states = mean_states.shape[0]
norm = mpl.colors.Normalize(vmin=0, vmax=n_states + 1)
cmap = cm.spectral
m = cm.ScalarMappable(norm=norm, cmap=cmap)
color_schema = dict()
for i in range(0, n_states + 1):
rgb = list(m.to_rgba(i)[:3])
for j in range(0, 3):
rgb[j] = str("%i" % (255 * rgb[j]))
color_schema[i] = ','.join(rgb)
states_ths = ''.join(
['<th style=\"color:rgb(%s)\">%i</th>' % (color_schema[i], i) for i in np.arange(1, n_states + 1)])
states_trs = []
"""
max_v = np.max(mean_states)
backgrounds = cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=np.min(mean_states), vmax=np.max(mean_states)), cmap=cm.Blues)
mean_to_color = lambda x: 'rgb(%i, %i, %i)' % backgrounds.to_rgba(x, bytes=True)[:3]
for cell_i, cell_means in enumerate(mean_states.T):
cell_description = "<td>%s</td>" % (str(cell_i+1) if input_labels is None else input_labels[cell_i])
# add mean values
cell_description += ''.join(['<td style="font-size: %i%%;color:#fff;background:%s">%.2f</td>' % (mean/max_v * 100, mean_to_color(mean), mean) for mean in cell_means])
# wrap in tr
cell_description = '<tr>%s</tr>' % cell_description
states_trs.append(cell_description)
"""
template = """
<table style="font-size:85%;text-align:center;border-collapse:collapse;border:1px solid #aaa;" cellpadding="5" border="1">
<tr style="font-size:larger; font-weight: bold;">
<th>{column_title}</th>
{states_ths}
</tr>
{states_trs}
</table>
"""
# rewrite
backgrounds = [
cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=np.min(data_type),
vmax=np.max(data_type)), cmap=cm.Blues)
for data_type in mean_states.T]
mean_to_color = lambda x, y: 'rgb(%i, %i, %i)' % backgrounds[y].to_rgba(x, bytes=True)[:3]
for cell_i, data_type_means in enumerate(mean_states.T):
cell_description = "<td>%s</td>" % (str(cell_i + 1) if input_labels is None else input_labels[cell_i])
# add mean values
cell_description += ''.join(['<td style="font-size: 85%%;color:#fff;background:%s">%.2f</td>' %
(mean_to_color(mean, cell_i), mean)
for mean in data_type_means])
# wrap in tr
cell_description = '<tr>%s</tr>' % cell_description
states_trs.append(cell_description)
template = """
<table style="font-size:85%;text-align:center;border-collapse:collapse;border:1px solid #aaa;" cellpadding="5" border="1">
<tr style="font-size:larger; font-weight: bold;">
<th>{column_title}</th>
{states_ths}
</tr>
{states_trs}
</table>
"""
return template.format(**({'states_ths': states_ths,
'states_trs': '\n'.join(states_trs),
'column_title': column_title
}))
class DiscreteMultichannelHMM(ClassifierStrategy):
"""
A model for discrete multichannel HMM:
data [position x tissue] =(PCA)> data [position x tissue combination] => discretization => word encoding => HMM
"""
def __init__(self):
self.model = None
self.pca_reduction = None
def classify(self, sequence):
raise NotImplementedError
def fit(self, data):
# TODO: only partially implemented here not tested...
raise NotImplementedError
from scipy.stats import norm as gaussian
min_alpha = 0
n_words = np.max(data)
# init hmm model
n_states = 5
state_transition = np.zeros(n_states + 1)
# begin state
state_transition[0, 1:] = np.random.rand(n_states)
# real states - random with some constraints. state 1 is most closed, and n is most opened
real_states = np.random.rand((n_states, n_states))
# set strong diagonal
diagonal_selector = np.eye(n_states, dtype='bool')
real_states[diagonal_selector] = np.sum(real_states, 1) * 9
real_states /= np.sum(real_states, 1)[:, None]
state_transition[1:, 1:] = real_states
# normalize
# emission
emission = np.zeros((n_states + 1, n_words))
real_emission = np.random.random((n_states, n_words))
for i in np.arange(0, n_states):
mean = i * (n_words / n_states)
variance = (n_words / n_states)
real_emission[i, :] = gaussian(mean, variance).pdf(np.arange(n_words))
real_emission /= np.sum(real_emission, 1)[:, None]
emission[1:, 1:] = real_emission
# init hmm
print('Creating model')
self.model = DiscreteHMM(state_transition, emission, min_alpha=min_alpha)
print('Training model')
def data_transform(self):
"""
get associated data transformation prepossessing
"""
if self.pca_reduction is None:
return lambda x: x
else:
return lambda x: DiscreteMultichannelHMM.preprocess(self.pca_reduction(x))
@staticmethod
def preprocess(data):
discrete = DiscreteMultichannelHMM.multichannel_discrete_transform(data)
multichannel_data = DiscreteMultichannelHMM.encode_discrete_words(discrete)
return multichannel_data
@staticmethod
def encode_discrete_words(data):
"""
Transforms a discrete matrix to one dimensional words
@param data: discrete matrix
@return: words array
"""
new_data = np.zeros(data.shape[1])
alphbet = np.power(2, np.arange(data.shape[0] * np.max(data)))
alphbet_assign = enumerate(alphbet)
# transform to powers of 2
for i in np.arange(0, np.max(data) + 1):
for j in np.arange(0, new_data.shape[0]):
selector = (data[j, :] == i)
data[j, selector] = next(alphbet_assign)
for cell in data:
# bitwise or
new_data |= cell
return new_data
@staticmethod
def multichannel_discrete_transform(data, percentiles=[60, 75, 90]):
"""
Transforms a matrix from continuous values to discrete values
@param percentiles: percentiles used for discretization
@param data: continuous values matrix
@return: discrete values
"""
data = np.log(data + 1)
prec_values = np.percentile(data, q=percentiles)
max_val = np.max(data) + 1
min_val = | np.min(data) | numpy.min |
# Copyright (c) 2011-2016 by California Institute of Technology
# Copyright (c) 2016 by The Regents of the University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Algorithms related to discretization of continuous dynamics.
See Also
========
L{find_controller}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import os
import warnings
import pprint
from copy import deepcopy
import multiprocessing as mp
import numpy as np
from scipy import sparse as sp
import polytope as pc
from polytope.plot import plot_partition, plot_transition_arrow
from tulip import transys as trs
from tulip.hybrid import LtiSysDyn, PwaSysDyn
from .prop2partition import (PropPreservingPartition,
pwa_partition, part2convex)
from .feasible import is_feasible, solve_feasible
from .plot import plot_ts_on_partition
# inline imports:
#
# inline: import matplotlib.pyplot as plt
debug = False
class AbstractSwitched(object):
"""Abstraction of SwitchedSysDyn, with mode-specific and common info.
Attributes:
- ppp: merged partition, if any
Preserves both propositions and dynamics
- ts: common TS, if any
- ppp2ts: map from C{ppp.regions} to C{ts.states}
- modes: dict of {mode: AbstractPwa}
- ppp2modes: map from C{ppp.regions} to C{modes[mode].ppp.regions}
of the form:
{mode: list}
where C{list} has same indices as C{ppp.regions} and
elements in each C{list} are indices of regions in
each C{modes[mode].ppp.regions}.
type: dict
Each partition corresponds to some mode.
(for switched systems)
In each mode a L{PwaSysDyn} is active.
"""
def __init__(
self, ppp=None, ts=None, ppp2ts=None,
modes=None, ppp2modes=None
):
if modes is None:
modes = dict()
self.ppp = ppp
self.ts = ts
self.ppp2ts = ppp2ts
self.modes = modes
self.ppp2modes = ppp2modes
def __str__(self):
s = 'Abstraction of switched system\n'
s += str('common PPP:\n') + str(self.ppp)
s += str('common ts:\n') + str(self.ts)
for mode, ab in self.modes.items():
s += 'mode: ' + str(mode)
s += ', with abstraction:\n' + str(ab)
return s
def ppp2pwa(self, mode, i):
"""Return original C{Region} containing C{Region} C{i} in C{mode}.
@param mode: key of C{modes}
@param i: Region index in common partition C{ppp.regions}.
@return: tuple C{(j, region)} of:
- index C{j} of C{Region} and
- C{Region} object
in C{modes[mode].ppp.regions}
"""
region_idx = self.ppp2modes[mode][i]
ab = self.modes[mode]
return ab.ppp2pwa(region_idx)
def ppp2sys(self, mode, i):
"""Return index of active PWA subsystem in C{mode},
@param mode: key of C{modes}
@param i: Region index in common partition C{ppp.regions}.
@return: tuple C{(j, subsystem)} of:
- index C{j} of PWA C{subsystem}
- L{LtiSysDyn} object C{subsystem}
"""
region_idx = self.ppp2modes[mode][i]
ab = self.modes[mode]
return ab.ppp2sys(region_idx)
def plot(self, show_ts=False, only_adjacent=False):
"""Plot mode partitions and merged partition, if one exists.
For details see L{AbstractPwa.plot}.
"""
axs = []
color_seed = 0
# merged partition exists ?
if self.ppp is not None:
for mode in self.modes:
env_mode, sys_mode = mode
edge_label = {'env_actions':env_mode,
'sys_actions':sys_mode}
ax = _plot_abstraction(
self, show_ts=False, only_adjacent=False,
color_seed=color_seed
)
plot_ts_on_partition(
self.ppp, self.ts, self.ppp2ts,
edge_label, only_adjacent, ax
)
axs += [ax]
# plot mode partitions
for mode, ab in self.modes.items():
ax = ab.plot(show_ts, only_adjacent, color_seed)
ax.set_title('Abstraction for mode: ' + str(mode))
axs += [ax]
#if isinstance(self.ts, dict):
# for ts in self.ts:
# ax = ts.plot()
# axs += [ax]
return axs
class AbstractPwa(object):
"""Discrete abstraction of PWA dynamics, with attributes:
- ppp: Partition into Regions.
Each Region corresponds to
a discrete state of the abstraction
type: L{PropPreservingPartition}
- ts: Finite transition system abstracting the continuous system.
Each state corresponds to a Region in C{ppp.regions}.
It can be fed into discrete synthesis algorithms.
type: L{FTS}
- ppp2ts: bijection between C{ppp.regions} and C{ts.states}.
Has common indices with C{ppp.regions}.
Elements are states in C{ts.states}.
(usually each state is a str)
type: list of states
- pwa: system dynamics
type: L{PwaSysDyn}
- pwa_ppp: partition preserving both:
- propositions and
- domains of PWA subsystems
Used for non-conservative planning.
If just L{LtiSysDyn}, then the only difference
of C{pwa_ppp} from C{orig_ppp} is convexification.
type: L{PropPreservingPartition}
- orig_ppp: partition preserving only propositions
i.e., agnostic of dynamics
type: L{PropPreservingPartition}
- disc_params: parameters used in discretization that
should be passed to the controller refinement
to ensure consistency
type: dict
If any of the above is not given,
then it is initialized to None.
Notes
=====
1. There could be some redundancy in ppp and ofts,
in that they are both decorated with propositions.
This might be useful to keep each of
them as functional units on their own
(possible to change later).
2. The 'Pwa' in L{AbstractPwa} includes L{LtiSysDyn}
as a special case.
"""
def __init__(
self, ppp=None, ts=None, ppp2ts=None,
pwa=None, pwa_ppp=None, ppp2pwa=None, ppp2sys=None,
orig_ppp=None, ppp2orig=None,
disc_params=None
):
if disc_params is None:
disc_params = dict()
self.ppp = ppp
self.ts = ts
self.ppp2ts = ppp2ts
self.pwa = pwa
self.pwa_ppp = pwa_ppp
self._ppp2pwa = ppp2pwa
self._ppp2sys = ppp2sys
self.orig_ppp = orig_ppp
self._ppp2orig = ppp2orig
# original_regions -> pwa_ppp
# ppp2orig -> ppp2pwa_ppp
# ppp2pwa -> ppp2pwa_sys
self.disc_params = disc_params
def __str__(self):
s = str(self.ppp)
s += str(self.ts)
s += 30 * '-' + '\n'
s += 'Map PPP Regions ---> TS states:\n'
s += self._ppp2other_str(self.ppp2ts) + '\n'
s += 'Map PPP Regions ---> PWA PPP Regions:\n'
s += self._ppp2other_str(self._ppp2pwa) + '\n'
s += 'Map PPP Regions ---> PWA Subsystems:\n'
s += self._ppp2other_str(self._ppp2sys) + '\n'
s += 'Map PPP Regions ---> Original PPP Regions:\n'
s += self._ppp2other_str(self._ppp2orig) + '\n'
s += 'Discretization Options:\n\t'
s += pprint.pformat(self.disc_params) +'\n'
return s
def ts2ppp(self, state):
region_index = self.ppp2ts.index(state)
region = self.ppp[region_index]
return (region_index, region)
def ppp2trans(self, region_index):
"""Return the transition set constraint and active subsystem,
for non-conservative planning.
"""
reg_idx, pwa_region = self.ppp2pwa(region_index)
sys_idx, sys = self.ppp2sys(region_index)
return pwa_region, sys
def ppp2pwa(self, region_index):
"""Return dynamics and predicate-preserving region
and its index for PWA subsystem active in given region.
The returned region is the C{trans_set} used for
non-conservative planning.
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, pwa.pwa_ppp[i])}
"""
j = self._ppp2pwa[region_index]
pwa_region = self.pwa_ppp[j]
return (j, pwa_region)
def ppp2sys(self, region_index):
"""Return index and PWA subsystem active in indexed region.
Semantics: j-th sub-system is active in i-th Region,
where C{j = ppp2pwa[i]}
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, pwa.list_subsys[i])}
"""
# LtiSysDyn ?
if self._ppp2sys is None:
return (0, self.pwa)
subsystem_idx = self._ppp2sys[region_index]
subsystem = self.pwa.list_subsys[subsystem_idx]
return (subsystem_idx, subsystem)
def ppp2orig(self, region_index):
"""Return index and region of original partition.
The original partition is w/o any dynamics,
not even the PWA domains, only the polytopic predicates.
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, orig_ppp.regions[i])}
"""
j = self._ppp2orig[region_index]
orig_region = self.orig_ppp[j]
return (j, orig_region)
def _ppp2other_str(self, ppp2other):
if ppp2other is None:
return ''
s = ''
for i, other in enumerate(ppp2other):
s += '\t\t' + str(i) + ' -> ' + str(other) + '\n'
return s
def _debug_str_(self):
s = str(self.ppp)
s += str(self.ts)
s += '(PWA + Prop)-Preserving Partition'
s += str(self.pwa_ppp)
s += 'Original Prop-Preserving Partition'
s += str(self.orig_ppp)
return s
def plot(self, show_ts=False, only_adjacent=False,
color_seed=None):
"""Plot partition and optionally feasible transitions.
@param show_ts: plot feasible transitions on partition
@type show_ts: bool
@param only_adjacent: plot feasible transitions only
between adjacent regions. This reduces clutter,
but if horizon > 1 and not all horizon used,
then some transitions could be hidden.
@param only_adjacent: bool
"""
ax = _plot_abstraction(self, show_ts, only_adjacent,
color_seed)
return ax
def verify_transitions(self):
logger.info('verifying transitions...')
for from_state, to_state in self.ts.transitions():
i, from_region = self.ts2ppp(from_state)
j, to_region = self.ts2ppp(to_state)
trans_set, sys = self.ppp2trans(i)
params = {'N', 'close_loop', 'use_all_horizon'}
disc_params = {k:v for k,v in self.disc_params.items()
if k in params}
s0 = solve_feasible(from_region, to_region, sys,
trans_set=trans_set, **disc_params)
msg = str(i) + ' ---> ' + str(j)
if not from_region <= s0:
logger.error('incorrect transition: ' + msg)
isect = from_region.intersect(s0)
ratio = isect.volume /from_region.volume
logger.error('intersection volume: ' + str(ratio) + ' %')
else:
logger.info('correct transition: ' + msg)
def _plot_abstraction(ab, show_ts, only_adjacent, color_seed):
if ab.ppp is None or ab.ts is None:
warnings.warn('Either ppp or ts is None.')
return
if show_ts:
ts = ab.ts
ppp2ts = ab.ppp2ts
else:
ts = None
ppp2ts = None
ax = ab.ppp.plot(
ts, ppp2ts, only_adjacent=only_adjacent,
color_seed=color_seed
)
#ax = self.ts.plot()
return ax
def discretize(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1, simu_type='bi'
):
"""Refine the partition via bisimulation
or dual-simulation algorithms, and establish transitions
based on reachability analysis.
Reference
=========
U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@param simu_type: if 'bi', use bisimulation partition; if 'dual',
use dual-simulation partition
@type simu_type: string,
default = 'bi'
@rtype: L{AbstractPwa}
"""
if simu_type == 'bi':
AbstractPwa = _discretize_bi(
part, ssys, N, min_cell_volume,
closed_loop, conservative,
max_num_poly, use_all_horizon,
trans_length, remove_trans,
abs_tol,
plotit, save_img, cont_props,
plot_every)
elif simu_type == 'dual':
AbstractPwa = _discretize_dual(
part, ssys, N, min_cell_volume,
closed_loop, conservative,
max_num_poly, use_all_horizon,
trans_length, remove_trans,
abs_tol,
plotit, save_img, cont_props,
plot_every)
else:
raise ValueError(
'Unknown simulation type: "{st}"'.format(
st=simu_type))
return AbstractPwa
def _discretize_bi(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1
):
"""Refine the partition and establish transitions
based on reachability analysis. Use bi-simulation algorithm.
Reference
=========
1. U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
2. <NAME>.; <NAME>.
"A Bisimulation-like Algorithm for Abstracting Control Systems."
54th Annual Allerton Conference on CCC 2016
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@rtype: L{AbstractPwa}
"""
start_time = os.times()[0]
orig_ppp = part
min_cell_volume = (min_cell_volume /np.finfo(np.double).eps
*np.finfo(np.double).eps)
ispwa = isinstance(ssys, PwaSysDyn)
islti = isinstance(ssys, LtiSysDyn)
if ispwa:
(part, ppp2pwa, part2orig) = pwa_partition(ssys, part)
else:
part2orig = range(len(part))
# Save original polytopes, require them to be convex
if conservative:
orig_list = None
orig = [0]
else:
(part, new2old) = part2convex(part) # convexify
part2orig = [part2orig[i] for i in new2old]
# map new regions to pwa subsystems
if ispwa:
ppp2pwa = [ppp2pwa[i] for i in new2old]
remove_trans = False # already allowed in nonconservative
orig_list = []
for poly in part:
if len(poly) == 0:
orig_list.append(poly.copy())
elif len(poly) == 1:
orig_list.append(poly[0].copy())
else:
raise Exception("discretize: "
"problem in convexification")
orig = list(range(len(orig_list)))
# Cheby radius of disturbance set
# (defined within the loop for pwa systems)
if islti:
if len(ssys.E) > 0:
rd = ssys.Wset.chebR
else:
rd = 0.
# Initialize matrix for pairs to check
IJ = part.adj.copy()
IJ = IJ.todense()
IJ = np.array(IJ)
logger.debug("\n Starting IJ: \n" + str(IJ) )
# next line omitted in discretize_overlap
IJ = reachable_within(trans_length, IJ,
np.array(part.adj.todense()) )
# Initialize output
num_regions = len(part)
transitions = np.zeros(
[num_regions, num_regions],
dtype = int
)
sol = deepcopy(part.regions)
adj = part.adj.copy()
adj = adj.todense()
adj = np.array(adj)
# next 2 lines omitted in discretize_overlap
if ispwa:
subsys_list = list(ppp2pwa)
else:
subsys_list = None
ss = ssys
# init graphics
if plotit:
try:
import matplotlib.pyplot as plt
plt.ion()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.axis('scaled')
ax2.axis('scaled')
file_extension = 'pdf'
except:
logger.error('failed to import matplotlib')
plt = None
else:
plt = None
iter_count = 0
# List of how many "new" regions
# have been created for each region
# and a list of original number of neighbors
#num_new_reg = np.zeros(len(orig_list))
#num_orig_neigh = np.sum(adj, axis=1).flatten() - 1
progress = list()
# Do the abstraction
while np.sum(IJ) > 0:
ind = np.nonzero(IJ)
# i,j swapped in discretize_overlap
i = ind[1][0]
j = ind[0][0]
IJ[j, i] = 0
si = sol[i]
sj = sol[j]
si_tmp = deepcopy(si)
sj_tmp = deepcopy(sj)
if ispwa:
ss = ssys.list_subsys[subsys_list[i]]
if len(ss.E) > 0:
rd, xd = pc.cheby_ball(ss.Wset)
else:
rd = 0.
if conservative:
# Don't use trans_set
trans_set = None
else:
# Use original cell as trans_set
trans_set = orig_list[orig[i]]
S0 = solve_feasible(
si, sj, ss, N, closed_loop,
use_all_horizon, trans_set, max_num_poly
)
msg = '\n Working with partition cells: {i}, {j}'.format(i=i,
j=j)
logger.info(msg)
msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i,
num=len(si))
msg += '\t{j} (#polytopes = {num})\n'.format(j=j,
num=len(sj))
if ispwa:
msg += '\t with active subsystem: '
msg += '{sys}\n'.format(sys=subsys_list[i])
msg += '\t Computed reachable set S0 with volume: '
msg += '{vol}\n'.format(vol=S0.volume)
logger.debug(msg)
#logger.debug('si \cap s0')
isect = si.intersect(S0)
vol1 = isect.volume
risect, xi = pc.cheby_ball(isect)
#logger.debug('si \ s0')
diff = si.diff(S0)
vol2 = diff.volume
rdiff, xd = pc.cheby_ball(diff)
# if pc.is_fulldim(pc.Region([isect]).intersect(diff)):
# logging.getLogger('tulip.polytope').setLevel(logging.DEBUG)
# diff = pc.mldivide(si, S0, save=True)
#
# ax = S0.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/s0.pdf')
#
# ax = si.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/si.pdf')
#
# ax = isect.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/isect.pdf')
#
# ax = diff.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/diff.pdf')
#
# ax = isect.intersect(diff).plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/diff_cap_isect.pdf')
#
# logger.error('Intersection \cap Difference != \emptyset')
#
# assert(False)
if vol1 <= min_cell_volume:
logger.warning('\t too small: si \cap Pre(sj), '
'so discard intersection')
if vol1 <= min_cell_volume and isect:
logger.warning('\t discarded non-empty intersection: '
'consider reducing min_cell_volume')
if vol2 <= min_cell_volume:
logger.warning('\t too small: si \ Pre(sj), so not reached it')
# We don't want our partitions to be smaller than the disturbance set
# Could be a problem since cheby radius is calculated for smallest
# convex polytope, so if we have a region we might throw away a good
# cell.
if (vol1 > min_cell_volume) and (risect > rd) and \
(vol2 > min_cell_volume) and (rdiff > rd):
# Make sure new areas are Regions and add proposition lists
if len(isect) == 0:
isect = pc.Region([isect], si.props)
else:
isect.props = si.props.copy()
if len(diff) == 0:
diff = pc.Region([diff], si.props)
else:
diff.props = si.props.copy()
# replace si by intersection (single state)
isect_list = pc.separate(isect)
sol[i] = isect_list[0]
# cut difference into connected pieces
difflist = pc.separate(diff)
difflist += isect_list[1:]
# n_isect = len(isect_list) -1
num_new = len(difflist)
# add each piece, as a new state
for region in difflist:
sol.append(region)
# keep track of PWA subsystems map to new states
if ispwa:
subsys_list.append(subsys_list[i])
n_cells = len(sol)
new_idx = range(n_cells-1, n_cells-num_new-1, -1)
"""Update transition matrix"""
transitions = np.pad(transitions, (0,num_new), 'constant')
transitions[i, :] = np.zeros(n_cells)
for r in new_idx:
#transitions[:, r] = transitions[:, i]
# All sets reachable from start are reachable from both part's
# except possibly the new part
transitions[i, r] = 0
transitions[j, r] = 0
# sol[j] is reachable from intersection of sol[i] and S0
if i != j:
transitions[j, i] = 1
# sol[j] is reachable from each piece os S0 \cap sol[i]
#for k in range(n_cells-n_isect-2, n_cells):
# transitions[j, k] = 1
"""Update adjacency matrix"""
old_adj = np.nonzero(adj[i, :])[0]
# reset new adjacencies
adj[i, :] = np.zeros([n_cells -num_new])
adj[:, i] = np.zeros([n_cells -num_new])
adj[i, i] = 1
adj = np.pad(adj, (0, num_new), 'constant')
for r in new_idx:
adj[i, r] = 1
adj[r, i] = 1
adj[r, r] = 1
if not conservative:
orig = np.hstack([orig, orig[i]])
# adjacencies between pieces of isect and diff
for r in new_idx:
for k in new_idx:
if r is k:
continue
if pc.is_adjacent(sol[r], sol[k]):
adj[r, k] = 1
adj[k, r] = 1
msg = ''
if logger.getEffectiveLevel() <= logging.DEBUG:
msg += '\t\n Adding states {i} and '.format(i=i)
for r in new_idx:
msg += '{r} and '.format(r=r)
msg += '\n'
logger.debug(msg)
for k in np.setdiff1d(old_adj, [i,n_cells-1]):
# Every "old" neighbor must be the neighbor
# of at least one of the new
if pc.is_adjacent(sol[i], sol[k]):
adj[i, k] = 1
adj[k, i] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[i, k] = 0
transitions[k, i] = 0
for r in new_idx:
if pc.is_adjacent(sol[r], sol[k]):
adj[r, k] = 1
adj[k, r] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[r, k] = 0
transitions[k, r] = 0
"""Update IJ matrix"""
IJ = np.pad(IJ, (0,num_new), 'constant')
adj_k = reachable_within(trans_length, adj, adj)
sym_adj_change(IJ, adj_k, transitions, i)
for r in new_idx:
sym_adj_change(IJ, adj_k, transitions, r)
if logger.getEffectiveLevel() <= logging.DEBUG:
msg = '\n\n Updated adj: \n{adj}'.format(adj=adj)
msg += '\n\n Updated trans: \n{trans}'.format(trans=
transitions)
msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ)
logger.debug(msg)
logger.info('Divided region: {i}\n'.format(i=i))
elif vol2 < abs_tol:
logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j))
transitions[j,i] = 1
else:
if logger.level <= logging.DEBUG:
msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j)
msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2)
msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1)
logger.debug(msg)
else:
logger.info('\t unreachable\n')
transitions[j,i] = 0
# check to avoid overlapping Regions
if debug:
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
assert(tmp_part.is_partition() )
n_cells = len(sol)
progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2
progress += [progress_ratio]
msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells)
msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio)
logger.info(msg)
iter_count += 1
# no plotting ?
if not plotit:
continue
if plt is None or plot_partition is None:
continue
if iter_count % plot_every != 0:
continue
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# plot pair under reachability check
ax2.clear()
si_tmp.plot(ax=ax2, color='green')
sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5)
plot_transition_arrow(si_tmp, sj_tmp, ax2)
S0.plot(ax2, color='none', hatch='/', alpha=0.3)
fig.canvas.draw()
# plot partition
ax1.clear()
plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23)
# plot dynamics
ssys.plot(ax1, show_domain=False)
# plot hatched continuous propositions
part.plot_props(ax1)
fig.canvas.draw()
# scale view based on domain,
# not only the current polytopes si, sj
l,u = part.domain.bounding_box
ax2.set_xlim(l[0,0], u[0,0])
ax2.set_ylim(l[1,0], u[1,0])
if save_img:
fname = 'movie' +str(iter_count).zfill(3)
fname += '.' + file_extension
fig.savefig(fname, dpi=250)
plt.pause(1)
new_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# check completeness of adjacency matrix
if debug:
tmp_part = deepcopy(new_part)
tmp_part.compute_adj()
# Generate transition system and add transitions
ofts = trs.FTS()
adj = sp.lil_matrix(transitions.T)
n = adj.shape[0]
ofts_states = range(n)
ofts.states.add_from(ofts_states)
ofts.transitions.add_adj(adj, ofts_states)
# Decorate TS with state labels
atomic_propositions = set(part.prop_regions)
ofts.atomic_propositions.add_from(atomic_propositions)
for state, region in zip(ofts_states, sol):
state_prop = region.props.copy()
ofts.states.add(state, ap=state_prop)
param = {
'N':N,
'trans_length':trans_length,
'closed_loop':closed_loop,
'conservative':conservative,
'use_all_horizon':use_all_horizon,
'min_cell_volume':min_cell_volume,
'max_num_poly':max_num_poly
}
ppp2orig = [part2orig[x] for x in orig]
end_time = os.times()[0]
msg = 'Total abstraction time: {time}[sec]'.format(time=
end_time - start_time)
print(msg)
logger.info(msg)
if save_img and plt is not None:
fig, ax = plt.subplots(1, 1)
plt.plot(progress)
ax.set_xlabel('iteration')
ax.set_ylabel('progress ratio')
ax.figure.savefig('progress.pdf')
return AbstractPwa(
ppp=new_part,
ts=ofts,
ppp2ts=ofts_states,
pwa=ssys,
pwa_ppp=part,
ppp2pwa=orig,
ppp2sys=subsys_list,
orig_ppp=orig_ppp,
ppp2orig=ppp2orig,
disc_params=param
)
def _discretize_dual(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1
):
"""Refine the partition and establish transitions
based on reachability analysis. Use dual-simulation algorithm.
Reference
=========
1. U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
2. <NAME>.; <NAME>.
"A Bisimulation-like Algorithm for Abstracting Control Systems."
54th Annual Allerton Conference on CCC 2016
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@param simu_type: flag used to choose abstraction algorithm
(bisimulation or dual-simulation).
@type simu_type: string, 'bi' or 'dual'
default = 'bi'
@rtype: L{AbstractPwa}
"""
start_time = os.times()[0]
orig_ppp = part
min_cell_volume = (min_cell_volume /np.finfo(np.double).eps
*np.finfo(np.double).eps)
ispwa = isinstance(ssys, PwaSysDyn)
islti = isinstance(ssys, LtiSysDyn)
if ispwa:
(part, ppp2pwa, part2orig) = pwa_partition(ssys, part)
else:
part2orig = range(len(part))
# Save original polytopes, require them to be convex
if conservative:
orig_list = None
orig = [0]
else:
(part, new2old) = part2convex(part) # convexify
part2orig = [part2orig[i] for i in new2old]
# map new regions to pwa subsystems
if ispwa:
ppp2pwa = [ppp2pwa[i] for i in new2old]
remove_trans = False # already allowed in nonconservative
orig_list = []
for poly in part:
if len(poly) == 0:
orig_list.append(poly.copy())
elif len(poly) == 1:
orig_list.append(poly[0].copy())
else:
raise Exception("discretize: "
"problem in convexification")
orig = list(range(len(orig_list)))
# Cheby radius of disturbance set
# (defined within the loop for pwa systems)
if islti:
if len(ssys.E) > 0:
rd = ssys.Wset.chebR
else:
rd = 0.
# Initialize matrix for pairs to check
IJ = part.adj.copy()
IJ = IJ.todense()
IJ = | np.array(IJ) | numpy.array |
#! /usr/bin/env python
from __future__ import print_function, division
import numpy as np
import pickle
from read_input import read_input
def main(filename, x=None, y=None, z=None, sep=0.01):
import pyscf
from matplotlib import pyplot as plt
import os
from matplotlib.colors import LogNorm
import matplotlib.colors as colors
from matplotlib import cm
from .intor import intor, k2real, real2k
import chem
# get input
inp = read_input(filename, build=False)
inp.nao = np.array([inp.cell[i].nao_nr() for i in range(inp.nsub)])
nao = inp.sSCF.cell.nao_nr()
# get kpoints
inp.band = read_band(inp.filename)
kpts1 = np.copy(inp.kpts)
print ('Initial kpts: ', repr(kpts1))
B = inp.sSCF.cell.reciprocal_vectors()
kpts2 = np.dot(inp.band.kpts, B)
print ('Final kpts: ' , repr(kpts2))
pi2 = np.pi * 2.
# get real-space lattice vectors
Ls = inp.sSCF.cell.get_lattice_Ls()
print ('Ls: ', repr(Ls))
# get overlap matrix in real-space lattice vectors
Sm_T = np.zeros((len(Ls), nao, nao))
mA = inp.sSCF.cell.to_mol()
for i in range(len(Ls)):
mB = mA.copy()
atm = []
for j in range(mA.natm):
coord = mA.atom_coord(j) * 0.52917720859 - Ls[i] * 0.52917720859
atm.append([mA.atom_symbol(j), coord])
mB.atom = atm
mB.unit = 'A'
mB.build(dump_input=False)
Sm_T[i] = intor(mA, mB, 'cint1e_ovlp_sph')
# read fock matrices from file
if inp.read and os.path.isfile(inp.read+'.fock'):
print ('Reading supermolecular Fock matrix from file: '+inp.read+'.fock')
FS = pickle.load(open(inp.read+'.fock', 'r'))
print ('Reading supermolecular Smat matrix from file: '+inp.read+'.smat')
Smat = pickle.load(open(inp.read+'.smat', 'r'))
E_k1 = diagonalize(FS, Smat)
x = np.arange(len(E_k1)) / len(E_k1)
E_k1 = E_k1.transpose()
for i in range(len(E_k1)):
plt.plot(x,E_k1[i],'b-')
# transform into real space
FS_T = k2real(FS, kpts1, Ls)
Sm_T2 = k2real(Smat, kpts1, Ls)
print ('SMAT DIFF: ', np.abs(Sm_T - Sm_T2).max())
# transform back into k-space
FS_k = real2k(FS_T, kpts2, Ls)
Sm_k = real2k(Sm_T, kpts2, Ls)
# print (repr(FS.reshape(len(kpts1), nao, nao)[0]))
# print (FS_k[0])
print (np.abs(FS.reshape(len(kpts1), nao, nao)[0] - FS_k[0]).max())
E_k2 = diagonalize(FS_k, Sm_k, kpts2)
x = np.arange(len(E_k2)) / len(E_k2)
E_k2 = E_k2.transpose()
for i in range(len(E_k2)):
plt.plot(x,E_k2[i],'r-')
plt.show()
def diagonalize(Fock, Smat, kpts=None):
'''Diagonalize k-points fock matrix and returns matrix of E vectors.'''
import scipy as sp
nk = Fock.shape[0]
na = Fock.shape[1]
E = np.zeros((nk, na))
for k in range(nk):
if kpts is not None: print (kpts[k])
E[k], C = sp.linalg.eigh(Fock[k], Smat[k])
return E
def read_band(filename):
'''Reads a formatted input file.'''
from input_reader import InputReader
import sys
from pyscf import gto, dft, pbc
from pyscf.pbc import gto as pbcgto, dft as pbcdft, df as pbcdf, scf as pbcscf
import numpy as np
from mole import concatenate_cells
# initialize reader for a pySCF input
reader = InputReader(comment=['!', '#', '::', '//'],
case=False, ignoreunknown=True)
# define "band" block
rband = reader.add_block_key('band', required=True)
rband.add_line_key('npoints', type=int, required=True)
rband.add_regex_line('points',
'\s*(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+\.?\d*)', repeat=True)
# read the input file
inp = reader.read_input(filename)
inp.nskpts = len(inp.band.points)
# get each special k-point
skpts = np.zeros((inp.nskpts,3))
for i in range(inp.nskpts):
skpts[i][0] = inp.band.points[i].group(1)
skpts[i][1] = inp.band.points[i].group(2)
skpts[i][2] = inp.band.points[i].group(3)
# get distance between spcial kpoints
dkpts = np.zeros((inp.nskpts-1))
for i in range(inp.nskpts-1):
temp = skpts[i+1] - skpts[i]
dkpts[i] = np.sqrt(np.dot(temp,temp))
# kpoints spacing
kspace = dkpts.sum() / float(inp.band.npoints)
# get kpoint coordinates
x = np.array([], dtype=float)
for i in range(inp.nskpts-1):
vec = skpts[i+1] - skpts[i]
lvec = np.sqrt(np.dot(vec,vec))
temp = np.arange(0, lvec, kspace) / lvec
temp = np.outer(temp, vec) + skpts[i]
x = np.append(x, temp.flatten())
x = np.array(x).flatten()
lx = len(x)
x = x.reshape(int(lx/3.),3)
if not (x[-1] == skpts[-1]).all():
x = | np.append(x, [skpts[-1]], axis=0) | numpy.append |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 10:42:20 2019
@author: berend
"""
## sesfunctions: wraps the dll in pure python
from ses_dll import SESdll
from ses_error import SESError
import ctypes
from structs import AnalyzerRegion, DetectorRegion, DetectorInfo
import numpy as np
class SESFunctions:
def __init__(self, dllpath, verbose = False):
self.sesdll = SESdll(dllpath) ## note: close this dll
self.verbose = verbose
self.e = SESError(verbose = self.verbose)
self.acq_funcs = {
'acq_channels' : (self.sesdll.GetAcquiredDataInteger,ctypes.c_int),
'acq_slices' : (self.sesdll.GetAcquiredDataInteger,ctypes.c_int),
'acq_iterations' : (self.sesdll.GetAcquiredDataInteger,ctypes.c_int),
'acq_intensity_unit':(self.sesdll.GetAcquiredDataString,ctypes.c_char_p),
'acq_channel_unit':(self.sesdll.GetAcquiredDataString,ctypes.c_char_p),
'acq_slice_unit':(self.sesdll.GetAcquiredDataString,ctypes.c_char_p),
'acq_spectrum' : (self.sesdll.GetAcquiredDataVectorDouble, ctypes.c_double),
'acq_slice' : (self.sesdll.GetAcquiredDataVectorDouble, ctypes.c_double),
'acq_image' : (self.sesdll.GetAcquiredDataVectorDouble, ctypes.c_double),
'acq_channel_scale':(self.sesdll.GetAcquiredDataVectorDouble, ctypes.c_double),
'acq_slice_scale':(self.sesdll.GetAcquiredDataVectorDouble, ctypes.c_double),
'acq_raw_image':(self.sesdll.GetAcquiredDataVectorInt32, ctypes.c_int),
'acq_current_step':(self.sesdll.GetAcquiredDataInteger,ctypes.c_int),
'acq_elapsed_time':(self.sesdll.GetAcquiredDataDouble, ctypes.c_double),
'acq_current_point':(self.sesdll.GetAcquiredDataInteger,ctypes.c_int),
'acq_point_intensity':(self.sesdll.GetAcquiredDataDouble, ctypes.c_double),
'acq_channel_intensity':(self.sesdll.GetAcquiredDataVectorDouble, ctypes.c_double),
}
self.property_funcs = {
'lib_description' :(self.sesdll.GetPropertyString,ctypes.c_char_p),
'lib_version' : (self.sesdll.GetPropertyString,ctypes.c_char_p),
'lib_error' : (self.sesdll.GetPropertyString,ctypes.c_char_p),
'lib_working_dir':(self.sesdll.GetPropertyString,ctypes.c_char_p),
'instrument_library':(self.sesdll.GetPropertyString,ctypes.c_char_p),
'instrument_status':(self.sesdll.GetPropertyInteger,ctypes.c_int),
'always_delay_region' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
'allow_io_with_detector' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
'instrument_model' : (self.sesdll.GetPropertyString, ctypes.c_char_p),
'instrument_serial_no':(self.sesdll.GetPropertyString, ctypes.c_char_p),
# 'detector_info':(self.sesdll.GetPropertyDouble, ctypes.c_double), ## Not mapped, use GetDetectorInfo
# 'detector_region':(self.sesdll.GetPropertyDouble, ctypes.c_double), ##Not mapped, use GetDetectorRegion
'element_set_count':(self.sesdll.GetPropertyInteger,ctypes.c_int),
'element_set':(self.sesdll.GetPropertyString,ctypes.c_char_p),
'element_name_count':(self.sesdll.GetPropertyInteger,ctypes.c_int),
'element_name':(self.sesdll.GetPropertyString,ctypes.c_char_p),
'lens_mode_count':(self.sesdll.GetPropertyInteger,ctypes.c_int),
'lens_mode':(self.sesdll.GetPropertyString,ctypes.c_char_p),
'pass_energy_count':(self.sesdll.GetPropertyInteger,ctypes.c_int),
'pass_energy':(self.sesdll.GetPropertyDouble,ctypes.c_double),
# 'analyzer_region':(self.sesdll.GetPropertyDouble,ctypes.c_double), ###Not mapped: use GetAnalyzerRegion
'use_external_io' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
'use_detector' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
'use_spin' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
'region_name' : (self.sesdll.GetPropertyString, ctypes.c_char_p),
'temp_file_name' : (self.sesdll.GetPropertyString, ctypes.c_char_p),
'reset_data_between_iterations' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
'use_binding_energy' : (self.sesdll.GetPropertyBool, ctypes.c_bool),
}
def Initialize(self):
"""Initialize the SES software
Args:
None
Returns:
None
"""
self.e.error(self.sesdll.Initialize(0)) ##0 is a standard parameter
def GetProperty(self, name):
"""Get property data
Args:
name: parameter name
Returns:
value of paramter
"""
if self.verbose:
print('Getting property')
func, returntype = self.property_funcs[name]
if returntype == ctypes.c_char_p:
if self.verbose:
print('Getting property ',name, ' of string type')
returnarray = ctypes.create_string_buffer(2000)
returnsize = ctypes.c_int(2000)
nameb = name.encode('ASCII')
self.e.error(func(nameb, 0, returnarray, ctypes.byref(returnsize)))
return returnarray.value.decode('ASCII')
else:
if self.verbose:
print('Getting property ', name, ' of type ', returntype)
returnvar = returntype(0)
returnsize = ctypes.c_int(0)
nameb = name.encode('ASCII')
self.e.error(func(nameb, 0, ctypes.byref(returnvar), ctypes.byref(returnsize)))
return returnvar.value
def SetProperty(self, pname, value):
"""Set a property
Args:
pname: property name
pvalue: value
Returns:
None
"""
if type(value) == int:
if self.verbose:
print('Setting int property')
value = ctypes.c_int(value)
pname = pname.encode('ASCII')
#Note: not sure about the -1 for size, using what is in the tutorial for SESwrapper
self.e.error(self.sesdll.SetPropertyInteger(pname, -1, ctypes.byref(value))) ##middle argument is size,
if type(value) == float:
if self.verbose:
print('Setting double/float property')
value = ctypes.c_double(value)
pname = pname.encode('ASCII')
self.e.error(self.sesdll.SetPropertyDouble(pname, -1, ctypes.byref(value))) ##middle argument is size
if type(value) == str:
if self.verbose:
print('Setting string property')
value = value.encode('ASCII')
pname = pname.encode('ASCII')
self.e.error(self.sesdll.SetPropertyString(pname, 0, value)) ##middle argument is size
def Validate(self, element_set, lens_mode, pass_energy, kinetic_energy):
"""Validate the selected parameters, raise error if wrong
Args:
element_set: element set (string)
lens_mode: lens mode (string)
pass_energy: pass energy (float)
kinetic_energy: kinetic energy (float)
Returns:
None
"""
if self.verbose: print(self.verbose('Validating'))
element_set = element_set.encode('ASCII')
lens_mode = lens_mode.encode('ASCII')
self.e.error(self.sesdll.Validate(element_set, lens_mode, pass_energy, kinetic_energy))
if self.verbose: print(self.verbose('Validation OK'))
def ResetHW(self):
"""Reset the hardware.
Args:
None
Returns:
None
"""
if self.verbose:
print('Resetting hardware')
self.e.error(self.sesdll.ResetHW())
def TestHW(self):
"""Test the hardware.
Args:
None
Returns:
None
"""
if self.verbose:
print('Testing hardware')
self.e.error(self.sesdll.TestHW())
def LoadInstrument(self, instrumentpath):
"""Load the instrument dat file
Args:
path to the instrument
Returns:
None"""
if self.verbose:
print('Laoding Instrument')
instrumentpath = instrumentpath.encode('ASCII')
self.e.error(self.sesdll.LoadInstrument(instrumentpath))
def ZeroSupplies(self):
"""Zero supplies.
Args:
None
Returns:
None
"""
if self.verbose:
print('Zeroing supplies')
self.e.error(self.sesdll.ZeroSupplies())
def GetBindingEnergy(self):
"""Get the binding energy.
Returns:
binding energy
"""
if self.verbose: print(self.verbose('Getting binding energy'))
returnvar = ctypes.c_double(0)
self.e.error(self.sesdll.GetBindingEnergy(ctypes.byref(returnvar)))
return returnvar.value
def SetBindingEnergy(self, binding_energy):
"""Get the binding energy.
Args:
binding_energy: float with binding energy
"""
if self.verbose: print(self.verbose('Setting binding energy'))
self.e.error(self.sesdll.SetBindingEnergy(binding_energy))
def GetKineticEnergy(self):
"""Get the kinetic energy.
Returns:
kinetic energy
"""
if self.verbose: print(self.verbose('Getting kinetic energy'))
returnvar = ctypes.c_double(0)
self.e.error(self.sesdll.GetKineticEnergy(ctypes.byref(returnvar)))
return returnvar.value
def SetKineticEnergy(self, kinetic_energy):
"""Get the kinetic energy.
Args:
kinetic_energy: float with kinetic energy
"""
if self.verbose: print(self.verbose('Setting kinetic energy'))
self.e.error(self.sesdll.SetKineticEnergy(kinetic_energy))
def GetExcitationEnergy(self):
"""Get the excitation energy.
Returns:
excitation energy
"""
if self.verbose: print(self.verbose('Getting excitation energy'))
returnvar = ctypes.c_double(0)
self.e.error(self.sesdll.GetExcitationEnergy(ctypes.byref(returnvar)))
return returnvar.value
def SetExcitationEnergy(self, excitation_energy):
"""Get the excitation energy.
Args:
excitation_energy: float with excitation energy
"""
if self.verbose: print(self.verbose('Setting excitation energy'))
self.e.error(self.sesdll.SetExcitationEnergy(excitation_energy))
def GetElementVoltage(self, element_name):
"""Get the element voltage.
Args:
element_name: name of the element
Returns:
element voltage
"""
if self.verbose: print(self.verbose('Getting element voltage ', element_name))
element_name = element_name.encode('ASCII')
returnvar = ctypes.c_double(0)
self.e.error(self.sesdll.GetElementVoltage(element_name, ctypes.byref(returnvar)))
return returnvar.value
def SetElementVoltage(self,element_name, element_voltage):
"""Get the element voltage.
Args:
element_voltage: float with excitation energy
element_name: name of the element
"""
if self.verbose: print(self.verbose('Setting element voltage of ', element_name))
element_name = element_name.encode('ASCII')
self.e.error(self.sesdll.SetElementVoltage(element_name,element_voltage))
def CheckAnalyzerRegion(self, analyzer_dict):
"""Take a dictionary of analyzer parameters, check the region, return the
number of steps taken, minimum dwell time per step, minimum energy step.
Args:
analyzer_dict: dictionary of parameters
Returns: (tuple of:)
nsteps
time_ms
min_energy_step_ev
"""
if self.verbose:
print('Checking Analyzer region')
analyzer = AnalyzerRegion(paramdict = analyzer_dict)
nsteps = ctypes.c_int(0)
time_ms = ctypes.c_double(0)
min_energy_step_ev = ctypes.c_double(0)
self.e.error(self.sesdll.CheckAnalyzerRegion(analyzer, ctypes.byref(nsteps),ctypes.byref(time_ms),ctypes.byref(min_energy_step_ev)))
return nsteps.value,time_ms.value,min_energy_step_ev.value
def GetDetectorInfo(self):
"""Get the detector info
Args:
None
Returns:
dictionary with detector properties"""
if self.verbose:
print('Getting Detector info')
info = DetectorInfo()
self.e.error(self.sesdll.GetDetectorInfo(info))
return dict(info)
def SetAnalyzerRegion(self, analyzer_dict):
"""Take a dictionary of parameters and set the region
Args:
analyzer: dictionary of parameters"""
if self.verbose:
print('Setting Analyzer region')
analyzer = AnalyzerRegion(paramdict = analyzer_dict)
self.e.error(self.sesdll.SetAnalyzerRegion(analyzer))
def GetAnalyzerRegion(self):
"""Get the current analyzer region
Args:
None
Returns:
Analyzer dictionary paramter"""
if self.verbose:
print('Getting Analyzer region')
analyzer = AnalyzerRegion()
self.e.error(self.sesdll.GetAnalyzerRegion(analyzer))
return dict(analyzer)
def SetDetectorRegion(self, detector_dict):
"""Take a dictionary of parameters and set the region
Args:
detector_dict: dictionary of parameters describing detector region"""
if self.verbose:
print('Setting Detector region')
detector = DetectorRegion(paramdict = detector_dict)
self.e.error(self.sesdll.SetDetectorRegion(detector))
def GetDetectorRegion(self):
"""Get the current detector region
Args:
None
Returns:
Detector dictionary paramter"""
if self.verbose:
print('Getting Detector region')
detector = DetectorRegion()
self.e.error(self.sesdll.SetDetectorRegion(detector))
return dict(detector)
def InitAcquisition(self, blockpointready, blockregionready):
"""Initialize the acquisition.
Args:
blockpointready: If true, this parameter tells the acquisition thread
to wait for confirmation between each step taken in
a swept mode acquisition.
blockregionready: If true, this parameter tells the acquisition thread
to wait for confirmation once the acquisition is finished.
Returns:
None
"""
if self.verbose:
print('Initializing acquisition')
self.e.error(self.sesdll.InitAcquisition(blockpointready, blockregionready))
def StartAcquisition(self):
"""Start the acquisition.
Args:
None
Returns:
None
"""
if self.verbose:
print('Starting acquisition')
self.e.error(self.sesdll.StartAcquisition())
def WaitForRegionReady(self, timeout_time):
"""Returns when the region is ready.
Args:
timeout_time: -1 is infinite
Returns:
None
"""
if self.verbose:
print('Waiting for region')
timeout_time = ctypes.c_int(timeout_time)
self.e.error(self.sesdll.WaitForRegionReady(timeout_time))
def WaitForPointReady(self, timeout_time):
"""Wait for point to be ready. When used in fixed mode, this will not return
until time_out time is reached.
Args:
timeout_time: -1 is infinite
Returns:
None
"""
if self.verbose:
print('Waiting for point')
timeout_time = ctypes.c_int(timeout_time)
self.e.error(self.sesdll.WaitForPointReady(timeout_time))
def ContinueAcquisition(self):
"""Start the acquisition.
Args:
None
Returns:
None
"""
if self.verbose:
print('Continuing acquisition')
self.e.error(self.sesdll.ContinueAcquisition())
def StopAcquisition(self):
"""Stop the acquisition.
Args:
None
Returns:
None
"""
if self.verbose:
print('Stopping acquisition')
self.e.error(self.sesdll.StopAcquisition())
def GetAcquiredData(self, name):
"""Get acquired data
Args:
name: parameter name
Returns:
value of paramter
"""
if self.verbose:
print('Getting data')
func, returntype = self.acq_funcs[name]
if returntype == ctypes.c_char_p:
if self.verbose:
print('Getting data ',name, ' of string type')
returnarray = ctypes.create_string_buffer(2000)
returnsize = ctypes.c_int(2000)
nameb = name.encode('ASCII')
self.e.error(func(nameb, 0, returnarray, ctypes.byref(returnsize)))
return returnarray.value.decode('ASCII')
else:
if self.verbose:
print('Getting data ', name, ' of type ', returntype)
returnvar = returntype(0)
returnsize = ctypes.c_int(0)
nameb = name.encode('ASCII')
self.e.error(func(nameb, 0, ctypes.byref(returnvar), ctypes.byref(returnsize)))
return returnvar.value
def GetAcquiredDataArray(self, name, size, data = None, index = 0):
"""Get acquired data
Args:
name: parameter name
size: size for the data
data: optional pointer to data holding object (numpy array)
index: for parameters that require an index
"""
if self.verbose:
print('Getting data array')
func, returntype = self.acq_funcs[name]
returnarray = (returntype * size)()
returnsize = ctypes.c_int(size)
nameb = name.encode('ASCII')
self.e.error(func(nameb, index, returnarray, ctypes.byref(returnsize)))
if data is None:
data = | np.array(returnarray) | numpy.array |
"""
Replicate "Very Deep Convolutional Networks for Natural Language Processing" by <NAME>,
<NAME>, <NAME>, <NAME>, 2016
New NLP architecture:
1. Operate at lowest atomic representation of text (characters)
2. Use deep-stack of local operations to learn high-level hierarchical representation
attribution: https://github.com/ilkarman/NLP-Sentiment/
"""
import numpy as np
import pandas as pd
import mxnet as mx
import wget
import time
import functools
import threading
import os.path
import Queue
import pickle
from mxnet.io import DataBatch
ctx = mx.gpu(0)
AZ_ACC = "amazonsentimenik"
AZ_CONTAINER = "textclassificationdatasets"
ALPHABET = list("abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+ =<>()[]{}")
FEATURE_LEN = 1014
BATCH_SIZE = 128
EPOCHS = 10
SD = 0.05 # std for gaussian distribution
NOUTPUT = 2
DATA_SHAPE = (BATCH_SIZE, 1, FEATURE_LEN, 1)
def download_file(url):
# Create file-name
local_filename = url.split('/')[-1]
if os.path.isfile(local_filename):
pass
# print("The file %s already exist in the current directory\n" % local_filename)
else:
# Download
print("downloading ...\n")
wget.download(url)
print('saved data\n')
def load_file(infile):
"""
Takes .csv and returns loaded data along with labels
"""
print("processing data frame: %s" % infile)
# Get data from windows blob
download_file('https://%s.blob.core.windows.net/%s/%s' % (AZ_ACC, AZ_CONTAINER, infile))
# load data into dataframe
df = pd.read_csv(infile,
header=None,
names=['sentiment', 'summary', 'text'])
# concat summary, review; trim to 1014 char; reverse; lower
df['rev'] = df.apply(lambda x: "%s %s" % (x['summary'], x['text']), axis=1)
df.rev = df.rev.str[:FEATURE_LEN].str[::-1].str.lower()
# store class as nparray
df.sentiment -= 1
y_split = np.asarray(df.sentiment, dtype='bool')
print("finished processing data frame: %s" % infile)
print("data contains %d obs, each epoch will contain %d batches" % (df.shape[0], df.shape[0] // BATCH_SIZE))
return df.rev, y_split
def load_data_frame(X_data, y_data, batch_size=128, shuffle=False):
"""
For low RAM this methods allows us to keep only the original data
in RAM and calculate the features (which are orders of magnitude bigger
on the fly). This keeps only 10 batches worth of features in RAM using
asynchronous programing and yields one DataBatch() at a time.
"""
if shuffle:
idx = X_data.index
assert len(idx) == len(y_data)
rnd = np.random.permutation(idx)
X_data = X_data.reindex(rnd)
y_data = y_data[rnd]
# Dictionary to create character vectors
char_index = dict((c, i + 2) for i, c in enumerate(ALPHABET))
# Yield processed batches asynchronously
# Buffy 'batches' at a time
def async_prefetch_wrp(iterable, buffy=30):
poison_pill = object()
def worker(q, it):
for item in it:
q.put(item)
q.put(poison_pill)
queue = Queue.Queue(buffy)
it = iter(iterable)
thread = threading.Thread(target=worker, args=(queue, it))
thread.daemon = True
thread.start()
while True:
item = queue.get()
if item == poison_pill:
return
else:
yield item
# Async wrapper around
def async_prefetch(func):
@functools.wraps(func)
def wrapper(*args, **kwds):
return async_prefetch_wrp(func(*args, **kwds))
return wrapper
@async_prefetch
def feature_extractor(dta, val):
# Yield mini-batch amount of character vectors
# X_split = np.zeros([batch_size, 1, FEATURE_LEN, len(ALPHABET)], dtype='bool')
X_split = np.zeros([batch_size, 1, FEATURE_LEN, 1], dtype='int')
for ti, tx in enumerate(dta):
chars = list(tx)
for ci, ch in enumerate(chars):
if ch in ALPHABET:
X_split[ti % batch_size][0][ci] = char_index[ch]
# X_split[ti % batch_size][0][ci] = np.array(character_hash[ch], dtype='bool')
# No padding -> only complete batches processed
if (ti + 1) % batch_size == 0:
yield mx.nd.array(X_split), mx.nd.array(val[ti + 1 - batch_size:ti + 1])
# X_split = np.zeros([batch_size, 1, FEATURE_LEN, len(ALPHABET)], dtype='bool')
X_split = np.zeros([batch_size, 1, FEATURE_LEN, 1], dtype='int')
# Yield one mini-batch at a time and asynchronously process to keep 4 in queue
for Xsplit, ysplit in feature_extractor(X_data, y_data):
yield DataBatch(data=[Xsplit], label=[ysplit])
class k_max_pool(mx.operator.CustomOp):
"""
https://github.com/CNevd/DeepLearning-Mxnet/blob/master/DCNN/dcnn_train.py#L15
"""
def __init__(self, k):
super(k_max_pool, self).__init__()
self.k = int(k)
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0].asnumpy()
# assert(4 == len(x.shape))
ind = np.argsort(x, axis=2)
sorted_ind = np.sort(ind[:, :, -(self.k):, :], axis=2)
dim0, dim1, dim2, dim3 = sorted_ind.shape
self.indices_dim0 = np.arange(dim0).repeat(dim1 * dim2 * dim3)
self.indices_dim1 = np.transpose(
np.arange(dim1).repeat(dim2 * dim3).reshape((dim1 * dim2 * dim3, 1)).repeat(dim0, axis=1)).flatten()
self.indices_dim2 = sorted_ind.flatten()
self.indices_dim3 = np.transpose(
| np.arange(dim3) | numpy.arange |
# Copyright 2021, Blue Brain Project, EPFL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Volume interpolation based on pairwise interpolation between slices."""
from __future__ import annotations
import logging
import warnings
from abc import ABC, abstractmethod
from math import ceil, log2
import numpy as np
import torch
from torchvision.transforms import ToTensor
from atlinter.data import GeneDataset
from atlinter.utils import find_closest
logger = logging.getLogger(__name__)
class PairInterpolationModel(ABC):
"""Base class for pair-interpolation models.
Subclasses of this class implement an interpolation between two given
images `img1` and `img2` to produce and intermediate image `img_mid`.
This class and its subclasses are used by the PairInterpolate class,
which applies a given interpolation model to concrete data.
"""
def before_interpolation(self, img1, img2):
"""Run initialization and pre-processing steps before interpolation.
Typical applications of this method are padding and cropping of
input images to fit the model requirements, as well as initialisation
of any internal state, should one be necessary.
Parameters
----------
img1 : np.ndarray
The left image of shape (width, height)
img2 : np.ndarray
The right image of shape (width, height).
Returns
-------
img1 : np.ndarray
The pre-processed left image.
img2 : np.ndarray
The pre-processed right image.
"""
return img1, img2
@abstractmethod
def interpolate(self, img1, img2):
"""Interpolate two images.
In the typical setting the input images are going to be of the format
as returned by the `before_interpolation`.
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
def after_interpolation(self, interpolated_images):
"""Run any post-processing after all interpolation is done.
Typical applications are padding and cropping of the image stack,
as well as any clean-up of the model state.
Parameters
----------
interpolated_images : np.ndarray
The stacked interpolated images. The array will include the input
images as the first and the last items respectively and will
therefore have the shape (n_interpolated + 2, height, width)
Returns
-------
np.ndarray
The post-processed interpolated images.
"""
return interpolated_images
class LinearPairInterpolationModel(PairInterpolationModel):
"""Linear pairwise interpolation.
This is the simplest possible interpolation model where the middle
image is the average of the left and right images.
"""
def interpolate(self, img1, img2):
"""Interpolate two images using linear interpolation.
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
img_mid = np.mean([img1, img2], axis=0)
return img_mid
class RIFEPairInterpolationModel(PairInterpolationModel):
"""Pairwise image interpolation using the RIFE model.
The typical use is
>>> from atlinter.vendor.rife.RIFE_HD import Model as RifeModel
>>> from atlinter.vendor.rife.RIFE_HD import device as rife_device
>>> rife_model = RifeModel()
>>> rife_model.load_model("/path/to/train_log", -1)
>>> rife_model.eval()
>>> rife_interpolation_model = RIFEPairInterpolationModel(rife_model, rife_device)
Parameters
----------
rife_model : atlinter.vendor.rife.RIFE_HD.Model
The RIFE model instance.
rife_device : from atlinter.vendor.rife.RIFE_HD.device
The RIFE device.
"""
def __init__(self, rife_model, rife_device):
# The behaviour of torch.nn.functional.interpolate has slightly changed,
# which leads to this warning. It doesn't seem to have an impact on the
# results, but if the authors of RIFE decide to update their code base
# by either specifying the `recompute_scale_factor` parameter or by
# some other means, then this warning filter should be removed.
# TODO: check the RIFE code for updates and remove the filter if necessary.
warnings.filterwarnings(
"ignore",
"The default behavior for interpolate/upsample with float scale_factor",
UserWarning,
)
self.rife_model = rife_model
self.rife_device = rife_device
self.shape = (0, 0)
def before_interpolation(self, img1, img2):
"""Pad input images to a multiple of 32 pixels.
Parameters
----------
img1 : np.ndarray
The left image of shape.
img2 : np.ndarray
The right image of shape.
Returns
-------
img1 : np.ndarray
The padded left image.
img2 : np.ndarray
The padded right image.
"""
image_shape = img1.shape
if len(image_shape) == 3 and image_shape[-1] == 3:
rgb = True
image_shape = image_shape[:-1]
else:
rgb = False
self.shape = np.array(image_shape)
pad_x, pad_y = ((self.shape - 1) // 32 + 1) * 32 - self.shape
if rgb:
img1 = np.pad(img1, ((0, pad_x), (0, pad_y), (0, 0)))
img2 = np.pad(img2, ((0, pad_x), (0, pad_y), (0, 0)))
else:
img1 = np.pad(img1, ((0, pad_x), (0, pad_y)))
img2 = np.pad(img2, ((0, pad_x), (0, pad_y)))
return img1, img2
def interpolate(self, img1, img2):
"""Interpolate two images using RIFE.
Note: img1 and img2 needs to have the same shape.
If img1, img2 are grayscale, the dimension should be (height, width).
If img1, img2 are RGB image, the dimension should be (height, width, 3).
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
# Add batch and RGB dimensions (if not already), set device
if len(img1.shape) == 2:
rgb = False
img1 = (
torch.tensor(img1, dtype=torch.float32)
.repeat((1, 3, 1, 1))
.to(self.rife_device)
)
img2 = (
torch.tensor(img2, dtype=torch.float32)
.repeat((1, 3, 1, 1))
.to(self.rife_device)
)
else:
rgb = True
img1 = np.transpose(img1, (2, 0, 1))[np.newaxis]
img2 = np.transpose(img2, (2, 0, 1))[np.newaxis]
img1 = torch.tensor(img1, dtype=torch.float32).to(self.rife_device)
img2 = torch.tensor(img2, dtype=torch.float32).to(self.rife_device)
# The actual interpolation
img_mid = self.rife_model.inference(img1, img2).detach().cpu().numpy()
img_mid = img_mid.squeeze()
if rgb:
# Put the RGB channel at the end
img_mid = np.transpose(img_mid, (1, 2, 0))
else:
# Average out the RGB dimension
img_mid = img_mid.mean(axis=0)
return img_mid
def after_interpolation(self, interpolated_images):
"""Undo the padding added in `before_interpolation`.
Parameters
----------
interpolated_images : np.ndarray
The stacked interpolated images.
If input images are grayscale,
the dimension should be (n_img, height, width) or (height, width).
If input images are RGB image,
the dimension should be (n_img, height, width, 3) or (height, width, 3).
Returns
-------
np.ndarray
The stacked interpolated images with padding removed.
"""
# No n_img dimension: (height, width) or (height, width, 3)
if len(interpolated_images.shape) == 2 or (
len(interpolated_images.shape) == 3 and interpolated_images.shape[-1] == 3
):
return interpolated_images[: self.shape[0], : self.shape[1]]
# n_img dimension: (n_img, height, width) or (n_img, height, width, 3)
else:
return interpolated_images[:, : self.shape[0], : self.shape[1]]
class CAINPairInterpolationModel(PairInterpolationModel):
"""Pairwise image interpolation using the CAIN model.
The typical use is
>>> from atlinter.vendor.cain.cain import CAIN
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> cain_model = CAIN().to(device)
>>> cain_checkpoint = torch.load("pretrained_cain.pth", map_location=device)
>>> cain_model.load_state_dict(cain_checkpoint)
>>> cain_interpolation_model = CAINPairInterpolationModel(cain_model)
Parameters
----------
cain_model : atlinter.vendor.cain.cain.CAIN or torch.nn.DataParallel
The CAIN model instance.
"""
def __init__(self, cain_model):
self.cain_model = cain_model
self.to_tensor = ToTensor()
def interpolate(self, img1, img2):
"""Interpolate two images using CAIN.
Note: img1 and img2 needs to have the same shape.
If img1, img2 are grayscale, the dimension should be (height, width).
If img1, img2 are RGB image, the dimension should be (height, width, 3).
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
# Add batch and RGB dimensions
if len(img1.shape) == 2:
rgb = False
img1 = self.to_tensor(img1).repeat((1, 3, 1, 1))
img2 = self.to_tensor(img2).repeat((1, 3, 1, 1))
else:
rgb = True
img1 = self.to_tensor(np.transpose(img1, (2, 0, 1)))[None]
img2 = self.to_tensor(np.transpose(img2, (2, 0, 1)))[None]
# The actual interpolation
img_mid, _ = self.cain_model(img1, img2)
img_mid = img_mid.detach().cpu().numpy()
img_mid = img_mid.squeeze()
if rgb:
# Put the RGB channel at the end
img_mid = np.transpose(img_mid, (1, 2, 0))
else:
# Average out the RGB dimension
img_mid = img_mid.mean(axis=0)
return img_mid
class AntsPairInterpolationModel(PairInterpolationModel):
"""Pairwise image interpolation using AntsPy registration.
Typical use is
>>> from atlannot.ants import register, transform
>>> ants_interpolation_model = AntsPairInterpolationModel(register, transform)
Parameters
----------
register_fn : atlannot.ants.register
The AntsPy registration function
transform_fn : atlannot.ants.transform
The AntsPy transformation function
"""
def __init__(self, register_fn, transform_fn):
self.register_fn = register_fn
self.transform_fn = transform_fn
def interpolate(self, img1, img2):
"""Interpolate two images using AntsPy registration.
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
# Ensure the correct d-type
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
# The actual interpolation
nii_data = self.register_fn(fixed=img2, moving=img1)
img_mid = self.transform_fn(img1, nii_data / 2)
return img_mid
class PairInterpolate:
"""Runner for pairwise interpolation using different models.
Parameters
----------
n_repeat : int (optional)
The number of times the interpolation should be iterated. For each
iteration an interpolated image is inserted between each pair of
images from the previous iteration. Therefore n_{i+1} = n_i + (n_i + 1).
For example, for n_repeat=3 the progression of the number of images
will be the following: input = 0 -> 1 -> 3 -> 7
"""
def __init__(self, n_repeat=1):
self.n_repeat = n_repeat
def repeat(self, n_repeat):
"""Set the number of interpolation iterations.
Parameters
----------
n_repeat : int
The new number of interpolation iterations. See `__init__` for more
details.
"""
self.n_repeat = n_repeat
return self
def __call__(self, img1, img2, model: PairInterpolationModel):
"""Run the interpolation with the given interpolation model.
Parameters
----------
img1 : np.ndarray
The left input image.
img2 : np.ndarray
The right input image.
model : PairInterpolationModel
The interpolation model.
Returns
-------
interpolated_images : np.ndarray
A stack of interpolation images. The input images are not included
in this stack.
"""
img1, img2 = model.before_interpolation(img1, img2)
interpolated_images = self._repeated_interpolation(
img1, img2, model, self.n_repeat
)
interpolated_images = np.stack(interpolated_images)
interpolated_images = model.after_interpolation(interpolated_images)
return interpolated_images
def _repeated_interpolation(self, img1, img2, model, n_repeat):
# End of recursion
if n_repeat <= 0:
return []
# Recursion step
img_mid = model.interpolate(img1, img2)
left_images = self._repeated_interpolation(img1, img_mid, model, n_repeat - 1)
right_images = self._repeated_interpolation(img_mid, img2, model, n_repeat - 1)
return [*left_images, img_mid, *right_images]
class GeneInterpolate:
"""Interpolation of a gene dataset.
Parameters
----------
gene_data : GeneData
Gene Dataset to interpolate. It contains a `volume` of reference shape
with all known places located at the right place and a `metadata` dictionary
containing information about the axis of the dataset and the section numbers.
model : PairInterpolationModel
Pair-interpolation model.
"""
def __init__(
self,
gene_data: GeneDataset,
model: PairInterpolationModel,
):
self.gene_data = gene_data
self.model = model
self.axis = self.gene_data.axis
self.gene_volume = self.gene_data.volume.copy()
# If sagittal axis, put the sagittal dimension first
if self.axis == "sagittal":
self.gene_volume = np.moveaxis(self.gene_volume, 2, 0)
def get_interpolation(
self, left: int, right: int
) -> tuple[np.ndarray | None, np.ndarray | None]:
"""Compute the interpolation for a pair of images.
Parameters
----------
left
Slice number of the left image to consider.
right
Slice number of the right image to consider.
Returns
-------
interpolated_images : np.array or None
Interpolated image for the given pair of images.
Array of shape (N, dim1, dim2, 3) with N the number of
interpolated images.
predicted_section_numbers : np.array or None
Slice value of the predicted images.
Array of shape (N, 1) with N the number of interpolated images.
"""
diff = right - left
if diff == 0:
return None, None
n_repeat = self.get_n_repeat(diff)
pair_interpolate = PairInterpolate(n_repeat=n_repeat)
interpolated_images = pair_interpolate(
self.gene_volume[left], self.gene_volume[right], self.model
)
predicted_section_numbers = self.get_predicted_section_numbers(
left, right, n_repeat
)
return interpolated_images, predicted_section_numbers
def get_all_interpolation(self) -> tuple[np.ndarray, np.ndarray]:
"""Compute pair interpolation for the entire volume.
Returns
-------
all_interpolated_images : np.array
Interpolated image for the entire volume.
Array of shape (N, dim1, dim2, 3) with N the number of
interpolated images.
all_predicted_section_numbers : np.array
Slice value of the predicted images.
Array of shape (N, 1) with N the number of interpolated images.
"""
# TODO: Try to change the implementation of the prediction so that
# we do not predict slices that are not needed.
logger.info("Start predicting interpolation between two known slices")
known_slices = sorted(self.gene_data.known_slices)
all_interpolated_images = []
all_predicted_section_numbers = []
for i in range(len(known_slices) - 1):
left, right = known_slices[i], known_slices[i + 1]
(
interpolated_images,
predicted_section_numbers,
) = self.get_interpolation(left, right)
if interpolated_images is None:
continue
all_interpolated_images.append(interpolated_images)
all_predicted_section_numbers.append(predicted_section_numbers)
if i % 5 == 0:
logger.info(f"{i} / {len(known_slices) - 1} interpolations predicted")
all_interpolated_images = np.concatenate(all_interpolated_images)
all_predicted_section_numbers = np.concatenate(all_predicted_section_numbers)
return all_interpolated_images, all_predicted_section_numbers
def predict_slice(self, slice_number: int) -> np.ndarray:
"""Predict one gene slice.
Parameters
----------
slice_number
Slice section to predict.
Returns
-------
np.ndarray
Predicted gene slice. Array of shape (dim1, dim2, 3)
being (528, 320) for sagittal dataset and
(320, 456) for coronal dataset.
"""
left, right = self.gene_data.get_surrounding_slices(slice_number)
if left is None:
return self.gene_volume[right]
elif right is None:
return self.gene_volume[left]
else:
interpolated_images, predicted_section_numbers = self.get_interpolation(
left, right
)
index = find_closest(slice_number, predicted_section_numbers)[0]
return interpolated_images[index]
def predict_volume(self) -> np.ndarray:
"""Predict entire volume with known gene slices.
This function might be slow.
"""
volume_shape = self.gene_data.volume_shape
volume = np.zeros(volume_shape, dtype="float32")
logger.info(f"Start predicting the volume of shape {volume_shape}")
if self.gene_data.axis == "sagittal":
volume = | np.moveaxis(volume, 2, 0) | numpy.moveaxis |
import pickle
import os
import numpy as np
def unpickle(file):
with open(file, 'rb') as fo:
d = pickle.load(fo, encoding='bytes')
return d
def get():
path, _ = os.path.split(__file__)
files = ['data_batch_'+str(i) for i in range(1, 6)]
files.append('test_batch')
labels_list = []
data_list = []
for file in files:
f = os.path.join(path, file)
d = unpickle(f)
labels = | np.array(d[b'labels']) | numpy.array |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are length of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: <NAME>
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pc_util
import sunrgbd_utils
from sunrgbd_utils import extract_pc_in_box3d
from model_util_sunrgbd import SunrgbdDatasetConfig
DC = SunrgbdDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene
MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1
DIST_THRESH = 0.1#0.2
VAR_THRESH = 5e-3
CENTER_THRESH = 0.1
LOWER_THRESH = 1e-6
NUM_POINT = 50
NUM_POINT_LINE = 10
LINE_THRESH = 0.1#0.2
MIND_THRESH = 0.1
NUM_POINT_SEM_THRESHOLD = 1
def check_upright(para_points):
return (para_points[0][-1] == para_points[1][-1]) and (para_points[1][-1] == para_points[2][-1]) and (para_points[2][-1] == para_points[3][-1])
def check_z(plane_equ, para_points):
return np.sum(para_points[:,2] + plane_equ[-1]) / 4.0 < LOWER_THRESH
def clockwise2counter(angle):
'''
@Args:
angle: clockwise from x axis, from 0 to 2*pi,
@Returns:
theta: counter clockwise, -pi / 2 ~ pi / 2, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
'''
return -((angle + np.pi / 2) % np.pi) + np.pi / 2;
def point2line_dist(points, a, b):
'''
@Args:
points: (N, 3)
a / b: (3,)
@Returns:
distance: (N,)
'''
x = b - a
t = np.dot(points - a, x) / np.dot(x, x)
c = a + t[:, None] * np.tile(x, (t.shape[0], 1))
return np.linalg.norm(points - c, axis=1)
def get_linesel(points, corners, direction):
''' corners:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
if direction == 'lower':
sel1 = point2line_dist(points, corners[0], corners[2]) < LINE_THRESH
sel2 = point2line_dist(points, corners[4], corners[6]) < LINE_THRESH
sel3 = point2line_dist(points, corners[0], corners[4]) < LINE_THRESH
sel4 = point2line_dist(points, corners[2], corners[6]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'upper':
sel1 = point2line_dist(points, corners[1], corners[3]) < LINE_THRESH
sel2 = point2line_dist(points, corners[5], corners[7]) < LINE_THRESH
sel3 = point2line_dist(points, corners[1], corners[5]) < LINE_THRESH
sel4 = point2line_dist(points, corners[3], corners[7]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'left':
sel1 = point2line_dist(points, corners[0], corners[1]) < LINE_THRESH
sel2 = point2line_dist(points, corners[2], corners[3]) < LINE_THRESH
return sel1, sel2
elif direction == 'right':
sel1 = point2line_dist(points, corners[4], corners[5]) < LINE_THRESH
sel2 = point2line_dist(points, corners[6], corners[7]) < LINE_THRESH
return sel1, sel2
else:
AssertionError('direction = lower / upper / left')
def get_linesel2(points, ymin, ymax, zmin, zmax, axis=0):
#sel3 = sweep(points, axis, ymax, 2, zmin, zmax)
#sel4 = sweep(points, axis, ymax, 2, zmin, zmax)
sel3 = np.abs(points[:,axis] - ymin) < LINE_THRESH
sel4 = np.abs(points[:,axis] - ymax) < LINE_THRESH
return sel3, sel4
''' ATTENTION: SUNRGBD, size_label is only half the actual size
'''
def params2bbox(center, size, angle):
''' from bbox_center, angle and size to bbox
@Args:
center: (3,)
size: (3,)
angle: -pi ~ pi, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
@Returns:
bbox: 8 x 3, order:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
xsize = size[0]
ysize = size[1]
zsize = size[2]
vx = np.array([np.cos(angle), np.sin(angle), 0])
vy = np.array([-np.sin(angle), np.cos(angle), 0])
vx = vx * np.abs(xsize) / 2
vy = vy * np.abs(ysize) / 2
vz = np.array([0, 0, np.abs(zsize) / 2])
bbox = np.array([\
center - vx - vy - vz, center - vx - vy + vz,
center - vx + vy - vz, center - vx + vy + vz,
center + vx - vy - vz, center + vx - vy + vz,
center + vx + vy - vz, center + vx + vy + vz])
return bbox
class SunrgbdDetectionVotesDataset(Dataset):
def __init__(self, data_path=None, split_set='train', num_points=20000,
use_color=False, use_height=False, use_v1=False,
augment=False, scan_idx_list=None):
assert(num_points<=50000)
self.use_v1 = use_v1
if use_v1:
self.data_path = os.path.join(data_path, 'sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
# self.data_path = os.path.join('/scratch/cluster/yanght/Dataset/sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
else:
AssertionError("v2 data is not prepared")
self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval')
self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \
for x in os.listdir(self.data_path)])))
if scan_idx_list is not None:
self.scan_names = [self.scan_names[i] for i in scan_idx_list]
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
scan_name = self.scan_names[idx]
point_color_sem = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6
bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8
point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10
semantics37 = point_color_sem[:, 6]
semantics10 = np.array([DC.class37_2_class10[k] for k in semantics37])
semantics10_multi = [DC.class37_2_class10_multi[k] for k in semantics37]
if not self.use_color:
point_cloud = point_color_sem[:, 0:3]
else:
point_cloud = point_color_sem[:,0:6]
point_cloud[:,3:6] = (point_color_sem[:,3:6]-MEAN_COLOR_RGB)
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
bboxes[:,0] = -1 * bboxes[:,0]
bboxes[:,6] = np.pi - bboxes[:,6]
point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = sunrgbd_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB
rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel
rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel
rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1)
point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
# new items
box3d_angles = np.zeros((MAX_NUM_OBJ,))
point_boundary_mask_z = np.zeros(self.num_points)
point_boundary_mask_xy = np.zeros(self.num_points)
point_boundary_offset_z = np.zeros([self.num_points, 3])
point_boundary_offset_xy = np.zeros([self.num_points, 3])
point_boundary_sem_z = np.zeros([self.num_points, 3+2+1])
point_boundary_sem_xy = np.zeros([self.num_points, 3+1+1])
point_line_mask = np.zeros(self.num_points)
point_line_offset = np.zeros([self.num_points, 3])
point_line_sem = np.zeros([self.num_points, 3+1])
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]*2
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
box3d_angles[i] = bbox[6]
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6])
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
semantics37 = semantics37[choices]
semantics10 = semantics10[choices]
semantics10_multi = [semantics10_multi[i] for i in choices]
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
# box angle is -pi to pi
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners = params2bbox(bbox[:3], 2 * bbox[3:6], clockwise2counter(bbox[6]))
# corners_votenet = sunrgbd_utils.my_compute_box_3d(bbox[:3], bbox[3:6], bbox[6])
try:
x_all_cls, ind_all_cls = extract_pc_in_box3d(point_cloud, corners)
except:
continue
ind_all_cls = np.where(ind_all_cls)[0] # T/F to index
# find point with same semantic as bbox, note semantics is 37 cls in sunrgbd
# ind = ind_all_cls[np.where(semantics10[ind_all_cls] == bbox[7])[0]]
ind = []
for j in ind_all_cls:
if bbox[7] in semantics10_multi[j]:
ind.append(j)
ind = np.array(ind)
if ind.shape[0] < NUM_POINT_SEM_THRESHOLD:
pass
else:
x = point_cloud[ind, :3]
###Get bb planes and boundary points
plane_lower_temp = np.array([0,0,1,-corners[6,-1]])
para_points = np.array([corners[1], corners[3], corners[5], corners[7]])
newd = np.sum(para_points * plane_lower_temp[:3], 1)
if check_upright(para_points) and plane_lower_temp[0]+plane_lower_temp[1] < LOWER_THRESH:
plane_lower = np.array([0,0,1,plane_lower_temp[-1]])
plane_upper = np.array([0,0,1,-np.mean(newd)])
else:
import pdb;pdb.set_trace()
print ("error with upright")
if check_z(plane_upper, para_points) == False:
import pdb;pdb.set_trace()
### Get the boundary points here
#alldist = np.abs(np.sum(point_cloud[:,:3]*plane_lower[:3], 1) + plane_lower[-1])
alldist = np.abs(np.sum(x*plane_lower[:3], 1) + plane_lower[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get lower four lines
line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'lower')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[0] + corners[2]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel2) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel2]] = 1.0
linecenter = (corners[4] + corners[6]) / 2.0
point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2]
point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel3) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel3]] = 1.0
linecenter = (corners[0] + corners[4]) / 2.0
point_line_offset[ind[sel][line_sel3]] = linecenter - x[sel][line_sel3]
point_line_sem[ind[sel][line_sel3]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel4) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel4]] = 1.0
linecenter = (corners[2] + corners[6]) / 2.0
point_line_offset[ind[sel][line_sel4]] = linecenter - x[sel][line_sel4]
point_line_sem[ind[sel][line_sel4]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])])
center = (corners[0] + corners[6]) / 2.0
center[2] = np.mean(x[sel][:,2])
sel_global = ind[sel]
point_boundary_mask_z[sel_global] = 1.0
point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), np.linalg.norm(corners[2] - corners[0]), bbox[7]])
point_boundary_offset_z[sel_global] = center - x[sel]
'''
### Check for middle z surfaces
[count, val] = np.histogram(alldist, bins=20)
mind_middle = val[np.argmax(count)]
sel_pre = np.copy(sel)
sel = np.abs(alldist - mind_middle) < DIST_THRESH
if np.abs(np.mean(x[sel_pre][:,2]) - np.mean(x[sel][:,2])) > MIND_THRESH:
### Do not use line for middle surfaces
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
center = (corners[0] + corners[6]) / 2.0
center[2] = np.mean(x[sel][:,2])
# center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])])
sel_global = ind[sel]
point_boundary_mask_z[sel_global] = 1.0
point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), np.linalg.norm(corners[2] - corners[0]), bbox[7]])
point_boundary_offset_z[sel_global] = center - x[sel]
'''
### Get the boundary points here
alldist = np.abs(np.sum(x*plane_upper[:3], 1) + plane_upper[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get upper four lines
line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'upper')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[1] + corners[3]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel2) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel2]] = 1.0
linecenter = (corners[5] + corners[7]) / 2.0
point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2]
point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel3) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel3]] = 1.0
linecenter = (corners[1] + corners[5]) / 2.0
point_line_offset[ind[sel][line_sel3]] = linecenter - x[sel][line_sel3]
point_line_sem[ind[sel][line_sel3]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel4) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel4]] = 1.0
linecenter = (corners[3] + corners[7]) / 2.0
point_line_offset[ind[sel][line_sel4]] = linecenter - x[sel][line_sel4]
point_line_sem[ind[sel][line_sel4]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])])
center = (corners[1] + corners[7]) / 2.0
center[2] = np.mean(x[sel][:,2])
sel_global = ind[sel]
point_boundary_mask_z[sel_global] = 1.0
point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[5] - corners[1]), np.linalg.norm(corners[3] - corners[1]), bbox[7]])
point_boundary_offset_z[sel_global] = center - x[sel]
v1 = corners[3] - corners[2]
v2 = corners[2] - corners[0]
cp = np.cross(v1, v2)
d = -np.dot(cp,corners[0])
a,b,c = cp
plane_left_temp = np.array([a, b, c, d])
para_points = np.array([corners[4], corners[5], corners[6], corners[7]])
### Normalize xy here
plane_left_temp /= np.linalg.norm(plane_left_temp[:3])
newd = np.sum(para_points * plane_left_temp[:3], 1)
if plane_left_temp[2] < LOWER_THRESH:
plane_left = plane_left_temp#np.array([cls,res,tempsign,plane_left_temp[-1]])
plane_right = np.array([plane_left_temp[0], plane_left_temp[1], plane_left_temp[2], -np.mean(newd)])
else:
import pdb;pdb.set_trace()
print ("error with upright")
### Get the boundary points here
alldist = np.abs(np.sum(x*plane_left[:3], 1) + plane_left[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,2] >= zmin) & (point_cloud[:,2] <= zmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get upper four lines
line_sel1, line_sel2 = get_linesel(x[sel], corners, 'left')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[0] + corners[1]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel2) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel2]] = 1.0
linecenter = (corners[2] + corners[3]) / 2.0
point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2]
point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (zmin+zmax)/2.0])
center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (corners[0, 2] + corners[1, 2])/2.0])
sel_global = ind[sel]
point_boundary_mask_xy[sel_global] = 1.0
# point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], zmax - zmin, np.where(DC.nyu40ids == meta_vertices[ind[0],-1])[0][0]])
point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], corners[1, 2] - corners[0, 2], bbox[7]])
point_boundary_offset_xy[sel_global] = center - x[sel]
'''
[count, val] = np.histogram(alldist, bins=20)
mind_middle = val[np.argmax(count)]
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,2] >= zmin) & (point_cloud[:,2] <= zmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get upper four lines
sel_pre = np.copy(sel)
sel = np.abs(alldist - mind_middle) < DIST_THRESH
if np.abs(np.mean(x[sel_pre][:,0]) - np.mean(x[sel][:,0])) > MIND_THRESH:
### Do not use line for middle surfaces
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (zmin+zmax)/2.0])
center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (corners[0, 2] + corners[1, 2])/2.0])
sel_global = ind[sel]
point_boundary_mask_xy[sel_global] = 1.0
point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], corners[1, 2] - corners[0, 2], bbox[7]])
point_boundary_offset_xy[sel_global] = center - x[sel]
'''
### Get the boundary points here
alldist = np.abs(np.sum(x*plane_right[:3], 1) + plane_right[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,2] >= zmin) & (point_cloud[:,2] <= zmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
line_sel1, line_sel2 = get_linesel(x[sel], corners, 'right')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[4] + corners[5]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel2) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel2]] = 1.0
linecenter = (corners[6] + corners[7]) / 2.0
point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2]
point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (zmin+zmax)/2.0])
center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (corners[4, 2] + corners[5, 2])/2.0])
sel_global = ind[sel]
point_boundary_mask_xy[sel_global] = 1.0
point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], corners[5, 2] - corners[4, 2], bbox[7]])
point_boundary_offset_xy[sel_global] = center - x[sel]
#plane_front_temp = leastsq(residuals, [0,1,0,0], args=(None, np.array([corners[0], corners[1], corners[4], corners[5]]).T))[0]
v1 = corners[0] - corners[4]
v2 = corners[4] - corners[5]
cp = np.cross(v1, v2)
d = -np.dot(cp,corners[5])
a,b,c = cp
plane_front_temp = np.array([a, b, c, d])
para_points = np.array([corners[2], corners[3], corners[6], corners[7]])
plane_front_temp /= np.linalg.norm(plane_front_temp[:3])
newd = | np.sum(para_points * plane_front_temp[:3], 1) | numpy.sum |
import numpy as np
import random
import itertools
from dataflow import ( DataFlow)
# def group_data(data, group_func):
# keys = []
# groups = []
#
# for k, g in itertools.groupby(sorted(data, key=group_func), group_func):
# klist = list(k)
# keys.append(klist[0])
# groups.append((list(g)))
# return list(keys), list(groups)
#
#
# def Splitrandom(ratios, seed=None, group_func=None):
# def f(data):
# if (group_func is not None):
# idx, groups = group_data(data, group_func)
# #print (idx)
# #print(groups)
# dict_idx_group = dict(zip(idx, groups))
#
#
# else:
# idx = np.arange(len(data))
#
# if (seed is not None):
# random.Random(seed).shuffle(idx)
# else:
# random.shuffle(idx)
# N = len(idx)
#
# splits_idx = []
# #print(idx)
# start = 0
# for i, r in enumerate(ratios):
# n = int(N * r)
#
# end = start + n
# #print(i, n, start, end)
#
# if (i == len(ratios) - 1):
# splits_idx.append(idx[start:])
# else:
# splits_idx.append(idx[start:end])
# start = end
#
#
# splits = []
# for si in splits_idx:
# asplit = []
# #print ('S',si)
# for k in si:
# if (group_func is not None):
# asplit.extend(dict_idx_group[k])
# else:
# asplit.append(data[k])
# splits.append(asplit)
#
# return splits
#
# return f
from sklearn.model_selection import GroupShuffleSplit
def Splitrandom (ratios, seed=None, group_func=None):
def get_group(data):
if (group_func is not None):
groups = [group_func(d) for d in data]
else:
groups = np.arange(len(data))
return groups
def slice(data, idx):
return [data[id] for id in idx]
def f(data):
def group_two(data, ratio_pair):
groups = get_group(data)
gss = GroupShuffleSplit(n_splits=1, train_size=ratio_pair[0], test_size=ratio_pair[1], random_state=seed)
train_idx, test_idx = next(gss.split(data, groups=groups))
return slice(data,train_idx) , slice(data,test_idx)
ratio_new = (ratios[0], np.sum(ratios[1:]))
train, valtest = group_two(data, ratio_new)
ratio_new =(ratios[1]/np.sum(ratios[1:]),ratios[2]/np.sum(ratios[1:]))
val, test = group_two(valtest, ratio_new)
return train, val, test
return f
class LabelMap2ProbabilityMap(DataFlow):
"""
Convert label map to probability map
"""
def __init__(self, ds, label_map_index, num_classes):
self.ds = ds
self.label_map_index = label_map_index
self.n_class = num_classes
def size(self):
return self.ds.size()
@staticmethod
def labelmap2probmap_( label_map, n_class):
s = label_map.shape
# convert label map to probability map
pmap = | np.zeros((s[0], s[1], n_class)) | numpy.zeros |
import numpy as np
import pytest
# pylint: disable=line-too-long
from tensornetwork.block_sparse.charge import BaseCharge, intersect, fuse_ndarrays, U1Charge, fuse_degeneracies, fuse_charges
def test_BaseCharge_charges():
D = 100
B = 6
np.random.seed(10)
charges = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
q1 = BaseCharge(charges)
np.testing.assert_allclose(q1.charges, charges)
def test_BaseCharge_generic():
D = 300
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
unique = np.unique(q, axis=1)
Q = BaseCharge(charges=q)
assert Q.dim == 300
assert Q.num_symmetries == 2
assert Q.num_unique == unique.shape[1]
def test_BaseCharge_len():
D = 300
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
Q = BaseCharge(charges=q)
assert len(Q) == 300
def test_BaseCharge_copy():
D = 300
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
Q = BaseCharge(charges=q)
Qcopy = Q.copy()
assert Q.charge_labels is not Qcopy.charge_labels
assert Q.unique_charges is not Qcopy.unique_charges
np.testing.assert_allclose(Q.charge_labels, Qcopy.charge_labels)
np.testing.assert_allclose(Q.unique_charges, Qcopy.unique_charges)
def test_BaseCharge_unique():
D = 3000
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
Q = BaseCharge(charges=q, charge_types=[U1Charge, U1Charge])
expected = np.unique(
q, return_index=True, return_inverse=True, return_counts=True, axis=1)
actual = Q.unique(return_index=True, return_inverse=True, return_counts=True)
assert np.all(actual[0].charges == expected[0])
assert np.all(actual[1] == expected[1])
assert np.all(actual[2] == expected[2])
assert np.all(actual[3] == expected[3])
def test_BaseCharge_unique_sort():
np.random.seed(10)
unique = np.array([1, 0, -1])
labels = np.random.randint(0, 3, 100)
Q = U1Charge(charges=unique, charge_labels=labels)
actual = Q.unique(
return_index=True, return_inverse=True, return_counts=True, sort=False)
np.testing.assert_allclose(actual[0].unique_charges, [[1, 0, -1]])
def test_intersect_1():
a = np.array([[0, 1, 2], [2, 3, 4]])
b = np.array([[0, -2, 6], [2, 3, 4]])
out = intersect(a, b, axis=1)
np.testing.assert_allclose(np.array([[0], [2]]), out)
def test_intersect_2():
a = np.array([[0, 1, 2], [2, 3, 4]])
b = np.array([[0, -2, 6, 2], [2, 3, 4, 4]])
out, la, lb = intersect(a, b, axis=1, return_indices=True)
np.testing.assert_allclose(np.array([[0, 2], [2, 4]]), out)
np.testing.assert_allclose(la, [0, 2])
np.testing.assert_allclose(lb, [0, 3])
def test_intersect_3():
a = np.array([0, 1, 2, 3, 4])
b = np.array([0, -1, 4])
out = intersect(a, b)
np.testing.assert_allclose([0, 4], out)
def test_intersect_4():
a = np.array([0, 1, 2, 3, 4])
b = np.array([0, -1, 4])
out, la, lb = intersect(a, b, return_indices=True)
np.testing.assert_allclose([0, 4], out)
np.testing.assert_allclose(la, [0, 4])
np.testing.assert_allclose(lb, [0, 2])
def test_intersect_raises():
np.random.seed(10)
a = np.random.randint(0, 10, (4, 5))
b = np.random.randint(0, 10, (4, 6))
with pytest.raises(ValueError):
intersect(a, b, axis=0)
c = np.random.randint(0, 10, (3, 7))
with pytest.raises(ValueError):
intersect(a, c, axis=1)
with pytest.raises(NotImplementedError):
intersect(a, c, axis=2)
d = np.random.randint(0, 10, (3, 7, 3))
e = np.random.randint(0, 10, (3, 7, 3))
with pytest.raises(NotImplementedError):
intersect(d, e, axis=1)
def test_fuse_ndarrays():
d1 = np.asarray([0, 1])
d2 = np.asarray([2, 3, 4])
fused = fuse_ndarrays([d1, d2])
np.testing.assert_allclose(fused, [2, 3, 4, 3, 4, 5])
def test_fuse_degeneracies():
d1 = np.asarray([0, 1])
d2 = np.asarray([2, 3, 4])
fused_degeneracies = fuse_degeneracies(d1, d2)
np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2))
def test_U1Charge_charges():
D = 100
B = 6
np.random.seed(10)
charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(charges)
assert np.all(q1.charges == charges)
def test_U1Charge_dual():
D = 100
B = 6
np.random.seed(10)
charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(charges)
assert np.all(q1.dual(True).charges == -charges)
def get_charges(B0, B1, D, num_charges):
return [
np.random.randint(B0, B1 + 1, D).astype(np.int16)
for _ in range(num_charges)
]
def fuse_many_charges(num_charges,
num_charge_types,
seed,
D,
B,
use_flows=False):
np.random.seed(seed)
if use_flows:
flows = np.random.choice([True, False], num_charges, replace=True)
else:
flows = | np.asarray([False] * num_charges) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 15:00:21 2019
@author: agarwal.270a
"""
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal as sig
from scipy.signal import windows as win
import pandas as pd
from scipy import io
import pickle
from scipy.stats import norm
# Import CC functions
#from cerebralcortex.core.datatypes import DataStream
#from cerebralcortex.core.metadata_manager.stream.metadata import Metadata, DataDescriptor, ModuleMetadata
#from cerebralcortex.core.util.spark_helper import get_or_create_sc
# Declare constants and load data
Fs=25 #Hz
len_in_s=20.48 #s
len_out=4
len_in=Fs*len_in_s
#arr_t=np.arange(250,290,len_in_s) #change time duration when longer noise exists
arr_t=np.arange(250,900,len_in_s) #change time duration when longer noise exists
path_prefix= 'E:/Box Sync/' #'C:/Users/agarwal.270/Box/'
path=path_prefix+'SU19/Research/PPG_ECG_Proj/py_code/MA_function/'
mdict=pickle.load(open(path+'data/sim_data.dat','rb'))
RR_distro=mdict['RR_distro']
HR_clusters=mdict['HR_clusters']
del mdict
#peak_data=mdict['peaks']
#led_id=mdict['led_id']
#verify after meeting
list_pdf_RR_joint=[RR_distro[j,0] for j in range(len(RR_distro))]
list_pdf_RR_row_sum=[np.sum(arr,axis=0) for arr in list_pdf_RR_joint]
list_pdf_RR_col_sum=[np.sum(arr,axis=1) for arr in list_pdf_RR_joint]
diff_arr=np.array([np.linalg.norm(list_pdf_RR_row_sum[k]-list_pdf_RR_col_sum[k])\
for k in range(len(list_pdf_RR_row_sum))]).round(4)
# =============================================================================
# plt.figure();
# for j in range(len(list_pdf_RR_row_sum)):
# plt.subplot(7,2,j+1);plt.plot(list_pdf_RR_row_sum[j],'b-o')
# plt.plot(list_pdf_RR_col_sum[j],'r--x');plt.legend(['row','col'])
# plt.grid(True);plt.title('z={}, rmse={}'.format(j+1,diff_arr[j]))
#
# =============================================================================
#%% Helper funcs
# =============================================================================
# def sample_RR(HR,RR_prev):
# #get cluster
# HR_up=(HR_clusters>HR).astype(int)
# z=(np.arange(len(HR_clusters)-1))[(np.diff(HR_up)).astype(bool)][0]
# #RR_z=RR_distro[z]
# RR_z_distro=RR_distro[z,0];RR_z_vals=RR_distro[z,1].reshape(-1)
# if RR_prev==0: #beginning of sampling. sample uniform randomly
# RR_next=RR_z_vals[np.random.randint(len(RR_z_vals))]
# else:
# idx_Rp=np.arange(len(RR_z_vals))[RR_z_vals==RR_prev]
# RR_z_Rp=RR_z_distro[idx_Rp,:] #conditional distro given z, RR_p
# idx_Rn=np.random.choice(len(RR_z_vals),p=RR_z_Rp/np.sum(RR_z_Rp)) #sample RR_next idx
# RR_next=RR_z_vals[idx_Rn]
# return RR_next
# =============================================================================
def sample_RR(HR,RR_prev):
#get cluster
HR_up=(HR_clusters>HR).astype(int)
z=(np.arange(len(HR_clusters)-1))[(np.diff(HR_up)).astype(bool)][0]
#get distros
RR_z_distro=list_pdf_RR_row_sum[z]
RR_z_vals=RR_distro[z,1].reshape(-1)
#sample
idx_Rn=np.random.choice(len(RR_z_vals),p=RR_z_distro) #sample RR_next idx
RR_next=RR_z_vals[idx_Rn]
return RR_next
def sinusoid(t,w,phi,Fs=25):
'''
Takes in inputs as numpy arrays of same size. Returns the sinewave with
desired characteristics.
t: array of time values in seconds. If a scalar is supplied, it is
considered as duration of the time series in seconds starting from 0. It is
divided into t*Fs divisions.
w: array of angular frequencies in radians/seconds. If a scalar is
supplied, it is made into a constant array of same shape as t and value w.
phi: array of phase values in radians. If a scalar is supplied, it is made
into a constant array of same shape as t and value phi.
Fs= Sampling frequency in Hz. Only needed in case t is not an array.
returns: t, s=np.sin(w*t+phi)
'''
# Handle Scalar inputs
if not(hasattr(t, "__len__")):
t=np.linspace(0,t,num=t*Fs,endpoint=False)
if not(hasattr(w, "__len__")):
w=w*np.ones(t.shape)
if not(hasattr(phi, "__len__")):
phi=phi*np.ones(t.shape)
# Check shapes are same
if (w.shape!=t.shape and phi.shape!=t.shape):
raise TypeError('Dimensional mismatch between input arrays. Please check the dimensions are same')
s=np.sin(w*t+phi)
return t,s
def HR_func_generator(t1):
arr_HR=np.arange(50,180) # Possible heart rates
# make a array of functions
f1=lambda B,D:((D*win.triang(len(t1))).astype(int)+B).astype(np.float32) #triang
f2=lambda B,D:((D*win.triang(2*len(t1))).astype(int)+B).astype(np.float32)\
[:len(t1)] # 1st half of triang
f3=lambda B,D:((D*win.tukey(len(t1),alpha=(0.3*np.random.rand()+0.7))).astype(int)+B).astype(np.float32) #tukey
f4=lambda B,D:((D*win.tukey(2*len(t1),alpha=(0.3*np.random.rand()+0.7))).astype(int)+B)\
.astype(np.float32)[:len(t1)] # 1st half of tukey
arr_f=np.array(1*[f1]+1*[f2]+1*[f3]+1*[f4]) # possible to change the proportion of functions
#randomly select elements
D_HR=0;HRs=[];D_HR_max=50
while D_HR==0: # we don't want D_HR to be zero so keep resampling
HRs+=[arr_HR[np.random.randint(len(arr_HR))]]
HR_range=np.arange(HRs[0]+1,min([HRs[0]+D_HR_max,180])+1)
HRs+=[HR_range[np.random.randint(len(HR_range))]]
B_HR,D_HR=HRs[0],HRs[1]-HRs[0]
#B_HR,D_HR=arr_B_HR[np.random.randint(len(arr_B_HR))],arr_D_HR[np.random.randint(len(arr_D_HR))]
HR_curve_f=arr_f[np.random.randint(len(arr_f))](B_HR,D_HR) #trend
return HR_curve_f,D_HR
def filtr(X0,Fs=25,filt=True):
nyq=Fs/2;flag=False
if len(X0.shape)==1:
X0=X0.reshape(-1,1)
flag=True
X1 = sig.detrend(X0,type='constant',axis=0); # Subtract mean
if filt:
# filter design used from Ju's code with slight changes for python syntax
b = sig.firls(219,np.array([0,0.3,0.5,4.5,5,nyq]),np.array([0,0,1,1,0,0]),np.array([10,1,1]),nyq=nyq);
X=np.zeros(X1.shape)
for i in range(X1.shape[1]):
#X[:,i] = sig.convolve(X1[:,i],b,mode='same'); # filtering using convolution, mode='same' returns the centered signal without any delay
X[:,i] = sig.filtfilt(b, [1], X1[:,i])
else:
X=X1
if flag:
X=X.reshape(-1)
#X=sig.detrend(X,type='constant',axis=0); # subtracted mean again to center around x=0 just in case things changed during filtering
return X
def filtr_HR(X0,Fs=25,filt=True):
nyq=Fs/2;flag=False
if len(X0.shape)==1:
X0=X0.reshape(-1,1)
flag=True
X1 = np.copy(X0)#sig.detrend(X0,type='constant',axis=0); # Subtract mean
if filt:
# filter design used from Ju's code with slight changes for python syntax
b = sig.firls(219,np.array([0,0.5,1,nyq]),np.array([1,1,0,0]),np.array([1,1]),nyq=nyq);
X=np.zeros(X1.shape)
for i in range(X1.shape[1]):
#X[:,i] = sig.convolve(X1[:,i],b,mode='same'); # filtering using convolution, mode='same' returns the centered signal without any delay
X[:,i] = sig.filtfilt(b, [1], X1[:,i])
else:
X=X1
if flag:
X=X.reshape(-1)
#X=sig.detrend(X,type='constant',axis=0); # subtracted mean again to center around x=0 just in case things changed during filtering
return X
def normalize_AC(data_left_filt,Fn=25,c=0,make_plots=False):
'''
data_left_filt: filtered ppg data
Fn: Sampling frequency in Hz
c: Column (Channel) in the array to be normalized
'''
data_left_filt=1*data_left_filt
flag=False
if len(data_left_filt.shape)==1:
data_left_filt=data_left_filt.reshape((-1,1))
flag=True
prc_l=50
pk_idx_start=2*Fn;pk_idx_end=29*Fn;
y=data_left_filt[pk_idx_start:pk_idx_end,c]
locs,pk_props = sig.find_peaks(y,distance=8,height=0);
pks_l=y[locs]
locs=locs+pk_idx_start;
if make_plots:
plt.figure(); plt.subplot(211);
plt.plot(data_left_filt[:pk_idx_end,c]);plt.plot(locs,pks_l,'r+')
temp_mins_l=[];
#for j=[-5,-4,-3,-2,-1,1,2,3,4,5]
for j in range(-7,0):
temp_mins_l+=[data_left_filt[locs+j,c]];
temp_min_l=np.min(np.array(temp_mins_l),axis=0);
amp_left=np.nanpercentile(pks_l-temp_min_l,prc_l);
#amp_left=np.mean(pks_l-temp_min_l);
data_left_filt[:,c]=data_left_filt[:,c]/amp_left;
if flag:
data_left_filt=data_left_filt.reshape(-1)
return data_left_filt
def form_data(X,Y,len_in,len_out):
'''
X:timeseries with inputs
Y:timeseries with outputs
'''
in_size=int(len_in)
out_size=int(len_out)
step_size=int(len_out/4)#np.max([out_size,4]) #change this as desired
#clip timeseries to nearest multiple of step_size
#lenth1=(((len(X)-in_size)//step_size)*step_size)+in_size
lenth=len(X)
#print(lenth1,lenth)
X,Y=X.T,Y.T # Transpose to make it look like time-series
X,Y=X.reshape(X.shape+(1,)),Y.reshape(Y.shape+(1,)) # add a dimension for concatenation
#print(X.shape,Y.shape)
#idx=np.arange(0,lenth-in_size,step_size)+in_size
idx=step_size*np.arange(0,1+((lenth-in_size)//step_size))+in_size
#print(idx[-1])
#print(lenth,X.shape[1],len(idx),(X.shape[1]-in_size+1)//step_size)
#print(X.shape,Y.shape,HR.shape)
data_X=np.concatenate([X[:,i-in_size:i,:] for i in idx],axis=-1).T
data_Y=np.concatenate([Y[i-out_size:i,:] for i in idx],axis=-1).T
#kernel_size=100;stride=1
#idxHR=np.arange(i-out_size+kernel_size,i,stride)
return data_X,data_Y
def pd_ffill(arr):
df = pd.DataFrame(arr)
df.fillna(method='ffill', axis=0, inplace=True)
out = df.values.reshape(arr.shape)
return out
def add_motion_noise(ppg1,flag=True):
# Noise for SNR=10log10(P_s/P_n)=20 dB => sigma=(ppg_pow**0.5)/10
acc1=0.00*np.random.standard_normal(ppg1.shape) # random normal noise with (0,0.1^2)
if flag: #extreme motion artefacts to be added or not
acc1=acc1+(2*np.random.random_sample(ppg1.shape)-1) # [-2,2] random uniform
#f=lambda z: (3 / (1 + np.exp(-10*z))) # A saturating sigmoid
f=lambda z: 2*np.tanh(2*z)
ppg1=ppg1+f(acc1) #noise added making values [-2,2] or [-4,4] depending on mode
return ppg1,acc1
def extract_rand_noise(noiz_list,lenth):
'''
noiz_list: Available components to choose from
lenth: Desired length of the noise signal
'''
noiz_list=[n for n in noiz_list if len(n)>lenth]
if len(noiz_list)==0:
raise AssertionError('Please use a smaller duration of ppg.')
noiz=noiz_list[np.random.randint(len(noiz_list))]
idx_start=np.random.randint(len(noiz)-lenth)
noiz=noiz[idx_start:idx_start+lenth]
return noiz
def gen_ppg_from_HR(t1,HR_curve_f,D_HR,peak_id,make_plots=False):
'''
mode={0:basic sinusoid, 1:mixture of sinusoids, 2:mixture of sinusoids with
a lot of motion artifacts}
'''
# Randomly insert consecutive Nan's and then ffill
perc_change=5;cons_reps=len(t1)//(np.abs(D_HR*2))
#idx=1+np.random.RandomState(seed=seed1).permutation(len(t1)-2-cons_reps)[:int((len(t1)-2)/cons_reps*perc_change/100)]
idx=1+np.random.permutation(len(t1)-2-cons_reps)[:int((len(t1)-2)/cons_reps*perc_change/100)]
try:
idx=np.concatenate([np.arange(i,i+cons_reps) for i in idx])
HR_curve_f[idx]=np.nan
HR_curve1=pd_ffill(HR_curve_f)
except ValueError:
HR_curve1=1*HR_curve_f
# TODO: Removed 0.1 Hz and 0.4 Hz in HRV
#HRV_w1=2*np.pi*0.1;HRV_w2=2*np.pi*0.4
#rand_mix=np.repeat(np.random.random_sample(1+(len(t1)//1500)),1500)[:len(t1)]
#rand_mix=0.55
#print(len(t1),rand_mix)
#gain_list=np.array([0,1,2,2,1,1,1,1])
#HR_curve1+=0.03*((rand_mix*sinusoid(t1,HRV_w1,phi=0)[-1])+\
# ((1-rand_mix)*sinusoid(t1,HRV_w2,phi=0)[-1]))#*gain_list[(300/HR_curve1).astype(int)]
#plt.figure();plt.plot(t1,sinusoid(t1,HRV_w1,phi=0)[-1],t1,sinusoid(t1,HRV_w2,phi=0)[-1])
#HR_curve1,_=add_motion_noise(HR_curve1,flag=False)
#print(HR_curve1.shape,t1.shape)
# =============================================================================
# w1=2*np.pi*(HR_curve1/60)
# #phi_PTT=(0.5*np.pi)/(HR_curve1/60)
# phi_PTT=0
# _,ppg0=sinusoid(t1,w1,phi=phi_PTT)
#
# ppg1=ppg0*2
# PTT=np.random.randint(4,6) #sample a PTT value
# ppg1=np.concatenate([np.zeros(PTT),ppg1[:-1*PTT]])
#
#
# # Peak Detection & check figure for its accuracy
# #out = ecg.ecg(signal=ppg01, sampling_rate=25,show=False)
# #ind=out['rpeaks']
# #arr_peaks=np.zeros(len(ppg01));arr_peaks[ind]=1
# #arr_peaks=(ppg01==np.max(ppg01)).astype(int)
# ind,_=find_peaks(ppg1,distance=6,height=0.9)
#
# =============================================================================
w_l=12;w_pk=25;w_r=w_pk-w_l-1
n_peaks=int(len(HR_curve1)/5)
#remove terminal pk_locs
#ind=ind[ind>=w_l]
#ind=ind[ind<(len(ppg1)-w_r)]
#sample bunch of peaks using PCA components
path2base='E:/Box Sync/'+\
'AU19/Research/PPG_ECG_proj/data/Wen_data_28_Sep/clean_lrsynced\\'
base_dict = io.loadmat(path2base+"green_ppg_basis.mat")
#base_dict=mdict[peak_id+'_G']['peaks']
eig_vec=base_dict['eig_vec'];eig_val=base_dict['eig_val'].reshape((-1,1))
avg=base_dict['mean'].reshape((-1,1))
k=10;eig_vec=eig_vec[:,:k];eig_val=eig_val[:k]
l_peaks,n_coeff=eig_vec.shape
weights= | np.random.random_sample((n_coeff,n_peaks)) | numpy.random.random_sample |
#
# DeMoN - Depth Motion Network
# Copyright (C) 2017 <NAME>, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import math
from minieigen import Quaternion, Vector3
# implements error metrics from Eigen et al. https://arxiv.org/pdf/1406.2283.pdf
def compute_valid_depth_mask(d1, d2=None):
"""Computes the mask of valid values for one or two depth maps
Returns a valid mask that only selects values that are valid depth value
in both depth maps (if d2 is given).
Valid depth values are >0 and finite.
"""
if d2 is None:
valid_mask = np.isfinite(d1)
valid_mask[valid_mask] = (d1[valid_mask] > 0)
else:
valid_mask = np.isfinite(d1) & np.isfinite(d2)
valid_mask[valid_mask] = (d1[valid_mask] > 0) & (d2[valid_mask] > 0)
return valid_mask
def l1(depth1, depth2):
"""
Computes the l1 errors between the two depth maps.
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
L1(log)
"""
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
diff = depth1 - depth2
num_pixels = float(diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sum(np.absolute(diff)) / num_pixels
def l1_inverse(depth1, depth2):
"""
Computes the l1 errors between inverses of two depth maps.
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
L1(log)
"""
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
diff = np.reciprocal(depth1) - np.reciprocal(depth2)
num_pixels = float(diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sum(np.absolute(diff)) / num_pixels
def rmse_log(depth1, depth2):
"""
Computes the root min square errors between the logs of two depth maps.
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
RMSE(log)
"""
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
log_diff = np.log(depth1) - np.log(depth2)
num_pixels = float(log_diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sqrt(np.sum(np.square(log_diff)) / num_pixels)
def rmse(depth1, depth2):
"""
Computes the root min square errors between the two depth maps.
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
RMSE(log)
"""
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
diff = depth1 - depth2
num_pixels = float(diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sqrt(np.sum(np.square(diff)) / num_pixels)
def scale_invariant(depth1, depth2):
"""
Computes the scale invariant loss based on differences of logs of depth maps.
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
scale_invariant_distance
"""
# sqrt(Eq. 3)
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
log_diff = np.log(depth1) - np.log(depth2)
num_pixels = float(log_diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sqrt(np.sum(np.square(log_diff)) / num_pixels - np.square(np.sum(log_diff)) / np.square(num_pixels))
def abs_relative(depth_pred, depth_gt):
"""
Computes relative absolute distance.
Takes preprocessed depths (no nans, infs and non-positive values)
depth_pred: depth map prediction
depth_gt: depth map ground truth
Returns:
abs_relative_distance
"""
assert (np.all(np.isfinite(depth_pred) & np.isfinite(depth_gt) & (depth_pred > 0) & (depth_gt > 0)))
diff = depth_pred - depth_gt
num_pixels = float(diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sum(np.absolute(diff) / depth_gt) / num_pixels
def avg_log10(depth1, depth2):
"""
Computes average log_10 error (Liu, Neural Fields, 2015).
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
abs_relative_distance
"""
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
log_diff = np.log10(depth1) - np.log10(depth2)
num_pixels = float(log_diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sum(np.absolute(log_diff)) / num_pixels
def sq_relative(depth_pred, depth_gt):
"""
Computes relative squared distance.
Takes preprocessed depths (no nans, infs and non-positive values)
depth_pred: depth map prediction
depth_gt: depth map ground truth
Returns:
squared_relative_distance
"""
assert (np.all(np.isfinite(depth_pred) & np.isfinite(depth_gt) & (depth_pred > 0) & (depth_gt > 0)))
diff = depth_pred - depth_gt
num_pixels = float(diff.size)
if num_pixels == 0:
return np.nan
else:
return np.sum(np.square(diff) / depth_gt) / num_pixels
def ratio_threshold(depth1, depth2, threshold):
"""
Computes the percentage of pixels for which the ratio of the two depth maps is less than a given threshold.
Takes preprocessed depths (no nans, infs and non-positive values)
depth1: one depth map
depth2: another depth map
Returns:
percentage of pixels with ratio less than the threshold
"""
assert (threshold > 0.)
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
log_diff = np.log(depth1) - np.log(depth2)
num_pixels = float(log_diff.size)
if num_pixels == 0:
return np.nan
else:
return float(np.sum(np.absolute(log_diff) < np.log(threshold))) / num_pixels
def compute_errors(depth_pred, depth_gt, distances_to_compute=None):
"""
Computes different distance measures between two depth maps.
depth_pred: depth map prediction
depth_gt: depth map ground truth
distances_to_compute: which distances to compute
Returns:
a dictionary with computed distances, and the number of valid pixels
"""
valid_mask = compute_valid_depth_mask(depth_pred, depth_gt)
depth_pred = depth_pred[valid_mask]
depth_gt = depth_gt[valid_mask]
num_valid = np.sum(valid_mask)
if distances_to_compute is None:
distances_to_compute = ['l1',
'l1_inverse',
'scale_invariant',
'abs_relative',
'sq_relative',
'avg_log10',
'rmse_log',
'rmse',
'ratio_threshold_1.25',
'ratio_threshold_1.5625',
'ratio_threshold_1.953125']
results = {'num_valid': num_valid}
for dist in distances_to_compute:
if dist.startswith('ratio_threshold'):
threshold = float(dist.split('_')[-1])
results[dist] = ratio_threshold(depth_pred, depth_gt, threshold)
else:
results[dist] = globals()[dist](depth_pred, depth_gt)
return results
def compute_depth_scale_factor(depth1, depth2, depth_scaling='abs'):
"""
Computes the scale factor for depth1 to minimize the least squares error to depth2
"""
assert (np.all(np.isfinite(depth1) & np.isfinite(depth2) & (depth1 > 0) & (depth2 > 0)))
if depth_scaling == 'abs':
# minimize MSE on depth
d1d1 = np.multiply(depth1, depth1)
d1d2 = np.multiply(depth1, depth2)
mask = compute_valid_depth_mask(d1d2)
sum_d1d1 = np.sum(d1d1[mask])
sum_d1d2 = np.sum(d1d2[mask])
if sum_d1d1 > 0.:
scale = sum_d1d2 / sum_d1d1
else:
print('compute_depth_scale_factor: Norm=0 during scaling')
scale = 1.
elif depth_scaling == 'log':
# minimize MSE on log depth
log_diff = np.log(depth2) - np.log(depth1)
scale = np.exp(np.mean(log_diff))
elif depth_scaling == 'inv':
# minimize MSE on inverse depth
d1d1 = np.multiply(np.reciprocal(depth1), np.reciprocal(depth1))
d1d2 = np.multiply(np.reciprocal(depth1), np.reciprocal(depth2))
mask = compute_valid_depth_mask(d1d2)
sum_d1d1 = np.sum(d1d1[mask])
sum_d1d2 = np.sum(d1d2[mask])
if sum_d1d1 > 0.:
scale = np.reciprocal(sum_d1d2 / sum_d1d1)
else:
print('compute_depth_scale_factor: Norm=0 during scaling')
scale = 1.
else:
raise Exception('Unknown depth scaling method')
return scale
def evaluate_depth(translation_gt, depth_gt_in, depth_pred_in,
distances_to_compute=None, inverse_gt=True, inverse_pred=True,
depth_scaling='abs', depth_pred_max=np.inf):
"""
Computes different error measures for the inverse depth map without scaling and with scaling.
translation_gt: 1d numpy array with [tx,ty,tz]
The translation that corresponds to the ground truth depth
depth_gt: 2d numpy array
This is the ground truth depth
depth_pred: depth prediction being evaluated
distances_to_compute: which distances to compute
returns (err, err_after_scaling)
errs is the dictionary of errors without optimally scaling the prediction
errs_pred_scaled is the dictionary of errors after minimizing
the least squares error by scaling the prediction
"""
valid_mask = compute_valid_depth_mask(depth_pred_in, depth_gt_in)
depth_pred = depth_pred_in[valid_mask]
depth_gt = depth_gt_in[valid_mask]
if inverse_gt:
depth_gt = np.reciprocal(depth_gt)
if inverse_pred:
depth_pred = np.reciprocal(depth_pred)
# if depth_pred_max < np.inf:
# depth_pred[depth_pred>depth_pred_max] = depth_pred_max
# we need to scale the ground truth depth if the translation is not normalized
translation_norm = np.sqrt(translation_gt.dot(translation_gt))
scale_gt_depth = not np.isclose(1.0, translation_norm)
if scale_gt_depth:
depth_gt_scaled = depth_gt / translation_norm
else:
depth_gt_scaled = depth_gt
errs = compute_errors(depth_pred, depth_gt_scaled, distances_to_compute)
# minimize the least squares error and compute the errors again
scale = compute_depth_scale_factor(depth_pred, depth_gt_scaled, depth_scaling=depth_scaling)
depth_pred_scaled = depth_pred * scale
errs_pred_scaled = compute_errors(depth_pred_scaled, depth_gt_scaled, distances_to_compute)
return errs, errs_pred_scaled
def compute_flow_epe(flow1, flow2):
"""Computes the average endpoint error between the two flow fields"""
diff = flow1 - flow2
epe = | np.sqrt(diff[0, :, :] ** 2 + diff[1, :, :] ** 2) | numpy.sqrt |
# Reference Source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, mean_squared_log_error, average_precision_score
class LinearRegression:
def __init__(self, filename):
df = pd.read_csv(filename, header = None)
self.X = np.array(df.drop([0], axis=1))
self.y = np.array(df[0])
self.learning_rate = 0.1
self.num_iterations = 100
self.cv_splits = 5
self.l1_lambda = 1e-6
self.division = 463715
def hypothesis(self, b, W, X):
return np.matmul(X, W) + b
def compute_cost(self, b, W, X, y):
total_cost = np.sum(np.square(y - self.hypothesis(b, W, X)))/(2*X.shape[0]) + self.l1_lambda*(np.sum(np.fabs(W)) + abs(b))
return total_cost
def gradient_descent_runner(self, X, y, b, W):
cost_graph = []
for i in range(self.num_iterations):
cost_graph.append(self.compute_cost(b, W, X, y))
b, W = self.step_gradient(b, W, X, y)
return [b, W, cost_graph]
def step_gradient(self, b, W, X, y):
#Calculate Gradient
W_gradient = ((self.hypothesis(b, W, X) - y).dot(X))/X.shape[0] + self.l1_lambda
b_gradient = (np.sum(X.dot(W) + b - y))/X.shape[0] + self.l1_lambda
#Update current W and b
W -= self.learning_rate * W_gradient
b -= self.learning_rate * b_gradient
#Return updated parameters
return b, W
if __name__ == "__main__":
lr = LinearRegression("YearPredictionMSD/YearPredictionMSD.txt")
#X_train, X_test, y_train, y_test = train_test_split(lr.X, lr.y, test_size = 0.2, random_state = 1)
# This split is provided by the repository. It avoids the 'producer effect' by making sure no song from a given artist ends up in both the train and test set.
X_train, y_train = StandardScaler().fit_transform(lr.X[:lr.division]), lr.y[:lr.division]
X_test, y_test = StandardScaler().fit_transform(lr.X[lr.division:]), lr.y[lr.division:]
split_size = X_train.shape[0]//lr.cv_splits
ev = []
mae = []
rmse = []
msle = []
r2 = []
global_mae = []
lambdas = []
best_mae = 10
best_l1 = 0
b, W = None, None
df = pd.DataFrame(np.concatenate((X_train,y_train[:, None]), axis = 1), columns = list(range(90, -1, -1)))
df = shuffle(df)
X_train = df.drop([0], axis = 1)
y_train = df[0]
for _ in range(8):
ev = []
mae = []
rmse = []
msle = []
r2 = []
print("Training and Testing for Lambda ", lr.l1_lambda)
for i in range(lr.cv_splits):
print("Cross Validation for Split ", i+1)
start = i * split_size
end = (i+1) * split_size
X = np.concatenate((X_train[:start], X_train[end:]), axis = 0)
y = np.concatenate((y_train[:start], y_train[end:]), axis=0)
b = np.random.normal()
W = | np.random.normal(size=lr.X.shape[1]) | numpy.random.normal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
High level functions for multiresolution analysis of spectrograms
Code licensed under both GPL and BSD licenses
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
# Load required modules
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy import ndimage as ndi
import itertools as it
import matplotlib.pyplot as plt
from skimage.io import imsave
from skimage import transform, measure
from scipy import ndimage
from maad import sound
from skimage.filters import gaussian
from maad.util import format_rois, rois_to_imblobs, normalize_2d
def _sigma_prefactor(bandwidth):
"""
Function from skimage.
Parameters
----------
Returns
-------
"""
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * \
(2.0 ** b + 1) / (2.0 ** b - 1)
def gabor_kernel_nodc(frequency, theta=0, bandwidth=1, gamma=1,
n_stds=3, offset=0):
"""
Return complex 2D Gabor filter kernel with no DC offset.
This function is a modification of the gabor_kernel function of scikit-image
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
gamma : float, optional
gamma changes the aspect ratio (ellipsoidal) of the gabor filter.
By default, gamma=1 which means no aspect ratio (circle)
if gamma>1, the filter is larger (x-dir)
if gamma<1, the filter is higher (y-dir)
This value is ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g_nodc : complex 2d array
A single gabor kernel (complex) with no DC offset
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
# set gaussian parameters
b = bandwidth
sigma_pref = 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * (2.0 ** b + 1) / (2.0 ** b - 1)
sigma_y = sigma_pref / frequency
sigma_x = sigma_y/gamma
# meshgrid
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
# rotation matrix
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
# combine gambor and
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y # gaussian envelope
oscil = np.exp(1j * (2 * np.pi * frequency * rotx + offset)) # harmonic / oscilatory function
g_dc = g*oscil
# remove dc component by subtracting the envelope weighted by K
K = np.sum(g_dc)/np.sum(g)
g_nodc = g_dc - K*g
return g_nodc
def _plot_filter_bank(kernels, frequency, ntheta, bandwidth, gamma, **kwargs):
"""
Display filter bank
Parameters
----------
kernels: list
List of kernels from filter_bank_2d_nodc()
frequency: 1d ndarray of scalars
Spatial frequencies used to built the Gabor filters. Values should be
in [0;1]
ntheta: int
Number of angular steps between 0° to 90°
bandwidth: scalar, optional, default is 1
This parameter modifies the frequency of the Gabor filter
gamma: scalar, optional, default is 1
This parameter change the Gaussian window that modulates the continuous
sine.
1 => same gaussian window in x and y direction (circle)
<1 => elongation of the filter size in the y direction (elipsoid)
>1 => reduction of the filter size in the y direction (elipsoid)
**kwargs, optional. This parameter is used by plt.plot and savefig functions
figsize : tuple of integers, optional, default: (13,13)
width, height in inches.
dpi : integer, optional
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
interpolation : string, optional, default is 'nearest'
Pixels interpolation
aspect : string, optional, default is 'auto'
fontsize : scalar, optional, default is 8/0.22*hmax*100/dpi)
size of the font use to print the parameters of each filter
... and more, see matplotlib
Returns
-------
fig : Figure
The Figure instance
ax : Axis
The Axis instance
"""
params = []
for theta in range(ntheta):
theta = theta/ntheta * np.pi
for freq in frequency:
params.append([freq, theta, bandwidth, gamma])
w = []
h = []
for kernel in kernels:
ylen, xlen = kernel.shape
w.append(xlen)
h.append(ylen)
plt.gray()
fig = plt.figure()
dpi =kwargs.pop('dpi',fig.get_dpi())
figsize =kwargs.pop('figsize',(13,13))
interpolation =kwargs.pop('interpolation','nearest')
aspect =kwargs.pop('aspect','auto')
fig.set_figwidth(figsize[0])
fig.set_figheight(figsize[1])
w = np.asarray(w)/dpi
h = np.asarray(h)/dpi
wmax = np.max(w)*1.25
hmax = np.max(h)*1.05
fontsize =kwargs.pop('fontsize',8/0.22*hmax*100/dpi)
params_label = []
for param in params:
params_label.append('theta=%d f=%.2f \n bandwidth=%.1f \n gamma=%.1f'
% (param[1] * 180 / np.pi, param[0], param[2],
param[3]))
n = len(frequency)
for ii, kernel in enumerate(kernels):
ax = plt.axes([(ii%n)*wmax + (wmax-w[ii])/2,(ii//n)*hmax + (hmax-h[ii])/2,w[ii],h[ii]])
ax.imshow(np.real(kernel),interpolation=interpolation, aspect =aspect, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel(params_label[ii],fontsize=fontsize)
ax.axis('tight')
plt.show()
return ax, fig
def _plot_filter_results(im_ref, im_list, kernels, params, m, n):
"""
Display the result after filtering
Parameters
----------
im_ref : 2D array
Reference image
im_list : list
List of filtered images
kernels: list
List of kernels from filter_bank_2d_nodc()
m: int
number of columns
n: int
number of rows
Returns
-------
Returns
-------
fig : Figure
The Figure instance
ax : Axis
The Axis instance
"""
ncols = m
nrows = n
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 5))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
axes[0][1].imshow(im_ref, origin='lower')
axes[0][1].set_title('spectrogram', fontsize=9)
axes[0][1].axis('off')
plt.tight_layout
params_label = []
for param in params:
params_label.append('theta=%d,\nf=%.2f' % (param[1] * 180 / np.pi, param[0]))
ii = 0
for ax_row in axes[1:]:
plotGabor = True
for ax in ax_row:
if plotGabor == True:
# Plot Gabor kernel
print(params_label[ii])
ax.imshow(np.real(kernels[ii]), interpolation='nearest')
ax.set_ylabel(params_label[ii], fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
plotGabor = False
else:
im_filtered = im_list[ii]
ax.imshow(im_filtered, origin='lower')
ax.axis('off')
plotGabor = True
ii=ii+1
plt.show()
return ax, fig
def filter_mag(im, kernel):
"""
Normalizes the image and computes im and real part of filter response using
the complex kernel and the modulus operation
Parameters
----------
im: 2D array
Input image to process
kernel: 2D array
Complex kernel (or filter)
Returns
-------
im_out: Modulus operand on filtered image
"""
im = (im - im.mean()) / im.std()
im_out = np.sqrt(ndi.convolve(im, np.real(kernel), mode='reflect')**2 +
ndi.convolve(im, np.imag(kernel), mode='reflect')**2)
return im_out
def filter_multires(im_in, kernels, npyr=4, rescale=True):
"""
Computes 2D wavelet coefficients at multiple octaves/pyramids
Parameters
----------
im_in: list of 2D arrays
List of input images to process
kernels: list of 2D arrays
List of 2D wavelets to filter the images
npyr: int
Number of pyramids to compute
rescale: boolean
Indicates if the reduced images should be rescaled
Returns
-------
im_out: list of 2D arrays
List of images filtered by each 2D kernel
"""
# Downscale image using gaussian pyramid
if npyr<2:
print('Warning: npyr should be int and larger than 2 for multiresolution')
im_pyr = tuple(transform.pyramid_gaussian(im_in, downscale=2,
max_layer=1, multichannel=False))
else:
im_pyr = tuple(transform.pyramid_gaussian(im_in, downscale=2,
max_layer=npyr-1, multichannel=False))
# filter 2d array at multiple resolutions using gabor kernels
im_filt=[]
for im in im_pyr: # for each pyramid
for kernel, param in kernels: # for each kernel
im_filt.append(filter_mag(im, kernel)) # magnitude response of filter
# Rescale image using gaussian pyramid
if rescale:
dims_raw = im_in.shape
im_out=[]
for im in im_filt:
ratio = np.array(dims_raw)/np.array(im.shape)
if ratio[0] > 1:
im = transform.rescale(im, scale = ratio, mode='reflect',
multichannel=False, anti_aliasing=True)
else:
pass
im_out.append(im)
else:
pass
return im_out
def filter_bank_2d_nodc(frequency, ntheta, bandwidth=1, gamma=1, display=False,
savefig=None, **kwargs):
"""
Build a Gabor filter bank with no offset component
Parameters
----------
frequency: 1d ndarray of scalars
Spatial frequencies used to built the Gabor filters. Values should be
in [0;1]
ntheta: int
Number of angular steps between 0° to 90°
bandwidth: scalar, optional, default is 1
This parameter modifies the frequency of the Gabor filter
gamma: scalar, optional, default is 1
This parameter change the Gaussian window that modulates the continuous
sine.
1 => same gaussian window in x and y direction (circle)
<1 => elongation of the filter size in the y direction (elipsoid)
>1 => reduction of the filter size in the y direction (elipsoid)
Returns
-------
params: 2d structured array
Parameters used to calculate 2D gabor kernels.
Params array has 4 fields (theta, freq, bandwidth, gamma)
kernels: 2d ndarray of scalars
Gabor kernels
"""
theta = np.arange(ntheta)
theta = theta / ntheta * np.pi
params=[i for i in it.product(theta,frequency)]
kernels = []
for param in params:
kernel = gabor_kernel_nodc(frequency=param[1],
theta=param[0],
bandwidth=bandwidth,
gamma=gamma,
offset=0,
n_stds=3)
kernels.append((kernel, param))
if display:
_, fig = _plot_filter_bank(kernels, frequency, ntheta, bandwidth,
gamma, **kwargs)
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename = savefig+'_filter_bank2D.'+format
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return params, kernels
def shape_features(im, im_blobs=None, resolution='low', opt_shape=None):
"""
Computes shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters
Parameters
----------
im: 2D array
Input image to process
im_blobs: 2D array, optional
Optional binary array with '1' on the region of interest and '0' otherwise
opt: dictionary
options for the filter bank (kbank_opt) and the number of scales (npyr)
Returns
-------
shape: 1D array
Shape coeficients of each filter
params: 2D numpy structured array
Corresponding parameters of the 2D fileters used to calculate the
shape coefficient. Params has 4 fields (theta, freq, pyr_level, scale)
bbox:
If im_blobs provided, corresponding bounding box
"""
# unpack settings
opt_shape = opt_shape_presets(resolution, opt_shape)
npyr = opt_shape['npyr']
# build filterbank
params, kernels = filter_bank_2d_nodc(ntheta=opt_shape['ntheta'],
bandwidth=opt_shape['bandwidth'],
frequency=opt_shape['frequency'],
gamma=opt_shape['gamma'])
# filter images
im_rs = filter_multires(im, kernels, npyr, rescale=True)
# Get mean intensity
shape = []
if im_blobs is None:
for im in im_rs:
shape.append(np.mean(im))
rois_bbox=None
shape = [shape] # for dataframe formating below
else:
for im in im_rs:
labels = measure.label(im_blobs)
rprops = measure.regionprops(labels, intensity_image=im)
roi_mean = [roi.mean_intensity for roi in rprops]
shape.append(roi_mean)
rois_bbox = [roi.bbox for roi in rprops]
shape = list(map(list, zip(*shape))) # transpose shape
# organise parameters
params = | np.asarray(params) | numpy.asarray |
# Imports packages
from gensim.models import KeyedVectors
import numpy as np
import os
import pickle as pk
import re
import sklearn as sk
from sklearn.preprocessing import LabelEncoder
import sys
import tensorflow as tf
# Declares paths
# Location where all data is stored
DATA_PATH = "/home/accts/gfs22/DiachronicPOSTagger/Data/"
# Location where embedding data is stored
EMBED_PATH = os.path.join(DATA_PATH, "Given_Data/Embeddings/word2vec/data")
# Location where corpus data is stored
CORPUS_PATH = os.path.join(DATA_PATH, "Given_Data/Corpus")
# Location where lexicon data is stored
LEX_PATH = os.path.join(DATA_PATH, "Given_Data/Embeddings/lexicon.txt")
# Location where processed data is saved
SAVE_PATH = os.path.join(DATA_PATH, "Processed_Data")
# Sets constants
# Number of most common words for which embeddings are stored
NUM_COMMON_WORDS = 600000
# Total number of words in the original data set vocabulary
NUM_VOCAB_WORDS = 3954340
# Maximum length of any sentence
MAX_SENT_LENGTH = 50
# Size of input embedding vectors
EMBED_DIM = 300
# Number of sentences in corpus that exceed 50 words
NUM_SENT_EXCEED_MAX_LEN = 0
# Number of total sentences in corpus
NUM_SENT_TOTAL = 0
# Formats POS by taking all information before the first underscore -- generates 423 unique POS tags
format_POS = lambda pos: pos.split("_")[0]
def load_embeddings():
"""Loads and returns word2vec embeddings
:return: model: a matrix of floats, word2vec GoogleNews embeddings
:rtype: object
"""
embeddings = KeyedVectors.load_word2vec_format(EMBED_PATH, binary=True)
return embeddings
def format_word(word):
"""Returns input "word" with dashes removed and in lower case.
:param word: word as string, a word in the corpus
:type word: string
:return: modified_word as string, modified version of "word" that is lower case and does not include dashes
:rtype: string
"""
dashes = r"[- ]*"
modified_word = re.sub(dashes, "", word.lower())
return modified_word
def read_lex(embeddings):
"""Uses lexicon to initialize a label encoder for POS, a list of POS tags, a dictionary that maps words to their embeddings, and a matrix of all embeddings
:param embeddings: embeddings or matrix of floats, word2vec GoogleNews embeddings (KeyedVectors object)
:type embeddings: object
:returns:
- le - as label encoder, the label encoder for POS tags (LabelEncoder object after fitting with POS_tags)
- POS_tags - as list of strings, the list of all POS tags
- word_dict - as dictionary with string keys and integer values to maps word strings (keys) to actual embeddings through embedding IDs (values)
- embed_mat - as matrix of integers (a matrix of the word embeddings where each row corresponds to a unique word)
:rtypes:
- le - sklearn.preprocessing.label.LabelEncoder
- POS_tags - list
- word_dict - dictionary
- embed_mat - numpy.ndarray
"""
# Opens lexicon
with open(LEX_PATH) as fh:
lines = fh.readlines()
# Initializes embedding matrix to random normally distributed numbers
embed_mat = np.random.normal(size=(NUM_VOCAB_WORDS, EMBED_DIM)) # 3954340 x 300
# Initializes list of all POS tags
POS_tags = []
# Initializes dictionary that maps words to embeddings
word_dict = {}
# Fills in embed_mat, POS_tags, and word_dict appropriately
for line in lines:
word_list = line.strip().split("\t")
if len(word_list) == 5:
wid, _, word, _, pos = word_list[:5] # wid as word id
word = format_word(word)
wid = int(wid)
pos = format_POS(pos)
if word not in word_dict:
word_dict[word] = wid
POS_tags.append(pos)
if word in embeddings:
embed_mat[wid, :] = embeddings[word]
else:
POS_tags.append("N/A")
# Fits label encoder to POS tags
le = LabelEncoder()
le.fit(POS_tags)
return le, POS_tags, word_dict, embed_mat
def read_single_file(file_name, le, POS_tags, word_dict):
"""For a given document generates matrix corresponding to indices for word embeddings (X_word), list corresponding to year of composition of each sentence (X_year_array), and matrix of label encoded POS tags of each word in a sentence in the document (Y_array)
:param file_name: a string, the name of the file under examination
:type file_name: string
:param le: a label encoder, the label encoder for POS tags
:type le: sklearn.preprocessing.label.LabelEncoder
:param POS_tags: a list of strings, the list of all POS tags
:type POS_tags: list
:param word_dict: a dictionary with string keys and integer values, maps word strings (keys) to actual embeddings through
:type word_dict: dictionary
:returns:
- X_word_array - a matrix of integers, each row corresponds to a list of the indices of the embeddings of each word in a sentence in the document
- X_year_array - a list of integers, repeats the year of composition for each word in the sentence for each sentence in the document
- Y_array - a matrix of integers, each row corresponds to a list of the label encoded POS tags of each word in a sentence in the document
:rtypes:
- X_word_array - numpy.ndarray
- X_year_array - numpy.ndarray
- Y_array - numpy.ndarray
"""
# Variable corresponding to number of sentences that contain more than 50 words
global NUM_SENT_EXCEED_MAX_LEN
# Number of total sentences in corpus
global NUM_SENT_TOTAL
# Words to ignore
ignore = {"@", "--", "-- ", " --"}
# Extracts the Year
split_fn = file_name.split("_")
X_year = int(split_fn[-2])
# Opens file
with open(file_name) as fh:
lines = fh.readlines()
# Variables to store embedding indices and label encoded POS tags
sent_X_word = []
sent_Y = []
X_word = np.zeros(shape=(MAX_SENT_LENGTH,), dtype=np.int32) # 50
Y = np.zeros(shape=(MAX_SENT_LENGTH, ), dtype=np.int32)
# Number of words in current sentence
num_words_cur_sent = 0
# Loops through line of document
for i, line in enumerate(lines):
word_list = line.strip().split("\t")
if len(word_list)==3 and word_list[0] not in ignore:
# Extracts and formats word and POS
word, lemma, pos = word_list[:3]
pos = format_POS(pos)
word = format_word(word)
# Removes words that begin with @ and words not in vocabulary
if word.startswith("@") or word not in word_dict:
continue
# Fills X_word with indices for word embeddings and Y with POS tags for each word
if num_words_cur_sent < MAX_SENT_LENGTH:
X_word[num_words_cur_sent] = word_dict[word]
Y[num_words_cur_sent] = le.transform([pos])[0]
# Begins a new sentence
if lemma == '.':
sent_X_word.append(X_word)
X_word = np.zeros(shape=(MAX_SENT_LENGTH,), dtype=np.int32)
sent_Y.append(Y)
Y = np.zeros(shape=(MAX_SENT_LENGTH, ), dtype=np.int32)
num_words_cur_sent = 0
# Iterates num_words_cur_sent if we are not beginning a new sentence
else:
num_words_cur_sent += 1
# Checks if sentence exceeds maximum sentence length
if num_words_cur_sent == MAX_SENT_LENGTH:
NUM_SENT_EXCEED_MAX_LEN = NUM_SENT_EXCEED_MAX_LEN + 1
# If we have sentences in a document, joins data from all sentences to make combined arrays
if sent_X_word != []:
X_word_array = np.stack(sent_X_word, axis=0)
X_year_array = np.repeat(X_year, len(sent_X_word))
Y_array = np.stack(sent_Y, axis=0)
print(X_word_array.shape[0])
print(NUM_SENT_TOTAL)
NUM_SENT_TOTAL = NUM_SENT_TOTAL + X_word_array.shape[0]
print(NUM_SENT_TOTAL)
return X_word_array, X_year_array, Y_array
# If there are no sentences in a document returns all None values.
else:
return None, None, None
def read_all_files(le, POS_tags, word_dict):
"""From all documents selects 1,000,000 sentences. Generates matrix corresponding to indices for word embeddings (X_word), list
corresponding to year of composition of each sentence (X_year_array), and matrix of label encoded POS tags (Y_array) for each
word for each of these sentences.
:param le: a label encoder, the label encoder for POS tags
:type le: sklearn.preprocessing.label.LabelEncoder
:param POS_tags: a list of strings, the list of all POS tags
:type POS_tags: list
:param word_dict: a dictionary with string keys and integer values, maps word strings (keys) to actual embeddings through embedding IDs (values)
:type word_dict: dictionary
:returns:
- X_word_array - a matrix of integers, each row corresponds to a list of the indices of the embeddings of each word in a sentence; there is a row for each of the 1,000,000 final sentences
- X_year_array - a list of integers, each entry corresponds to the year of composition of a sentence; there is an entry for each of the 1,000,000 final sentences
- Y_array - a matrix of integers, each row corresponds to a list of the label encoded POS tags of each word in a sentence; there is a row for each of the 1,000,000 final sentences
:rtypes:
- X_word_array - numpy.ndarray
- X_year_array - numpy.ndarray
- Y_array - numpy.ndarray
"""
# Variable corresponding to number of sentences that contain more than 50 words
global NUM_SENT_EXCEED_MAX_LEN # 0
# Variable corresponding to total number of sentences
global NUM_SENT_TOTAL # 0
# Initializes intermediate variables to store data from all files
X_word_arrays_total, X_year_arrays_total, Y_arrays_total = [], [], []
# Loops through all files
for dirpath, dirnames, filenames in os.walk(CORPUS_PATH): # /Users/yurio/Kuliah/semester_2/text_analytics/POS_tagging
# Prints directory and number of files in directory
print(dirpath)
print(len(filenames))
sys.stdout.flush()
if len(filenames)!=0:
X_word_arrays, X_year_arrays, Y_arrays = [], [], []
# Calls "read_single_file" function on each file
# Appends results from each file to X_word_arrays, X_year_arrays, and Y_arrays
for i, file in enumerate(filenames):
if i%100 == 0:
print(i)
sys.stdout.flush()
X_word_array, X_year_array, Y_array = read_single_file(os.path.join(dirpath, file), le, POS_tags, word_dict)
if X_word_array is not None and X_year_array is not None and Y_array is not None:
X_word_arrays.append(X_word_array)
X_year_arrays.append(X_year_array)
Y_arrays.append(Y_array)
# Appropriately reshapes X_word_arrays_total, X_year_arrays_total, Y_arrays_total into numpy arrays
X_word_array_int = np.concatenate(X_word_arrays, axis=0)
X_year_array_int = np.concatenate(X_year_arrays, axis=0)
Y_array_int = | np.concatenate(Y_arrays, axis=0) | numpy.concatenate |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for gaussian_markov_chain module.
"""
import numpy as np
from ..gaussian_markov_chain import GaussianMarkovChain
from ..gaussian_markov_chain import VaryingGaussianMarkovChain
from ..gaussian import Gaussian, GaussianMoments
from ..gaussian import GaussianARD
from ..gaussian import GaussianGamma
from ..wishart import Wishart, WishartMoments
from ..gamma import Gamma, GammaMoments
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import misc
from bayespy.utils.misc import TestCase
def kalman_filter(y, U, A, V, mu0, Cov0, out=None):
"""
Perform Kalman filtering to obtain filtered mean and covariance.
The parameters of the process may vary in time, thus they are
given as iterators instead of fixed values.
Parameters
----------
y : (N,D) array
"Normalized" noisy observations of the states, that is, the
observations multiplied by the precision matrix U (and possibly
other transformation matrices).
U : (N,D,D) array or N-list of (D,D) arrays
Precision matrix (i.e., inverse covariance matrix) of the observation
noise for each time instance.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Filtered mean of the states.
Cov : array
Filtered covariance of the states.
See also
--------
rts_smoother
"""
mu = mu0
Cov = Cov0
# Allocate memory for the results
(N,D) = np.shape(y)
X = np.empty((N,D))
CovX = np.empty((N,D,D))
# Update step for t=0
M = np.dot(np.dot(Cov, U[0]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[0]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
X[0,:] = mu
CovX[0,:,:] = Cov
#for (yn, Un, An, Vn) in zip(y, U, A, V):
for n in range(len(y)-1): #(yn, Un, An, Vn) in zip(y, U, A, V):
# Prediction step
mu = np.dot(A[n], mu)
Cov = np.dot(np.dot(A[n], Cov), A[n].T) + V[n]
# Update step
M = np.dot(np.dot(Cov, U[n+1]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[n+1]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
# Force symmetric covariance (for numeric inaccuracy)
Cov = 0.5*Cov + 0.5*Cov.T
# Store results
X[n+1,:] = mu
CovX[n+1,:,:] = Cov
return (X, CovX)
def rts_smoother(mu, Cov, A, V, removethis=None):
"""
Perform Rauch-Tung-Striebel smoothing to obtain the posterior.
The function returns the posterior mean and covariance of each
state. The parameters of the process may vary in time, thus they
are given as iterators instead of fixed values.
Parameters
----------
mu : (N,D) array
Mean of the states from Kalman filter.
Cov : (N,D,D) array
Covariance of the states from Kalman filter.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Posterior mean of the states.
Cov : array
Posterior covariance of the states.
See also
--------
kalman_filter
"""
N = len(mu)
#n = N-1
# Start from the last time instance and smoothen backwards
x = mu[-1,:]
Covx = Cov[-1,:,:]
for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)):
#n = n - 1
#if n <= 0:
# break
# The predicted value of n
x_p = np.dot(A[n], mu[n,:])
Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n]
# Temporary variable
S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:]))
# Smoothed value of n
x = mu[n,:] + np.dot(S.T, x-x_p)
Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S)
# Force symmetric covariance (for numeric inaccuracy)
Covx = 0.5*Covx + 0.5*Covx.T
# Store results
mu[n,:] = x
Cov[n,:] = Covx
return (mu, Cov)
class TestGaussianMarkovChain(TestCase):
def create_model(self, N, D):
# Construct the model
Mu = Gaussian(np.random.randn(D),
np.identity(D))
Lambda = Wishart(D,
random.covariance(D))
A = Gaussian(np.random.randn(D,D),
np.identity(D))
V = Gamma(D,
np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N)
Y = Gaussian(X, np.identity(D))
return (Y, X, Mu, Lambda, A, V)
def test_plates(self):
"""
Test that plates are handled correctly.
"""
def test_message_to_mu0(self):
pass
def test_message_to_Lambda0(self):
pass
def test_message_to_A(self):
pass
def test_message_to_v(self):
pass
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
N = 3
D = 2
Mu = Gaussian(np.random.randn(D), random.covariance(D))
Lambda = Wishart(D, random.covariance(D))
A = Gaussian(np.random.randn(D,D), random.covariance(D))
V = Gamma(D, np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N+1)
Y = Gaussian(X, random.covariance(D))
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
self.assert_message_to_parent(X, Mu, eps=1e-8)
self.assert_message_to_parent(
X,
Lambda,
eps=1e-8,
postprocess=lambda u: [
u[0] + linalg.transpose(u[0], ndim=1),
u[1],
]
)
self.assert_message_to_parent(X, A)
self.assert_message_to_parent(X, V, eps=1e-10, atol=1e-5)
pass
def test_message_to_parents_with_inputs(self):
""" Check gradient passed to inputs parent node """
def check(Mu, Lambda, A, V, U):
X = GaussianMarkovChain(Mu, Lambda, A, V, inputs=U)
Y = Gaussian(X, random.covariance(D))
# Check moments
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
X.update()
# Check gradient messages to parents
self.assert_message_to_parent(X, Mu)
self.assert_message_to_parent(
X,
Lambda,
postprocess=lambda phi: [
phi[0] + linalg.transpose(phi[0], ndim=1),
phi[1]
]
)
self.assert_message_to_parent(
X,
A,
postprocess=lambda phi: [
phi[0],
phi[1] + linalg.transpose(phi[1], ndim=1),
]
)
self.assert_message_to_parent(X, V)
self.assert_message_to_parent(X, U)
N = 4
D = 2
K = 3
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
Gaussian(
np.random.randn(D,D+K),
random.covariance(D+K)
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
GaussianGamma(
np.random.randn(D,D+K),
random.covariance(D+K),
D,
np.random.rand(D),
ndim=1
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
pass
def test_message_to_child(self):
"""
Test the updating of GaussianMarkovChain.
Check that the moments and the lower bound contribution are computed
correctly.
"""
# TODO: Add plates and missing values!
# Dimensionalities
D = 3
N = 5
(Y, X, Mu, Lambda, A, V) = self.create_model(N, D)
# Inference with arbitrary observations
y = np.random.randn(N,D)
Y.observe(y)
X.update()
(x_vb, xnxn_vb, xpxn_vb) = X.get_moments()
# Get parameter moments
(mu0, mumu0) = Mu.get_moments()
(icov0, logdet0) = Lambda.get_moments()
(a, aa) = A.get_moments()
(icov_x, logdetx) = V.get_moments()
icov_x = np.diag(icov_x)
# Prior precision
Z = np.einsum('...kij,...kk->...ij', aa, icov_x)
U_diag = [icov0+Z] + (N-2)*[icov_x+Z] + [icov_x]
U_super = (N-1) * [-np.dot(a.T, icov_x)]
U = misc.block_banded(U_diag, U_super)
# Prior mean
mu_prior = np.zeros(D*N)
mu_prior[:D] = | np.dot(icov0,mu0) | numpy.dot |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = int(i-past_len)
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum( np.where(pow_mu[sstart:i]>thr_mu)[0] ) / (past_len*1.0)
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
else:
prem = 0 #turn laser off
# for loop ends
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='blue')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_rem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s'+os.linesep) % idf)
fid.write(('ch_alloc: %s'+os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f'+os.linesep) % thr_delta)
fid.write(('THR_MU: %.2f'+os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f'+os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f'+os.linesep) % std_thdelta)
fid.write(('PAST_MU: %.2f'+os.linesep) % past_mu)
fid.write(('SF: %.2f'+os.linesep) % sf)
fid.write(('ALPHA: %.2f'+os.linesep) % alpha)
fid.write(('Bern: %.2f' + os.linesep) % 0.5)
if xemg:
fid.write(('XEMG: %d'+os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def recursive_sleepstate_rem_control(ppath, recordings, past_len=120, sdt=2.5, delay=120):
"""
algorithm running laser control for REM sleep dependent activation/inhibtion.
$delay s after a detected REM sleep period, the laser is turned on for the same duration. If a new REM period starts,
the laser stops, but we keep track of the missing time. The next time is laser turns on again,
it stays on for the duration of the most recent REM period + the remaining time.
The algorithm for REM detection is the same as used forclosed-loop REM sleep manipulation.
The function reads in the required parameters from the configuration file (MOUSEID_rem.txt)
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
past_len window to calculate $past_mu
sdt time bin for brain sttate, typically 2.5s
delay delay to wait after a REM sleep periods ends, till the laser is turned on.
"""
idf = re.split('_', recordings[0])[0]
past_len = int(np.round(past_len/sdt))
# load parameters
cfile = os.path.join(ppath, idf + '_rem.txt')
params = load_sleep_params(ppath, cfile)
thr_th_delta1 = params['THR_TH_DELTA'][0]
thr_th_delta2 = params['THR_TH_DELTA'][1]
thr_delta = params['THR_DELTA'][0]
thr_mu = params['THR_MU'][0]
alpha = params['ALPHA'][0]
sf = params['SF'][0]
past_mu = params['PAST_MU'][0]
xemg = params['XEMG'][0]
# calculate spectrogram
(SE, SM) = ([], [])
for rec in recordings:
A, B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5, 12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta, :], axis=0)
pow_theta = np.sum(SE[i_theta, :], axis=0)
pow_mu = np.sum(SM[i_mu, :], axis=0)
th_delta = np.divide(pow_theta, pow_delta)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
# NEW variables:
laser_idx = np.zeros((ntbins,))
delay = int(np.round(delay/sdt))
delay_count = 0
curr_rem_dur = 0
dur_count = 0
on_delay = False
laser_on = False
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = i - past_len
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum(np.where(pow_mu[sstart:i] > thr_mu)[0]) / past_len
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
curr_rem_dur += 1 #NEW
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
curr_rem_dur += 1
else:
prem = 0 # turn laser off
dur_count += curr_rem_dur #NEW
delay_count = delay #NEW
curr_rem_dur = 0 #NEW
on_delay = True #NEW
# NEW:
if on_delay:
if prem == 0:
delay_count -=1
if delay_count == 0:
laser_on = True
on_delay = False
if laser_on:
if prem == 0:
if dur_count >= 0:
dur_count -= 1
laser_idx[i] = 1
else:
laser_on = False
else:
laser_on = False
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='green', label='REM')
ax4.plot(t, laser_idx * thr_th_delta1, color='blue', label='Laser')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.legend()
plt.show(block=False)
def load_sleep_params(path, param_file):
"""
load parameter file generated by &recursive_sleepstate_rem || &recursive_sleepstate_nrem
@Return:
Dictionary: Parameter --> Value
"""
fid = open(os.path.join(path, param_file), 'r')
lines = fid.readlines()
params = {}
for line in lines:
if re.match('^[\S_]+:', line):
a = re.split('\s+', line)
key = a[0][:-1]
params[key] = a[1:-1]
# transform number strings to floats
for k in params:
vals = params[k]
new_vals = []
for v in vals:
if re.match('^[\d\.]+$', v):
new_vals.append(float(v))
else:
new_vals.append(v)
params[k] = new_vals
return params
def recursive_sleepstate_nrem(ppath, recordings, sf=0.3, alpha=0.3, std_thdelta = 1.5, sdt=2.5, psave=False, xemg=False):
"""
predict NREMs period only based on EEG/EMG history; the same algorithm is also used for
closed-loop NREM sleep manipulation.
The algorithm uses for NREM sleep detection thresholds for delta power, EMG power, and theta/delta power.
For delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a NREM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
The values for hard and soft threshold are fitted using a Gaussian mixture model
:param ppath: base folder
:param recordings: list of recordings
:param sf: smoothing factor for each powerspectrum
:param alpha: spatial smoothing factor
:param std_thdelta: factor to set threshold for theta/delta
:param sdt: time step of brain state classification, typically 2.5 s
:param psave: save parameters to text file?
:param xemg: use EEG instead of EMG?
"""
# to fit Gaussian mixture model to delta power distributino
from sklearn import mixture
idf = re.split('_', recordings[0])[0]
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
med_delta = np.median(pow_delta)
pow_delta_fit = pow_delta[np.where(pow_delta<=3*med_delta)]
# fit Gaussian mixture model to delta power
# see http://www.astroml.org/book_figures/chapter4/fig_GMM_1D.html
gm = mixture.GaussianMixture(n_components=2)
fit = gm.fit(pow_delta_fit.reshape(-1, 1))
means = np.squeeze(fit.means_)
x = np.arange(0, med_delta*3, 100)
plt.figure()
plt.hist(pow_delta_fit, 100, normed=True, histtype='stepfilled', alpha=0.4)
logprob = fit.score_samples(x.reshape(-1,1))
responsibilities = fit.predict_proba(x.reshape((-1,1)))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
plt.plot(x, pdf, '-k')
plt.plot(x, pdf_individual, '--k')
plt.xlim((0, med_delta*3))
plt.ylabel('p(x)')
plt.xlabel('x = Delta power')
# get point where curves cut each other
if means[0] < means[1]:
idx = np.where((x>= means[0]) & (x<= means[1]))[0]
else:
idx = np.where((x >= means[1]) & (x <= means[0]))[0]
imin = np.argmin(pdf[idx])
xcut = x[idx[0]+imin]
plt.plot(xcut, pdf[idx[0]+imin], 'ro')
ilow = np.argmin(np.abs(x-means[0]))
plt.plot(x[ilow], pdf[ilow], 'bo')
ihigh = np.argmin(np.abs(x-means[1]))
plt.plot(x[ihigh], pdf[ihigh], 'go')
plt.show(block=False)
# set parameters for hard and soft delta thresholds
tmp = np.array([x[ihigh], xcut, x[ilow]])
tmp.sort()
thr_delta1 = tmp[-1] # x[ihigh]; right peak of distribution
thr_delta2 = tmp[1] # trough of distribution
# NREM yes or no according to thresholds
# However, this variable does not directly control whether laser should
# be on or off; whether NREM sleep is really on or off is determined
# by nrem_idx; if pnrem_hidden == 1, then all threshold critera, but not
# sleep history criteria are fulfilled
pnrem_hidden = 0
# if nrem_idx[i] == 1, time point i is NREM
nrem_idx = np.zeros((ntbins,), dtype='int8')
# NREM stays on after thresholds are NOT fulfilled to avoid interruptions by microarousals
grace_period = int(20 / sdt)
# nrem_delay: NREM only starts with some delay
nrem_delay = int(10 / sdt)
grace_count = grace_period
delay_count = nrem_delay
for i in range(ntbins):
if pnrem_hidden == 0:
### Entering NREM:
# Delta power laser than high threshold
if pow_delta[i] > thr_delta1 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
### NOT-NREM -> NREM
pnrem_hidden = 1
nrem_idx[i] = 0
delay_count -= 1
# we are fully in NREM, that's why grace_count is reset:
grace_count = grace_period
else:
### NOT-NREM -> NOT-NREM
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
else:
nrem_idx[i] = 0
else:
### pnrem_hidden == 1
if pow_delta[i] > thr_delta2 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
if delay_count > 0:
delay_count -= 1
nrem_idx[i] = 0
else :
nrem_idx[i] = 1
else:
### Exit NREM -> NOT-NREM
# were are fully out of NREM, so delay_count can be reset:
delay_count = nrem_delay
pnrem_hidden = 0
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
#### figure ##############################################
plt.figure()
t = np.arange(0, sdt * (ntbins - 1) + sdt / 2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq >= 0) & (freq <= 30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
ax1.pcolorfast(t, freq[im], np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412, sharex=ax1)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),)) * thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),)) * thr_delta1, color='red')
ax3.plot(t, np.ones((len(t),)) * thr_delta2, color=[1, 0.6, 0.6])
ax3.plot(t, nrem_idx * thr_delta1, color=[0.6, 0.6, 0.6])
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),)) * thr_th_delta1, color='red')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_nrem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s' + os.linesep) % idf)
fid.write(('ch_alloc: %s' + os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f %.2f' + os.linesep) % (thr_delta1, thr_delta2))
fid.write(('THR_MU: %.2f' + os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f' + os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f' + os.linesep) % std_thdelta)
fid.write(('SF: %.2f' + os.linesep) % sf)
fid.write(('ALPHA: %.2f' + os.linesep) % alpha)
if xemg:
fid.write(('XEMG: %d' + os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def rem_online_analysis(ppath, recordings, backup='', single_mode=False, fig_file='', overlap=0):
"""
analyze results from closed-loop experiments
:param ppath: base folder
:param recordings: list of strings, recordinds
:param backup: string, potential second backup folder with recordings
:param single_mode: boolean, if True, average across all REM periods (irrespective of mouse)
and plot each single REM period as dot
:param overlap: float between 0 and 100; specifices percentage by which the online detected REM period has to
overlap with real (annotated) REM period to be further consided for analysis;
if overlap == 0, then any overlap counts, i.e. this parameter has no influence
:return: df, pd.DataFrame, with control and experimental REM durations as data columns
"""
if type(recordings) != list:
recordings = [recordings]
overlap = overlap / 100.0
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
dur_exp = {m:[] for m in mice}
dur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M,S = load_stateidx(paths[rec], rec)
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for s in seq:
# check true REM sequences overlapping with online detected sequences
isect = np.intersect1d(s, rem_idx)
#print(len(isect)/ len(s))
# test if real REM period s overlaps with online detected REM periods and,
# if yes, make sure that the overlap is at least overlap *100 percent
if len(np.intersect1d(s, rem_idx)) > 0 and float(len(isect)) / len(s) >= overlap:
drn = (s[-1]-s[0]+1)*dt
# does the sequence overlap with laser?
if len(np.intersect1d(isect, laser_idx))>0:
dur_exp[idf].append(drn)
else:
dur_ctr[idf].append(drn)
data = {'exp':[], 'ctr':[]}
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
for m in mice:
data['exp'] += dur_exp[m]
data['ctr'] += dur_ctr[m]
else:
for idf in dur_ctr:
dur_ctr[idf] = np.array(dur_ctr[idf]).mean()
dur_exp[idf] = np.array(dur_exp[idf]).mean()
data['exp'] = np.array(list(dur_exp.values()))
data['ctr'] = np.array(list(dur_ctr.values()))
df = pd.DataFrame({'ctr':pd.Series(data['ctr']), 'exp' : pd.Series(data['exp'])})
# plot everything
if not single_mode:
plt.ion()
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey', label='W/o Laser')
plt.bar([2], [df_mean['exp']], color='blue', label='With laser')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
for (a,b) in zip(df['ctr'], df['exp']):
plt.plot([1,2], [a,b], color='black')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
else:
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey')
plt.bar([2], [df_mean['exp']], color='blue')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
a = df['ctr']
b = df['exp']
plt.plot(np.ones((len(a),)), a, '.', color='black', label='W/o Laser')
plt.plot(2*np.ones((len(b),)), b, '.', color='black', label='With laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return df
def online_homeostasis(ppath, recordings, backup='', mode=0, single_mode=False, pplot=True, overlap=0, ma_thr=0):
"""
Further analysis of data obtained from closed loop stimulation
Assume the sleep structure looks like this
R R R R W W N N N N N W W N N N N R R R R R
REM_pre -- inter REM ---- REM_post
REM_pre is the duration of the first REM period, inter-REM is everything between REM_pre and the
next REM period REM_post.
The function calculates the inter REM duration after REM periods with laser and after REM periods w/o laser
:param ppath: base folder
:param recordings: list of recording, or file listing
:param backup: backup folder for $ppath
:param mode: mode == 0, calculate complete inter REM duration
mode == 2, only calculate duration of wake in inter REM periods
mode == 3, only calculate duration of NREM in inter REM periods
:param single_mode: consider each single recording, instead of mice
:param overlap: percentage (number between 0 and 100). Defines the percentage
how much a true (offline annotated) REM period should overlap with laser
to be considered as REM sleep with laser.
Of note, REM periods w/o laser have to have 0 overlap with laser.
All remaining REM periods are discarded.
:param pplot: if True, plot figure; errorbars show 95% confidence intervals,
calculated using bootstrapping
:param ma:
:return: df, if single_mode == True $df is a pandas DataFrame:
REM iREM laser
mouse - mouse ID
REM - REM duration
iREM - inter REM duration after REM periods with laser
laser - 'y' or 'n'; depending on whether laser was on during REM sleep period (for "REM") or during the
preceding REM sleep period (for "iREM")
if single_mode == False, mouse is the data frame index
"""
if type(recordings) != list:
recordings = [recordings]
if overlap > 0:
overlap = overlap / 100
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
remdur_exp = {m:[] for m in mice}
remdur_ctr = {m:[] for m in mice}
itdur_exp = {m:[] for m in mice}
itdur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(paths[rec], rec)[0]
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat' % rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for (p,q) in zip(seq[0:-1], seq[1:]):
# check if true REM sequences do overlap with online detected sequences
# and only continue working with those:
if len(np.intersect1d(p, rem_idx)) > 0:
drn = (p[-1]-p[0]+1)*dt
it_M = M[p[-1]+1:q[0]]
if mode == 0:
it_drn = len(it_M)*dt
elif mode == 2:
it_drn = len(np.where(it_M==2)[0]) * dt
else:
it_drn = len(np.where(it_M == 3)[0]) * dt
# does the true REM sequence overlap with laser?
# by setting overlap to a value > 0, you can
# set a percentage how much the REM period should overlap with laser
# NEW 08/26/21
if len(np.intersect1d(p, laser_idx)) / len(p) > overlap:
remdur_exp[idf].append(drn)
itdur_exp[idf].append(it_drn)
elif len(np.intersect1d(p, laser_idx)) == 0:
remdur_ctr[idf].append(drn)
itdur_ctr[idf].append(it_drn)
else:
pass
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
data = {'itexp':[], 'itctr':[], 'remexp':[], 'remctr':[]}
for m in mice:
data['itexp'] += itdur_exp[m]
data['itctr'] += itdur_ctr[m]
data['remexp'] += remdur_exp[m]
data['remctr'] += remdur_ctr[m]
df = pd.DataFrame({'REM': data['remexp']+data['remctr'], 'iREM':data['itexp']+data['itctr'], 'laser': ['y']*len(data['remexp']) + ['n']*len(data['remctr'])})
else:
for idf in mice:
itdur_ctr[idf] = np.array(itdur_ctr[idf]).mean()
itdur_exp[idf] = np.array(itdur_exp[idf]).mean()
remdur_ctr[idf] = np.array(remdur_ctr[idf]).mean()
remdur_exp[idf] = np.array(remdur_exp[idf]).mean()
data = {}
for s in ['itexp', 'itctr', 'remexp', 'remctr']:
data[s] = np.zeros((len(mice),))
i = 0
for m in mice:
data['itexp'][i] = itdur_exp[m]
data['itctr'][i] = itdur_ctr[m]
data['remexp'][i] = remdur_exp[m]
data['remctr'][i] = remdur_ctr[m]
i += 1
df = pd.DataFrame({'REM': np.concatenate((data['remexp'], data['remctr'])),
'iREM': np.concatenate((data['itexp'], data['itctr'])),
'laser': ['y']*len(mice) + ['n']*len(mice),
'mouse': mice+mice})
if pplot and not single_mode:
dfm = pd.melt(df, id_vars=['laser', 'mouse'], var_name='state')
sns.set_style('whitegrid')
plt.ion()
plt.figure()
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
if pplot and single_mode:
dfm = pd.melt(df, id_vars=['laser'], var_name='state')
plt.ion()
plt.figure()
sns.set(style="whitegrid")
#sns.swarmplot(data=df[['itctr', 'itexp']], color='black')
#sns.barplot(data=df[['itctr', 'itexp']], palette=['gray', 'blue'], errcolor='black')
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
return df
### FUNCTIONS USED BY SLEEP_STATE #####################################################
def get_sequences(idx, ibreak=1) :
"""
get_sequences(idx, ibreak=1)
idx - np.vector of indices
@RETURN:
seq - list of np.vectors
"""
diff = idx[1:] - idx[0:-1]
breaks = np.nonzero(diff>ibreak)[0]
breaks = np.append(breaks, len(idx)-1)
seq = []
iold = 0
for i in breaks:
r = list(range(iold, i+1))
seq.append(idx[r])
iold = i+1
return seq
def threshold_crossing(data, th, ilen, ibreak, m):
"""
seq = threshold_crossing(data, th, ilen, ibreak, m)
"""
if m>=0:
idx = np.where(data>=th)[0]
else:
idx = np.where(data<=th)[0]
# gather sequences
j = 0
seq = []
while (j <= len(idx)-1):
s = [idx[j]]
for k in range(j+1,len(idx)):
if (idx[k] - idx[k-1]-1) <= ibreak:
# add j to sequence
s.append(idx[k])
else:
break
if (s[-1] - s[0]+1) >= ilen and not(s[0] in [i[1] for i in seq]):
seq.append((s[0], s[-1]))
if j == len(idx)-1:
break
j=k
return seq
def closest_precessor(seq, i):
"""
find the preceding element in seq which is closest to i
helper function for sleep_state
"""
tmp = seq-i;
d = np.where(tmp<0)[0]
if len(d)>0:
id = seq[d[-1]];
else:
id = 0;
return id
def write_remidx(M, K, ppath, name, mode=1) :
"""
rewrite_remidx(idx, states, ppath, name)
replace the indices idx in the remidx file of recording name
with the assignment given in states
"""
if mode == 0 :
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
else :
outfile = os.path.join(ppath, name, 'remidx_' + name + '_corr.txt')
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M[0,:],K)]
f.writelines(s)
f.close()
#######################################################################################
### MANIPULATING FIGURES ##############################################################
def set_fontsize(fs):
import matplotlib
matplotlib.rcParams.update({'font.size': fs})
def set_fontarial():
"""
set Arial as default font
"""
import matplotlib
matplotlib.rcParams['font.sans-serif'] = "Arial"
def save_figure(fig_file):
# alternative way of setting nice fonts:
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.pylab.savefig(fig_file, dpi=300)
#matplotlib.rcParams['text.usetex'] = False
#matplotlib.rcParams['text.usetex'] = True
plt.savefig(fig_file, bbox_inches="tight", dpi=200)
#matplotlib.rcParams['text.usetex'] = False
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#######################################################################################
def sleep_state(ppath, name, th_delta_std=1, mu_std=0, sf=1, sf_delta=3, pwrite=0,
pplot=True, pemg=True, vmax=2.5, pspec_norm=False, use_idx=[]):
"""
automatic sleep state detection based on
delta, theta, sigma, gamma and EMG power.
New: use also sigma band: that's very helpful to classify pre-REM periods
as NREM; otherwise they tend to be classified as wake.
Gamma peaks nicely pick up during microarousals.
My strategy is the following:
I smooth delta band a lot to avoid strong fragmentation of sleep; but to
still pick up microarousals I use the gamma power.
spectrogram data has to be calculated before using calculate_spectrum
Each bin in the spectrogram gets assigned one of four states:
1-REM
2-Wake
3-NREM
0-undef
:param ppath: base folder
:param name: recording name
:param th_delta_std: threshold for theta/delta band is calculated as mean(theta/delta) + th_delta_std*std(theta/delta)
:param mu_std: threshold for EMG power is calculate as "mean(EMG) + mu_std * mean(EMG)
:param sf: smoothing factor for gamma and sigma power
:param sf_delta: smoothing factor for delta power
:param pwrite: if True, save sleep classification to file remidx_$name.txt
:param pplot: if True, plot figures
:param pemg: if True, use EMG as EMG, otherwise use EEG gamma power instead
:param vmax: float, set maximum of color range of EEG heatmap.
:param pspec_norm: boolean, if True, normalized EEG spectrogram by deviding each frequency band by its mean; only affects
plotting, no effect on sleep state calculation
:param use_idx: list, if not empty, use only given indices to calculate sleep state
:return:
"""
PRE_WAKE_REM = 30.0
# Minimum Duration and Break in
# high theta/delta, high emg, high delta, high sigma and gamma sequences
#
# duration[i,0] is the minimum duration of sequence of state i
# duration[i,1] is maximal break duration allowed in a sequence of state i
duration = np.zeros((5,2))
# high theta/delta
duration[0,:] = [5,15]
# high emg
duration[1,:] = [0, 5]
# high delta
duration[2,:] = [10, 10]
# high sigma
duration[3,:] = [10, 10]
# gamma
duration[4,:] = [0, 5]
# Frequency Bands/Ranges for delta, theta, and, gamma
r_delta = [0.5, 4]
r_sigma = [12, 20]
r_theta = [5,12]
# EMG band
r_mu = [50, 500]
if not pemg:
r_mu = [250, 500]
# high gamma power
r_gamma = [100, 150]
#load EEG and EMG spectrum, calculated by calculate_spectrum
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
if pemg:
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
else:
Q = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
SPEEG = np.squeeze(P['SP'])
if pemg == 1:
SPEMG = np.squeeze(Q['mSP'])
else:
SPEMG = np.squeeze(P['SP'])
if use_idx == []:
use_idx = range(0, SPEEG.shape[1])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
N = len(t)
duration = np.divide(duration,dt)
# get indices for frequency bands
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
i_sigma = np.where((freq >= r_sigma[0]) & (freq <= r_sigma[1]))[0]
i_gamma = np.where((freq >= r_gamma[0]) & (freq <= r_gamma[1]))[0]
p_delta = smooth_data( SPEEG[i_delta,:].mean(axis=0), sf_delta )
p_theta = smooth_data( SPEEG[i_theta,:].mean(axis=0), 0 )
# now filtering for EMG to pick up microarousals
p_mu = smooth_data( SPEMG[i_mu,:].mean(axis=0), sf )
p_sigma = smooth_data( SPEEG[i_sigma,:].mean(axis=0), sf )
p_gamma = smooth_data( SPEEG[i_gamma,:].mean(axis=0), 0 )
th_delta = np.divide(p_theta, p_delta)
#th_delta = smooth_data(th_delta, 2);
seq = {}
seq['high_theta'] = threshold_crossing(th_delta, np.nanmean(th_delta[use_idx])+th_delta_std*np.nanstd(th_delta[use_idx]),
duration[0,1], duration[0,1], 1)
seq['high_emg'] = threshold_crossing(p_mu, np.nanmean(p_mu[use_idx])+mu_std*np.nanstd(p_mu[use_idx]),
duration[1,0], duration[1,1], 1)
seq['high_delta'] = threshold_crossing(p_delta, np.nanmean(p_delta[use_idx]), duration[2,0], duration[2,1], 1)
seq['high_sigma'] = threshold_crossing(p_sigma, np.nanmean(p_sigma[use_idx]), duration[3,0], duration[3,1], 1)
seq['high_gamma'] = threshold_crossing(p_gamma, np.nanmean(p_gamma[use_idx]), duration[4,0], duration[4,1], 1)
# Sleep-State Rules
idx = {}
for k in seq:
tmp = [list(range(i,j+1)) for (i,j) in seq[k]]
# now idea why this works to flatten a list
# idx[k] = sum(tmp, [])
# alternative that I understand:
if len(tmp) == 0:
idx[k] = np.array([])
else:
idx[k] = np.array(reduce(lambda x,y: x+y, tmp))
idx['low_emg'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_emg']))
idx['low_delta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_delta']))
idx['low_theta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_theta']))
#REM Sleep: thdel up, emg down, delta down
a = np.intersect1d(idx['high_theta'], idx['low_delta'])
# non high_emg phases
b = np.setdiff1d(a, idx['high_emg'])
rem = get_sequences(b, duration[0,1])
rem_idx = reduce(lambda x,y: np.concatenate((x,y)), rem)
# SWS Sleep
# delta high, no theta, no emg
a = np.setdiff1d(idx['high_delta'], idx['high_emg']) # no emg activation
b = np.setdiff1d(a, idx['high_theta']) # no theta;
sws = get_sequences(b)
sws_idx = reduce(lambda x,y: np.concatenate((x,y)), sws)
#print a
# Wake
# low delta + high emg and not rem
a = np.unique(np.union1d(idx['low_delta'], idx['high_emg']))
b = np.setdiff1d(a, rem_idx)
wake = get_sequences(b)
wake_idx = reduce(lambda x,y: np.concatenate((x,y)), wake)
# sequences with low delta, high sigma and low emg are NREM
a = np.intersect1d(np.intersect1d(idx['high_sigma'], idx['low_delta']), idx['low_emg'])
a = np.setdiff1d(a, rem_idx)
sws_idx = np.unique(np.union1d(a, sws_idx))
wake_idx = np.setdiff1d(wake_idx, a)
#NREM sequences with high gamma are wake
a = np.intersect1d(sws_idx, idx['high_gamma'])
sws_idx = np.setdiff1d(sws_idx, a)
wake_idx = np.unique(np.union1d(wake_idx,a))
# Wake and Theta
wake_motion_idx = np.intersect1d(wake_idx, idx['high_theta'])
# Wake w/o Theta
wake_nomotion_idx = np.setdiff1d(wake_idx, idx['low_theta'])
# Are there overlapping sequences?
a = np.intersect1d(np.intersect1d(rem_idx, wake_idx), sws_idx)
# Are there undefined sequences?
undef_idx = np.setdiff1d(np.setdiff1d(np.setdiff1d(np.arange(0,N), rem_idx), wake_idx), sws_idx)
# Wake wins over SWS
sws_idx = np.setdiff1d(sws_idx, wake_idx)
# Special rules
# if there's a REM sequence directly following a short wake sequence (PRE_WAKE_REM),
# this wake sequence goes to SWS
# NREM to REM transitions are sometimes mistaken as quite wake periods
for rem_seq in rem:
if len(rem_seq) > 0:
irem_start = rem_seq[0]
# is there wake in the preceding bin?
if irem_start-1 in wake_idx:
# get the closest sws bin in the preceding history
isws_end = closest_precessor(sws_idx, irem_start)
if (irem_start - isws_end)*dt < PRE_WAKE_REM:
new_rem = np.arange(isws_end+1,irem_start)
rem_idx = np.union1d(rem_idx, new_rem)
wake_idx = np.setdiff1d(wake_idx, new_rem)
else:
new_wake = rem_seq
wake_idx = np.union1d(wake_idx, new_wake)
rem_idx = np.setdiff1d(rem_idx, new_wake)
# two different representations for the results:
S = {}
S['rem'] = rem_idx
S['nrem'] = sws_idx
S['wake'] = wake_idx
S['awake'] = wake_motion_idx
S['qwake'] = wake_nomotion_idx
M = np.zeros((N,))
if len(rem_idx) > 0:
M[rem_idx] = 1
if len(wake_idx) > 0:
M[wake_idx] = 2
if len(sws_idx) > 0:
M[sws_idx] = 3
if len(undef_idx) > 0:
M[undef_idx] = 0
# write sleep annotation to file
if pwrite:
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
print("writing annotation to %s" % outfile)
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M,np.zeros((N,)))]
f.writelines(s)
f.close()
# nice plotting
plt.ion()
if pplot:
plt.figure(figsize=(18,9))
axes1=plt.axes([0.1, 0.9, 0.8, 0.05])
A = np.zeros((1,len(M)))
A[0,:] = M
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,0,0], [0,1,1],[0.5,0,1], [0.8, 0.8, 0.8]], 4)
#tmp = axes1.imshow(A, vmin=0, vmax=3)
tmp = axes1.pcolorfast(t, [0,1], A, vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes1.axis('tight')
tmp.axes.get_xaxis().set_visible(False)
tmp.axes.get_yaxis().set_visible(False)
box_off(axes1)
# show spectrogram
axes2=plt.axes([0.1, 0.75, 0.8, 0.1], sharex=axes1)
ifreq = np.where(freq <= 30)[0]
med = np.median(SPEEG.max(axis=0))
if pspec_norm:
ifreq = np.where(freq <= 80)[0]
filt = np.ones((6, 1))
filt = filt / np.sum(filt)
SPEEG = scipy.signal.convolve2d(SPEEG, filt, mode='same')
spec_mean = SPEEG.mean(axis=1)
SPEEG = np.divide(SPEEG, np.repeat([spec_mean], SPEEG.shape[1], axis=0).T)
med = np.median(SPEEG.max(axis=0))
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax = med*vmax, cmap='jet')
else:
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax=med * vmax, cmap='jet')
axes2.axis('tight')
plt.ylabel('Freq (Hz)')
box_off(axes2)
# show delta power
axes3=plt.axes([0.1, 0.6, 0.8, 0.1], sharex=axes2)
axes3.plot(t,p_delta, color='gray')
plt.ylabel('Delta (a.u.)')
plt.xlim((t[0], t[-1]))
seq = get_sequences(S['nrem'])
#for s in seq:
# plt.plot(t[s],p_delta[s], color='red')
s = idx['high_delta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_delta[s], color='red')
box_off(axes3)
axes4=plt.axes([0.1, 0.45, 0.8, 0.1], sharex=axes3)
axes4.plot(t,p_sigma, color='gray')
plt.ylabel('Sigma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_sigma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_sigma[s], color='red')
box_off(axes4)
axes5=plt.axes([0.1, 0.31, 0.8, 0.1], sharex=axes4)
axes5.plot(t,th_delta, color='gray')
plt.ylabel('Th/Delta (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_theta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],th_delta[s], color='red')
box_off(axes5)
axes6=plt.axes([0.1, 0.17, 0.8, 0.1], sharex=axes5)
axes6.plot(t,p_gamma, color='gray')
plt.ylabel('Gamma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_gamma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_gamma[s], color='red')
box_off(axes6)
axes7=plt.axes([0.1, 0.03, 0.8, 0.1], sharex=axes6)
axes7.plot(t,p_mu, color='gray')
plt.xlabel('Time (s)')
plt.ylabel('EMG (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_emg']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_mu[s], color='red')
box_off(axes7)
plt.show()
# 2nd figure showing distribution of different bands
plt.figure(figsize=(20,3))
axes1 = plt.axes([0.05, 0.1, 0.13, 0.8])
plt.hist(p_delta, bins=100)
plt.plot(np.nanmean(p_delta), 10, 'ro')
plt.title('delta')
plt.ylabel('# Occurances')
box_off(axes1)
axes1 = plt.axes([0.25, 0.1, 0.13, 0.8])
plt.hist(th_delta, bins=100)
plt.plot(np.nanmean(th_delta)+th_delta_std*np.nanstd(th_delta), 10, 'ro')
plt.title('theta/delta')
box_off(axes1)
axes1 = plt.axes([0.45, 0.1, 0.13, 0.8])
plt.hist(p_sigma, bins=100)
plt.plot(np.nanmean(p_sigma), 10, 'ro')
plt.title('sigma')
box_off(axes1)
axes1 = plt.axes([0.65, 0.1, 0.13, 0.8])
plt.hist(p_gamma, bins=100)
plt.plot(np.nanmean(p_gamma), 10, 'ro')
plt.title('gamma')
box_off(axes1)
axes1 = plt.axes([0.85, 0.1, 0.13, 0.8])
plt.hist(p_mu, bins=100)
plt.plot(np.nanmean(p_mu)+np.nanstd(p_mu), 10, 'ro')
plt.title('EMG')
plt.show(block=False)
box_off(axes1)
plt.show()
return M,S
def plot_hypnograms(ppath, recordings, tbin=0, unit='h', ma_thr=20, title='', tstart=0, tend=-1):
"""
plot all hypnograms specified in @recordings
:param ppath: base folder
:param recordings: list of recordings
:param tbin: tbin for xticks
:param unit: time unit; h - hour, min - minute, s - second
:param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM
:param tstart: float, start time point (in seconds!) of hypnograms
:param tend: float, last shown time point (in seconds!)
:param title: optional title for figure
"""
recordings = recordings[::-1]
sr = get_snr(ppath, recordings[0])
nbin = int(np.round(sr) * 2.5)
dt_sec = (1.0 / sr) * nbin
istart = int(np.round(tstart/dt_sec))
dt = dt_sec
if unit == 'h':
dt /= 3600
elif unit == 'min':
dt /= 60
rec_len = dict()
irec = 0
ny = (1.0-0.2) / len(recordings)
dy = ny * 0.75
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.5, 0, 1], [0.8, 0.8, 0.8]], 4)
plt.ion()
plt.figure(figsize=(9,4))
axes = []
for rec in recordings:
M,K = load_stateidx(ppath, rec)
#kcut = np.where(K<0)[0]
#M = M[kcut]
#M[kcut] = 0
if tend == -1:
iend = len(M)
else:
iend = int(tend/dt_sec)
M = M[istart:iend]
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt_sec <= ma_thr:
M[s] = 3
rec_len[rec] = len(M)*dt
t = np.arange(0, len(M))*dt
ax = plt.axes([0.05, ny*irec+0.15, 0.75, dy])
tmp = ax.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3, cmap=my_map)
box_off(ax)
ax.axis('tight')
tmp.axes.get_yaxis().set_visible(False)
if irec > 0:
tmp.axes.get_xaxis().set_visible(False)
if irec == 0:
plt.xlabel('Time (%s)' % unit)
irec += 1
axes.append(ax)
if len(title) > 0:
plt.title(title)
max_dur = max(rec_len.values())
if tbin > 0:
xtick = np.arange(0, max_dur, tbin)
for (ax, rec) in zip(axes, recordings):
ax.set_xlim([0, max_dur])
if tbin > 0:
ax.set_xticks(xtick)
ax.text(max_dur+max_dur*0.01, 0.5, rec)
plt.show()
def plot_swa(ppath, name, delta_win, alpha, band=[0.5, 4.5], swa_yrange=[]):
"""
plot slow wave (delta) activity during NREM
The top plot shows the hynogram.
The middle plot shows the delta power (irrespective of brain state) as line plot
The bottom plot shows for consecutive $delta_win seconds long bins, the
median delta power (SWA) during NREM, if the ration of NREM during the
corresponding bin >= $alpha
Example call:
dm=plot_swa(ppath, name, 30, 0.5, swa_yrange=[0, 0.012])
:param ppath, name: basefolder, recording name
:param delta_win: plot median swa value for each consecutive $delta_win seconds long window, if
:param alpha: the ratio of NREM in this window is larger than alpha (value between 0 and 1)
:param swa_yrange: tuple, minimun and maximum value of yrange for SWA
:return df: pd.DataFrame with SWA time points and corresponding median SWA values
"""
r_delta = band
sr = get_snr(ppath, name)
nbin = int(np.round(2.5*sr))
dt = nbin*(1.0/sr)
M,_ = load_stateidx(ppath, name)
t = np.arange(0, len(M))*dt
P = so.loadmat(os.path.join(ppath, name, 'sp_%s.mat' % name), squeeze_me=True)
SP = P['SP']
freq = P['freq']
df = freq[1]-freq[0]
idelta = np.where((freq>=r_delta[0]) & (freq<=r_delta[1]))[0]
pow_delta = SP[idelta,:].sum(axis=0)*df
# get NREM sequences contributing points for fitting
iwin = int(delta_win/dt)
#seq = get_sequences(nrem_idx, ibreak=int((delta_win/dt)*0.1))
delta_med = []
for j in range(0, len(M)-iwin, iwin):
s = range(j, j+iwin)
sc = j+int(iwin/2)
Mcut = M[s]
if (1.0*len(np.where(Mcut==3)[0])) / len(s) >= alpha:
i = np.where(Mcut==3)[0]
i = i+s[0]
a = np.median(pow_delta[i])
delta_med.append((t[sc],a))
df = pd.DataFrame(columns=['time', 'pow'], data=delta_med)
# generate figure
# show brainstate
plt.ion()
plt.figure(figsize=(10, 4))
axes_brs = plt.axes([0.1, 0.85, 0.8, 0.1])
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.6, 0, 1], [0.8, 0.8, 0.8]], 4)
tmp = axes_brs.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes_brs.axis('tight')
axes_brs.axes.get_xaxis().set_visible(False)
axes_brs.axes.get_yaxis().set_visible(False)
axes_brs.spines["top"].set_visible(False)
axes_brs.spines["right"].set_visible(False)
axes_brs.spines["bottom"].set_visible(False)
axes_brs.spines["left"].set_visible(False)
# plot delta power as function of time
c = 1000**2
axes_tdelta = plt.axes([0.1, 0.55, 0.8, 0.2], sharex=axes_brs)
plt.plot(t, pow_delta/c, 'k')
box_off(axes_tdelta)
axes_tdelta.axes.get_xaxis().set_visible(False)
axes_tdelta.spines["bottom"].set_visible(False)
plt.ylabel('SWA (mV$\mathrm{^2}$)')
# plot delta power medians
axes_delta = plt.axes([0.1, 0.12, 0.8, 0.35], sharex=axes_brs)
for (s,delta) in delta_med:
plt.plot(s, delta/c, 'ko')
print(t)
plt.xlim((t[0], t[-1]))
box_off(axes_delta)
plt.xlabel('Time (s)')
plt.ylabel('NREM SWA (mV$\mathrm{^2}$)')
if swa_yrange == []:
ymax = df['pow'].max()/c
plt.ylim([0, ymax+0.1*ymax])
else:
plt.ylim(swa_yrange)
plt.show()
return df
def laser_triggered_eeg(ppath, name, pre, post, f_max, pnorm=2, pplot=False, psave=False, tstart=0, tend=-1,
peeg2=False, vm=2.5, prune_trials=True, mu=[10, 200], trig_state=0, harmcs=0, iplt_level=1):
"""
calculate laser triggered, averaged EEG and EMG spectrum
:param ppath: base folder containing mouse recordings
:param name: recording
:param pre: time before laser
:param post: time after laser
:param f_max: calculate/plot frequencies up to frequency f_max
:param pnorm: normalization,
pnorm = 0, no normalization
pnorm = 1, normalize each frequency band by its average power
pnorm = 2, normalize each frequency band by the average power
during the preceding baseline period
:param vm: float to set saturation level of colormap
:param pplot: plot figure yes=True, no=False
:param psave: save the figure, yes=True, no = False
:param tstart: float, starting time point. Only lasers trials after tstart will be considered
:param tend: float, only laser trials up to tend will be considered; if tend==-1, use whole recording
:param peeg2: if True, use EEG channel 2
:param prune_trials: if True, throw out trials with EEG or EMG artifacts
:param mu: tuple; range used for EMG amplitude calculation
:param trig_state: int; if > 0, only use trials where brain is at laser onset in brainstate trig_state
1=REM, 2=Wake, 3=NREM
:param harmcs: if >0, interpolate all frequencies corresponding to multiples of harmcs by the average power
of the two neighboring frequencies.
:param iplt_level: options - 1 or 2. If 1 only take one neighboring frequency above and below the harmonic;
if 2, take 2 neighbors above and below, respectively
"""
def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
df = freq[2]-freq[1]
for h in np.arange(harmcs, f_max, harmcs):
i = np.argmin(np.abs(freq - h))
if np.abs(freq[i] - h) < df and h != 60:
if iplt_level == 2:
SP[i,:] = (SP[i-2:i,:] + SP[i+1:i+3,:]).mean(axis=0) * 0.5
else:
SP[i,:] = (SP[i-1,:] + SP[i+1,:]) * 0.5
return SP
SR = get_snr(ppath, name)
NBIN = np.round(2.5*SR)
lsr = load_laser(ppath, name)
idxs, idxe = laser_start_end(lsr)
laser_dur = np.mean((idxe-idxs)/SR)
print('%s: Average laser duration: %f; Number of trials %d' % (name, laser_dur, len(idxs)))
# downsample EEG time to spectrogram time
idxs = [int(i/NBIN) for i in idxs]
idxe = [int(i/NBIN) for i in idxe]
#load EEG and EMG
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
if not peeg2:
SPEEG = np.squeeze(P['SP'])
else:
SPEEG = np.squeeze(P['SP2'])
SPEMG = np.squeeze(Q['mSP'])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
ifreq = np.where(freq<=f_max)[0]
ipre = int(np.round(pre/dt))
ipost = int(np.round(post/dt))
speeg_mean = SPEEG.mean(axis=1)
spemg_mean = SPEMG.mean(axis=1)
# interpolate frequencies corresponding to harmonics of $harmcs
if harmcs > 0:
SPEEG = _interpolate_harmonics(SPEEG, freq, f_max, harmcs)
SPEMG = _interpolate_harmonics(SPEMG, freq, f_max, harmcs)
if tend > -1:
i = np.where((np.array(idxs)*dt >= tstart) & (np.array(idxs)*dt <= tend))[0]
else:
i = np.where(np.array(idxs)*dt >= tstart)[0]
idxs = [idxs[j] for j in i]
idxe = [idxe[j] for j in i]
skips = []
skipe = []
if prune_trials:
for (i,j) in zip(idxs, idxe):
A = SPEEG[0,i-ipre:i+ipost+1] / speeg_mean[0]
B = SPEMG[0,i-ipre:i+ipost+1] / spemg_mean[0]
k = np.where(A >= np.median(A)*50)[0]
l = np.where(B >= np.median(B)*500)[0]
if len(k) > 0 or len(l) > 0:
skips.append(i)
skipe.append(j)
print("%s: kicking out %d trials" % (name, len(skips)))
idxs_new = []
idxe_new = []
for (i,j) in zip(idxs, idxe):
if not i in skips:
idxs_new.append(i)
idxe_new.append(j)
idxs = idxs_new
idxe = idxe_new
# select trials where brain state is right before laser in trig_state
if trig_state > 0:
idxs_new = []
idxe_new = []
M = load_stateidx(ppath, name)[0]
for (i,j) in zip(idxs, idxe):
if i < len(M) and M[i] == trig_state:
idxs_new.append(i)
idxe_new.append(j)
idxs = idxs_new
idxe = idxe_new
# Spectrogram for EEG and EMG normalized by average power in each frequency band
if pnorm == 1:
SPEEG = np.divide(SPEEG, np.repeat(speeg_mean, len(t)).reshape(len(speeg_mean), len(t)))
SPEMG = np.divide(SPEMG, np.repeat(spemg_mean, len(t)).reshape(len(spemg_mean), len(t)))
speeg_parts = []
spemg_parts = []
for (i,j) in zip(idxs, idxe):
if i>=ipre and j+ipost < len(t):
eeg_part = SPEEG[ifreq,i-ipre:i+ipost+1]
speeg_parts.append(eeg_part)
spemg_parts.append(SPEMG[ifreq,i-ipre:i+ipost+1])
EEGLsr = np.array(speeg_parts).mean(axis=0)
EMGLsr = np.array(spemg_parts).mean(axis=0)
# smooth spectrogram
nfilt = 3
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
EEGLsr = scipy.signal.convolve2d(EEGLsr, filt, boundary='symm', mode='same')
if pnorm == 2:
for i in range(EEGLsr.shape[0]):
EEGLsr[i,:] = np.divide(EEGLsr[i,:], np.sum(np.abs(EEGLsr[i,0:ipre]))/(1.0*ipre))
EMGLsr[i,:] = np.divide(EMGLsr[i,:], np.sum(np.abs(EMGLsr[i,0:ipre]))/(1.0*ipre))
# get time axis
dt = (1.0/SR)*NBIN
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
f = freq[ifreq]
if pplot:
# get rid of boxes around matplotlib plots
def box_off(ax):
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.ion()
plt.figure(figsize=(10,8))
ax = plt.axes([0.1, 0.55, 0.4, 0.35])
plt.pcolormesh(t,f,EEGLsr, vmin=0, vmax=np.median(EEGLsr)*vm, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EEG', fontsize=12)
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.62, 0.55, 0.35, 0.35])
ilsr = np.where((t>=0) & (t<=120))[0]
plt.plot(f,EEGLsr[:,0:ipre].mean(axis=1), color='gray', label='baseline', lw=2)
plt.plot(f,EEGLsr[:,ilsr].mean(axis=1), color='blue', label='laser', lw=2)
box_off(ax)
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power (uV^2)')
#plt.legend(loc=0)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, borderaxespad=0.)
ax = plt.axes([0.1, 0.1, 0.4, 0.35])
plt.pcolormesh(t,f,EMGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EMG', fontsize=12)
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power (uV^2s)')
ax = plt.axes([0.62, 0.1, 0.35, 0.35])
mf = np.where((f>=mu[0]) & (f <= mu[1]))[0]
df = f[1]-f[0]
# amplitude is square root of (integral over each frequency)
avg_emg = np.sqrt(EMGLsr[mf,:].sum(axis=0)*df)
m = np.max(avg_emg)*1.5
plt.plot([0,0], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.plot([laser_dur,laser_dur], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.xlim((t[0], t[-1]))
plt.ylim((0,m))
plt.plot(t,avg_emg, color='black', lw=2)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. (uV)')
plt.show()
if psave:
img_file = os.path.join(ppath, name, 'fig_'+name+'_spec.png')
save_figure(img_file)
return EEGLsr, EMGLsr, freq[ifreq], t
def laser_triggered_eeg_avg(ppath, recordings, pre, post, f_max, laser_dur, pnorm=1, pplot=1, tstart=0, tend=-1,
vm=[], cb_ticks=[], mu=[10, 100], trig_state=0, harmcs=0, iplt_level=1, peeg2=False, fig_file=''):
"""
calculate average spectrogram for all recordings listed in @recordings; for averaging take
mouse identity into account
:param ppath: base folder
:param recordings: list of recordings
:param pre: time before laser onset
:param post: time after laser onset
:param f_max: maximum frequency shown for EEG spectrogram
:param laser_dur: duration of laser stimulation
:param pnorm: normalization,
pnorm = 0, no normalization
pnorm = 1, normalize each frequency band by its average power
pnorm = 2, normalize each frequency band by the average power
during the preceding baseline period
:param pplot: pplot==0 - no figure;
pplot==1 - conventional figure;
pplot==2 - pretty figure showing EEG spectrogram
along with EMG amplitude
note: the errorbar for the EMG amplitude is the S.E.M.
:param tstart: only consider laser trials with laser onset after tstart seconds
:param tend: only consider laser trials with laser onset before tend seconds
:param vm: saturation of heatmap for EEG spectrogram
:param cb_ticks: ticks for colorbar (only applies for pplot==2)
:param mu: frequencies for EMG amplitude calculation
:param trig_state: if > 0, only use trials where brain is at laser onset in brainstate trig_state
1=REM, 2=Wake, 3=NREM
:param peeg2: if True, use EEG2 instead of EEG
:param harmcs: if >0, interpolate all frequencies corresponding to multiples of harmcs by the average power
of the two neighboring frequencies.
:param iplt_level: options - 1 or 2. If 1 only take one neighboring frequency above and below the harmonic;
if 2, take 2 neighbors above and below, respectively
:param fig_file: if specified, save figure to given file
:return:
t, f, EEGSpec, EMGSpec, EEGLsr
t - time axis
f - frequency axis
EEGSpec - dict with mouse id -> 2D np.array(frequency x time)
EMGSpec - dict with mouse id -> 2D np.array(frequency x time)
EEGLsr - 2D np.array(frequency x time)
"""
EEGSpec = {}
EMGSpec = {}
mice = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not(idf in mice):
mice.append(idf)
EEGSpec[idf] = []
EMGSpec[idf] = []
for rec in recordings:
idf = re.split('_', rec)[0]
EEG, EMG, f, t = laser_triggered_eeg(ppath, rec, pre, post, f_max, mu=mu, pnorm=pnorm, pplot=False,
psave=False, tstart=tstart, tend=tend, trig_state=trig_state,
peeg2=peeg2, harmcs=harmcs, iplt_level=iplt_level)
EEGSpec[idf].append(EEG)
EMGSpec[idf].append(EMG)
for idf in mice:
EEGSpec[idf] = np.array(EEGSpec[idf]).mean(axis=0)
EMGSpec[idf] = np.array(EMGSpec[idf]).mean(axis=0)
EEGLsr = np.array([EEGSpec[k] for k in mice]).mean(axis=0)
EMGLsr = np.array([EMGSpec[k] for k in mice]).mean(axis=0)
mf = np.where((f >= mu[0]) & (f <= mu[1]))[0]
if harmcs > 0:
harm_freq = np.arange(0, f.max(), harmcs)
for h in harm_freq:
mf = np.setdiff1d(mf, mf[np.where(f[mf]==h)[0]])
df = f[1] - f[0]
EMGAmpl = np.zeros((len(mice), EEGLsr.shape[1]))
i=0
for idf in mice:
# amplitude is square root of (integral over each frequency)
if harmcs == 0:
EMGAmpl[i,:] = np.sqrt(EMGSpec[idf][mf,:].sum(axis=0)*df)
else:
tmp = 0
for qf in mf:
tmp += EMGSpec[idf][qf,:] * (f[qf] - f[qf-1])
EMGAmpl[i,:] = np.sqrt(tmp)
i += 1
avg_emg = EMGAmpl.mean(axis=0)
sem_emg = EMGAmpl.std(axis=0) / np.sqrt(len(mice))
if pplot==1:
plt.ion()
plt.figure(figsize=(12,10))
ax = plt.axes([0.1, 0.55, 0.4, 0.4])
if len(vm) == 2:
plt.pcolormesh(t,f,EEGLsr, cmap='jet', vmin=vm[0], vmax=vm[1])
else:
plt.pcolormesh(t, f, EEGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EEG')
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.6, 0.55, 0.3, 0.4])
ipre = np.where(t<0)[0]
ilsr = np.where((t>=0) & (t<=120))[0]
plt.plot(f,EEGLsr[:,ipre].mean(axis=1), color='gray', label='baseline', lw=2)
plt.plot(f,EEGLsr[:,ilsr].mean(axis=1), color='blue', label='laser', lw=2)
box_off(ax)
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power (uV^2)')
#plt.legend(loc=0)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, borderaxespad=0.)
ax = plt.axes([0.1, 0.05, 0.4, 0.4])
plt.pcolormesh(t,f,EMGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EMG')
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.6, 0.05, 0.3, 0.4])
m = np.max(avg_emg)*1.5
plt.plot([0,0], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.plot([laser_dur,laser_dur], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.xlim((t[0], t[-1]))
plt.ylim((0,m))
plt.plot(t,avg_emg, color='black', lw=2)
plt.fill_between(t, avg_emg-sem_emg, avg_emg+sem_emg)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. (uV)')
plt.show()
elif pplot==2:
# pretty figure
plt.figure()
if len(vm) > 0:
cb_ticks = vm
# plot EEG spectrogram
axes_cbar = plt.axes([0.8, 0.75, 0.1, 0.2])
ax = plt.axes([0.1, 0.55, 0.75, 0.4])
if len(vm) == 2:
im=ax.pcolorfast(t, f, EEGLsr, cmap='jet', vmin=vm[0], vmax=vm[1])
else:
im = ax.pcolorfast(t, f, EEGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
# colorbar for EEG spectrogram
cb = plt.colorbar(im, ax=axes_cbar, pad=0.0, aspect=10.0, orientation='vertical')
if pnorm > 0:
cb.set_label('Rel. Power')
else:
cb.set_label('Power (uV^2s)')
cb.ax.xaxis.set_ticks_position("bottom")
cb.ax.xaxis.set_label_position('top')
if len(cb_ticks) > 0:
cb.set_ticks(cb_ticks)
axes_cbar.set_alpha(0.0)
axes_cbar.spines["top"].set_visible(False)
axes_cbar.spines["right"].set_visible(False)
axes_cbar.spines["bottom"].set_visible(False)
axes_cbar.spines["left"].set_visible(False)
axes_cbar.axes.get_xaxis().set_visible(False)
axes_cbar.axes.get_yaxis().set_visible(False)
# EMG amplitude
ax = plt.axes([0.1, 0.1, 0.75, 0.3])
m = np.max(avg_emg) * 1.5
ax.add_patch(patches.Rectangle((0, 0), laser_dur, np.max(avg_emg)*1.5, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
plt.xlim((t[0], t[-1]))
plt.ylim((0, m))
plt.fill_between(t, avg_emg-sem_emg, avg_emg+sem_emg, color='gray', zorder=2)
plt.plot(t, avg_emg, color='black', lw=2)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. ($\mathrm{\mu V}$)')
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return t, f, EEGSpec, EMGSpec, EEGLsr
def laser_brainstate(ppath, recordings, pre, post, pplot=True, fig_file='', start_time=0, end_time=-1,
ma_thr=0, edge=0, sf=0, cond=0, single_mode=False, ci=95, backup='', csv_file=''):
"""
calculate laser triggered probability of REM, Wake, NREM
ppath - base folder holding all recording
recordings - list of recording
pre - time before laser onset
post - time after laser onset
@Optional:
pplot - pplot==True: plot figure
fig_file - specify filename including ending, if you wish to save figure
start_time - in [s], only consider laser onsets starting after $start_time
end_time - in [s], only consider laset onsets starting before $end_time
sf - smoothing factor for Gaussian kernel; if sf=0, no filtering
edge - only use if $sf > 0: to avoid smoothing artifacts, set edge to a value > 0, e.g. 20
ma_thr - if > 0, smooth out microarousals with duration < $ma_thr
cond - cond==0: consider all trials; cond==[1,2,3] only plot trials,
where mouse was in REM, Wake, or NREM as laser turned on
single_mode - if True, plot every single mouse
backup - optional backup folder; if specified each single recording folder can be either on $ppath or $backup;
if it's on both folders, the version in ppath is used
ci - string; possible values: 'sem', 'sd', or value betwen 0 and 100, corresponding
to the bootstrapped confidence interval. The default is ci=95
csv_file - if filename (without or including full file path) is provided,
save pd.DataFrame df (see @Return) to csv file
@Return:
df_timecourse: pd.DataFrame with columns: mouse, time, perc, state.
df: pd.DataFrame with columns mouse_id, REM, NREM, Wake, Lsr
Lsr has three values: 0 - before laser, 1 - during laser, 2 - after laser
if laser was on for laser_dur s, then
df[df['Lsr'] == 0]['REM'] is the average % of REM sleep during laser stimulation for each mouse
df[df['Lsr'] == 0]['REM'] is the average % of REM sleep
during the laser_dur s long time interval preceding laser onset.
df[df['Lsr'] == 2]['REM'] is the average during the time inverval of duration laser_dur that
directly follows laser stimulation
"""
if type(recordings) != list:
recordings = [recordings]
rec_paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
rec_paths[rec] = ppath
else:
rec_paths[rec] = backup
pre += edge
post += edge
BrainstateDict = {}
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
BrainstateDict[idf] = []
if not idf in mouse_order:
mouse_order.append(idf)
nmice = len(BrainstateDict)
for rec in recordings:
ppath = rec_paths[rec]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
dt = NBIN * 1.0/SR
istart_time = int(np.round(start_time / dt))
M = load_stateidx(ppath, rec)[0]
if end_time == -1:
iend_time = len(M)
else:
iend_time = int(np.round(end_time / dt))
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
(idxs, idxe) = laser_start_end(load_laser(ppath, rec))
idf = re.split('_', rec)[0]
ipre = int(np.round(pre/dt))
ipost = int(np.round(post/dt))
idxs = [int(i/NBIN) for i in idxs]
idxe = [int(i/NBIN) for i in idxe]
laser_dur = np.mean((np.array(idxe) - np.array(idxs))) * dt
for (i,j) in zip(idxs, idxe):
if i>=ipre and i+ipost<=len(M)-1 and i>istart_time and i < iend_time:
bs = M[i-ipre:i+ipost+1]
BrainstateDict[idf].append(bs)
# I assume here that every recording has same dt
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
# first time point where the laser was fully on (during the complete bin).
izero = np.where(t>0)[0][0]
# the first time bin overlapping with laser is then
izero -= 1
# @BS: mouse x time x state
BS = np.zeros((nmice, len(t), 3))
Trials = []
imouse = 0
for mouse in mouse_order:
if cond==0:
M = np.array(BrainstateDict[mouse])
Trials.append(M)
for state in range(1,4):
C = np.zeros(M.shape)
C[np.where(M==state)] = 1
BS[imouse,:,state-1] = C.mean(axis=0)
if cond>0:
M = BrainstateDict[mouse]
Msel = []
for trial in M:
if trial[izero] == cond:
Msel.append(trial)
M = np.array(Msel)
Trials.append(M)
for state in range(1,4):
C = np.zeros(M.shape)
C[np.where(M==state)] = 1
BS[imouse,:,state-1] = C.mean(axis=0)
imouse += 1
# flatten Trials
Trials = reduce(lambda x,y: np.concatenate((x,y), axis=0), Trials)
BS = BS*100
if sf > 0:
for state in [2, 1, 0]:
for i in range(nmice):
BS[i, :, state] = smooth_data(BS[i, :, state], sf)
df_timecourse = pd.DataFrame()
state_map = {1: 'REM', 2: 'Wake', 3: 'NREM'}
for s in state_map:
df = nparray2df(BS[:, :, s - 1], mouse_order, t, 'perc', 'mouse', 'time')
df['state'] = state_map[s]
df_timecourse = df_timecourse.append(df)
nmice = imouse
if pplot:
state_label = {0:'REM', 1:'Wake', 2:'NREM'}
it = np.where((t >= -pre + edge) & (t <= post - edge))[0]
plt.ion()
if not single_mode:
plt.figure()
ax = plt.axes([0.15, 0.15, 0.6, 0.7])
colors = [[0, 1, 1 ],[0.5, 0, 1],[0.6, 0.6, 0.6]]
if ci == 'sem':
for state in [2,1,0]:
tmp = BS[:, :, state].mean(axis=0)
plt.plot(t[it], tmp[it], color=colors[state], lw=3, label=state_label[state])
if nmice > 1:
smp = BS[:,:,state].std(axis=0) / np.sqrt(nmice)
plt.fill_between(t[it], tmp[it]-smp[it], tmp[it]+smp[it], color=colors[state], alpha=0.4, zorder=3)
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
ax.add_patch(patches.Rectangle((0,0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('Probability')
#plt.legend(bbox_to_anchor=(0., 1.02, 0.5, .102), loc=3, ncol=3, borderaxespad=0.)
plt.draw()
else:
bs_colors = {'REM': [0, 1, 1], 'Wake': [0.5, 0, 1], 'NREM': [0.6, 0.6, 0.6]}
dfm = df_timecourse.groupby(['mouse', 'state', 'time']).mean().reset_index()
for s in [3, 2, 1]:
sns.lineplot(data=dfm[dfm.state == state_map[s]], ci=ci, x='time', y='perc',
color=bs_colors[state_map[s]], err_kws={'alpha': 0.8, 'zorder': 3})
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
ax.add_patch(patches.Rectangle((0,0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('Probability')
else:
plt.figure(figsize=(7,7))
clrs = sns.color_palette("husl", nmice)
for state in [2,1,0]:
ax = plt.subplot('31' + str(3-state))
for i in range(nmice):
plt.plot(t[it], BS[i,it,state], color=clrs[i], label=mouse_order[i])
ax.add_patch(patches.Rectangle((0, 0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1], alpha=0.8))
plt.xlim((t[it][0], t[it][-1]))
plt.ylim((0,100))
plt.ylabel('% ' + state_label[state])
if state==0:
plt.xlabel('Time (s)')
else:
ax.set_xticklabels([])
if state==2:
ax.legend(mouse_order, bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order),
frameon=False)
box_off(ax)
# figure showing all trials
plt.figure(figsize=(4,6))
set_fontarial()
plt.ion()
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,1,1],[0.5,0,1], [0.6, 0.6, 0.6]], 3)
x = list(range(Trials.shape[0]))
plt.pcolormesh(t,np.array(x), np.flipud(Trials), cmap=my_map, vmin=1, vmax=3)
plt.plot([0,0], [0, len(x)-1], color='white')
plt.plot([laser_dur,laser_dur], [0, len(x)-1], color='white')
ax.axis('tight')
plt.draw()
plt.xlabel('Time (s)')
plt.ylabel('Trial No.')
box_off(ax)
plt.show()
if len(fig_file)>0:
plt.savefig(fig_file)
# compile dataframe with all baseline and laser values
ilsr = np.where((t>=0) & (t<=laser_dur))[0]
ibase = np.where((t>=-laser_dur) & (t<0))[0]
iafter = np.where((t>=laser_dur) & (t<laser_dur*2))[0]
df = pd.DataFrame(columns = ['Mouse', 'REM', 'NREM', 'Wake', 'Lsr'])
mice = mouse_order + mouse_order + mouse_order
lsr = np.concatenate((np.ones((nmice,), dtype='int'), np.zeros((nmice,), dtype='int'), np.ones((nmice,), dtype='int')*2))
df['Mouse'] = mice
df['Lsr'] = lsr
df['REM'] = np.concatenate((BS[:,ilsr,0].mean(axis=1), BS[:,ibase,0].mean(axis=1), BS[:,iafter,0].mean(axis=1)))
df['NREM'] = np.concatenate((BS[:,ilsr,2].mean(axis=1), BS[:,ibase,2].mean(axis=1), BS[:,iafter,2].mean(axis=1)))
df['Wake'] = np.concatenate((BS[:,ilsr,1].mean(axis=1), BS[:,ibase,1].mean(axis=1), BS[:,iafter,1].mean(axis=1)))
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return df_timecourse, df, Trials
def laser_brainstate_bootstrap(ppath, recordings, pre, post, edge=0, sf=0, nboots=1000, alpha=0.05, backup='',
start_time=0, ma_thr=20, bootstrap_mode=0, fig_file=''):
"""
Align brain state with laser stimulation and calculate two-sided 1-$alpha confidence intervals using
bootstrapping.
:param ppath: base folder
:param recordings: list of recordings
:param pre: time before laser
:param post: time after laser onset
:param edge: add $edge seconds add beginning and end (that are not shown in the plot) to avoid filtering artifacts
:param sf: smoothing factor for Gaussian filter; better do not use
:param nboots: int, how many times the whole data set is resampled for boot-strapping
:param alpha: plot shows 1-$alpha confidence interval
:param backup: optional backup folder where recordings are stored
:param start_time: start time of recordding used for analysis
:param ma_thr: sleep periods < ma_thr are thrown away
:param bootstrap_mode: default=0
bootstrap_mode == 0: Take inter-mouse variance and inter-trial variance (of each mouse) into account.
That is, bootstrapping re-models the variance expected when re-doing the same
experimental design (same mouse number and total trial number).
To account for potentially different number of trials per mouse, resample the data
during each iteration the following way: Assume that there are n laser trials from m mice;
randomly select (with replacment) ntrial mice; then select from each mouse randomly one trial.
bootstrap_mode == 1: Only take inter-trial variance (of each mouse) into account. That is,
bootstrapping models the variance expected when redoing the experiment with exactly the same mice.
:param fig_file, if file name is specified, the figure will be saved
:return: P - p-values for NREM, REM, Wake
Mod - by how much the percentage of NREM, REM, Wake is increased compared to baseline
"""
pre += edge
post += edge
rec_paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
rec_paths[rec] = ppath
else:
rec_paths[rec] = backup
# dict: mouse_id --> laser trials, R W N sequence
BrainstateDict = {}
for rec in recordings:
idf = re.split('_', rec)[0]
BrainstateDict[idf] = []
mice = list(BrainstateDict.keys())
nmice = len(BrainstateDict)
for rec in recordings:
ppath = rec_paths[rec]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5 * SR)
dt = NBIN * 1 / SR
istart_time = int( | np.round(start_time / dt) | numpy.round |
import gym
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.io import savemat, loadmat
import re, seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
file ="data_21-04-08-06-09.mat"
data = loadmat(file)
STATES = data['STATES']
ACTIONS = data['ACTIONS']
REWARDS = data['REWARDS']
NSTATES = data['NSTATES']
# generate data
n = 200
S = STATES[:,0]
E = STATES[:,1]
I = STATES[:,2]
COSTS = -np.array(REWARDS)
COSTS -= np.mean(COSTS)
COSTS /= (np.std(COSTS) + 1e-10) # normalizing the result
COSTS = COSTS*20
# axes instance
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig)
# get colormap from seaborn
cmap = ListedColormap(sns.color_palette("husl", 256).as_hex())
# plot
sc = ax.scatter(S, E, I, s=5, c=ACTIONS, marker='o', alpha=1)
ax.set_xlabel('S')
ax.set_ylabel('E')
ax.set_zlabel('I')
# legend
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2)
ax.view_init(azim=0, elev=0)
plt.show()
# rotate the axes and update
# for angle in range(0, 360):
# ax.view_init(30, angle)
# plt.draw()
# plt.pause(.001)
# save
# plt.savefig("scatter_hue", bbox_inches='tight')
a_map = {0:'O', 1:'S', 2:'C'}
# POL = [a_map[a] for a in ACTIONS[0].tolist()]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize = (20,20))
ax.scatter(S, E, c=ACTIONS, s=60, alpha=1, edgecolors='none')
ax.set_ylabel('S', fontsize=40)
ax.set_xlabel('E', fontsize=40)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_ylim(0,1e5)
ax.set_xlim(0,1e5)
plt.legend(*sc.legend_elements(), bbox_to_anchor=(0.9, 1), loc=1)
# plt.show()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.savefig("SE", bbox_inches='tight')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize = (20,20))
ax.scatter(S, I, c=ACTIONS, s=60, alpha=1, edgecolors='none')
ax.set_ylabel('S', fontsize=40)
ax.set_xlabel('I', fontsize=40)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_ylim(0,1e5)
ax.set_xlim(0,1e5)
plt.legend(*sc.legend_elements(), bbox_to_anchor=(0.9, 1), loc=1)
# plt.show()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.savefig("SI", bbox_inches='tight')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize = (20,20))
ax.scatter(E, I, c=ACTIONS, s=60, alpha=1, edgecolors='none')
ax.set_ylabel('E', fontsize=40)
ax.set_xlabel('I', fontsize=40)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_ylim(0,1e5)
ax.set_xlim(0,1e5)
plt.legend(*sc.legend_elements(), bbox_to_anchor=(0.9, 1), loc=1)
# plt.show()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.savefig("EI", bbox_inches='tight')
def plot_fig(STATES, ACTIONS, fig_name):
S = STATES[:,0]
E = STATES[:,1]
I = STATES[:,2]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize = (20,20))
ax.scatter(S, E, c=ACTIONS, s=60, alpha=1, edgecolors='none')
ax.set_xlabel('S', fontsize=40)
ax.set_ylabel('E', fontsize=40)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_ylim(0,1e5)
ax.set_xlim(0,1e5)
plt.legend(*sc.legend_elements(), bbox_to_anchor=(0.9, 1), loc=1)
# plt.show()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.savefig(fig_name + '_' + "SE", bbox_inches='tight')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize = (20,20))
ax.scatter(S, I, c=ACTIONS, s=60, alpha=1, edgecolors='none')
ax.set_xlabel('S', fontsize=40)
ax.set_ylabel('I', fontsize=40)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_ylim(0,1e5)
ax.set_xlim(0,1e5)
plt.legend(*sc.legend_elements(), bbox_to_anchor=(0.9, 1), loc=1)
# plt.show()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.savefig(fig_name + '_' + "SI", bbox_inches='tight')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize = (20,20))
ax.scatter(E, I, c=ACTIONS, s=60, alpha=1, edgecolors='none')
ax.set_xlabel('E', fontsize=40)
ax.set_ylabel('I', fontsize=40)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_ylim(0,1e5)
ax.set_xlim(0,1e5)
plt.legend(*sc.legend_elements(), bbox_to_anchor=(0.9, 1), loc=1)
# plt.show()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.savefig(fig_name + '_' + "EI", bbox_inches='tight')
STATES_0, STATES_1, STATES_2 = [], [], []
ACTIONS_0, ACTIONS_1, ACTIONS_2 = [], [], []
for i, a in enumerate(ACTIONS[0]):
if a==0:
STATES_0.append(STATES[i].tolist())
ACTIONS_0.append(a)
elif a==1:
STATES_1.append(STATES[i].tolist())
ACTIONS_1.append(a)
elif a==2:
STATES_2.append(STATES[i].tolist())
ACTIONS_2.append(a)
else:
pass
pol_dict = {0:'Lockdown', 1:'SocialDist', 2:'Open'}
plot_fig(np.array(STATES_0), ACTIONS_0, pol_dict[0])
plot_fig(np.array(STATES_1), ACTIONS_1, pol_dict[1])
plot_fig(np.array(STATES_2), ACTIONS_2, pol_dict[2])
fig, ax = plt.subplots(nrows=3, ncols=3, figsize = (20,20))
for i in range(3):
for j in range(3):
if j==0:
data_S, data_A = STATES_0, ACTIONS_0
elif j==1:
data_S, data_A = STATES_1, ACTIONS_1
elif j==2:
data_S, data_A = STATES_2, ACTIONS_2
S = np.array(data_S)[:,0]
E = | np.array(data_S) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import time
import unittest
# Allows relative imports when run locally as script
# https://docs.python-guide.org/writing/structure/
#if __name__ == "__main__":
# sys.path.insert(0, os.path.abspath(
# os.path.join(os.path.dirname(__file__), '..')))
#import fractalshades.numpy_utils.xrange as fsx
from fractalshades.numpy_utils.xrange import (
Xrange_array,
Xrange_polynomial,
Xrange_SA)
def _matching(res, expected, almost=False, dtype=None, cmp_op=False, ktol=1.5):
if not cmp_op:
res = res.to_standard()
if almost:
np.testing.assert_allclose(res, expected,
rtol= ktol * np.finfo(dtype).eps)
else:
np.testing.assert_array_equal(res, expected)
def _test_op1(ufunc, almost=False, cmp_op=False, ktol=1.0):
"""
General framework for testing unary operators on Xrange arrays
"""
# print("testing function", ufunc)
rg = np.random.default_rng(100)
n_vec = 500
max_bin_exp = 20
# testing binary operation of reals extended arrays
for dtype in [np.float64, np.float32]:
# print("dtype", dtype)
op1 = rg.random([n_vec], dtype=dtype)
op1 *= 2.**rg.integers(low=-max_bin_exp, high=max_bin_exp,
size=[n_vec])
expected = ufunc(op1)
res = ufunc(Xrange_array(op1))
_matching(res, expected, almost, dtype, cmp_op, ktol)
# Checking datatype
assert res._mantissa.dtype == dtype
# with non null shift array # culprit
exp_shift_array = rg.integers(low=-max_bin_exp, high=max_bin_exp,
size=[n_vec])
expected = ufunc(op1 * (2.**exp_shift_array).astype(dtype))
_matching(ufunc(Xrange_array(op1, exp_shift_array)),
expected, almost, dtype, cmp_op, ktol)
# test "scalar"
_matching(ufunc(Xrange_array(op1, exp_shift_array)[0]),
expected[0], almost, dtype, cmp_op, ktol)
# print("c2")
# testing binary operation of reals extended arrays
for dtype in [np.float32, np.float64]:
op1 = (rg.random([n_vec], dtype=dtype) +
1j*rg.random([n_vec], dtype=dtype))
op1 *= 2.**rg.integers(low=-max_bin_exp, high=max_bin_exp,
size=[n_vec])
expected = ufunc(op1)
res = ufunc(Xrange_array(op1))
_matching(res, expected, almost, dtype, cmp_op, ktol)
# Checking datatype
to_complex = {np.float32: np.complex64,
np.float64: np.complex128}
if ufunc in [np.abs]:
assert res._mantissa.dtype == dtype
else:
assert res._mantissa.dtype == to_complex[dtype]
# with non null shift array
exp_shift_array = rg.integers(low=-max_bin_exp, high=max_bin_exp,
size=[n_vec])
expected = ufunc(op1 * (2.**exp_shift_array))
_matching(ufunc(Xrange_array(op1, exp_shift_array)),
expected, almost, dtype, cmp_op, ktol)
def _test_op2(ufunc, almost=False, cmp_op=False):
"""
General framework for testing operations between 2 Xrange arrays.
"""
# print("testing operation", ufunc)
rg = np.random.default_rng(100)
# ea_type = (Xrange_array._FLOAT_DTYPES +
# Xrange_array._COMPLEX_DTYPES)
n_vec = 500
max_bin_exp = 20
exp_shift = 2
# testing binary operation of reals extended arrays
for dtype in [np.float32, np.float64]:
op1 = rg.random([n_vec], dtype=dtype)
op2 = rg.random([n_vec], dtype=dtype)
op1 *= 2.**rg.integers(low=-max_bin_exp, high=max_bin_exp,
size=[n_vec])
op2 *= 2.**rg.integers(low=-max_bin_exp, high=max_bin_exp,
size=[n_vec])
# testing operation between 2 Xrange_arrays OR between ER_A and
# a standard np.array
expected = ufunc(op1, op2)
res = ufunc(Xrange_array(op1), Xrange_array(op2))
_matching(res, expected, almost, dtype, cmp_op)
# # testing operation between 2 Xrange_arrays OR between ER_A and
# # a standard np.array xith dim 2
expected_2d = ufunc(op1.reshape(50, 10),
op2.reshape(50, 10))
res_2d = ufunc(Xrange_array(op1.reshape(50, 10)),
Xrange_array(op2.reshape(50, 10)))
_matching(res_2d, expected_2d, almost, dtype, cmp_op)
# Checking datatype
if ufunc in [np.add, np.multiply, np.subtract, np.divide]:
assert res._mantissa.dtype == dtype
if ufunc not in [np.equal, np.not_equal]:
_matching(ufunc(op1, Xrange_array(op2)),
expected, almost, dtype, cmp_op)
_matching(ufunc(Xrange_array(op1), op2),
expected, almost, dtype, cmp_op)
# Testing with non-null exponent
exp_shift_array = rg.integers(low=-exp_shift, high=exp_shift,
size=[n_vec])
expected = ufunc(op1 * 2.**exp_shift_array, op2 * 2.**-exp_shift_array)
_matching(ufunc(Xrange_array(op1, exp_shift_array),
Xrange_array(op2, -exp_shift_array)),
expected, almost, dtype, cmp_op)
# testing operation of an Xrange_array with a scalar
if ufunc not in [np.equal, np.not_equal]:
expected = ufunc(op1[0], op2)
_matching(ufunc(op1[0], Xrange_array(op2)),
expected, almost, dtype, cmp_op)
expected = ufunc(op2, op1[0])
_matching(ufunc(Xrange_array(op2), op1[0]),
expected, almost, dtype, cmp_op)
# testing operation of an Xrange_array with a "Xrange" scalar
if ufunc not in [np.equal, np.not_equal]:
expected = ufunc(op1[0], op2)
_matching(ufunc(Xrange_array(op1)[0], Xrange_array(op2)),
expected, almost, dtype, cmp_op)
expected = ufunc(op2, op1[0])
_matching(ufunc(Xrange_array(op2), Xrange_array(op1)[0]),
expected, almost, dtype, cmp_op)
if cmp_op and (ufunc not in [np.equal, np.not_equal]):
return
if ufunc in [np.maximum]:
return
# testing binary operation of complex extended arrays
for dtype in [np.float32, np.float64]:
n_vec = 20
max_bin_exp = 20
rg = | np.random.default_rng(1) | numpy.random.default_rng |
"""
Class to construct parabolas from 3 points.
ADW: Need to move all of the plotting stuff
"""
import numpy
import scipy.stats
import scipy.interpolate
############################################################
class Parabola:
def __init__(self, x, y):
"""
INPUTS
x = variable of interest
y = 2 * log(likelihood)
"""
# Sort the input
argsort = numpy.argsort(x)
self.x = numpy.array(x)[argsort]
self.y = numpy.array(y)[argsort]
index = numpy.argmax(self.y)
if index == 0:
index_0 = 0
index_1 = 1
index_2 = 2
elif index == len(self.y) - 1:
index_0 = len(self.y) - 3
index_1 = len(self.y) - 2
index_2 = len(self.y) - 1
else:
index_0 = index - 1
index_1 = index
index_2 = index + 1
x_0 = self.x[index_0]
x_1 = self.x[index_1]
x_2 = self.x[index_2]
y_0 = self.y[index_0]
y_1 = self.y[index_1]
y_2 = self.y[index_2]
# Invert matrix
a = numpy.matrix([[x_0**2, x_0, 1.],
[x_1**2, x_1, 1.],
[x_2**2, x_2, 1.]])
a_inverse = numpy.linalg.inv(a)
b = numpy.array([y_0, y_1, y_2])
p = numpy.dot(numpy.array(a_inverse), b)
self.p_2 = p[0]
self.p_1 = p[1]
self.p_0 = p[2]
# Vertex
self.vertex_x = -self.p_1 / (2. * self.p_2)
self.vertex_y = self.p_0 - (self.p_1**2 / (4. * self.p_2))
def __eq__(self,other):
return numpy.allclose([self.p_0,self.p_1,self.p_2],[other.p_0,other.p_1,other.p_2])
def __ne__(self,other):
return not self.__eq__(other)
def __repr__(self):
return "y = %.2g * x**2 + %.2g * x + %.2g"%(self.p_2, self.p_1, self.p_0)
def __str__(self):
return self.__repr__()
def __call__(self, x):
"""
Evaluate the parabola.
"""
return (self.p_2 * x**2) + (self.p_1 * x) + self.p_0
def densify(self, factor=10):
"""
Increase the density of points along the parabolic curve.
"""
x = []
y = []
for ii in range(0, len(self.x) - 2):
p = Parabola(self.x[ii: ii + 3], self.y[ii: ii + 3])
x.append(numpy.linspace(self.x[ii], self.x[ii + 1], factor)[0: -1])
y.append(p(x[-1]))
p = Parabola(self.x[len(self.x) - 3:], self.y[len(self.y) - 3:])
x.append(numpy.linspace(self.x[-2], self.x[-1], factor)[0: -1])
y.append(p(x[-1]))
x.append([self.x[-1]])
y.append([self.y[-1]])
#f = scipy.interpolate.interp1d(numpy.concatenate(x), numpy.concatenate(y))
#x = numpy.linspace(self.x[0], self.x[-1], len(x) * factor)
#return x, f(x)
return numpy.concatenate(x), numpy.concatenate(y)
def profileUpperLimit(self, delta = 2.71):
"""
Compute one-sided upperlimit via profile method.
"""
a = self.p_2
b = self.p_1
if self.vertex_x < 0:
c = self.p_0 + delta
else:
c = self.p_0 - self.vertex_y + delta
if b**2 - 4. * a * c < 0.:
print('WARNING')
print(a, b, c)
#pylab.figure()
#pylab.scatter(self.x, self.y)
#raw_input('WAIT')
return 0.
return max((numpy.sqrt(b**2 - 4. * a * c) - b) / (2. * a), (-1. * numpy.sqrt(b**2 - 4. * a * c) - b) / (2. * a))
#def bayesianUpperLimit3(self, alpha, steps = 1.e5):
# """
# Compute one-sided upper limit using Bayesian Method of Helene.
# """
# # Need a check to see whether limit is reliable
# pdf = scipy.interpolate.interp1d(self.x, numpy.exp(self.y / 2.)) # Convert from 2 * log(likelihood) to likelihood
# x_pdf = numpy.linspace(self.x[0], self.x[-1], steps)
# cdf = numpy.cumsum(pdf(x_pdf))
# cdf /= cdf[-1]
# cdf_reflect = scipy.interpolate.interp1d(cdf, x_pdf)
# return cdf_reflect(alpha)
# #return self.x[numpy.argmin((cdf - alpha)**2)]
def bayesianUpperLimit(self, alpha, steps=1.e5, plot=False):
"""
Compute one-sided upper limit using Bayesian Method of Helene.
Several methods of increasing numerical stability have been implemented.
"""
x_dense, y_dense = self.densify()
y_dense -= numpy.max(y_dense) # Numeric stability
f = scipy.interpolate.interp1d(x_dense, y_dense, kind='linear')
x = numpy.linspace(0., numpy.max(x_dense), steps)
pdf = numpy.exp(f(x) / 2.)
cut = (pdf / numpy.max(pdf)) > 1.e-10
x = x[cut]
pdf = pdf[cut]
#pdf /= pdf[0]
#forbidden = numpy.nonzero(pdf < 1.e-10)[0]
#if len(forbidden) > 0:
# index = forbidden[0] # Numeric stability
# x = x[0: index]
# pdf = pdf[0: index]
cdf = numpy.cumsum(pdf)
cdf /= cdf[-1]
cdf_reflect = scipy.interpolate.interp1d(cdf, x)
#if plot:
# pylab.figure()
# pylab.plot(x, f(x))
# pylab.scatter(self.x, self.y, c='red')
#
# pylab.figure()
# pylab.plot(x, pdf)
#
# pylab.figure()
# pylab.plot(cdf, x)
return cdf_reflect(alpha)
def bayesianUpperLimit2(self, alpha, steps=1.e5, plot=False):
"""
Compute one-sided upper limit using Bayesian Method of Helene.
"""
cut = ((self.y / 2.) > -30.) # Numeric stability
try:
f = scipy.interpolate.interp1d(self.x[cut], self.y[cut], kind='cubic')
except:
f = scipy.interpolate.interp1d(self.x[cut], self.y[cut], kind='linear')
x = numpy.linspace(0., numpy.max(self.x[cut]), steps)
y = numpy.exp(f(x) / 2.)
#forbidden = numpy.nonzero((y / numpy.exp(self.vertex_y / 2.)) < 1.e-10)[0]
forbidden = numpy.nonzero((y / self.vertex_y) < 1.e-10)[0]
if len(forbidden) > 0:
index = forbidden[0] # Numeric stability
x = x[0: index]
y = y[0: index]
cdf = numpy.cumsum(y)
cdf /= cdf[-1]
cdf_reflect = scipy.interpolate.interp1d(cdf, x)
#if plot:
# pylab.figure()
# pylab.scatter(self.x, self.y)
#
# pylab.figure()
# pylab.plot(x, f(x))
#
# pylab.figure()
# pylab.plot(x, y)
#
# pylab.figure()
# pylab.plot(cdf, x)
return cdf_reflect(alpha)
"""
if numpy.isnan(result):
import pylab
for ii in range(0, len(self.x)):
print '%.3f %.3f'%(self.x[ii], self.y[ii])
pylab.figure()
pylab.scatter(self.x, self.y)
pylab.figure()
pylab.scatter(cdf, x)
raw_input('WAIT')
return result
"""
def confidenceInterval(self, alpha=0.6827, steps=1.e5, plot=False):
"""
Compute two-sided confidence interval by taking x-values corresponding to the largest PDF-values first.
"""
x_dense, y_dense = self.densify()
y_dense -= numpy.max(y_dense) # Numeric stability
f = scipy.interpolate.interp1d(x_dense, y_dense, kind='linear')
x = numpy.linspace(0., numpy.max(x_dense), steps)
# ADW: Why does this start at 0, which often outside the input range?
# Wouldn't starting at xmin be better:
#x = numpy.linspace(numpy.min(x_dense), numpy.max(x_dense), steps)
pdf = numpy.exp(f(x) / 2.)
cut = (pdf / numpy.max(pdf)) > 1.e-10
x = x[cut]
pdf = pdf[cut]
sorted_pdf_indices = numpy.argsort(pdf)[::-1] # Indices of PDF in descending value
cdf = | numpy.cumsum(pdf[sorted_pdf_indices]) | numpy.cumsum |
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from .assembling import buildElasticityMatrix
from .bc import bcApplyWestMat, bcApplyWest_vec
from .cg import cg
from .projection import projection, GenEO_V0, minimal_V0, coarse_operators
from petsc4py import PETSc
from slepc4py import SLEPc
import mpi4py.MPI as mpi
import numpy as np
import scipy as sp
import os
class PCBNN(object): #Neumann-Neumann and Additive Schwarz with no overlap
def __init__(self, A_IS):
"""
Initialize the domain decomposition preconditioner, multipreconditioner and coarse space with its operators
Parameters
==========
A_IS : petsc.Mat
The matrix of the problem in IS format. A must be a symmetric positive definite matrix
with symmetric positive semi-definite submatrices
PETSc.Options
=============
PCBNN_switchtoASM :Bool
Default is False
If True then the domain decomposition preconditioner is the BNN preconditioner. If false then the domain
decomposition precondition is the Additive Schwarz preconditioner with minimal overlap.
PCBNN_kscaling : Bool
Default is True.
If true then kscaling (partition of unity that is proportional to the diagonal of the submatrices of A)
is used when a partition of unity is required. Otherwise multiplicity scaling is used when a partition
of unity is required. This may occur in two occasions:
- to scale the local BNN matrices if PCBNN_switchtoASM=True,
- in the GenEO eigenvalue problem for eigmin if PCBNN_switchtoASM=False and PCBNN_GenEO=True with
PCBNN_GenEO_eigmin > 0 (see projection.__init__ for the meaning of these options).
PCBNN_verbose : Bool
If True, some information about the preconditioners is printed when the code is executed.
PCBNN_GenEO : Bool
Default is False.
If True then the coarse space is enriched by solving local generalized eigenvalue problems.
PCBNN_CoarseProjection : Bool
Default is True.
If False then there is no coarse projection: Two level Additive Schwarz or One-level preconditioner depending on PCBNN_addCoarseSolve.
If True, the coarse projection is applied: Projected preconditioner of hybrid preconditioner depending on PCBNN_addCoarseSolve.
PCBNN_addCoarseSolve : Bool
Default is True.
If True then (R0t A0\R0 r) is added to the preconditioned residual.
False corresponds to the projected preconditioner (need to choose initial guess accordingly) (or the one level preconditioner if PCBNN_CoarseProjection = False).
True corresponds to the hybrid preconditioner (or the fully additive preconditioner if PCBNN_CoarseProjection = False).
"""
OptDB = PETSc.Options()
self.switchtoASM = OptDB.getBool('PCBNN_switchtoASM', False) #use Additive Schwarz as a preconditioner instead of BNN
self.kscaling = OptDB.getBool('PCBNN_kscaling', True) #kscaling if true, multiplicity scaling if false
self.verbose = OptDB.getBool('PCBNN_verbose', False)
self.GenEO = OptDB.getBool('PCBNN_GenEO', True)
self.addCS = OptDB.getBool('PCBNN_addCoarseSolve', True)
self.projCS = OptDB.getBool('PCBNN_CoarseProjection', True)
self.viewPC = OptDB.getBool('PCBNN_view', True)
self.viewV0 = OptDB.getBool('PCBNN_viewV0', False)
self.viewGenEOV0 = OptDB.getBool('PCBNN_viewGenEO', False)
self.viewminV0 = OptDB.getBool('PCBNN_viewminV0', False)
self.test_case = OptDB.getString('test_case', 'default')
#extract Neumann matrix from A in IS format
Ms = A_IS.copy().getISLocalMat()
# convert A_IS from matis to mpiaij
A_mpiaij = A_IS.convert('mpiaij')
r, _ = A_mpiaij.getLGMap() #r, _ = A_IS.getLGMap()
is_A = PETSc.IS().createGeneral(r.indices)
# extract exact local solver
As = A_mpiaij.createSubMatrices(is_A)[0]
vglobal, _ = A_mpiaij.getVecs()
vlocal, _ = Ms.getVecs()
scatter_l2g = PETSc.Scatter().create(vlocal, None, vglobal, is_A)
#compute the multiplicity of each degree
vlocal.set(1.)
vglobal.set(0.)
scatter_l2g(vlocal, vglobal, PETSc.InsertMode.ADD_VALUES)
scatter_l2g(vglobal, vlocal, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
NULL,mult_max = vglobal.max()
if self.viewPC:
_, self.ns = vlocal.getSizes()
_, self.nglob = vglobal.getSizes()
tempglobal = vglobal.getArray(readonly=True)
templocal = vlocal.getArray(readonly=True)
self.nints = np.count_nonzero(tempglobal == 1) #interor dofs in this subdomain
self.nGammas = np.count_nonzero(templocal -1) #interor dofs in this subdomain
# k-scaling or multiplicity scaling of the local (non-assembled) matrix
if self.kscaling == False:
Ms.diagonalScale(vlocal,vlocal)
else:
v1 = As.getDiagonal()
v2 = Ms.getDiagonal()
Ms.diagonalScale(v1/v2, v1/v2)
# the default local solver is the scaled non assembled local matrix (as in BNN)
if self.switchtoASM:
Atildes = As
if mpi.COMM_WORLD.rank == 0:
print('The user has chosen to switch to Additive Schwarz instead of BNN.')
else: #(default)
Atildes = Ms
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('cholesky')
pc_Atildes.setFactorSolverType('mumps')
ksp_Atildes.setFromOptions()
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('cholesky')
pc_Atildes_forSLEPc.setFactorSolverType('mumps')
ksp_Atildes_forSLEPc.setFromOptions()
self.A = A_mpiaij
self.Ms = Ms
self.As = As
self.ksp_Atildes = ksp_Atildes
self.ksp_Atildes_forSLEPc = ksp_Atildes_forSLEPc
self.work = vglobal.copy()
self.works_1 = vlocal.copy()
self.works_2 = self.works_1.copy()
self.scatter_l2g = scatter_l2g
self.mult_max = mult_max
self.minV0 = minimal_V0(self.ksp_Atildes)
if self.viewminV0 == True:
self.minV0.view()
if self.GenEO == True:
self.GenEOV0 = GenEO_V0(self.ksp_Atildes_forSLEPc,self.Ms,self.As,self.mult_max,self.minV0.V0s, self.minV0.labs)
self.V0s = self.GenEOV0.V0s
self.labs = self.GenEOV0.labs
if self.viewGenEOV0 == True:
self.GenEOV0.view()
else:
self.V0s = self.minV0.V0s
self.labs = self.minV0.labs
self.proj = coarse_operators(self.V0s,self.A,self.scatter_l2g,vlocal,self.work)
#TODO implement better the case where no coarse projection is performed by not computing V0 at all
if self.addCS == False and self.projCS == False: #no coarse operation so set the size of V0 to zero
self.GenEO = False
self.minV0.nrb = 0
self.minV0.labs = []
self.minV0.mumpsCntl3= []
self.proj.gathered_dimV0s= mpi.COMM_WORLD.gather(self.minV0.nrb, root=0)
if self.viewV0 == True:
self.proj.view()
if self.viewPC == True:
self.view()
def mult(self, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
########################
########################
xd = x.copy()
if self.projCS == True:
self.proj.project_transpose(xd)
self.scatter_l2g(xd, self.works_1, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.ksp_Atildes.solve(self.works_1, self.works_2)
y.set(0.)
self.scatter_l2g(self.works_2, y, PETSc.InsertMode.ADD_VALUES)
if self.projCS == True:
self.proj.project(y)
if self.addCS == True:
xd = x.copy()
ytild = self.proj.coarse_init(xd) # I could save a coarse solve by combining this line with project_transpose
y += ytild
def MP_mult(self, x, y):
"""
Applies the domain decomposition multipreconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : FIX
The list of ndom vectors that stores the result of the multipreconditioning operation (one vector per subdomain).
"""
self.scatter_l2g(x, self.works_1, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.ksp_Atildes.solve(self.works_1, self.works_2)
for i in range(mpi.COMM_WORLD.size):
self.works_1.set(0)
if mpi.COMM_WORLD.rank == i:
self.works_1 = self.works_2.copy()
y[i].set(0.)
self.scatter_l2g(self.works_1, y[i], PETSc.InsertMode.ADD_VALUES)
self.proj.project(y[i])
def apply(self,pc, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
This is just a call to PCBNN.mult with the function name and arguments that allow PCBNN to be passed
as a preconditioner to PETSc.ksp.
Parameters
==========
pc: This argument is not called within the function but it belongs to the standard way of calling a preconditioner.
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
self.mult(x,y)
def view(self):
self.gathered_ns = mpi.COMM_WORLD.gather(self.ns, root=0)
self.gathered_nints = mpi.COMM_WORLD.gather(self.nints, root=0)
self.gathered_Gammas = mpi.COMM_WORLD.gather(self.nGammas, root=0)
self.minV0.gathered_dim = mpi.COMM_WORLD.gather(self.minV0.nrb, root=0)
self.gathered_labs = mpi.COMM_WORLD.gather(self.labs, root=0)
if self.GenEO == True:
self.GenEOV0.gathered_nsharp = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmax, root=0)
self.GenEOV0.gathered_nflat = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmin, root=0)
self.GenEOV0.gathered_dimKerMs = mpi.COMM_WORLD.gather(self.GenEOV0.dimKerMs, root=0)
self.GenEOV0.gathered_Lambdasharp = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmax, root=0)
self.GenEOV0.gathered_Lambdaflat = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmin, root=0)
if mpi.COMM_WORLD.rank == 0:
print('#############################')
print(f'view of PCBNN')
print(f'{self.switchtoASM=}')
print(f'{self.kscaling= }')
print(f'{self.verbose= }')
print(f'{self.GenEO= }')
print(f'{self.addCS= }')
print(f'{self.projCS= }')
print(f'{self.viewPC= }')
print(f'{self.viewV0= }')
print(f'{self.viewGenEOV0= }')
print(f'{self.viewminV0= }')
print(f'{self.mult_max=}')
print(f'### info about the subdomains ###')
self.nint = np.sum(self.gathered_nints)
self.nGamma = self.nglob - self.nint
print(f'{self.gathered_ns =}')
print(f'{self.gathered_nints =}')
print(f'{self.gathered_Gammas=}')
print(f'{self.nGamma=}')
print(f'{self.nint=}')
print(f'{self.nglob=}')
print(f'{self.gathered_labs=}')
print(f'### info about minV0.V0s = (Ker(Atildes)) ###')
print(f'{self.minV0.mumpsCntl3=}')
if (self.ksp_Atildes.pc.getFactorSolverType() == 'mumps'):
print(f'dim(Ker(Atildes)) = {self.minV0.gathered_dim}')
else:
print(f'Ker(Atildes) not computed because pc is not mumps')
if self.GenEO == True:
print(f'### info about GenEOV0.V0s ###')
print(f'{self.GenEOV0.tau_eigmax=}')
print(f'{self.GenEOV0.tau_eigmin=}')
print(f'{self.GenEOV0.eigmax=}')
print(f'{self.GenEOV0.eigmin=}')
print(f'{self.GenEOV0.nev=}')
print(f'{self.GenEOV0.maxev=}')
print(f'{self.GenEOV0.mumpsCntl3=}')
print(f'{self.GenEOV0.verbose=}')
print(f'{self.GenEOV0.gathered_nsharp=}')
print(f'{self.GenEOV0.gathered_nflat=}')
print(f'{self.GenEOV0.gathered_dimKerMs=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdasharp)=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdaflat)=}')
print(f'### info about the coarse space ###')
print(f'{self.proj.V0_is_global=}')
print(f'{self.proj.gathered_dimV0s=}')
if self.GenEO == True:
print(f'global dim V0 = {np.sum(self.proj.gathered_dimV0s)} = ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes)) + ({np.sum(self.GenEOV0.gathered_nsharp)} from GenEO_eigmax) + ({np.sum(self.GenEOV0.gathered_nflat)+np.sum(self.GenEOV0.gathered_dimKerMs)} from GenEO_eigmin)')
else:
print(f'global dim V0 = {np.sum(self.proj.gathered_dimV0s)} = ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes))')
print('#############################')
self.savetofile()
def savetofile(self):
if mpi.COMM_WORLD.rank == 0:
if not os.path.exists(self.test_case):
os.mkdir(self.test_case)
np.savez(f'{self.test_case}/init',
switchtoASM= self.switchtoASM,
kscaling = self.kscaling,
verbose = self.verbose,
GenEO = self.GenEO,
addCS = self.addCS,
projCS = self.projCS,
viewPC = self.viewPC,
viewV0 = self.viewV0,
viewGenEOV0= self.viewGenEOV0,
viewminV0 = self.viewminV0,
mult_max = self.mult_max ,
gathered_ns = np.asarray(self.gathered_ns),
gathered_nints = np.asarray(self.gathered_nints),
gathered_Gammas = np.asarray(self.gathered_Gammas),
nGamma = self.nGamma,
nint = self.nint,
nglob = self.nglob,
minV0_mumpsCntl3 = self.minV0.mumpsCntl3,
V0_is_global= self.proj.V0_is_global,
gathered_dimV0s= np.asarray(self.proj.gathered_dimV0s),
minV0_gathered_dim = np.asarray(self.minV0.gathered_dim),
V0dim = np.sum(self.proj.gathered_dimV0s),
minV0dim = np.sum(self.minV0.gathered_dim),
gathered_labs= np.asarray(self.gathered_labs),
)
if self.GenEO == True:
np.savez(f'{self.test_case}/GenEO',
tau_eigmax = self.GenEOV0.tau_eigmax,
tau_eigmin = self.GenEOV0.tau_eigmin,
eigmax = self.GenEOV0.eigmax,
eigmin = self.GenEOV0.eigmin,
nev = self.GenEOV0.nev,
maxev = self.GenEOV0.maxev,
mumpsCntl3 = self.GenEOV0.mumpsCntl3,
verbose = self.GenEOV0.verbose,
gathered_nsharp = self.GenEOV0.gathered_nsharp,
gathered_nflat = self.GenEOV0.gathered_nflat,
gathered_dimKerMs = self.GenEOV0.gathered_dimKerMs,
gathered_Lambdasharp = np.asarray(self.GenEOV0.gathered_Lambdasharp,dtype='object'),
gathered_Lambdaflat = np.asarray(self.GenEOV0.gathered_Lambdaflat,dtype='object'),
sum_nsharp = np.sum(self.GenEOV0.gathered_nsharp),
sum_nflat = np.sum(self.GenEOV0.gathered_nflat),
sum_dimKerMs = np.sum(self.GenEOV0.gathered_dimKerMs)
)
class PCNew:
def __init__(self, A_IS):
OptDB = PETSc.Options()
self.switchtoASM = OptDB.getBool('PCNew_switchtoASM', False) #use Additive Schwarz as a preconditioner instead of BNN
self.switchtoASMpos = OptDB.getBool('PCNew_switchtoASMpos', False) #use Additive Schwarz as a preconditioner instead of BNN
self.verbose = OptDB.getBool('PCNew_verbose', False)
self.GenEO = OptDB.getBool('PCNew_GenEO', True)
#self.H2addCS = OptDB.getBool('PCNew_H2addCoarseSolve', True)
self.H2projCS = OptDB.getBool('PCNew_H2CoarseProjection', True)
self.H3addCS = OptDB.getBool('PCNew_H3addCoarseSolve', True)
self.H3projCS = OptDB.getBool('PCNew_H3CoarseProjection', True)
self.compute_ritz_apos = OptDB.getBool('PCNew_ComputeRitzApos', False)
self.nev = OptDB.getInt('PCNew_Bs_nev', 20) #number of vectors asked to SLEPc for cmputing negative part of Bs
self.viewPC = OptDB.getBool('PCNew_view', True)
self.viewV0 = OptDB.getBool('PCNew_viewV0', False)
self.viewGenEOV0 = OptDB.getBool('PCNew_viewGenEO', False)
self.viewminV0 = OptDB.getBool('PCNew_viewminV0', False)
self.viewnegV0 = OptDB.getBool('PCNew_viewnegV0', False)
self.test_case = OptDB.getString('test_case', 'default')
self.H2addCS = True #OptDB.getBool('PCNew_H2addCoarseSolve', True) (it is currently not an option to use a projected preconditioner for H2)
# Compute Bs (the symmetric matrix in the algebraic splitting of A)
# TODO: implement without A in IS format
ANeus = A_IS.getISLocalMat() #only the IS is used for the algorithm,
Mu = A_IS.copy()
Mus = Mu.getISLocalMat() #the IS format is used to compute Mu (multiplicity of each pair of dofs)
for i in range(ANeus.getSize()[0]):
col, _ = ANeus.getRow(i)
Mus.setValues([i], col, np.ones_like(col))
Mu.restoreISLocalMat(Mus)
Mu.assemble()
Mu = Mu.convert('mpiaij')
A_mpiaij = A_IS.convert('mpiaij')
B = A_mpiaij.duplicate()
for i in range(*A_mpiaij.getOwnershipRange()):
a_cols, a_values = A_mpiaij.getRow(i)
_, b_values = Mu.getRow(i)
B.setValues([i], a_cols, a_values/b_values, PETSc.InsertMode.INSERT_VALUES)
B.assemble()
# B.view()
# A_mpiaij.view()
# (A_mpiaij - B).view()
# data = ANeus.getArray()
# if mpi.COMM_WORLD.rank == 0:
# print(dir(ANeus))
# print(type(ANeus), ANeus.getType())
###################@
# convert A_IS from matis to mpiaij
#A_mpiaij = A_IS.convertISToAIJ()
r, _ = A_mpiaij.getLGMap() #r, _ = A_IS.getLGMap()
is_A = PETSc.IS().createGeneral(r.indices)
# extract exact local solver
As = A_mpiaij.createSubMatrices(is_A)[0]
Bs = B.createSubMatrices(is_A)[0]
#mumps solver for Bs
Bs_ksp = PETSc.KSP().create(comm=PETSc.COMM_SELF)
Bs_ksp.setOptionsPrefix("Bs_ksp_")
Bs_ksp.setOperators(Bs)
Bs_ksp.setType('preonly')
Bs_pc = Bs_ksp.getPC()
Bs_pc.setType('cholesky')
Bs_pc.setFactorSolverType('mumps')
Bs_pc.setFactorSetUpSolverType()
Bs_pc.setUp()
Bs_ksp.setFromOptions()
#temp = Bs.getValuesCSR()
work, _ = A_mpiaij.getVecs()
work_2 = work.duplicate()
works, _ = As.getVecs()
works_2 = works.duplicate()
mus = works.duplicate()
scatter_l2g = PETSc.Scatter().create(works, None, work, is_A)
#compute the multiplicity of each dof
work = Mu.getDiagonal()
NULL,mult_max = work.max()
scatter_l2g(work, mus, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
if self.viewPC:
_, self.ns = mus.getSizes()
_, self.nglob = work.getSizes()
tempglobal = work.getArray(readonly=True)
templocal = mus.getArray(readonly=True)
self.nints = np.count_nonzero(tempglobal == 1) #interor dofs in this subdomain
self.nGammas = np.count_nonzero(templocal -1) #interor dofs in this subdomain
invmus = mus.duplicate()
invmus = 1/mus
if mpi.COMM_WORLD.rank == 0:
print(f'multmax: {mult_max}')
DVnegs = []
Vnegs = []
invmusVnegs = []
#BEGIN diagonalize Bs
#Eigenvalue Problem for smallest eigenvalues
eps = SLEPc.EPS().create(comm=PETSc.COMM_SELF)
eps.setDimensions(nev=self.nev)
eps.setProblemType(SLEPc.EPS.ProblemType.HEP)
eps.setOperators(Bs)
#print(f'dimension of Bs : {Bs.getSize()}')
#OPTION 1: works but dense algebra
eps.setType(SLEPc.EPS.Type.LAPACK)
eps.setWhichEigenpairs(SLEPc.EPS.Which.SMALLEST_REAL) #with lapack this just tells slepc how to order the eigenpairs
##END OPTION 1
##OPTION 2: default solver (Krylov Schur) but error with getInertia - is there a MUMPS mattype - Need to use MatCholeskyFactor
#if Which eigenpairs is set to SMALLEST_REAL, some are computed but not all
##Bs.setOption(PETSc.Mat.Option.SYMMETRIC, True)
##Bs.convert('sbaij')
##IScholBs = is_A.duplicate()
##Bs.factorCholesky(IScholBs) #not implemented
#tempksp = PETSc.KSP().create(comm=PETSc.COMM_SELF)
#tempksp.setOperators(Bs)
#tempksp.setType('preonly')
#temppc = tempksp.getPC()
#temppc.setType('cholesky')
#temppc.setFactorSolverType('mumps')
#temppc.setFactorSetUpSolverType()
#tempF = temppc.getFactorMatrix()
#tempF.setMumpsIcntl(13, 1) #needed to compute intertia according to slepcdoc, inertia computation still doesn't work though
#temppc.setUp()
##eps.setOperators(tempF)
#eps.setWhichEigenpairs(SLEPc.EPS.Which.ALL)
#eps.setInterval(PETSc.NINFINITY,0.0)
#eps.setUp()
##eps.setWhichEigenpairs(SLEPc.EPS.Which.TARGET_REAL)
##eps.setTarget(0.)
##if len(Vnegs) > 0 :
## eps.setDeflationSpace(Vnegs)
##if mpi.COMM_WORLD.rank == 0:
## eps.view()
##END OPTION 2
eps.solve()
if eps.getConverged() < self.nev:
PETSc.Sys.Print('for Bs in subdomain {}: {} eigenvalues converged (less that the {} requested)'.format(mpi.COMM_WORLD.rank, eps.getConverged(), self.nev), comm=PETSc.COMM_SELF)
Dnegs = []
Dposs = []
for i in range(eps.getConverged()):
tempscalar = np.real(eps.getEigenvalue(i))
if tempscalar < 0. :
Dnegs.append(-1.*tempscalar)
Vnegs.append(works.duplicate())
eps.getEigenvector(i,Vnegs[-1])
DVnegs.append(Dnegs[-1] * Vnegs[-1])
invmusVnegs.append(invmus * Vnegs[-1])
else :
Dposs.append(tempscalar)
if self.verbose:
PETSc.Sys.Print('for Bs in subdomain {}: ncv= {} with {} negative eigs'.format(mpi.COMM_WORLD.rank, eps.getConverged(), len(Vnegs), self.nev), comm=PETSc.COMM_SELF)
print(f'values of Dnegs {np.array(Dnegs)}')
nnegs = len(Dnegs)
#print(f'length of Dnegs {nnegs}')
#END diagonalize Bs
if self.viewnegV0:
print('###')
print(f'view of Vneg in Subdomain {mpi.COMM_WORLD.rank}')
print(f'ncv = {eps.getConverged()} eigenvalues converged')
print(f'{nnegs=}')
print(f'values of Dnegs: {np.array(Dnegs)}')
works.set(0.)
RsVnegs = []
Vneg = []
Dneg = []
RsDVnegs = []
RsDnegs = []
for i in range(mpi.COMM_WORLD.size):
nnegi = len(Vnegs) if i == mpi.COMM_WORLD.rank else None
nnegi = mpi.COMM_WORLD.bcast(nnegi, root=i)
for j in range(nnegi):
Vneg.append(Vnegs[j].copy() if i == mpi.COMM_WORLD.rank else works.copy())
dnegi = Dnegs[j] if i == mpi.COMM_WORLD.rank else None
dnegi = mpi.COMM_WORLD.bcast(dnegi, root=i)
Dneg.append(dnegi)
#print(f'i Dneg[i] = {i} {Dneg[i]}')
for i, vec in enumerate(Vneg):
work.set(0)
scatter_l2g(vec, work, PETSc.InsertMode.ADD_VALUES)
scatter_l2g(work, works, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
if works.norm() != 0:
RsVnegs.append(works.copy())
RsDVnegs.append(Dneg[i]*works.copy())
RsDnegs.append(Dneg[i])
#TO DO: here implement RsVnegs and RsDVnegs
#self.Vneg = Vneg
# self.Vnegs = Vnegs
# self.DVnegs = DVnegs
# self.scatterl
#Local Apos and Aneg
Aneg = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
Aneg.setPythonContext(Aneg_ctx(Vnegs, DVnegs, scatter_l2g, works, works_2))
Aneg.setUp()
Apos = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
Apos.setPythonContext(Apos_ctx(A_mpiaij, Aneg ))
Apos.setUp()
#A pos = A_mpiaij + Aneg so it could be a composite matrix rather than Python type
Anegs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
Anegs.setPythonContext(Anegs_ctx(Vnegs, DVnegs))
Anegs.setUp()
Aposs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
Aposs.setPythonContext(Aposs_ctx(Bs, Anegs ))
Aposs.setUp()
projVnegs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
projVnegs.setPythonContext(projVnegs_ctx(Vnegs))
projVnegs.setUp()
projVposs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
projVposs.setPythonContext(projVposs_ctx(projVnegs))
projVposs.setUp()
#TODO Implement RsAposRsts, this is the restriction of Apos to the dofs in this subdomain. So it applies to local vectors but has non local operations
RsAposRsts = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF) #or COMM_WORLD ?
RsAposRsts.setPythonContext(RsAposRsts_ctx(As,RsVnegs,RsDVnegs))
RsAposRsts.setUp()
invAposs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
invAposs.setPythonContext(invAposs_ctx(Bs_ksp, projVposs ))
invAposs.setUp()
ksp_Aposs = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Aposs.setOperators(Aposs)
ksp_Aposs.setType('preonly')
pc_Aposs = ksp_Aposs.getPC()
pc_Aposs.setType('python')
pc_Aposs.setPythonContext(invAposs_ctx(Bs_ksp,projVposs))
ksp_Aposs.setUp()
work.set(1.)
Ms = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
Ms.setPythonContext(scaledmats_ctx(Aposs, mus, mus))
Ms.setUp()
ksp_Ms = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Ms.setOptionsPrefix("ksp_Ms_")
ksp_Ms.setOperators(Ms)
ksp_Ms.setType('preonly')
pc_Ms = ksp_Ms.getPC()
pc_Ms.setType('python')
pc_Ms.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Ms.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Ms_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Ms_forSLEPc.setOptionsPrefix("ksp_Ms_")
ksp_Ms_forSLEPc.setOperators(Ms)
ksp_Ms_forSLEPc.setType('preonly')
pc_Ms_forSLEPc = ksp_Ms_forSLEPc.getPC()
pc_Ms_forSLEPc.setType('python')
pc_Ms_forSLEPc.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Ms_forSLEPc.setFromOptions()
# the default local solver is the scaled non assembled local matrix (as in BNN)
if self.switchtoASM:
Atildes = As
if mpi.COMM_WORLD.rank == 0:
print('Switch to Additive Schwarz instead of BNN.')
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('cholesky')
pc_Atildes.setFactorSolverType('mumps')
ksp_Atildes.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('cholesky')
pc_Atildes_forSLEPc.setFactorSolverType('mumps')
ksp_Atildes_forSLEPc.setFromOptions()
if self.switchtoASMpos:
if mpi.COMM_WORLD.rank == 0:
print('switchtoASMpos has been ignored in favour of switchtoASM.')
elif self.switchtoASMpos:
Atildes = RsAposRsts
if mpi.COMM_WORLD.rank == 0:
print('Switch to Apos Additive Schwarz instead of BNN.')
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('python')
pc_Atildes.setPythonContext(invRsAposRsts_ctx(As,RsVnegs,RsDnegs,works))
ksp_Atildes.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('python')
pc_Atildes_forSLEPc.setPythonContext(invRsAposRsts_ctx(As,RsVnegs,RsDnegs,works))
ksp_Atildes_forSLEPc.setFromOptions()
else: #(default)
Atildes = Ms
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('python')
pc_Atildes.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Atildes.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('python')
pc_Atildes_forSLEPc.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Atildes_forSLEPc.setFromOptions()
labs=[]
for i, tmp in enumerate(Dnegs):
labs.append(f'(\Lambda_-^s)_{i} = {-1.*tmp}')
minV0 = minimal_V0(ksp_Atildes,invmusVnegs,labs) #won't compute anything more vecause the solver for Atildes is not mumps
minV0s = minV0.V0s
labs = minV0.labs
if self.viewminV0 == True:
minV0.view()
self.A = A_mpiaij
self.Apos = Apos
self.Aneg = Aneg
self.Ms = Ms
self.As = As
self.RsAposRsts = RsAposRsts
self.ksp_Atildes = ksp_Atildes
self.ksp_Ms = ksp_Ms
self.ksp_Atildes_forSLEPc = ksp_Atildes_forSLEPc
self.ksp_Ms_forSLEPc = ksp_Ms_forSLEPc
self.work = work
self.work_2 = work_2
self.works_1 = works
self.works_2 = works_2
self.scatter_l2g = scatter_l2g
self.mult_max = mult_max
self.ksp_Atildes = ksp_Atildes
self.minV0 = minV0
self.labs = labs
self.Dnegs = Dnegs
self.nnegs = nnegs
self.works_1.set(1.)
self.RsAposRsts.mult(self.works_1,self.works_2)
if self.GenEO == True:
print(f'{labs=}')
self.GenEOV0 = GenEO_V0(self.ksp_Atildes_forSLEPc,self.Ms,self.RsAposRsts,self.mult_max,minV0s,labs,self.ksp_Ms_forSLEPc)
self.V0s = self.GenEOV0.V0s
if self.viewGenEOV0 == True:
self.GenEOV0.view()
print(f'{self.GenEOV0.labs=}')
else:
self.V0s = minV0s
self.proj2 = coarse_operators(self.V0s,self.Apos,self.scatter_l2g,self.works_1,self.work)
if self.viewV0 == True:
self.proj2.view()
# work.set(1.)
# test = work.copy()
# test = self.proj2.coarse_init(work)
# testb = work.copy()
# self.proj2.project(testb)
# testc = work.copy()
# self.proj2.project_transpose(testc)
# testd = work.copy()
# self.apply([], work,testd)
self.H2 = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
self.H2.setPythonContext(H2_ctx(self.H2projCS, self.H2addCS, self.proj2, self.scatter_l2g, self.ksp_Atildes, self.works_1, self.works_2 ))
self.H2.setUp()
self.ksp_Apos = PETSc.KSP().create(comm=PETSc.COMM_WORLD)
self.ksp_Apos.setOptionsPrefix("ksp_Apos_")
self.ksp_Apos.setOperators(Apos)
self.ksp_Apos.setType("cg")
if self.compute_ritz_apos:
self.ksp_Apos.setComputeEigenvalues(True)
self.pc_Apos = self.ksp_Apos.getPC()
self.pc_Apos.setType('python')
self.pc_Apos.setPythonContext(H2_ctx(self.H2projCS, self.H2addCS, self.proj2, self.scatter_l2g, self.ksp_Atildes, self.works_1, self.works_2 ))
self.ksp_Apos.setFromOptions()
self.pc_Apos.setFromOptions()
#At this point the preconditioner for Apos is ready
if self.verbose:
if mpi.COMM_WORLD.rank == 0:
print(f'#V0(H2) = rank(Ker(Pi2)) = {len(self.proj2.V0)}')
Vneg = []
for i in range(mpi.COMM_WORLD.size):
nnegi = len(Vnegs) if i == mpi.COMM_WORLD.rank else None
nnegi = mpi.COMM_WORLD.bcast(nnegi, root=i)
for j in range(nnegi):
if i == mpi.COMM_WORLD.rank:
works = Vnegs[j].copy()
else:
works.set(0.)
self.work.set(0)
self.scatter_l2g(works, self.work, PETSc.InsertMode.ADD_VALUES)
Vneg.append(self.work.copy())
AposinvV0 = []
self.ritz_eigs_apos = None
for vec in Vneg:
self.ksp_Apos.solve(vec,self.work_2)
if self.compute_ritz_apos and self.ritz_eigs_apos is None:
self.ritz_eigs_apos = self.ksp_Apos.computeEigenvalues()
self.ksp_Apos.setComputeEigenvalues(False)
AposinvV0.append(self.work_2.copy())
self.AposinvV0 = AposinvV0
self.proj3 = coarse_operators(self.AposinvV0,self.A,self.scatter_l2g,self.works_1,self.work,V0_is_global=True)
self.proj = self.proj3 #this name is consistent with the proj in PCBNN
###############################
# ###Alternative to assembling the second coarse operators
#
# ###
# self.Id = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# self.Id.setPythonContext(Id_ctx())
# self.Id.setUp()
#
# #self.Id = PETSc.Mat().create(comm=PETSc.COMM_SELF)
# #self.Id.setType("constantdiagonal") #I don't know how to set the value to 1
#
# #self.N = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# #self.N.setPythonContext(N_ctx(self.Aneg,self.A,self.ksp_Apos,self.work,self.work_2))
# #self.N.setUp()
#
# #self.ksp_N = PETSc.KSP().create(comm=PETSc.COMM_WORLD)
# #self.ksp_N.setOptionsPrefix("ksp_N_")
# #self.ksp_N.setOperators(self.N)
# #self.ksp_N.setType("gmres")
# #self.ksp_N.setGMRESRestart(151)
## # if self.compute_ritz_N:
# #self.ksp_N.setComputeEigenvalues(True)
# ##self.pc_N = self.ksp_N.getPC()
# ##self.pc_N.setType('python')
# ##self.pc_N.setPythonContext(
# #self.ksp_N.setFromOptions()
# self.proj4 = coarse_operators(Vneg,self.Id,self.scatter_l2g,self.works_1,self.work,V0_is_global=True)
#
# self.ProjA = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# self.ProjA.setPythonContext(ProjA_ctx(self.proj4,self.A))
# self.ProjA.setUp()
# self.work.set(1.)
# #test = self.work.duplicate()
# #self.ProjA.mult(self.work,test)
# #print('self.ProjA works ok')
#
# self.ksp_ProjA = PETSc.KSP().create(comm=PETSc.COMM_WORLD)
# self.ksp_ProjA.setOptionsPrefix("ksp_ProjA_")
# self.ksp_ProjA.setOperators(self.ProjA)
# self.ksp_ProjA.setType("gmres")
# self.ksp_ProjA.setGMRESRestart(151)
# self.ksp_ProjA.setComputeEigenvalues(True)
# #self.pc_ProjA = self.ksp_N.getPC()
# #self.pc_ProjA.setType('python')
# #self.pc_ProjA.setPythonContext(
# self.ksp_ProjA.setFromOptions()
###############################
##
if self.viewV0 == True:
self.proj.view()
if self.viewPC == True:
self.view()
##Debug DEBUG
# works_3 = works.copy()
##projVnegs is a projection
# #works.setRandom()
# works.set(1.)
# projVnegs.mult(works,works_2)
# projVnegs.mult(works_2,works_3)
# print(f'check that projVnegs is a projection {works_2.norm()} = {works_3.norm()} < {works.norm()}')
##projVposs is a projection
##Pythagoras ok
# works.setRandom()
# #works.set(1.)
# projVnegs.mult(works,works_2)
# projVposs.mult(works,works_3)
# print(f'{works_2.norm()**2} + {works_3.norm()**2}= {works_2.norm()**2 + works_3.norm()**2} = {(works.norm())**2}')
# print(f'0 = {(works - works_2 - works_3).norm()} if the two projections sum to identity')
##Aposs = projVposs Bs projVposs = Bs projVposs (it is implemented as Bs + Anegs)
# works_4 = works.copy()
# works.setRandom()
# #works.set(1.)
# projVposs.mult(works,works_2)
# Bs.mult(works_2,works_3)
# projVposs.mult(works_3,works_2)
# Aposs.mult(works,works_4)
# print(f'check Aposs = projVposs Bs projVposs = Bs projVposs: {works_2.norm()} = {works_3.norm()} = {works_4.norm()}')
# print(f'norms of diffs (should be zero): {(works_2 - works_3).norm()}, {(works_2 - works_4).norm()}, {(works_3 - works_4).norm()}')
###check that Aposs > 0 and Anegs >0 but Bs is indefinite + "Pythagoras"
# works_4 = works.copy()
# works.set(1.) #(with vector full of ones I get a negative Bs semi-norm)
# Bs.mult(works,works_4)
# Aposs.mult(works,works_2)
# Anegs.mult(works,works_3)
# print(f'|.|_Bs {works_4.dot(works)} (can be neg or pos); |.|_Aposs {works_2.dot(works)} > 0; |.|_Anegs {works_3.dot(works)} >0')
# print(f' |.|_Bs^2 = |.|_Aposs^2 - |.|_Anegs ^2 = {works_2.dot(works)} - {works_3.dot(works)} = {works_2.dot(works) - works_3.dot(works)} = {works_4.dot(works)} ')##
###check that ksp_Aposs.solve(Aposs * x) = projVposs x
# works_4 = works.copy()
# works.setRandom()
# #works.set(1.)
# projVposs.mult(works,works_2)
# Aposs(works,works_3)
# ksp_Aposs.solve(works_3,works_4)
# works_5 = works_2 - works_4
# print(f'norm x = {works.norm()}; norm projVposs x = {works_2.norm()} = norm Aposs\Aposs*x = {works_4.norm()}; normdiff = {works_5.norm()}')
####check that mus*invmus = vec of ones
# works.set(1.0)
# works_2 = invmus*mus
# works_3 = works - works_2
# print(f'0 = norm(vec of ones - mus*invmus) = {works_3.norm()}, mus in [{mus.min()}, {mus.max()}], invmus in [{invmus.min()}, {invmus.max()}]')
###check that Ms*ksp_Ms.solve(Ms*x) = Ms*x
# works_4 = works.copy()
# works.setRandom()
# Atildes.mult(works,works_3)
# self.ksp_Atildes.solve(works_3,works_4)
# Atildes.mult(works_4,works_2)
# works_5 = works_2 - works_3
# print(f'norm x = {works.norm()}; Atilde*x = {works_3.norm()} = norm Atilde*(Atildes\Atildes)*x = {works_2.norm()}; normdiff = {works_5.norm()}')
###check Apos by implementing it a different way in Apos_debug
# Apos_debug = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# Apos_debug.setPythonContext(Apos_debug_ctx(projVposs, Aposs, scatter_l2g, works, work))
# Apos_debug.setUp()
# work.setRandom()
# test = work.duplicate()
# test2 = work.duplicate()
# Apos.mult(work,test)
# Apos_debug.mult(work,test2)
# testdiff = test-test2
# print(f'norm of |.|_Apos = {np.sqrt(test.dot(work))} = |.|_Apos_debug = {np.sqrt(test2.dot(work))} ; norm of diff = {testdiff.norm()}')
###
###check that the projection in proj2 is a self.proj2.A orth projection
#work.setRandom()
# work.set(1.)
# test = work.copy()
# self.proj2.project(test)
# test2 = test.copy()
# self.proj2.project(test2)
# testdiff = test-test2
# print(f'norm(Pi x - Pi Pix) = {testdiff.norm()} = 0')
# self.proj2.A.mult(test,test2)
# test3 = work.duplicate()
# self.proj2.A.mult(work,test3)
# print(f'|Pi x|_A^2 - |x|_A^2 = {test.dot(test2)} - {work.dot(test3)} = {test.dot(test2) - work.dot(test3)} < 0 ')
# #test2 = A Pi x ( = Pit A Pi x)
# test3 = test2.copy()
# self.proj2.project_transpose(test3)
# test = test3.copy()
# self.proj2.project_transpose(test)
# testdiff = test3 - test2
# print(f'norm(A Pi x - Pit A Pix) = {testdiff.norm()} = 0 = {(test - test3).norm()} = norm(Pit Pit A Pi x - Pit A Pix); compare to norm(A Pi x) = {test2.norm()} ')
# #work.setRandom()
# work.set(1.)
# test2 = work.copy()
# self.proj2.project_transpose(test2)
# test2 = -1*test2
# test2 += work
#
# test = work.copy()
# test = self.proj2.coarse_init(work)
# test3 = work.duplicate()
# self.proj2.A.mult(test,test3)
###check that the projection in proj3 is a self.proj3.A orth projection whose image includes Ker(Aneg)
# #work.setRandom()
# work.set(1.)
# test = work.copy()
# self.proj3.project(test)
# test2 = test.copy()
# self.proj3.project(test2)
# testdiff = test-test2
# print(f'norm(Pi x - Pi Pix) = {testdiff.norm()} = 0')
# self.proj3.A.mult(test,test2)
# test3 = work.duplicate()
# self.proj3.A.mult(work,test3)
# print(f'|Pi x|_A^2 - |x|_A^2 = {test.dot(test2)} - {work.dot(test3)} = {test.dot(test2) - work.dot(test3)} < 0 ')
# #test2 = A Pi x ( = Pit A Pi x)
# test3 = test2.copy()
# self.proj3.project_transpose(test3)
# test = test3.copy()
# self.proj3.project_transpose(test)
# testdiff = test3 - test2
# print(f'norm(A Pi x - Pit A Pix) = {testdiff.norm()} = 0 = {(test - test3).norm()} = norm(Pit Pit A Pi x - Pit A Pix); compare to norm(A Pi x) = {test2.norm()} ')
# #work.setRandom()
# work.set(1.)
# test2 = work.copy()
# self.proj3.project_transpose(test2)
# test2 = -1*test2
# test2 += work
#
# test = work.copy()
# test = self.proj3.coarse_init(work)
# test3 = work.duplicate()
# self.proj3.A.mult(test,test3)
#
# print(f'norm(A coarse_init(b)) = {test3.norm()} = {test2.norm()} = norm((I-Pit b)); norm diff = {(test2 - test3).norm()}')
#
# work.set(1.)
# test = work.copy()
# test2 = work.copy()
# self.proj3.project(test2)
# test3 = work.copy()
# self.proj3.project(test3)
# test = work.copy()
# self.Apos.mult(test2,test)
# test2 = work.copy()
# self.A.mult(test3,test2)
# print(f'norm(Apos Pi3 x) = {test.norm()} = {test2.norm()} = norm(A Pi3 x); norm diff = {(test - test2).norm()}')
# for vec in self.AposinvV0:
# test = vec.copy()
# self.proj3.project(test)
# print(f'norm(Pi3 AposinvV0[i]) = {test.norm()} compare to norm of the non projected vector norm ={(vec).norm()}')
#
### END Debug DEBUG
def mult(self, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
########################
########################
xd = x.copy()
if self.H3projCS == True:
self.proj3.project_transpose(xd)
self.H2.mult(xd,y)
if self.H3projCS == True:
self.proj3.project(y)
if self.H3addCS == True:
xd = x.copy()
ytild = self.proj3.coarse_init(xd) # I could save a coarse solve by combining this line with project_transpose
if ytild.dot(xd) < 0:
print(f'x.dot(coarse_init(x)) = {ytild.dot(xd)} < 0 ')
y += ytild
def MP_mult(self, x, y):
"""
Applies the domain decomposition multipreconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : FIX
The list of ndom vectors that stores the result of the multipreconditioning operation (one vector per subdomain).
"""
print('not implemented')
def apply(self, pc, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
This is just a call to PCNew.mult with the function name and arguments that allow PCNew to be passed
as a preconditioner to PETSc.ksp.
Parameters
==========
pc: This argument is not called within the function but it belongs to the standard way of calling a preconditioner.
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
self.mult(x,y)
def view(self):
self.gathered_ns = mpi.COMM_WORLD.gather(self.ns, root=0)
self.gathered_nints = mpi.COMM_WORLD.gather(self.nints, root=0)
self.gathered_Gammas = mpi.COMM_WORLD.gather(self.nGammas, root=0)
self.minV0.gathered_dim = mpi.COMM_WORLD.gather(self.minV0.nrb, root=0)
self.gathered_labs = mpi.COMM_WORLD.gather(self.labs, root=0)
self.gathered_nneg = mpi.COMM_WORLD.gather(self.nnegs, root=0)
self.gathered_Dneg = mpi.COMM_WORLD.gather(self.Dnegs, root=0)
if self.GenEO == True:
self.GenEOV0.gathered_nsharp = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmax, root=0)
self.GenEOV0.gathered_nflat = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmin, root=0)
self.GenEOV0.gathered_dimKerMs = mpi.COMM_WORLD.gather(self.GenEOV0.dimKerMs, root=0)
self.GenEOV0.gathered_Lambdasharp = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmax, root=0)
self.GenEOV0.gathered_Lambdaflat = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmin, root=0)
if mpi.COMM_WORLD.rank == 0:
print('#############################')
print(f'view of PCNew')
print(f'{self.switchtoASM=}')
print(f'{self.verbose= }')
print(f'{self.GenEO= }')
print(f'{self.H3addCS= }')
print(f'{self.H3projCS= }')
print(f'{self.H2projCS= }')
print(f'{self.viewPC= }')
print(f'{self.viewV0= }')
print(f'{self.viewGenEOV0= }')
print(f'{self.viewnegV0= }')
print(f'{self.viewminV0= }')
print(f'{self.compute_ritz_apos=}')
print(f'{self.mult_max=}')
print(f'### info about the subdomains ###')
self.nint = np.sum(self.gathered_nints)
self.nGamma = self.nglob - self.nint
print(f'{self.gathered_ns =}')
print(f'{self.gathered_nints =}')
print(f'{self.gathered_Gammas=}')
print(f'{self.nGamma=}')
print(f'{self.nint=}')
print(f'{self.nglob=}')
print(f'{self.gathered_labs=}')
print(f'### info about minV0.V0s = (Ker(Atildes)) ###')
print(f'{self.minV0.mumpsCntl3=}')
print(f'###info about Vnegs = rank(Anegs) = coarse components for proj3')
print(f'{self.gathered_nneg=}')
print(f'{np.sum(self.gathered_nneg)=}')
if (self.ksp_Atildes.pc.getFactorSolverType() == 'mumps'):
print(f'dim(Ker(Atildes)) = {self.minV0.gathered_dim}')
else:
print(f'Ker(Atildes) not computed because pc is not mumps')
if self.GenEO == True:
print(f'### info about GenEOV0.V0s ###')
print(f'{self.GenEOV0.tau_eigmax=}')
print(f'{self.GenEOV0.tau_eigmin=}')
print(f'{self.GenEOV0.eigmax=}')
print(f'{self.GenEOV0.eigmin=}')
print(f'{self.GenEOV0.nev=}')
print(f'{self.GenEOV0.maxev=}')
print(f'{self.GenEOV0.mumpsCntl3=}')
print(f'{self.GenEOV0.verbose=}')
print(f'{self.GenEOV0.gathered_nsharp=}')
print(f'{self.GenEOV0.gathered_nflat=}')
#print(f'{self.GenEOV0.gathered_dimKerMs=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdasharp)=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdaflat)=}')
print(f'### info about the preconditioner for Apos ###')
print(f'{self.proj2.V0_is_global=}')
if(self.proj2.V0_is_global == False):
print(f'{self.proj2.gathered_dimV0s=}')
if self.GenEO == True:
print(f'global dim V0 for Apos = {self.proj2.dim} = ({np.sum(self.gathered_nneg)} from Vneg ) + ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes)) + ({np.sum(self.GenEOV0.gathered_nsharp)} from GenEO_eigmax) + ({np.sum(self.GenEOV0.gathered_nflat) } from GenEO_eigmin)')
else:
print(f'global dim V0 for Apos = {np.sum(self.proj2.gathered_dimV0s)} = ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes))')
if self.compute_ritz_apos and self.ritz_eigs_apos is not None:
print(f'Estimated kappa(H2 Apos) = {self.ritz_eigs_apos.max()/self.ritz_eigs_apos.min() }; with lambdamin = {self.ritz_eigs_apos.min()} and lambdamax = {self.ritz_eigs_apos.max()}')
print('#############################')
self.savetofile()
def savetofile(self):
if mpi.COMM_WORLD.rank == 0:
if not os.path.exists(self.test_case):
os.mkdir(self.test_case)
np.savez(f'{self.test_case}/init',
switchtoASM = self.switchtoASM,
verbose = self.verbose,
GenEO = self.GenEO,
H3addCS = self.H3addCS,
H3projCS = self.H3projCS,
H2projCS = self.H2projCS,
viewPC = self.viewPC,
viewV0 = self.viewV0,
viewGenEOV0 = self.viewGenEOV0,
viewnegV0 = self.viewnegV0,
viewminV0 = self.viewminV0,
compute_ritz_apos = self.compute_ritz_apos,
mult_max = self.mult_max,
gathered_ns = self.gathered_ns,
gathered_nints = self.gathered_nints,
gathered_Gammas = self.gathered_Gammas,
nGamma = self.nGamma,
nint = self.nint,
nglob = self.nglob,
minV0_mumpsCntl3 = self.minV0.mumpsCntl3,
gathered_labs= | np.asarray(self.gathered_labs,dtype='object') | numpy.asarray |
import numpy as np
import pytest
from doctr.utils import metrics
@pytest.mark.parametrize(
"gt, pred, raw, caseless, unidecode, unicase",
[
[['grass', '56', 'True', 'EUR'], ['grass', '56', 'true', '€'], .5, .75, .75, 1],
[['éléphant', 'ça'], ['elephant', 'ca'], 0, 0, 1, 1],
],
)
def test_text_match(gt, pred, raw, caseless, unidecode, unicase):
metric = metrics.TextMatch()
with pytest.raises(AssertionError):
metric.summary()
with pytest.raises(AssertionError):
metric.update(['a', 'b'], ['c'])
metric.update(gt, pred)
assert metric.summary() == dict(raw=raw, caseless=caseless, unidecode=unidecode, unicase=unicase)
metric.reset()
assert metric.raw == metric.caseless == metric.unidecode == metric.unicase == metric.total == 0
@pytest.mark.parametrize(
"box1, box2, iou, abs_tol",
[
[[[0, 0, .5, .5]], [[0, 0, .5, .5]], 1, 0], # Perfect match
[[[0, 0, .5, .5]], [[.5, .5, 1, 1]], 0, 0], # No match
[[[0, 0, 1, 1]], [[.5, .5, 1, 1]], 0.25, 0], # Partial match
[[[.2, .2, .6, .6]], [[.4, .4, .8, .8]], 4 / 28, 1e-7], # Partial match
[[[0, 0, .1, .1]], [[.9, .9, 1, 1]], 0, 0], # Boxes far from each other
[np.zeros((0, 4)), [[0, 0, .5, .5]], 0, 0], # Zero-sized inputs
[[[0, 0, .5, .5]], np.zeros((0, 4)), 0, 0], # Zero-sized inputs
],
)
def test_box_iou(box1, box2, iou, abs_tol):
iou_mat = metrics.box_iou(np.asarray(box1), np.asarray(box2))
assert iou_mat.shape == (len(box1), len(box2))
if iou_mat.size > 0:
assert abs(iou_mat - iou) <= abs_tol
@pytest.mark.parametrize(
"mask1, mask2, iou, abs_tol",
[
[
[[[True, True, False], [True, True, False]]],
[[[True, True, False], [True, True, False]]],
1,
0
], # Perfect match
[
[[[True, False, False], [False, False, False]]],
[[[True, True, False], [True, True, False]]],
0.25,
0
], # Partial match
],
)
def test_mask_iou(mask1, mask2, iou, abs_tol):
iou_mat = metrics.mask_iou(np.asarray(mask1), np.asarray(mask2))
assert iou_mat.shape == (len(mask1), len(mask2))
if iou_mat.size > 0:
assert abs(iou_mat - iou) <= abs_tol
# Incompatible spatial shapes
with pytest.raises(AssertionError):
metrics.mask_iou(np.zeros((2, 3, 5), dtype=bool), np.ones((3, 2, 5), dtype=bool))
@pytest.mark.parametrize(
"rbox1, rbox2, iou, abs_tol",
[
[[[.25, .25, .5, .5, 0.]], [[.25, .25, .5, .5, 0.]], 1, 0], # Perfect match
[[[.25, .25, .5, .5, 0.]], [[.75, .75, .5, .5, 0.]], 0, 1e-4], # No match
[[[.5, .5, 1, 1, 0.]], [[.75, .75, .5, .5, 0.]], 0.25, 0], # Partial match
[[[.4, .4, .4, .4, 0.]], [[.6, .6, .4, .4, 0.]], 4 / 28, 5e-3], # Partial match
[[[.05, .05, .1, .1, 0.]], [[.95, .95, .1, .1, 0.]], 0, 0], # Boxes far from each other
[np.zeros((0, 5)), [[.25, .25, .5, .5, 0.]], 0, 0], # Zero-sized inputs
[[[.25, .25, .5, .5, 0.]], np.zeros((0, 5)), 0, 0], # Zero-sized inputs
],
)
def test_rbox_iou(rbox1, rbox2, iou, abs_tol):
mask_shape = (256, 256)
iou_mat = metrics.rbox_iou(np.asarray(rbox1), np.asarray(rbox2), mask_shape)
assert iou_mat.shape == (len(rbox1), len(rbox2))
if iou_mat.size > 0:
assert abs(iou_mat - iou) <= abs_tol
# Ensure broadcasting doesn't change the result
iou_matbis = metrics.rbox_iou(np.asarray(rbox1), np.asarray(rbox2), mask_shape, use_broadcasting=False)
assert np.all((iou_mat - iou_matbis) <= 1e-7)
# Incorrect boxes
with pytest.raises(AssertionError):
metrics.rbox_iou(np.zeros((2, 5), dtype=float), np.ones((3, 4), dtype=float), mask_shape)
@pytest.mark.parametrize(
"box, shape, mask",
[
[
[0, 0, .5, .5, 0], (2, 2),
[[True, False], [False, False]],
],
],
)
def test_rbox_to_mask(box, shape, mask):
masks = metrics.rbox_to_mask(np.asarray(box)[None, ...], shape)
assert masks.shape == (1, *shape)
assert np.all(masks[0] == np.asarray(mask, dtype=bool))
@pytest.mark.parametrize(
"gts, preds, iou_thresh, recall, precision, mean_iou",
[
[[[[0, 0, .5, .5]]], [[[0, 0, .5, .5]]], 0.5, 1, 1, 1], # Perfect match
[[[[0, 0, 1, 1]]], [[[0, 0, .5, .5], [.6, .6, .7, .7]]], 0.2, 1, 0.5, 0.125], # Bad match
[[[[0, 0, 1, 1]]], [[[0, 0, .5, .5], [.6, .6, .7, .7]]], 0.5, 0, 0, 0.125], # Bad match
[[[[0, 0, .5, .5]], [[0, 0, .5, .5]]], [[[0, 0, .5, .5]], None], 0.5, 0.5, 1, 1], # No preds on 2nd sample
],
)
def test_localization_confusion(gts, preds, iou_thresh, recall, precision, mean_iou):
metric = metrics.LocalizationConfusion(iou_thresh)
for _gts, _preds in zip(gts, preds):
metric.update(np.asarray(_gts), np.zeros((0, 4)) if _preds is None else np.asarray(_preds))
assert metric.summary() == (recall, precision, mean_iou)
metric.reset()
assert metric.num_gts == metric.num_preds == metric.matches == metric.tot_iou == 0
@pytest.mark.parametrize(
"gts, preds, iou_thresh, recall, precision, mean_iou",
[
[[[[.1, .1, .1, .1, 0]]], [[[.1, .1, .1, .1, 0]]], 0.5, 1, 1, 1], # Perfect match
[[[[.15, .1, .1, .1, 0]]], [[[.2, .1, .2, .1, 0], [.7, .7, .2, .2, 0]]], 0.2, 1, 0.5, 0.25], # Bad match
[[[[.1, .1, .1, .1, 0]], [[.3, .3, .1, .1, 0]]], [[[.1, .1, .1, .1, 0]], None], 0.5, 0.5, 1, 1], # Empty
],
)
def test_r_localization_confusion(gts, preds, iou_thresh, recall, precision, mean_iou):
metric = metrics.LocalizationConfusion(iou_thresh, rotated_bbox=True, mask_shape=(1000, 1000))
for _gts, _preds in zip(gts, preds):
metric.update(np.asarray(_gts), np.zeros((0, 5)) if _preds is None else np.asarray(_preds))
assert metric.summary()[:2] == (recall, precision)
assert abs(metric.summary()[2] - mean_iou) <= 5e-3
metric.reset()
assert metric.num_gts == metric.num_preds == metric.matches == metric.tot_iou == 0
@pytest.mark.parametrize(
"gt_boxes, gt_words, pred_boxes, pred_words, iou_thresh, recall, precision, mean_iou",
[
[ # Perfect match
[[[0, 0, .5, .5]]], [["elephant"]],
[[[0, 0, .5, .5]]], [["elephant"]],
0.5,
{"raw": 1, "caseless": 1, "unidecode": 1, "unicase": 1},
{"raw": 1, "caseless": 1, "unidecode": 1, "unicase": 1},
1,
],
[ # Bad match
[[[0, 0, .5, .5]]], [["elefant"]],
[[[0, 0, .5, .5]]], [["elephant"]],
0.5,
{"raw": 0, "caseless": 0, "unidecode": 0, "unicase": 0},
{"raw": 0, "caseless": 0, "unidecode": 0, "unicase": 0},
1,
],
[ # Good match
[[[0, 0, 1, 1]]], [["EUR"]],
[[[0, 0, .5, .5], [.6, .6, .7, .7]]], [["€", "e"]],
0.2,
{"raw": 0, "caseless": 0, "unidecode": 1, "unicase": 1},
{"raw": 0, "caseless": 0, "unidecode": .5, "unicase": .5},
0.125,
],
[ # No preds on 2nd sample
[[[0, 0, .5, .5]], [[0, 0, .5, .5]]], [["Elephant"], ["elephant"]],
[[[0, 0, .5, .5]], None], [["elephant"], []],
0.5,
{"raw": 0, "caseless": .5, "unidecode": 0, "unicase": .5},
{"raw": 0, "caseless": 1, "unidecode": 0, "unicase": 1},
1,
],
],
)
def test_ocr_metric(
gt_boxes, gt_words, pred_boxes, pred_words, iou_thresh, recall, precision, mean_iou
):
metric = metrics.OCRMetric(iou_thresh)
for _gboxes, _gwords, _pboxes, _pwords in zip(gt_boxes, gt_words, pred_boxes, pred_words):
metric.update(
np.asarray(_gboxes),
np.zeros((0, 4)) if _pboxes is None else np.asarray(_pboxes),
_gwords,
_pwords
)
_recall, _precision, _mean_iou = metric.summary()
assert _recall == recall
assert _precision == precision
assert _mean_iou == mean_iou
metric.reset()
assert metric.num_gts == metric.num_preds == metric.tot_iou == 0
assert metric.raw_matches == metric.caseless_matches == metric.unidecode_matches == metric.unicase_matches == 0
# Shape check
with pytest.raises(AssertionError):
metric.update(
np.asarray(_gboxes),
np.zeros((0, 4)),
_gwords,
["I", "have", "a", "bad", "feeling", "about", "this"],
)
@pytest.mark.parametrize(
"gt_boxes, gt_classes, pred_boxes, pred_classes, iou_thresh, recall, precision, mean_iou",
[
[ # Perfect match
[[[0, 0, .5, .5]]], [[0]],
[[[0, 0, .5, .5]]], [[0]],
0.5, 1, 1, 1,
],
[ # Bad match
[[[0, 0, .5, .5]]], [[0]],
[[[0, 0, .5, .5]]], [[1]],
0.5, 0, 0, 1,
],
[ # No preds on 2nd sample
[[[0, 0, .5, .5]], [[0, 0, .5, .5]]], [[0], [1]],
[[[0, 0, .5, .5]], None], [[0], []],
0.5, .5, 1, 1,
],
],
)
def test_detection_metric(
gt_boxes, gt_classes, pred_boxes, pred_classes, iou_thresh, recall, precision, mean_iou
):
metric = metrics.DetectionMetric(iou_thresh)
for _gboxes, _gclasses, _pboxes, _pclasses in zip(gt_boxes, gt_classes, pred_boxes, pred_classes):
metric.update(
np.asarray(_gboxes),
np.zeros((0, 4)) if _pboxes is None else np.asarray(_pboxes),
np.array(_gclasses, dtype=np.int64),
np.array(_pclasses, dtype=np.int64),
)
_recall, _precision, _mean_iou = metric.summary()
assert _recall == recall
assert _precision == precision
assert _mean_iou == mean_iou
metric.reset()
assert metric.num_gts == metric.num_preds == metric.tot_iou == 0
assert metric.num_matches == 0
# Shape check
with pytest.raises(AssertionError):
metric.update(
np.asarray(_gboxes),
np.zeros((0, 4)),
| np.array(_gclasses, dtype=np.int64) | numpy.array |
import os
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import mikeio
from mikeio import Dataset, Dfsu, Dfs2, Dfs0
from mikeio.eum import EUMType, ItemInfo, EUMUnit
@pytest.fixture
def ds1():
nt = 10
ne = 7
d1 = np.zeros([nt, ne]) + 0.1
d2 = np.zeros([nt, ne]) + 0.2
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
return Dataset(data, time, items)
@pytest.fixture
def ds2():
nt = 10
ne = 7
d1 = np.zeros([nt, ne]) + 1.0
d2 = np.zeros([nt, ne]) + 2.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
return Dataset(data, time, items)
def test_create_wrong_data_type_error():
data = ["item 1", "item 2"]
nt = 2
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
with pytest.raises(TypeError, match="numpy"):
Dataset(data=data, time=time)
def test_get_names():
data = []
nt = 100
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.items[0].name == "Foo"
assert ds.items[0].type == EUMType.Undefined
assert repr(ds.items[0].unit) == "undefined"
def test_select_subset_isel():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel(10, axis=1)
assert len(selds.items) == 2
assert len(selds.data) == 2
assert selds["Foo"].shape == (100, 30)
assert selds["Foo"][0, 0] == 2.0
assert selds["Bar"][0, 0] == 3.0
def test_select_subset_isel_axis_out_of_range_error(ds2):
assert len(ds2.shape) == 2
dss = ds2.isel(idx=0)
# After subsetting there is only one dimension
assert len(dss.shape) == 1
with pytest.raises(ValueError):
dss.isel(idx=0, axis="spatial")
def test_select_temporal_subset_by_idx():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel([0, 1, 2], axis=0)
assert len(selds) == 2
assert selds["Foo"].shape == (3, 100, 30)
def test_temporal_subset_fancy():
nt = (24 * 31) + 1
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-1", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
assert ds.time[0].hour == 0
assert ds.time[-1].hour == 0
selds = ds["2000-01-01 00:00":"2000-01-02 00:00"]
assert len(selds) == 2
assert selds["Foo"].shape == (25, 100, 30)
selds = ds[:"2000-01-02 00:00"]
assert selds["Foo"].shape == (25, 100, 30)
selds = ds["2000-01-31 00:00":]
assert selds["Foo"].shape == (25, 100, 30)
selds = ds["2000-01-30":]
assert selds["Foo"].shape == (49, 100, 30)
def test_subset_with_datetime_is_not_supported():
nt = (24 * 31) + 1
d1 = | np.zeros([nt, 100, 30]) | numpy.zeros |
#!/usr/bin/env python
__copyright__ = """
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions: The above copyright notice and this permission
notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import numpy as np
import tensorflow as tf
from detection_3d.parameters import Parameters
from detection_3d.tools.training_helpers import setup_gpu
from detection_3d.detection_dataset import DetectionDataset
from detection_3d.tools.visualization_tools import visualize_2d_boxes_on_top_image
from detection_3d.tools.file_io import save_bboxes_to_file
from detection_3d.tools.detection_helpers import (
make_eight_points_boxes,
get_boxes_from_box_grid,
get_bboxes_parameters_from_points,
)
from PIL import Image
from tqdm import tqdm
import timeit
def validation_inference(param_settings, dataset_file, model_dir, output_dir):
setup_gpu()
# Load model
model = tf.keras.models.load_model(model_dir)
bbox_voxel_size = np.asarray(param_settings["bbox_voxel_size"], dtype=np.float32)
lidar_coord = np.array(param_settings["lidar_offset"], dtype=np.float32)
grid_meters = param_settings["grid_meters"]
val_dataset = DetectionDataset(param_settings, dataset_file, shuffle=False)
param_settings["val_size"] = val_dataset.num_samples
for val_samples in tqdm(
val_dataset.dataset, desc=f"val_inference", total=val_dataset.num_it_per_epoch,
):
top_view, gt_boxes, lidar_filenames = val_samples
predictions = model(top_view, training=False)
for image, predict, gt, filename in zip(
top_view, predictions, gt_boxes, lidar_filenames
):
filename = str(filename.numpy())
seq_folder = filename.split("/")[-3]
name = os.path.splitext(os.path.basename(filename))[0]
# Ensure that output dir exists or create it
top_view_dir = os.path.join(output_dir, "top_view", seq_folder)
bboxes_dir = os.path.join(output_dir, "bboxes", seq_folder)
os.makedirs(top_view_dir, exist_ok=True)
os.makedirs(bboxes_dir, exist_ok=True)
p_top_view = (
visualize_2d_boxes_on_top_image(
[predict], [image], grid_meters, bbox_voxel_size, prediction=True,
)
* 255
)
gt_top_view = (
visualize_2d_boxes_on_top_image(
[gt], [image], grid_meters, bbox_voxel_size, prediction=False,
)
* 255
)
result = | np.vstack((p_top_view[0], gt_top_view[0])) | numpy.vstack |
import numpy as np
from time import time
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import env_utils as envu
import pylab
import matplotlib
import render_traj_sensor
class Env(object):
def __init__(self, ic_gen, lander, dynamics, logger,
render_func=render_traj_sensor.render_traj,
glideslope_constraint=None,
attitude_constraint=None,
debug_steps=False,
w_constraint=None,
rh_constraint=None,
reward_object=None,
debug_done=False,
nav_period=10,
tf_limit=5000.0, allow_plotting=True, print_every=1,):
self.nav_period = nav_period
self.debug_done = debug_done
self.debug_steps = debug_steps
self.logger = logger
self.lander = lander
self.rl_stats = RL_stats(lander,logger,render_func, print_every=print_every,allow_plotting=allow_plotting)
self.tf_limit = tf_limit
self.display_errors = False
self.dynamics = dynamics
self.allow_plotting = allow_plotting
self.ic_gen = ic_gen
self.episode = 0
self.glideslope_constraint = glideslope_constraint
self.attitude_constraint = attitude_constraint
self.w_constraint = w_constraint
self.rh_constraint = rh_constraint
self.reward_object = reward_object
if allow_plotting:
plt.clf()
plt.cla()
print('lander env RHL')
def reset(self):
self.lander.sensor.reset()
self.ic_gen.set_ic(self.lander, self.dynamics)
self.glideslope_constraint.reset(self.lander.state)
#self.lander.sensor.reset(self.lander.state)
self.rh_constraint.reset()
self.steps = 0
self.t = 0.0
self.lander.clear_trajectory()
image, state = self.lander.get_state_agent(self.t)
self.lander.update_trajectory(False, self.t)
return image, state
def check_for_done(self,lander):
done = False
vc = envu.get_vc(lander.state['position'], lander.state['velocity'])
if self.attitude_constraint.get_margin(lander.state) < 0.0 and self.attitude_constraint.terminate_on_violation:
done = True
if self.debug_done:
print('Attitude: ', self.attitude_constraint.get_margin(lander.state) , self.steps)
if self.w_constraint.get_margin(lander.state) < 0.0 and self.w_constraint.terminate_on_violation:
done = True
if self.debug_done:
print('Rot Vel: ', self.w_constraint.get_margin(lander.state) , self.steps)
if self.rh_constraint.get_margin(lander.state) < 0.0 and self.rh_constraint.terminate_on_violation:
done = True
if self.debug_done:
print('RH Const: ', self.rh_constraint.get_margin(lander.state) , self.steps)
if self.t > self.tf_limit:
done = True
if self.debug_done:
print('Timeout: ', self.steps)
return done
def step(self,action):
action = action.copy()
if len(action.shape) > 1:
action = action[0]
steps_to_sim = int(np.round(self.nav_period / self.dynamics.h))
self.lander.prev_state = self.lander.state.copy()
BT, F, L, mdot = self.lander.thruster_model.thrust(action)
for i in range(steps_to_sim):
self.dynamics.next(self.t, BT, F, L, mdot, self.lander)
self.t += self.dynamics.h
done = self.check_for_done(self.lander)
if done:
break
self.steps+=1
self.glideslope_constraint.calculate(self.lander.state)
self.rh_constraint.step(self.lander.state)
#######################
# this is expensive, so only do it once per step
# 1.) update the sensor state
# 2.) check for sensor violation
# 3.) get reward
# 4.) update lander trajectory
#
image, state = self.lander.get_state_agent(self.t)
if self.lander.state['sensor_miss']:
done = True
if self.debug_done:
print('FOV VIO')
if self.steps <= 5 and self.debug_steps:
print('FEW STEPS: ')
print(self.lander.trajectory['position'])
print(self.lander.trajectory['velocity'])
print(self.lander.trajectory['thrust'])
########################
reward,reward_info = self.reward_object.get( self.lander, action, done, self.steps,
self.glideslope_constraint, self.attitude_constraint,
self.w_constraint, self.rh_constraint)
self.lander.update_trajectory(done, self.t)
if done:
self.episode += 1
return image, state ,reward,done,reward_info
def test_policy_batch(self, agent , n, print_every=100, use_ts=False, keys=None, test_mode=True):
t0 = time()
if keys is None:
keys1 = ['norm_vf', 'norm_rf', 'position', 'velocity', 'fuel', 'attitude_321', 'w' ]
keys2 = ['thrust']
keys = keys1 + keys2
all_keys = self.lander.get_engagement_keys()
print('worked 1')
agent.policy.test_mode = test_mode
self.lander.use_trajectory_list = True
self.episode = 0
self.lander.trajectory_list = []
self.display_errors = True
for i in range(n):
agent.run_episode()
for k in all_keys:
if not k in keys:
self.lander.trajectory_list[-1][k] = None
#self.test_policy_episode(policy, input_normalizer,use_ts=use_ts)
if i % print_every == 0 and i > 0:
print('i (et): %d (%16.0f)' % (i,time()-t0 ) )
t0 = time()
self.lander.show_cum_stats()
print(' ')
self.lander.show_final_stats(type='final')
print('')
self.lander.show_cum_stats()
print('')
self.lander.show_final_stats(type='final')
print('')
self.lander.show_final_stats(type='ic')
def test_policy_batch_notm(self, agent , n, print_every=100, use_ts=False):
print('worked')
self.lander.use_trajectory_list = True
self.episode = 0
self.lander.trajectory_list = []
self.display_errors = True
for i in range(n):
agent.run_episode()
#self.test_policy_episode(policy, input_normalizer,use_ts=use_ts)
if i % print_every == 0 and i > 0:
print('i : ',i)
self.lander.show_cum_stats()
print('')
self.lander.show_final_stats(type='final')
print('')
self.lander.show_cum_stats()
print('')
self.lander.show_final_stats(type='final')
print('')
self.lander.show_final_stats(type='ic')
class RL_stats(object):
def __init__(self,lander,logger,render_func,allow_plotting=True,print_every=1, vf=None, scaler=None):
self.logger = logger
self.render_func = render_func
self.lander = lander
self.scaler = scaler
self.vf = vf
self.keys = ['r_f', 'v_f', 'r_i', 'v_i', 'norm_rf', 'norm_vf', 'thrust', 'norm_thrust','fuel', 'rewards', 'fuel_rewards',
'norm_af', 'norm_wf', 'rh_penalty',
'att_rewards', 'att_penalty', 'attitude', 'attitude_error', 'w', 'a_f', 'w_f',
'w_rewards', 'w_penalty', 'pos_error', 'alt_error', 'alt_vc',
'landing_rewards','landing_margin', 'tracking_rewards', 'steps']
self.formats = {}
for k in self.keys:
self.formats[k] = '{:8.2f}'
self.formats['steps'] = '{:8.0f}'
self.formats['alt_vc'] = '{:8.4f}'
self.formats['alt_error'] = '{:8.4f}'
self.stats = {}
self.history = { 'Episode' : [] , 'MeanReward' : [], 'StdReward' : [] , 'MinReward' : [], 'Policy_KL' : [], 'Policy_Beta' : [], 'Variance' : [], 'Policy_Entropy' : [], 'ExplainedVarNew' : [] ,
'Norm_rf' : [], 'Norm_vf' : [], 'SD_rf' : [], 'SD_vf' : [], 'Max_rf' : [], 'Max_vf' : [],
'Model ExpVarOld' : [], 'Model P Loss Old' : [],
'Norm_af' : [], 'Norm_wf' : [], 'SD_af' : [], 'SD_wf' : [], 'Max_af' : [], 'Max_wf' : [], 'MeanSteps' : [], 'MaxSteps' : []}
self.plot_learning = self.plot_agent_learning
self.clear()
self.allow_plotting = allow_plotting
self.last_time = time()
self.update_cnt = 0
self.episode = 0
self.print_every = print_every
if allow_plotting:
plt.clf()
plt.cla()
self.fig2 = plt.figure(2,figsize=plt.figaspect(0.5))
self.fig3 = plt.figure(3,figsize=plt.figaspect(0.5))
self.fig4 = plt.figure(4,figsize=plt.figaspect(0.5))
self.fig5 = plt.figure(5,figsize=plt.figaspect(0.5))
self.fig6 = plt.figure(6,figsize=plt.figaspect(0.5))
self.fig7 = plt.figure(7,figsize=plt.figaspect(0.5))
def save_history(self,fname):
np.save(fname + "_history", self.history)
def load_history(self,fname):
self.history = np.load(fname + ".npy")
def clear(self):
for k in self.keys:
self.stats[k] = []
def update_episode(self,sum_rewards,steps):
self.stats['rewards'].append(sum_rewards)
self.stats['fuel_rewards'].append(np.sum(self.lander.trajectory['fuel_reward']))
self.stats['tracking_rewards'].append(np.sum(self.lander.trajectory['tracking_reward']))
self.stats['att_penalty'].append(np.sum(self.lander.trajectory['att_penalty']))
self.stats['attitude_error'].append(np.sum(self.lander.trajectory['attitude_error']))
self.stats['rh_penalty'].append(np.sum(self.lander.trajectory['rh_penalty']))
self.stats['pos_error'].append(self.lander.trajectory['pos_error'])
self.stats['alt_error'].append(self.lander.trajectory['alt_error'])
self.stats['alt_vc'].append(self.lander.trajectory['alt_vc'])
self.stats['att_rewards'].append(np.sum(self.lander.trajectory['att_reward']))
#self.stats['att_rewards'].append(np.asarray(self.lander.trajectory['tracking_reward']))
self.stats['w_penalty'].append(np.sum(self.lander.trajectory['w_penalty']))
self.stats['w_rewards'].append(np.sum(self.lander.trajectory['w_reward']))
self.stats['landing_rewards'].append(np.sum(self.lander.trajectory['landing_reward']))
self.stats['attitude'].append(self.lander.trajectory['attitude_321'])
self.stats['w'].append(self.lander.trajectory['w'])
self.stats['landing_margin'].append(np.sum(self.lander.trajectory['landing_margin']))
self.stats['r_f'].append(self.lander.trajectory['position'][-1])
self.stats['v_f'].append(self.lander.trajectory['velocity'][-1])
self.stats['w_f'].append(self.lander.trajectory['w'][-1])
self.stats['a_f'].append(self.lander.trajectory['attitude_321'][-1][1:3])
self.stats['w_f'].append(self.lander.trajectory['w'][-1])
self.stats['r_i'].append(self.lander.trajectory['position'][0])
self.stats['v_i'].append(self.lander.trajectory['velocity'][0])
self.stats['norm_rf'].append(self.lander.trajectory['norm_rf'][-1])
self.stats['norm_vf'].append(self.lander.trajectory['norm_vf'][-1])
self.stats['norm_af'].append(np.linalg.norm(self.lander.trajectory['attitude_321'][-1][1:3])) # don't care about yaw
self.stats['norm_wf'].append(np.linalg.norm(self.lander.trajectory['w'][-1]))
self.stats['norm_thrust'].append(np.linalg.norm(self.lander.trajectory['thrust'],axis=1))
self.stats['thrust'].append(self.lander.trajectory['thrust'])
self.stats['fuel'].append(np.linalg.norm(self.lander.trajectory['fuel'][-1]))
self.stats['steps'].append(steps)
self.episode += 1
def check_and_append(self,key):
if key not in self.logger.log_entry.keys():
val = 0.0
else:
val = self.logger.log_entry[key]
self.history[key].append(val)
# called by render at policy update
def show(self):
self.history['MeanReward'].append(np.mean(self.stats['rewards']))
self.history['StdReward'].append(np.std(self.stats['rewards']))
self.history['MinReward'].append(np.min(self.stats['rewards']))
self.check_and_append('Policy_KL')
self.check_and_append('Policy_Beta')
self.check_and_append('Variance')
self.check_and_append('Policy_Entropy')
self.check_and_append('ExplainedVarNew')
self.check_and_append('Model ExpVarOld')
self.check_and_append('Model P Loss Old')
self.history['Episode'].append(self.episode)
self.history['Norm_rf'].append(np.mean(self.stats['norm_rf']))
self.history['SD_rf'].append(np.mean(self.stats['norm_rf']+np.std(self.stats['norm_rf'])))
self.history['Max_rf'].append(np.max(self.stats['norm_rf']))
self.history['Norm_vf'].append(np.mean(self.stats['norm_vf']))
self.history['SD_vf'].append(np.mean(self.stats['norm_vf']+np.std(self.stats['norm_vf'])))
self.history['Max_vf'].append(np.max(self.stats['norm_vf']))
self.history['Norm_af'].append(np.mean(self.stats['norm_af']))
self.history['SD_af'].append(np.mean(self.stats['norm_af']+np.std(self.stats['norm_af'])))
self.history['Max_af'].append(np.max(self.stats['norm_af']))
self.history['Norm_wf'].append(np.mean(self.stats['norm_wf']))
self.history['SD_wf'].append(np.mean(self.stats['norm_wf']+np.std(self.stats['norm_wf'])))
self.history['Max_wf'].append(np.max(self.stats['norm_wf']))
self.history['MeanSteps'].append(np.mean(self.stats['steps']))
self.history['MaxSteps'].append(np.max(self.stats['steps']))
if self.allow_plotting:
self.render_func(self.lander.trajectory,vf=self.vf,scaler=self.scaler)
self.plot_rewards()
self.plot_learning()
self.plot_rf()
self.plot_vf()
self.plot_af()
self.plot_wf()
if self.update_cnt % self.print_every == 0:
self.show_stats()
self.clear()
self.update_cnt += 1
def show_stats(self):
et = time() - self.last_time
self.last_time = time()
r_f = np.linalg.norm(self.stats['r_f'],axis=1)
v_f = np.linalg.norm(self.stats['v_f'],axis=1)
f = '{:6.2f}'
print('Update Cnt = %d ET = %8.1f Stats: Mean, Std, Min, Max' % (self.update_cnt,et))
for k in self.keys:
f = self.formats[k]
v = self.stats[k]
if k == 'thrust' or k=='tracking_error' or k=='norm_thrust' or k=='attitude' or k=='w' or k=='alt_vc' or k=='pos_error' or k=='alt_error':
v = np.concatenate(v)
v = | np.asarray(v) | numpy.asarray |
from unittest import TestCase
import os
import os.path as osp
import shutil
import textwrap
import numpy as np
from datumaro.components.annotation import Bbox, Label
from datumaro.components.config_model import Model, Source
from datumaro.components.dataset import DEFAULT_FORMAT, Dataset
from datumaro.components.errors import (
DatasetMergeError, EmptyCommitError, ForeignChangesError,
MismatchingObjectError, MissingObjectError, MissingSourceHashError,
OldProjectError, PathOutsideSourceError, ReadonlyProjectError,
SourceExistsError, SourceUrlInsideProjectError,
)
from datumaro.components.extractor import DatasetItem, Extractor, ItemTransform
from datumaro.components.launcher import Launcher
from datumaro.components.project import DiffStatus, Project
from datumaro.util.scope import scope_add, scoped
from datumaro.util.test_utils import TestDir, compare_datasets, compare_dirs
from .requirements import Requirements, mark_requirement
class ProjectTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_init_and_load(self):
test_dir = scope_add(TestDir())
scope_add(Project.init(test_dir)).close()
scope_add(Project(test_dir))
self.assertTrue('.datumaro' in os.listdir(test_dir))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_find_project_in_project_dir(self):
test_dir = scope_add(TestDir())
scope_add(Project.init(test_dir))
self.assertEqual(osp.join(test_dir, '.datumaro'),
Project.find_project_dir(test_dir))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_cant_find_project_when_no_project(self):
test_dir = scope_add(TestDir())
self.assertEqual(None, Project.find_project_dir(test_dir))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_add_local_model(self):
class TestLauncher(Launcher):
pass
source_name = 'source'
config = Model({
'launcher': 'test',
'options': { 'a': 5, 'b': 'hello' }
})
test_dir = scope_add(TestDir())
project = scope_add(Project.init(test_dir))
project.env.launchers.register('test', TestLauncher)
project.add_model(source_name,
launcher=config.launcher, options=config.options)
added = project.models[source_name]
self.assertEqual(added.launcher, config.launcher)
self.assertEqual(added.options, config.options)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_run_inference(self):
class TestLauncher(Launcher):
def launch(self, inputs):
for inp in inputs:
yield [ Label(inp[0, 0, 0]) ]
expected = Dataset.from_iterable([
DatasetItem(0, image=np.zeros([2, 2, 3]), annotations=[Label(0)]),
DatasetItem(1, image=np.ones([2, 2, 3]), annotations=[Label(1)])
], categories=['a', 'b'])
launcher_name = 'custom_launcher'
model_name = 'model'
test_dir = scope_add(TestDir())
source_url = osp.join(test_dir, 'source')
source_dataset = Dataset.from_iterable([
DatasetItem(0, image=np.ones([2, 2, 3]) * 0),
DatasetItem(1, image=np.ones([2, 2, 3]) * 1),
], categories=['a', 'b'])
source_dataset.save(source_url, save_images=True)
project = scope_add(Project.init(osp.join(test_dir, 'proj')))
project.env.launchers.register(launcher_name, TestLauncher)
project.add_model(model_name, launcher=launcher_name)
project.import_source('source', source_url, format=DEFAULT_FORMAT)
dataset = project.working_tree.make_dataset()
model = project.make_model(model_name)
inference = dataset.run_model(model)
compare_datasets(self, expected, inference)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_import_local_source(self):
test_dir = scope_add(TestDir())
source_base_url = osp.join(test_dir, 'test_repo')
source_file_path = osp.join(source_base_url, 'x', 'y.txt')
os.makedirs(osp.dirname(source_file_path), exist_ok=True)
with open(source_file_path, 'w') as f:
f.write('hello')
project = scope_add(Project.init(osp.join(test_dir, 'proj')))
project.import_source('s1', url=source_base_url, format='fmt')
source = project.working_tree.sources['s1']
self.assertEqual('fmt', source.format)
compare_dirs(self, source_base_url, project.source_data_dir('s1'))
with open(osp.join(test_dir, 'proj', '.gitignore')) as f:
self.assertTrue('/s1' in [line.strip() for line in f])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_import_local_source_with_relpath(self):
# This form must copy all the data in URL, but read only
# specified files. Required to support subtasks and subsets.
test_dir = scope_add(TestDir())
source_url = osp.join(test_dir, 'source')
source_dataset = Dataset.from_iterable([
DatasetItem(0, subset='a', image=np.ones((2, 3, 3)),
annotations=[ Bbox(1, 2, 3, 4, label=0) ]),
DatasetItem(1, subset='b', image=np.zeros((10, 20, 3)),
annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
], categories=['a', 'b'])
source_dataset.save(source_url, save_images=True)
expected_dataset = Dataset.from_iterable([
DatasetItem(1, subset='b', image= | np.zeros((10, 20, 3)) | numpy.zeros |
import tequila as tq
import qpfas
import numpy as np
from typing import List, Dict
######################################## General Workflow Functions ##################################################
def create_molecule(molecule_tag: str,
basis: str,
active_space,
path: str = None,
mol_index=None) -> qpfas.chemistry.Molecule:
"""
Create a qpfas Molecule object from one of the default molecules in the default_molecules
directory or from the directory specified by path
"""
# load structure
if mol_index:
molecule_qpfas = qpfas.chemistry.Molecule.from_mult_xyz(molecule_tag, path=path, distortion_index=mol_index)
else:
molecule_qpfas = qpfas.chemistry.Molecule.from_xyz(molecule_tag, path=path)
# assign basis
molecule_qpfas.basis = basis
# get active space
if isinstance(active_space, list):
molecule_qpfas.active_orbitals = active_space
else:
if (not active_space) or (active_space == "None") or (active_space == "full"):
molecule_qpfas.active_orbitals = list(range(qpfas.chemistry.number_of_orbitals(molecule_qpfas)))
elif active_space == 'frozen_core':
molecule_qpfas.active_orbitals = qpfas.chemistry.get_frozen_core_active_orbitals(molecule_qpfas)
elif "noons" in active_space:
tol = float(active_space.split("_")[1])
nat_orbitals = qpfas.chemistry.NaturalOccupations(molecule_qpfas)
molecule_qpfas.active_orbitals = nat_orbitals.split_space(tol)
else:
raise Exception(f"Active Space Method{active_space} not recognised")
return molecule_qpfas
def get_molecule_tq(molecule_qpfas: qpfas.chemistry.Molecule,
transformation: str,
) -> tq.chemistry.Molecule:
"""
Generate the second quantisation from a qpfas molecule object
"""
return tq.chemistry.Molecule(molecule_qpfas.to_tequila(),
basis_set=molecule_qpfas.basis,
transformation=transformation,
active_orbitals=molecule_qpfas.active_orbitals)
def molecule_make_hamiltonian(molecule: tq.chemistry.Molecule):
"""
Make the Qubitized Hamiltonian
"""
return molecule.make_hamiltonian()
def compute_benchmark_energies(molecule: tq.chemistry.Molecule,
benchmarks: List[str]) -> Dict:
"""
This uses psi4 to perforom the classical benchmarks.
"""
energy_benchmarks = {}
for method in ["hf", "mp2", "ccsd", "fci"]:
if method in benchmarks:
energy_benchmarks[method + "_energy"] = molecule.compute_energy(method=method)
return energy_benchmarks
def compute_benchmark_energies_pyscf(molecule_qpfas: qpfas.chemistry.Molecule,
benchmarks: List[str]) -> Dict:
"""
This is an alternative to compute_benchmark_energies. Instead of using psi4 it uses
pyscf to perforom the classical benchmarks.
"""
full_space = list(range(qpfas.chemistry.number_of_orbitals(molecule_qpfas)))
frozen_orbitals = list(set(full_space) - set(molecule_qpfas.active_orbitals))
frozen_orbitals.sort()
print("Frozen orbitals:", frozen_orbitals)
return qpfas.chemistry.run_pyscf(molecule_qpfas, benchmarks, frozen_orbitals)
def get_n_qubits(molecule: tq.chemistry.Molecule):
if molecule.active_space is not None:
return 2*len(molecule.active_space.active_orbitals)
else:
return 2*molecule.n_orbitals
def get_ansatz(molecule_tq: tq.chemistry.Molecule,
ansatz_method: str,
ansatz_depth: int
) -> tq.circuit.circuit.QCircuit:
"""
Generates
- UCCSD variants
- hardware efficient variants
Ansatz for inputted Tequila molecule
"""
if ansatz_method == "uccsd":
return molecule_tq.make_uccsd_ansatz(trotter_steps=ansatz_depth)
elif ansatz_method == "kupccgsd":
return molecule_tq.make_upccgsd_ansatz(order=ansatz_depth)
elif ansatz_method == "hardware":
return qpfas.chemistry.hardware_ansatz(get_n_qubits(molecule_tq), ansatz_depth)
elif ansatz_method == "hardwareconserving":
return qpfas.chemistry.hardware_pc_ansatz(get_n_qubits(molecule_tq), molecule_tq.n_electrons, ansatz_depth)
else:
raise Exception("Method '%s' not in available ansatzes" % ansatz_method)
def get_ansatz_list_convention(molecule_tq: tq.chemistry.Molecule,
ansatz_name_depth: List):
"""A helper function if one wants to create an ansatz from a list convention.
i.e. [<ansatz_name>, <ansatz+depth>]
"""
return get_ansatz(molecule_tq, ansatz_name_depth[0], ansatz_name_depth[1])
#def num
######################################## VQE Functions ###############################################################
def vqe_wrapper(ansatz: list,
qubit_hamiltonian_tq: tq.QubitHamiltonian,
molecule_tq: tq.chemistry.Molecule,
optimizer: str,
samples,
backend: str = "qulacs"):
"""
A wrapper functions for the different VQE workflows
"""
ansatz_method, ansatz_depth = ansatz
result_dict = {"n_hamiltonian_terms": get_number_of_terms(qubit_hamiltonian_tq)}
if "adapt" in ansatz_method:
_, pool = ansatz_method.split("-")
run_output = run_adapt_vqe(qubit_hamiltonian_tq, ansatz_depth, molecule_tq, optimizer, pool, samples, backend)
ansatz_circuit = run_output.U
result_dict["vqe_output"] = {"converged_energy": run_output.energy,
"energy_history": list(np.concatenate([i.energies for i in run_output.histories])),
"n_iterations": len(run_output.histories),
"n_params": len(run_output.variables),
"converged_params": {str(i): run_output.variables[i] for i in run_output.variables}}
elif ansatz_method == "tapering":
ansatz_circuit, result, tapering_dict = run_tapering_vqe(qubit_hamiltonian_tq, ansatz_depth, optimizer, samples, backend, "best")
result_dict["vqe_output"] = {"converged_energy": result.energy,
"energy_history": list(result.history.energies),
"n_iterations": len(result.history.energies),
"n_params": len(result.variables),
"converged_params": {str(i): result.variables[i] for i in result.variables}}
result_dict["tapering_data"] = tapering_dict
else:
ansatz_circuit = get_ansatz(molecule_tq, ansatz_method, ansatz_depth)
run_output = run_vqe(qubit_hamiltonian_tq, ansatz_circuit, optimizer, ansatz_method, samples, backend)
result_dict["vqe_output"] = {"converged_energy": run_output.energy,
"energy_history": list(run_output.history.energies),
"n_iterations": len(run_output.history.energies),
"n_params": len(run_output.variables),
"converged_params": {str(i): run_output.variables[i] for i in run_output.variables}}
return result_dict, ansatz_circuit
def run_vqe(qubit_hamiltonian_tq: tq.QubitHamiltonian,
ansatz_circuit: tq.circuit.circuit.QCircuit,
optimizer: str = "COBYLA",
initialization: str = "zeros",
samples: int = 0,
backend: str = "qulacs"
) -> tq.optimizers.optimizer_scipy.SciPyResults:
"""
Runs the supplied minimization algorithm on the supplied objective function and returns
Tequila's SciPyResults object storing the result.
"""
if initialization in ["zeros", "uccsd", "kupccgsd"]:
initial_values = {k: 0.0 for k in ansatz_circuit.extract_variables()}
elif initialization in ["random", "hardware", "hardwareconserving"]:
tau = 2.*np.pi
initial_values = {k: tau*np.random.rand() for k in ansatz_circuit.extract_variables()}
else:
raise Exception("initialization '%s' not recognized" % initialization)
if samples == 0:
samples = None
energy_objective = _energy_objective_function(qubit_hamiltonian_tq, ansatz_circuit, samples)
return tq.optimizer_scipy.minimize(objective=energy_objective,
method=optimizer,
backend=backend,
samples=samples,
initial_values=initial_values,
tol=1e-3)
def run_tapering_vqe(qubit_hamiltonian_tq: tq.QubitHamiltonian,
depth: int,
optimizer: str,
samples: int = 0,
backend: str = "qulacs",
return_type: str = "best"):
"""
Runs the VQE algorithm with tapering
"""
taper = qpfas.chemistry.TaperQubits(qubit_hamiltonian_tq.to_openfermion())
taper.compute_generators()
taper.transform_hamiltonian()
n_qubits = taper.num_qubits - taper.nullity
ansatz_circuit = qpfas.chemistry.hardware_ansatz(n_qubits, depth) # must use ansatz that can generate arbitrary states
results = []
sectors = []
for i in range(2**taper.nullity):
sec = bin(i)[2:]
sec = "0" * (taper.nullity - len(sec)) + sec
sec = [int(j) for j in sec]
taper.remove_qubits(sec)
qubit_hamiltonian_tq = tq.QubitHamiltonian.from_openfermion(taper.tapered_hamiltonian)
vqe_run = run_vqe(qubit_hamiltonian_tq, ansatz_circuit, optimizer, "random", samples, backend)
results.append(vqe_run)
sectors.append(sec)
if return_type == "best":
indx = np.argmin([i.energy for i in results])
tapering_dict = {"sector": sectors[indx],
"qubit_set": taper.qubit_set,
"qubits_removed": len(taper.qubit_set)}
return ansatz_circuit, results[indx], tapering_dict
elif return_type == "all":
return results
else:
raise Exception("Return type must be either 'best' or 'all'")
def run_adapt_vqe(qubit_hamiltonian_tq: tq.QubitHamiltonian,
norm_tolerance,
molecule: tq.chemistry.Molecule,
optimizer: str,
pool: str = "UpCCSD",
samples: int = 0,
backend: str = "qulacs"):
"""
Method based on "An adaptive variational algorithm for exact molecular simulations on a quantum computer"
https://www.nature.com/articles/s41467-019-10988-2
"""
if samples == 0:
samples = None
operator_pool = tq.adapt.MolecularPool(molecule=molecule, indices=pool)
solver = tq.adapt.Adapt(H=qubit_hamiltonian_tq,
Upre=molecule.prepare_reference(),
operator_pool=operator_pool,
optimizer_args={"method": optimizer},
compile_args={"samples": samples, "backend": backend},
gradient_convergence=norm_tolerance,
energy_convergence=1e-3)
return solver(operator_pool=operator_pool, label=0)
######################################## Results Gathering ###########################################################
def split_vqe_wrapper_results(results):
"""A helper function for workflow"""
return results[0]
def split_vqe_wrapper_ansatz(results):
"""A helper function for workflow"""
return results[1]
def get_gate_dict(ansatz):
variables = {i: np.random.rand() for i in ansatz.extract_variables()} # necessary for qasm conversion
qasm_str = tq.export_open_qasm(ansatz, variables=variables)
qasm_str = qasm_str.split("\n")[4:-1]
qasm_str = | np.array([i[:2] for i in qasm_str]) | numpy.array |
# Authors: <NAME> <<EMAIL>>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica --------------------------------------
----------------------------------------------------------------------
author : <NAME>
email : <EMAIL>
last update: 09.11.2016
version : 1.2
----------------------------------------------------------------------
This simple implementation of ICASSO is based on the following
publication:
----------------------------------------------------------------------
<NAME>, <NAME>, and <NAME>. 'Validating the
independent components of neuroimaging time-series via
clustering and visualization', Neuroimage, 22:3(1214-1222), 2004.
Should you use this code, we kindly request you to cite the
aforementioned publication.
<http://research.ics.aalto.fi/ica/icasso/about+download.shtml
DOWNLOAD ICASSO from here>
----------------------------------------------------------------------
Overview
----------------------------------------------------------------------
Perform ICASSO estimation. ICASSO is based on running ICA
multiple times with slightly different conditions and
clustering the obtained components. Note, here FourierICA
is applied
1. Runs ICA with given parameters M times on data X.
2. Clusters the estimates and computes other statistics.
3. Returns (and visualizes) the best estimates.
----------------------------------------------------------------------
How to use ICASSO?
----------------------------------------------------------------------
from jumeg.decompose import icasso
icasso_obj = = JuMEG_icasso()
W, A, quality, fourier_ica_obj = icasso_obj.fit(fn_raw, stim_name='STI 013',
event_id=1, tmin_stim=-0.5,
tmax_stim=0.5, flow=4.0, fhigh=34.0)
--> for further comments we refer directly to the functions or to
fourier_ica_test.py
----------------------------------------------------------------------
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
########################################################
# #
# JuMEG_icasso class #
# #
########################################################
class JuMEG_icasso(object):
def __init__(self, ica_method='fourierica', average=False, nrep=50,
fn_inv=None, src_loc_method='dSPM', snr=1.0,
morph2fsaverage=True, stim_name=None, event_id=1,
flow=4.0, fhigh=34.0, tmin_win=0.0, tmax_win=1.0,
pca_dim=None, dim_reduction='MDL', conv_eps=1e-9,
max_iter=2000, tICA=False, lrate=1.0, cost_function=None,
decim_epochs=False):
"""
Generate ICASSO object.
Parameters
----------
ica_method: string which ICA method should be used
default: ica_method='FourierICA'
average: should ICA be performed on data averaged above
subjects?
default: average=False
nrep: number of repetitions ICA should be performed
default: nrep=50
fn_inv: file name of inverse operator. If given
FourierICA is applied on data transformed to
source space
src_loc_method: method used for source localization.
Only of interest if 'fn_inv' is set
default: src_loc_method='dSPM'
snr: signal-to-noise ratio for performing source
localization
default: snr=1.0
morph2fsaverage: should data be morphed to the
'fsaverage' brain?
default: morph2fsaverage=True
stim_name: string which contains the name of the
stimulus channel. Only necessary if ICA should
be applied to evoked data.
event_id: integer of list of integer containing the
event IDs which should be used to generate epochs
default: event_id=1
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
tmin_win: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default=0.0
tmax_win: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default=1.0
dim_reduction: {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
pca_dim: Integer. The number of components used for PCA
decomposition.
conv_eps: iteration stops when weight changes are smaller
then this number
default: conv_eps = 1e-9
max_iter: integer containing the maximal number of
iterations to be performed in ICA estimation
default: max_iter=2000
tICA: bool if temporal ICA should be applied (and not)
FourierICA
default: tICA=False
lrate: float containg the learning rate which should be
used in the applied ICA algorithm
default: lrate=1.0
cost_function: string containg the cost-function to
use in the appled ICA algorithm. For further information
look in fourier_ica.py
default: cost_funtion=None
decim_epochs: integer. If set the number of epochs used
to estimate the optimal demixing matrix is decimated
to the given number.
default: decim_epochs=False
Returns
-------
object: ICASSO object
"""
self._ica_method = ica_method
self.average = average
self._nrep = nrep
self.fn_inv = fn_inv
self.src_loc_method = src_loc_method
self.snr = snr
self.morph2fsaverage = morph2fsaverage
self.whitenMat = [] # whitening matrix
self.dewhitenMat = [] # de-whitening matrix
self.W_est = [] # de-mixing matrix
self.A_est = [] # mixing matrix
self.dmean = [] # data mean
self.dstd = [] # data standard-deviation
self.stim_name = stim_name
self.event_id = event_id
self.flow = flow
self.fhigh = fhigh
self._sfreq = 0.0
self.tmin_win = tmin_win
self.tmax_win = tmax_win
# ICA parameter
self.conv_eps = conv_eps # stopping threshold
self.max_iter = max_iter
self.lrate = lrate # learning rate for the ICA algorithm
self.tICA = tICA # should temporal ICA be performed?
self.pca_dim = pca_dim
self.dim_reduction= dim_reduction
self.cost_function = cost_function
self.decim_epochs = decim_epochs
# make sure to chose meaningful parameters
# when not FourierICA is used
if self.ica_method != 'fourierica':
if conv_eps == 1e-9:
self.conv_eps = 1e-12 # stopping threshold
if max_iter == 2000:
self.max_iter = 200
if lrate == 1:
self.lrate = None # learning rate for the ICA algorithm
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get maximum number of repetitions
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_nrep(self, nrep):
self._nrep = nrep
def _get_nrep(self):
return int(self._nrep)
nrep = property(_get_nrep, _set_nrep)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get ICA method
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_ica_method(self, ica_method):
possible_methods = ['extended-infomax', 'fastica',
'fourierica', 'infomax']
if ica_method in possible_methods:
self._ica_method = ica_method
else:
print('WARNING: chosen ICA method does not exist!')
print('Must be one of the following methods: ', possible_methods)
print('But your choice was: ', ica_method)
print('Programm stops!')
import pdb
pdb.set_trace()
def _get_ica_method(self):
return self._ica_method
ica_method = property(_get_ica_method, _set_ica_method)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate linkage between components
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _linkage(self, dis):
# initialize some variables
dlen, dim = dis.shape
Md = dis.copy()
Md += np.diag(np.ones(dlen)*np.inf)
# ------------------------------------------
# estimate clusters
# ------------------------------------------
# --> each vector is at first in its own cluster
Z = np.zeros((dlen-1, 3)) + np.NaN
clusters = np.arange(dlen)
Cdist = Md.copy()
for idx in np.arange(dlen-1):
d_min = np.min(Cdist)
if np.isinf(d_min):
break # no more connected clusters
else:
min_idx = np.argmin(np.min(Cdist, axis=0))
c1 = np.argmin(Cdist[:, min_idx]) # cluster1
c2 = clusters[min_idx] # cluster2
# combine the two clusters
c1_inds = (clusters == c1).nonzero()[0] # vectors belonging to c1
c2_inds = (clusters == c2).nonzero()[0] # vectors belonging to c2
c_inds = np.concatenate((c1_inds, c2_inds)) # members of the new cluster
nc_inds = len(c_inds)
# find bigger cluster
if len(c2_inds) > len(c1_inds):
c, k = c2, c1
else:
c, k = c1, c2
clusters[c_inds] = c # update cluster info
Z[idx, :] = [c, k, d_min] # save info into Z
# ------------------------------------------
# update cluster distances
# ------------------------------------------
# remove the subclusters from the cdist table
for idxC in c_inds:
Cdist[idxC, c_inds] = np.Inf # distance of clusters to its members = Inf
k_inds = c_inds[c_inds != c] # vector of the smallest cluster
Cdist[k_inds, :] = np.Inf # set distance of the subcluster to
Cdist[:, k_inds] = np.Inf # other clusters = Inf
# update the distance of this cluster to the other clusters
idxC = (clusters != c).nonzero()[0]
if len(idxC) > 0:
cl = np.unique(clusters[idxC])
for l in cl:
o_inds = (clusters == l).nonzero()[0] # indices belonging to cluster k
no_inds = len(o_inds)
vd = np.zeros((nc_inds, no_inds))
for ivd in range(nc_inds):
vd[ivd, :] = Md[c_inds[ivd], o_inds]
vd = vd.flatten()
idxvd = np.isfinite(vd).nonzero()[0]
nidxvd = len(idxvd)
sd = np.Inf if nidxvd == 0 else np.sum(vd[idxvd])/nidxvd
Cdist[c, l] = sd
Cdist[l, c] = sd
last = Z[idx, 0]
if np.isnan(last):
last = Z[idx-1, 0]
rest = np.setdiff1d(np.unique(clusters), last)
Z[idx:dlen-2, 0] = rest.transpose()
Z[idx:dlen-2, 1] = last
Z[idx:dlen-2, 2] = np.Inf
idx -= 1
else:
rest = []
# ------------------------------------------
# return values
# ------------------------------------------
# calculate the order of the samples
order = np.array([last])
# go through the combination matrix from top to down
for k in range(idx, -1, -1):
c_var = Z[k, 0]
k_var = np.array([Z[k, 1]])
idx_var = np.where(order == c_var)[0]
if len(idx_var) == 0:
order = np.concatenate((k_var, order))
else:
order = np.concatenate((order[:idx_var[0]], k_var, order[idx_var[0]:]))
order = np.concatenate((rest, order))[::-1]
# to maintain compatibility with Statistics Toolbox, the values
# in Z must be yet transformed so that they are similar to the
# output of the LINKAGE function
Zs = Z.copy()
current_cluster = np.array(list(range(dlen)))
iter_stop = len(Z[:, 0])
for idx in range(iter_stop):
Zs[idx, 0] = current_cluster[int(Z[idx, 0])]
Zs[idx, 1] = current_cluster[int(Z[idx, 1])]
current_cluster[int(Z[idx, 0])] = dlen + idx
current_cluster[int(Z[idx, 1])] = dlen + idx
return Zs, order
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _corrw(self):
# get some dimension information
npc = int(self.W_est[0].shape[0])
nchan = int(self.W_est[0].shape[1])
ntimes = int(len(self.W_est))
# save estimated demixing matrices W in one matrix
weight = np.zeros((ntimes*npc, nchan), dtype=np.complex)
for idx in range(ntimes):
weight[(idx*npc):((idx+1)*npc), :] = self.W_est[idx]
weight = np.dot(weight, self.dewhitenMat)
# normalize rows to unit length
weight_norm = np.abs(np.sqrt(np.sum(weight*weight.conj(), axis=1))).reshape((npc*ntimes, 1))
weight /= np.repeat(weight_norm, npc, axis=1)
# compute similarities
similarities = np.abs(np.dot(weight, weight.conj().transpose()))
similarities[similarities > 1] = 1
similarities[similarities < 0] = 0
return similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# generate partitions from Z
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _z_to_partition(self, Z):
nz = Z.shape[0] + 1
C = np.zeros((nz, nz))
C[0, :] = np.arange(nz)
for ic in range(1, nz):
C[ic, :] = C[ic-1, :]
idx = (C[ic, :] == Z[ic-1, 0]) + (C[ic, :] == Z[ic-1, 1])
C[ic, idx == 1] = nz - 1 + ic
for ic in range(nz):
uniqC = np.unique(C[ic, :])
newidx = []
for elemC in C[ic, :]:
newidx = np.concatenate((newidx, (uniqC == elemC).nonzero()[0]))
C[ic, :] = newidx
idx = list(range(nz-1, -1, -1))
partition = C[idx, :]
return partition
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# compute cluster statistics
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _clusterstat(self, S, partitions):
# number of clusters
Ncluster = int(np.max(partitions) + 1)
# initialize dictionary
stat = {'internal_sum': np.zeros(Ncluster) * np.NaN,
'internal_min': np.zeros(Ncluster) * np.NaN,
'internal_avg': np.zeros(Ncluster) * np.NaN,
'internal_max': np.zeros(Ncluster) * np.NaN,
'external_sum': np.zeros(Ncluster) * np.NaN,
'external_min': np.zeros(Ncluster) * np.NaN,
'external_avg': np.zeros(Ncluster) * np.NaN,
'external_max': np.zeros(Ncluster) * np.NaN,
'between_min': np.zeros((Ncluster, Ncluster)),
'between_avg': np.zeros((Ncluster, Ncluster)),
'between_max': np.zeros((Ncluster, Ncluster))}
for cluster in range(Ncluster):
thisPartition = np.where(partitions == cluster)[0]
nthisPartition = len(thisPartition)
S_ = np.zeros((nthisPartition, nthisPartition))
for i in range(nthisPartition):
S_[i, :] = S[thisPartition[i], thisPartition]
S_[list(range(nthisPartition)), list(range(nthisPartition))] = np.NaN
S_ = S_[np.isfinite(S_)]
if len(S_) > 0:
stat['internal_sum'][cluster] = np.sum(S_)
stat['internal_min'][cluster] = np.min(S_)
stat['internal_avg'][cluster] = np.mean(S_)
stat['internal_max'][cluster] = np.max(S_)
if Ncluster > 1:
cthisPartition = np.where(partitions != cluster)[0]
ncthisPartition = len(cthisPartition)
S_ = np.zeros((nthisPartition, ncthisPartition))
for i in range(nthisPartition):
S_[i, :] = S[thisPartition[i], cthisPartition]
stat['external_sum'][cluster] = np.sum(S_)
stat['external_min'][cluster] = np.min(S_)
stat['external_avg'][cluster] = np.mean(S_)
stat['external_max'][cluster] = np.max(S_)
for i in range(Ncluster):
Pi = np.where(i == partitions)[0]
for j in range(i+1, Ncluster):
Pj = np.where(j == partitions)[0]
d_ = np.zeros((len(Pi), len(Pj)))
for iPi in range(len(Pi)):
d_[iPi, :] = S[Pi[iPi], Pj]
stat['between_min'][i, j] = np.min(d_)
stat['between_avg'][i, j] = np.mean(d_)
stat['between_max'][i, j] = np.max(d_)
stat['between_min'] += stat['between_min'].transpose()
stat['between_avg'] += stat['between_avg'].transpose()
stat['between_max'] += stat['between_max'].transpose()
return stat
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate the R-index as defined in
# <NAME>., <NAME>., 2001. 'Resampling method for
# unsupervised estimation of cluster validity'.
# Neural Comput. 13 (11), 2573-2593.
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _rindex(self, dissimilarities, partitions, verbose=True):
nPart = partitions.shape[0]
# number of clusters in each partition
Ncluster = np.max(partitions, axis=1)
ri = np.zeros(nPart)
if verbose:
print(">>> Computing R-index...")
for k in range(nPart):
hist, bin_edges = np.histogram(partitions[k, :], bins=np.arange(1, Ncluster[k]+2))
if any(hist == 1):
# contains one-item clusters (index very doubtful)
ri[k] = np.NaN
elif Ncluster[k] == 0:
# degenerate partition (all in the same cluster)
ri[k] = np.NaN
else:
# compute cluster statistics
stat = self._clusterstat(dissimilarities, partitions[k, :])
between = stat['between_avg']
between[list(range(len(between))), list(range(len(between)))] = np.Inf
internal = stat['internal_avg'].transpose()
ri[k] = np.mean(internal/np.min(between, axis=0))
return ri
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate clusters
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _cluster(self, verbose=True):
# ------------------------------------------
# compute dissimilarities
# ------------------------------------------
similarities = self._corrw()
dissimilarities = 1.0 - similarities
# ------------------------------------------
# generate partitions
# ------------------------------------------
Z, order = self._linkage(dissimilarities)
partitions = self._z_to_partition(Z)
# ------------------------------------------
# compute cluster validity
# ------------------------------------------
npc = int(self.W_est[0].shape[0])
indexR = self._rindex(dissimilarities, partitions[:npc, :], verbose=verbose)
return Z, order, partitions, indexR, dissimilarities, similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate curve that decreases from v0 to vn with a
# rate that is somewhere between linear and 1/t
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _potency_curve(self, v0, vn, t):
return v0 * ((1.0*vn/v0)**(np.arange(t)/(t-1.0)))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# compute principal coordinates (using linear
# Metric Multi-Dimensional Scaling)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _mmds(self, D):
nD = D.shape[0]
# square dissimilarities
D2 = D**2
# center matrix
Z = np.identity(nD) - np.ones((nD, nD))/(1.0 * nD)
# double centered inner product
B = -0.5 * np.dot(Z, np.dot(D2, Z))
# SVD
U, sing, V = np.linalg.svd(B)
# coordinates
X = np.dot(U, np.diag(np.sqrt(sing)))
return X
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# projects data vectors using Curvilinear Component
# Analysis
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _cca(self, D, P, epochs, Mdist, alpha0, lambda0):
# check input data
noc, dim = D.shape
noc_x_1 = np.zeros(noc, dtype=np.int)
me = | np.zeros(dim) | numpy.zeros |
#!/opt/anaconda/bin/python
# -*- coding: utf-8 -*-
# Unfortunately the `which` way of calling python can't accept command-line arguments.
"""
Created on Mon Nov 03 16:13:48 2014
@author: <NAME>
@email: <EMAIL> OR <EMAIL>
A selection of alignment routines designed for registering and summing stacks
of images or diffraction patterns in the field of electron microscopy.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
if np.version.version.split('.')[1] == 7:
print( "WARNING: NUMPY VERSION 1.7 DETECTED, ZORRO IS DESIGNED FOR >1.10" )
print( "CHECK YOUR ENVIRONMENT VARIABLES TO SEE IF EMAN2 HAS HIJACKED YOUR PYTHON DISTRIBUTION" )
import numexprz as nz
# Now see which numexpr we have, by the dtype of float (whether it casts or not)
try:
# Now see which numexpr we have, by the dtype of float (whether it casts or not)
tdata = np.complex64( 1.0 + 2.0j )
fftw_dtype = nz.evaluate( 'tdata + tdata' ).dtype
float_dtype = nz.evaluate( 'real(tdata+tdata)' ).dtype
except:
fftw_dtype = 'complex128'
float_dtype = 'float64'
import scipy.optimize
import scipy.ndimage
import scipy.stats
import time
try:
import ConfigParser as configparser
except:
import configparser # Python 3
# Here we have to play some games depending on where the file was called from
# with the use of absolute_import
# print( "__name__ of zorro: " + str(__name__) )
try:
import zorro_util as util
import zorro_plotting as plot
except ImportError:
from . import zorro_util as util
from . import zorro_plotting as plot
import mrcz
import os, os.path, tempfile, sys
import subprocess
# Should we disable Multiprocessing on Windows due to general bugginess in the module?
import multiprocessing as mp
try:
import pyfftw
except:
print( "Zorro did not find pyFFTW package: get it at https://pypi.python.org/pypi/pyFFTW" )
try:
import tables
except:
print( "Zorro did not find pyTables installation for HDF5 file support" )
import matplotlib.pyplot as plt
# Numpy.pad is bad at dealing with interpreted strings
if sys.version_info >= (3,0):
symmetricPad = u'symmetric'
constantPad = u'constant'
else:
symmetricPad = b'symmetric'
constantPad = b'constant'
#### OBJECT-ORIENTED INTERFACE ####
class ImageRegistrator(object):
# Should be able to handle differences in translation, rotation, and scaling
# between images
def __init__( self ):
# Declare class members
self.verbose = 0
self.umask = 2
# Meta-information for processing, not saved in configuration files.
self.METApriority = 0.0
self.METAstatus = u'new'
self.METAmtime = 0.0
self.METAsize = 0
self.xcorrMode = 'zorro' # 'zorro', 'unblur v1.02', 'motioncorr v2.1'
# FFTW_PATIENT is bugged for powers of 2, so use FFTW_MEASURE as default
self.fftw_effort = u"FFTW_MEASURE"
# TODO: change this to drop into cachePath
self.n_threads = nz.nthreads # Number of cores to limit FFTW to, if None uses all cores
self.cachePath = tempfile.gettempdir()
# CALIBRATIONS
self.pixelsize = None # Typically we use nanometers, the same units as Digital Micrograph
self.voltage = 300.0 # Accelerating voltage, kV
self.C3 = 2.7 # Spherical aberration of objective, mm
self.gain = None
self.detectorPixelSize = None # Physical dimensions of detector pixel (5 um for K2)
# Timings
self.bench = {} # Dict holds various benchmark times for the code
self.saveC = False # Save the cross-correlation within +/- maxShift
# INFORMATION REDUCTION
# The SNR at high spatial frequencies tends to be lower due to how information transfer works, so
# removing/filtering those frequencies can improve stability of the registration. YMMV, IMHO, etc.
self.Brad = 512 # Gaussian low-pass applied to data before registration, units are radius in Fourier space, or equivalent point-spread function in real-space
self.Bmode = u'opti' # can be a real-space Gaussian convolution, 'conv' or Fourier filter, 'fourier', or 'opti' for automatic Brad
# For Bmode = 'fourier', a range of available filters can be used: gaussian, gauss_trunc, butterworth.order (order is an int), hann, hamming
self.BfiltType = u'gaussian'
self.fouCrop = [3072,3072] # Size of FFT in frequency-space to crop to (e.g. [2048,2048])
self.reloadData = True
# Data
self.images = None
self.imageSum = None
self.filtSum = None # Dose-filtered, Wiener-filtered, etc. representations go here
self.gainRef = None # For application of gain reference in Zorro rather than Digital Micrograph/TIA/etc.
self.gainInfo = {
"Horizontal": True, "Vertical": True, "Diagonal":False,
"GammaParams": [ 0.12035633, -1.04171635, -0.03363192, 1.03902726],
}
# One of None, 'dose', 'dose,background', 'dosenorm', 'gaussLP', 'gaussLP,background'
# also 'hot' can be in the comma-seperated list for pre-filtering of hot pixels
self.filterMode = None
# Dose filt param = [dosePerFrame, critDoseA, critDoseB, critDoseC, cutoffOrder, missingStartFrame]
self.doseFiltParam = [None, 0.24499, -1.6649, 2.8141, 32, 0]
# for 'hot' in filterMode
self.hotpixInfo = { u"logisticK":6.0, u"relax":0.925, u"maxSigma":8.0, u"psf": u"K2",
u"guessHotpix":0, u"guessDeadpix":0, u"decorrOutliers":False,
u"cutoffLower":-4.0, u"cutoffUpper":3.25, u"neighborPix":0 }
self.FFTSum = None
# If you want to use one mask, it should have dims [1,N_Y,N_X]. This is
# to ensure Cythonized code can interact safely with Numpy
self.incohFouMag = None # Incoherent Fourier magnitude, for CTF determination, resolution checks
self.masks = None
self.maskSum = None
self.C = None
# Results
self.translations = None
self.transEven = None # For even-odd tiled FRC, the half-stack translations
self.transOdd = None # For even-odd tiled FRC, the half-stack translations
self.velocities = None # pixel velocity, in pix/frame, to find frames that suffer from excessive drift
self.rotations = None # rotations, for polar-transformed data
self.scales = None # scaling, for polar-transformed data
self.errorDictList = [] # A list of dictionaries of errors and such from different runs on the same data.
self.trackCorrStats = False
self.corrStats = None
self.doLazyFRC = True
self.doEvenOddFRC = False
self.FRC = None # A Fourier ring correlation
# Filtering
# TODO: add more fine control over filtering options
# CTF currently supports CTFFIND4.1 or GCTF
self.CTFProgram = None # None, "ctffind4.1", or "gctf", 'ctffind4.1,sum' works on (aligned) sum, same for 'gctf,sum'
self.CTFInfo = { u'DefocusU':None, u'DefocusV': None, u'DefocusAngle':None, u'CtfFigureOfMerit':None,
u'FinalResolution': None, u'AmplitudeContrast':0.07, u'AdditionalPhaseShift':None,
}
self.CTFDiag = None # Diagnostic image from CTFFIND4.1 or GCTF
# DEPRICATED ctf stuff
#self.doCTF = False
#self.CTF4Results = None # Micrograph number, DF1, DF2, Azimuth, Additional Phase shift, CC, and max spacing fit-to
#self.CTF4Diag = None
# Registration parameters
self.shapePadded = [4096,4096]
self.shapeOriginal = None
self.shapeBinned = None
self.subPixReg = 16 # fraction of a pixel to REGISTER image shift to
# Subpixel alignment method: None (shifts still registered subpixally), lanczos, or fourier
# lanczos is cheaper computationally and has fewer edge artifacts
self.shiftMethod = u'lanczos'
self.maxShift = 100 # Generally should be 1/2 distance to next lattice spacing
# Pre-shift every image by that of the previous frame, useful for high-resolution where one can jump a lattice
# i.e. should be used with small values for maxShift
self.preShift = False
# Solver weighting can be raw max correlation coeffs (None), normalized to [0,1] by the
# min and max correlations ('norm'), or 'logistic' function weighted which
# requires corrThres to be set.
self.peakLocMode = u'interpolated' # interpolated (oversampled), or a RMS-best fit like fitlaplacian
self.weightMode = u'autologistic' # autologistic, normalized, unweighted, logistic, or corr
self.peaksigThres = 6.0
self.logisticK = 5.0
self.logisticNu = 0.15
self.originMode = u'centroid' # 'centroid' or None
self.suppressOrigin = True # Delete the XC pixel at (0,0). Only necessary if gain reference is bad, but defaults to on.
# Triangle-matrix indexing parameters
self.triMode = u'diag' # Can be: tri, diag, auto, first
self.startFrame = 0
self.endFrame = 0
self.diagStart = 0 # XC to neighbour frame on 0, next-nearest neighbour on +1, etc.
self.diagWidth = 5
self.autoMax = 10
self.corrThres = None # Use with 'auto' mode to stop doing cross-correlations if the values drop below the threshold
self.velocityThres = None # Pixel velocity threshold (pix/frame), above which to throw-out frames with too much motion blur.
#### INPUT/OUTPUT ####
self.files = { u"config":None, u"stack":None, u"mask":None, u"sum":None,
u"align":None, u"figurePath":None, u"xc":None,
u"moveRawPath":None, u"original":None, u"gainRef":None,
u"stdout": None, u"automatch":None, u"rejected":None,
u"compressor": None, u"clevel": 1 }
#self.savePDF = False
self.savePNG = True
self.saveMovie = True
self.doCompression = False
self.compress_ext = ".bz2"
#### PLOTTING ####
self.plotDict = { u"imageSum":True, u"imageFirst":False, u"FFTSum":True, u"polarFFTSum":True,
u"filtSum":True, u'stats': False,
u"corrTriMat":False, u"peaksigTriMat": True,
u"translations":True, u"pixRegError":True,
u"CTFDiag":True, u"logisticWeights": True, u"FRC": True,
u'Transparent': True, u'plot_dpi':144, u'image_dpi':250,
u'image_cmap':u'gray', u'graph_cmap':u'gnuplot',
u'fontsize':12, u'fontstyle': u'serif', u'colorbar': True,
u'backend': u'Qt4Agg', u'multiprocess':True,
u'show':False }
pass
def initDefaultFiles( self, stackName ):
self.files[u'stack'] = stackName
self.files[u'config'] = stackName + u".zor"
stackPath, stackFront = os.path.split( stackName )
stackFront = os.path.splitext( stackFront )[0]
if not 'compressor' in self.files or not bool(self.files['compressor']):
mrcExt = ".mrc"
mrcsExt = ".mrcs"
else:
mrcExt = ".mrcz"
mrcsExt = ".mrcsz"
self.files[u'align'] = os.path.relpath(
os.path.join( u"./align", "%s_zorro_movie%s" %(stackFront, mrcsExt) ),
start=stackPath )
self.files[u'sum'] = os.path.relpath( stackPath,
os.path.join( u"./sum", "%s_zorro%s" %(stackFront, mrcExt) ),
start=stackPath )
self.files[u'figurePath'] = os.path.relpath(
os.path.join(stackPath, u"./figs"), start=stackPath )
def xcorr2_mc2_1( self, gpu_id = 0, loadResult=True, clean=True ):
"""
This makes an external operating system call to the Cheng's lab GPU-based
B-factor multireference executable. It and CUDA libraries must be on the system
path and libary path respectively.
NOTE: Spyder looks loads PATH and LD_LIBRARY_PATH from .profile, not .bashrc
"""
dosef_cmd = util.which("dosefgpu_driftcorr")
if dosef_cmd is None:
print( "Error: dosefgpu_driftcorr not found in system path." )
return
#tempFileHash = str(uuid.uuid4() ) # Key let's us multiprocess safely
stackBase = os.path.basename( os.path.splitext( self.files['stack'] )[0] )
if self.cachePath is None:
self.cachePath = "."
InName = os.path.join( self.cachePath, stackBase + u"_mcIn.mrc" )
# Unfortunately these files may as well be in the working directory.
OutAvName = os.path.join( self.cachePath, stackBase + u"_mcOutAv.mrc" )
OutStackName = os.path.join( self.cachePath, stackBase + u"_mcOut.mrc" )
logName = os.path.join( self.cachePath, stackBase + u"_mc.zor" )
mrcz.writeMRC( self.images, InName )
# Force binning to 1, as performance with binning is poor
binning = 1
if self.Brad is not None:
# Li masking is in MkPosList() in cufunc.cu (line 413)
# Their r2 is normalized and mine isn't
# Li has mask = exp( -0.5 * bfactor * r_norm**2 )
# r_norm**2 = x*x/Nx*Nx + y*y/Ny*Ny = r**2 / (Nx**2 + Ny**2)
# For non-square arrays they have a non-square (but constant frequency) filter
# RAM has mask = exp( -(r/brad)**2 )
# We can only get Bfactor approximately then but it's close enough for 3710x3838
bfac = 2.0 * (self.images.shape[1]**2 + self.images.shape[2]**2) / (self.Brad**2)
print( "Using B-factor of " + str(bfac) + " for dosefgpu_driftcorr" )
else:
bfac = 1000 # dosef default 'safe' bfactor for mediocre gain reference
# Consider: Dosef suffers at the ends of the sequence, so make the middle frame zero drift?
# align_to = np.floor( self.images.shape[0]/2 )
# This seems to cause more problems then it's worth.
align_to = 0
if self.diagWidth != None:
fod = self.diagWidth
else:
fod = 0
# Dosef can limit search to a certain box size
if self.maxShift == None:
maxshift = 96
else:
maxshift = self.maxShift * 2
if self.startFrame == None:
self.startFrame = 0
if self.endFrame == None:
self.endFrame = 0
motion_flags = ( " " + InName
+ " -gpu " + str(gpu_id)
+ " -nss " + str(self.startFrame)
+ " -nes " + str(self.endFrame)
+ " -fod " + str(fod)
+ " -bin " + str(binning)
+ " -bft " + str(bfac)
+ " -atm -" + str(align_to)
+ " -pbx " + str(maxshift)
+ " -ssc 1 -fct " + OutStackName
+ " -fcs " + OutAvName
+ " -flg " + logName )
sub = subprocess.Popen( dosef_cmd + motion_flags, shell=True )
sub.wait()
self.loadMCLog( logName )
time.sleep(0.5)
if bool(clean):
try: os.remove(InName)
except: pass
try: os.remove(OutStackName)
except: pass
try: os.remove(OutAvName)
except: pass
try: os.remove(logName)
except: pass
def loadMCLog( self, logName ):
"""
Load and part a MotionCorr log from disk using regular expressions.
"""
import re
# Parse to get the translations
fhMC = open( logName )
MClog = fhMC.readlines()
fhMC.close()
# Number of footer lines changes with the options you use.
# I would rather find Sum Frame #000
for linenumber, line in enumerate(MClog):
try:
test = re.findall( "Sum Frame #000", line)
if bool(test):
frameCount = np.int( re.findall( "\d\d\d", line )[1] ) + 1
break
except: pass
MClog_crop = MClog[linenumber+1:linenumber+frameCount+1]
MCdrifts = np.zeros( [frameCount,2] )
for J in np.arange(0,frameCount):
MCdrifts[J,:] = re.findall( r"([+-]?\d+.\d+)", MClog_crop[J] )[1:]
# Zorro saves translations, motioncorr saves shifts.
self.translations = -np.fliplr( MCdrifts )
if self.originMode == u'centroid':
centroid = np.mean( self.translations, axis=0 )
self.translations -= centroid
def xcorr2_unblur1_02( self, dosePerFrame = None, minShift = 2.0, terminationThres = 0.1,
maxIteration=10, verbose=False, loadResult=True, clean=True ):
"""
Calls UnBlur by <NAME> Rohou using the Zorro interface.
"""
self.bench['unblur0'] = time.time()
unblur_exename = "unblur_openmp_7_17_15.exe"
if util.which( unblur_exename ) is None:
print( "UnBlur not found in system path" )
return
print( "Calling UnBlur for " + self.files['stack'] )
print( " written by <NAME> and <NAME>: http://grigoriefflab.janelia.org/unblur" )
print( " http://grigoriefflab.janelia.org/node/4900" )
import os
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
if self.cachePath is None:
self.cachePath = "."
# Force trailing slashes onto cachePatch
stackBase = os.path.basename( os.path.splitext( self.files[u'stack'] )[0] )
frcOutName = os.path.join( self.cachePath, stackBase + u"_unblur_frc.txt" )
shiftsOutName = os.path.join( self.cachePath, stackBase + u"_unblur_shifts.txt" )
outputAvName = os.path.join( self.cachePath, stackBase + u"_unblur.mrc" )
outputStackName = os.path.join( self.cachePath, stackBase + u"_unblur_movie.mrc" )
ps = self.pixelsize * 10.0
if 'dose' in self.filterMode:
doDoseFilter = True
if dosePerFrame == None:
# We have to guesstimate the dose per frame in e/A^2 if it's not provided
dosePerFrame = np.mean( self.images ) / (ps*ps)
preExposure = 0.0
if 'dosenorm' in self.filterMode:
restoreNoise=True
else:
restoreNoise=False
else:
doDoseFilter = False
if self.Brad is not None:
# Li masking is in MkPosList() in cufunc.cu (line 413)
# Their r2 is normalized and mine isn't
# Li has mask = exp( -0.5 * bfactor * r_norm**2 )
# r_norm**2 = x*x/Nx*Nx + y*y/Ny*Ny = r**2 / (Nx**2 + Ny**2)
# For non-square arrays they have a non-square (but constant frequency) filter
# RAM has mask = exp( -(r/brad)**2 )
# We can only get Bfactor approximately then but it's close enough for 3710x3838
bfac = 2.0 * (self.images.shape[1]**2 + self.images.shape[2]**2) / (self.Brad**2)
print( "Using B-factor of " + str(bfac) + " for UnBlur" )
else:
bfac = 1500 # dosef default 'safe' bfactor for mediocre gain reference
outerShift = self.maxShift * ps
# RAM: I see no reason to let people change the Fourier cross masking
vertFouMaskHW = 1
horzFouMaskHW = 1
try:
mrcName = os.path.join( self.cachePath, stackBase + "_unblurIN.mrc" )
mrcz.writeMRC( self.images, mrcName )
except:
print( "Error in exporting MRC file to UnBlur" )
return
# Are there flags for unblur? Check the source code.
flags = "" # Not using any flags
unblurexec = ( unblur_exename + " " + flags + " << STOP_PARSING \n" + mrcName )
unblurexec = (unblurexec + "\n" + str(self.images.shape[0]) + "\n" +
outputAvName + "\n" + shiftsOutName + "\n" + str(ps) + "\n" +
str(doDoseFilter) )
if bool(doDoseFilter):
unblurexec += "\n" + str(dosePerFrame) + "\n" + str(self.voltage) + "\n" + str(preExposure)
unblurexec += ("\n yes \n" + outputStackName + "\n yes \n" +
frcOutName + "\n" + str(minShift) + "\n" + str(outerShift) + "\n" +
str(bfac) + "\n" + str( np.int(vertFouMaskHW) ) + "\n" + str( np.int(horzFouMaskHW) ) + "\n" +
str(terminationThres) + "\n" + str(maxIteration) )
if bool(doDoseFilter):
unblurexec += "\n" + str(restoreNoise)
unblurexec += "\n" + str(verbose)
unblurexec = unblurexec + "\nSTOP_PARSING"
print( unblurexec )
sub = subprocess.Popen( unblurexec, shell=True )
sub.wait()
try:
# Their FRC is significantly different from mine.
self.FRC = np.loadtxt(frcOutName, comments='#', skiprows=0 )
self.translations = np.loadtxt( shiftsOutName, comments='#', skiprows=0 ).transpose()
# UnBlur uses Fortran ordering, so we need to swap y and x for Zorro C-ordering
self.translations = np.fliplr( self.translations )
# UnBlur returns drift in Angstroms
self.translations /= ps
# UnBlur registers to middle frame
self.translations -= self.translations[0,:]
if bool( loadResult ):
print( "Loading UnBlur aligned frames into ImageRegistrator.images" )
if 'dose' in self.filterMode:
# TODO: WHow to get both filtered images and unfiltered?
self.imageSum = mrcz.readMRC( outputAvName )[0]
else:
self.imageSum = mrcz.readMRC( outputAvName )[0]
# TODO: We have a bit of an issue, this UnBlur movie is dose filtered...
self.images = mrcz.readMRC( outputStackName )[0]
except IOError:
print( "UnBlur likely core-dumped, try different input parameters?" )
finally:
time.sleep(0.5) # DEBUG: try and see if temporary files are deleteable now.
frcOutName = os.path.join( self.cachePath, stackBase + "_unblur_frc.txt" )
shiftsOutName = os.path.join( self.cachePath, stackBase + "_unblur_shifts.txt" )
outputAvName = os.path.join( self.cachePath, stackBase + "_unblur.mrc" )
outputStackName = os.path.join( self.cachePath, stackBase + "_unblur_movie.mrc" )
pass
if self.originMode == 'centroid':
centroid = np.mean( self.translations, axis=0 )
self.translations -= centroid
time.sleep(0.5)
if bool(clean):
try: os.remove( mrcName )
except: print( "Could not remove Unblur MRC input file" )
try: os.remove( frcOutName )
except: print( "Could not remove Unblur FRC file" )
try: os.remove( shiftsOutName )
except: print( "Could not remove Unblur Shifts file" )
try: os.remove( outputAvName )
except: print( "Could not remove Unblur MRC average" )
try: os.remove( outputStackName )
except: print( "Could not remove Unblur MRC stack" )
self.bench['unblur1'] = time.time()
def __init_xcorrnm2( self, triIndices=None ):
"""
"""
self.bench['xcorr0'] = time.time()
shapeImage = np.array( [self.images.shape[1], self.images.shape[2]] )
self.__N = np.asarray( self.images.shape )[0]
if self.preShift:
print( "Warning: Preshift will break if there are skipped frames in a triIndices row." )
# Test to see if triIndices is a np.array or use self.triMode
if hasattr( triIndices, "__array__" ): # np.array
# Ensure triIndices is a square array of the right size
if triIndices.shape[0] != self.__N or triIndices.shape[1] != self.__N:
raise IndexError("triIndices is wrong size, should be of length: " + str(self.__N) )
elif triIndices is None:
[xmesh, ymesh] = np.meshgrid( np.arange(0,self.__N), np.arange(0,self.__N) )
trimesh = xmesh - ymesh
# Build the triMat if it wasn't passed in as an array
if( self.triMode == 'first' ):
print( "Correlating in template mode to first image" )
triIndices = np.ones( [1,self.__N], dtype='bool' )
triIndices[0,0] = False # Don't autocorrelate the first frame.
elif( self.triMode == u'diag' ):
if (self.diagWidth is None) or (self.diagWidth < 0):
# For negative numbers, align the entire triangular matrix
self.diagWidth = self.__N
triIndices = (trimesh <= self.diagWidth + self.diagStart ) * (trimesh > self.diagStart )
print( "Correlating in diagonal mode with width " + str(self.diagWidth) )
elif( self.triMode == u'autocorr' ):
triIndices = (trimesh == 0)
elif( self.triMode == u'refine' ):
triIndices = trimesh == 0
else: # 'tri' or 'auto' ; default is an upper triangular matrix
triIndices = trimesh >= 1
pass
else:
raise TypeError( "Error: triIndices not recognized as valid: " + str(triIndices) )
if self.masks is None or self.masks == []:
print( "Warning: No mask not recommened with MNXC-style correlation" )
self.masks = np.ones( [1,shapeImage[0],shapeImage[1]], dtype = self.images.dtype )
if( self.masks.ndim == 2 ):
self.masks = np.reshape( self.masks.astype(self.images.dtype), [1,shapeImage[0],shapeImage[1]] )
# Pre-loop allocation
self.__shiftsTriMat = np.zeros( [self.__N,self.__N,2], dtype=float_dtype ) # Triagonal matrix of shifts in [I,J,(y,x)]
self.__corrTriMat = np.zeros( [self.__N,self.__N], dtype=float_dtype ) # Triagonal matrix of maximum correlation coefficient in [I,J]
self.__peaksigTriMat = np.zeros( [self.__N,self.__N], dtype=float_dtype ) # Triagonal matrix of correlation peak contrast level
self.__originTriMat= np.zeros( [self.__N,self.__N], dtype=float_dtype ) # Triagonal matrix of origin correlation coefficient in [I,J]
# Make pyFFTW objects
if not bool( np.any( self.fouCrop ) ):
self.__tempFullframe = np.empty( shapeImage, dtype=fftw_dtype )
self.__FFT2, self.__IFFT2 = util.pyFFTWPlanner( self.__tempFullframe, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ), effort = self.fftw_effort, n_threads=self.n_threads )
self.__shapeCropped = shapeImage
self.__tempComplex = np.empty( self.__shapeCropped, dtype=fftw_dtype )
else:
self.__tempFullframe = np.empty( shapeImage, dtype=fftw_dtype )
self.__FFT2, _ = util.pyFFTWPlanner( self.__tempFullframe, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doReverse=False )
# Force fouCrop to multiple of 2
self.__shapeCropped = 2 * np.floor( np.array( self.fouCrop ) / 2.0 ).astype('int')
self.__tempComplex = np.empty( self.__shapeCropped, dtype=fftw_dtype )
_, self.__IFFT2 = util.pyFFTWPlanner( self.__tempComplex, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doForward=False )
self.__shapeCropped2 = (np.array( self.__shapeCropped) / 2.0).astype('int')
self.__templateImageFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__templateSquaredFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__templateMaskFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__tempComplex2 = np.empty( self.__shapeCropped, dtype=fftw_dtype )
# Subpixel initialization
# Ideally subPix should be a power of 2 (i.e. 2,4,8,16,32)
self.__subR = 8 # Sampling range around peak of +/- subR
if self.subPixReg is None: self.subPixReg = 1;
if self.subPixReg > 1.0:
# hannfilt = np.fft.fftshift( ram.apodization( name='hann', size=[subR*2,subR*2], radius=[subR,subR] ) ).astype( fftw_dtype )
# Need a forward transform that is [subR*2,subR*2]
self.__Csub = np.empty( [self.__subR*2,self.__subR*2], dtype=fftw_dtype )
self.__CsubFFT = np.empty( [self.__subR*2,self.__subR*2], dtype=fftw_dtype )
self.__subFFT2, _ = util.pyFFTWPlanner( self.__Csub, fouMage=self.__CsubFFT, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doReverse = False )
# and reverse transform that is [subR*2*subPix, subR*2*subPix]
self.__CpadFFT = np.empty( [self.__subR*2*self.subPixReg,self.__subR*2*self.subPixReg], dtype=fftw_dtype )
self.__Csub_over = np.empty( [self.__subR*2*self.subPixReg,self.__subR*2*self.subPixReg], dtype=fftw_dtype )
_, self.__subIFFT2 = util.pyFFTWPlanner( self.__CpadFFT, fouMage=self.__Csub_over, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doForward = False )
self.__maskProduct = np.zeros( self.__shapeCropped, dtype=float_dtype )
self.__normConst2 = np.float32( 1.0 / ( np.float64(self.__shapeCropped[0])*np.float64(self.__shapeCropped[1]))**2.0 )
self.bench['xcorr1'] = time.time()
return triIndices
def xcorrnm2_speckle( self, triIndices=None ):
"""
<NAME>
<EMAIL>
October 1, 2016
With data recorded automatically from SerialEM, we no long have access to the gain reference
normalization step provided by Gatan. With the K2 detector, gain normalization is no
longer a simple multiplication. Therefore we see additional, multiplicative (or speckle)
noise in the images compared to those recorded by Gatan Microscopy Suite. Here we want
to use a different approach from the Padfield algorithm, which is useful for suppressing
additive noise, and
In general Poisson noise should be speckle noise, especially at the dose rates commonly
seen in cryo-EM.
"""
triIndices = self.__init_xcorrnm2( triIndices = triIndices)
# Pre-compute forward FFTs (template will just be copied conjugate Fourier spectra)
self.__imageFFT = np.empty( [self.__N, self.shapePadded[0], self.shapePadded[1]], dtype=fftw_dtype )
self.__autocorrHalfs = np.empty( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=float_dtype )
currIndex = 0
self.__originC = []; self.C = []
print( "Pre-computing forward Fourier transforms and autocorrelations" )
# For even-odd and noise estimates, we often skip many rows
# precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) > 0 ), np.argwhere( np.sum( triIndices, axis=0 ) > 0 ) ] ) )
precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) >= 0 ),
np.argwhere( np.sum( triIndices, axis=0 ) >= 0 ) ] ) )
for I in precompIndices:
if self.verbose >= 2:
print( "Precomputing forward FFT frame: " + str(I) )
# Apply masks to images
if self.masks.shape[0] == 1:
masks_block = self.masks[0,:,:]
images_block = self.images[I,:,:]
else:
masks_block = self.masks[I,:,:]
images_block = self.images[I,:,:]
self.__tempComplex = nz.evaluate( "masks_block * images_block" ).astype( fftw_dtype )
self.__FFT2.update_arrays( self.__tempComplex, self.__imageFFT[I,:,:]); self.__FFT2.execute()
print( "TODO: FOURIER CROPPING" )
# Compute autocorrelation
imageFFT = self.__imageFFT[I,:,:]
# Not sure if numexpr is useful for such a simple operation?
self.__tempComplex = nz.evaluate( "imageFFT * conj(imageFFT)" )
self.__IFFT2.update_arrays( self.__tempComplex, self.__tempComplex2 )
tempComplex2 = self.__tempComplex2
nz.evaluate( "0.5*abs(tempComplex2)", out=self.__autocorrHalfs[I,:,:] )
self.bench['xcorr2'] = time.time()
########### COMPUTE PHASE CORRELATIONS #############
print( "Starting correlation calculations, mode: " + self.triMode )
if self.triMode == u'refine':
# Find FFT sum (it must be reduced by the current frame later)
# FIXME: Is there some reason this might not be linear after FFT?
# FIXME: is it the complex conjugate operation below???
self.__sumFFT = np.sum( self.__baseImageFFT, axis = 0 )
self.__sumSquaredFFT = np.sum( self.__baseSquaredFFT, axis = 0 )
print( "In refine" )
for I in np.arange(self.images.shape[0] - 1):
# In refine mode we have to build the template on the fly from imageSum - currentImage
self.__templateImageFFT = np.conj( self.__sumFFT - self.__baseImageFFT[I,:,:] ) / self.images.shape[0]
self.__templateSquaredFFT = np.conj( self.__sumSquaredFFT - self.__baseSquaredFFT[I,:,:] ) / self.images.shape[0]
tempComplex2 = None
self.mnxc2_SPECKLE( I, I, self.__shapeCropped, refine=True )
#### Find maximum positions ####
self.locatePeak( I, I )
if self.verbose:
print( "Refine # " + str(I) + " shift: [%.2f"%self.__shiftsTriMat[I,I,0]
+ ", %.2f"%self.__shiftsTriMat[I,I,1]
+ "], cc: %.6f"%self.__corrTriMat[I,I]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,I] )
else:
# For even-odd and noise estimates, we often skip many rows
rowIndices = np.unique( np.argwhere( np.sum( triIndices, axis=1 ) > 0 ) )
#print( "rowIndices: " + str(rowIndices) )
for I in rowIndices:
# I is the index of the template image
tempComplex = self.__baseImageFFT[I,:,:]
self.__templateImageFFT = nz.evaluate( "conj(tempComplex)")
# Now we can start looping through base images
columnIndices = np.unique( np.argwhere( triIndices[I,:] ) )
#print( "columnIndices: " + str(columnIndices) )
for J in columnIndices:
####### MNXC2 revisement with private variable to make the code more manageable.
self.mnxc2_speckle( I, J, self.__shapeCropped )
#### Find maximum positions ####
self.locatePeak( I, J )
if self.verbose:
print( "# " + str(I) + "->" + str(J) + " shift: [%.2f"%self.__shiftsTriMat[I,J,0]
+ ", %.2f"%self.__shiftsTriMat[I,J,1]
+ "], cc: %.6f"%self.__corrTriMat[I,J]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,J] )
# Correlation stats is for establishing correlation scores for fixed-pattern noise.
if bool( self.trackCorrStats ):
self.calcCorrStats( currIndex, triIndices )
# triMode 'auto' diagonal mode
if self.triMode == u'auto' and (self.__peaksigTriMat[I,J] <= self.peaksigThres or J-I >= self.autoMax):
if self.verbose: print( "triMode 'auto' stopping at frame: " + str(J) )
break
currIndex += 1
pass # C max position location
if bool( np.any( self.fouCrop ) ):
self.__shiftsTriMat[:,:,0] *= self.shapePadded[0] / self.__shapeCropped[0]
self.__shiftsTriMat[:,:,1] *= self.shapePadded[1] / self.__shapeCropped[1]
self.bench['xcorr3'] = time.time()
# Pointer reference house-keeping
del images_block, masks_block, imageFFT, tempComplex2
def xcorrnm2_tri( self, triIndices=None ):
"""
<NAME>
<EMAIL>
April 16, 2015
triIndices is the index locations to correlate to. If None, self.triMode
is used to build one. Normally you should use self.triMode for the first iteration,
and pass in a triIndice from the errorDict if you want to repeat.
returns : [shiftsTriMat, corrTriMat, peaksTriMat]
This is an evolution of the Padfield cross-correlation algorithm to take
advantage of the Cheng multi-reference approach for cross-correlation
alignment of movies.
Padfield, "Masked object registration in the Fourier domain," IEEE
Transactions on Image Processing 21(5) (2012): 3706-2718.
<NAME> al. Nature Methods, 10 (2013): 584-590.
It cross-correlates every frame to every other frame to build a triangular
matrix of shifts and then does a functional minimization over the set of
equations. This means the computational cost grows with a power law with
the number of frames but it is more noise resistant.
triIndices can be an arbitrary boolean N x N matrix of frames to correlate
Alternatively it can be a string which will generate an appropriate matrix:
'tri' (default) correlates all frames to eachother
'first' is correlate to the first frame as a template
'diag' correlates to the next frame (i.e. a diagonal )
'auto' is like diag but automatically determines when to stop based on corrcoeffThes
diagWidth is for 'diag' and the number of frames to correlate each frame to,
default is None, which does the entire triangular matrix
diagWidth = 1 correlates to each preceding frame
NOTE: only calculates FFTs up to Nyquist/2.
"""
triIndices = self.__init_xcorrnm2( triIndices = triIndices)
if self.masks.shape[0] == 1 :
# tempComplex = self.masks[0,:,:].astype( fftw_dtype )
self.__baseMaskFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__FFT2.update_arrays( self.masks[0,:,:].squeeze().astype( fftw_dtype ), self.__tempFullframe ); self.__FFT2.execute()
# FFTCrop
sC2 = self.__shapeCropped2
self.__baseMaskFFT[0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseMaskFFT[0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseMaskFFT[-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseMaskFFT[-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
self.__templateMaskFFT = np.conj( self.__baseMaskFFT )
# maskProduct term is M1^* .* M2
templateMaskFFT = self.__templateMaskFFT;
baseMaskFFT = self.__baseMaskFFT # Pointer assignment
self.__tempComplex2 = nz.evaluate( "templateMaskFFT * baseMaskFFT" )
self.__IFFT2.update_arrays( self.__tempComplex2, self.__tempComplex ); self.__IFFT2.execute()
tempComplex = self.__tempComplex
normConst2 = self.__normConst2
self.__maskProduct = nz.evaluate( "normConst2*real(tempComplex)" )
else:
# Pre-allocate only
self.__baseMaskFFT = np.zeros( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=fftw_dtype )
if bool( self.maxShift ) or self.Bmode is u'fourier':
if self.maxShift is None or self.preShift is True:
[xmesh,ymesh] = np.meshgrid( np.arange(-self.__shapeCropped2[0], self.__shapeCropped2[0]),
np.arange(-self.__shapeCropped2[1], self.__shapeCropped2[1]) )
else:
[xmesh,ymesh] = np.meshgrid( np.arange(-self.maxShift, self.maxShift), np.arange(-self.maxShift, self.maxShift) )
rmesh2 = nz.evaluate( "xmesh*xmesh + ymesh*ymesh" )
# rmesh2 = xmesh*xmesh + ymesh*ymesh
if bool( self.maxShift ):
self.__mask_maxShift = ( rmesh2 < self.maxShift**2.0 )
if self.Bmode is u'fourier':
self.__Bfilter = np.fft.fftshift( util.apodization( name=self.BfiltType,
size=self.__shapeCropped,
radius=[self.Brad,self.Brad] ) )
self.bench['xcorr1'] = time.time()
# Pre-compute forward FFTs (template will just be copied conjugate Fourier spectra)
self.__imageFFT = np.empty( [self.__N, self.shapePadded[0], self.shapePadded[1]], dtype=fftw_dtype )
self.__baseImageFFT = np.empty( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=fftw_dtype )
self.__baseSquaredFFT = np.empty( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=fftw_dtype )
# Looping for triagonal matrix
# For auto this is wrong, so make these lists instead
currIndex = 0
self.__originC = []; self.C = []
print( "Pre-computing forward Fourier transforms" )
# For even-odd and noise estimates, we often skip many rows
# precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) > 0 ), np.argwhere( np.sum( triIndices, axis=0 ) > 0 ) ] ) )
precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) >= 0 ),
np.argwhere( np.sum( triIndices, axis=0 ) >= 0 ) ] ) )
for I in precompIndices:
if self.verbose >= 2:
print( "Precomputing forward FFT frame: " + str(I) )
# Apply masks to images
if self.masks.shape[0] == 1:
masks_block = self.masks[0,:,:]
images_block = self.images[I,:,:]
else:
masks_block = self.masks[I,:,:]
images_block = self.images[I,:,:]
tempReal = nz.evaluate( "masks_block * images_block" ).astype( fftw_dtype )
self.__FFT2.update_arrays( tempReal, self.__tempFullframe ); self.__FFT2.execute()
if self.shiftMethod == u"fourier":
self.__imageFFT[I,:,:] = self.__tempFullframe.copy(order='C')
# FFTCrop
self.__baseImageFFT[I,0:sC2[0],0:sC2[1]] = self.__imageFFT[I,0:sC2[0],0:sC2[1]]
self.__baseImageFFT[I,0:sC2[0],-sC2[1]:] = self.__imageFFT[I,0:sC2[0],-self.__sC2[1]:]
self.__baseImageFFT[I,-sC2[0]:,0:sC2[1]] = self.__imageFFT[I,-sC2[0]:,0:self.__sC2[1]]
self.__baseImageFFT[I,-sC2[0]:,-sC2[1]:] = self.__imageFFT[I,-sC2[0]:,-sC2[1]:]
print( "TODO: check memory consumption" )
else:
# FFTCrop
self.__baseImageFFT[I,0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseImageFFT[I,0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseImageFFT[I,-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseImageFFT[I,-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
self.__FFT2.update_arrays( nz.evaluate( "tempReal*tempReal" ).astype( fftw_dtype ), self.__tempFullframe ); self.__FFT2.execute()
# FFTCrop
self.__baseSquaredFFT[I,0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseSquaredFFT[I,0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseSquaredFFT[I,-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseSquaredFFT[I,-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
if not self.masks.shape[0] == 1:
self.__FFT2.update_arrays( self.masks[I,:,:].squeeze().astype( fftw_dtype), self.__tempFullframe ); self.__FFT2.execute()
# FFTCrop
self.__baseMaskFFT[I,0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseMaskFFT[I,0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseMaskFFT[I,-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseMaskFFT[I,-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
pass
del masks_block, images_block
self.bench['xcorr2'] = time.time()
print( "Starting correlation calculations, mode: " + self.triMode )
if self.triMode == u'refine':
# Find FFT sum (it must be reduced by the current frame later)
# FIXME: Is there some reason this might not be linear after FFT?
# FIXME: is it the complex conjugate operation below???
self.__sumFFT = np.sum( self.__baseImageFFT, axis = 0 )
self.__sumSquaredFFT = np.sum( self.__baseSquaredFFT, axis = 0 )
print( "In refine" )
for I in np.arange(self.images.shape[0] - 1):
# In refine mode we have to build the template on the fly from imageSum - currentImage
self.__templateImageFFT = np.conj( self.__sumFFT - self.__baseImageFFT[I,:,:] ) / self.images.shape[0]
self.__templateSquaredFFT = np.conj( self.__sumSquaredFFT - self.__baseSquaredFFT[I,:,:] ) / self.images.shape[0]
tempComplex2 = None
self.mnxc2( I, I, self.__shapeCropped, refine=True )
#### Find maximum positions ####
self.locatePeak( I, I )
if self.verbose:
print( "Refine # " + str(I) + " shift: [%.2f"%self.__shiftsTriMat[I,I,0]
+ ", %.2f"%self.__shiftsTriMat[I,I,1]
+ "], cc: %.6f"%self.__corrTriMat[I,I]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,I] )
else:
# For even-odd and noise estimates, we often skip many rows
rowIndices = np.unique( np.argwhere( np.sum( triIndices, axis=1 ) > 0 ) )
#print( "rowIndices: " + str(rowIndices) )
for I in rowIndices:
# I is the index of the template image
tempComplex = self.__baseImageFFT[I,:,:]
self.__templateImageFFT = nz.evaluate( "conj(tempComplex)")
tempComplex2 = self.__baseSquaredFFT[I,:,:]
self.__templateSquaredFFT = nz.evaluate( "conj(tempComplex2)")
if not self.masks.shape[0] == 1:
tempComplex = baseMaskFFT[I,:,:]
self.__templateMaskFFT = nz.evaluate( "conj(tempComplex)")
# Now we can start looping through base images
columnIndices = np.unique( np.argwhere( triIndices[I,:] ) )
#print( "columnIndices: " + str(columnIndices) )
for J in columnIndices:
####### MNXC2 revisement with private variable to make the code more manageable.
self.mnxc2( I, J, self.__shapeCropped )
#### Find maximum positions ####
self.locatePeak( I, J )
if self.verbose:
print( "# " + str(I) + "->" + str(J) + " shift: [%.2f"%self.__shiftsTriMat[I,J,0]
+ ", %.2f"%self.__shiftsTriMat[I,J,1]
+ "], cc: %.6f"%self.__corrTriMat[I,J]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,J] )
# Correlation stats is for establishing correlation scores for fixed-pattern noise.
if bool( self.trackCorrStats ):
# Track the various statistics about the correlation map, mean, std, max, skewness
self.calcCorrStats( currIndex, triIndices )
# triMode 'auto' diagonal mode
if self.triMode == u'auto' and (self.__peaksigTriMat[I,J] <= self.peaksigThres or J-I >= self.autoMax):
if self.verbose: print( "triMode 'auto' stopping at frame: " + str(J) )
break
currIndex += 1
pass # C max position location
if bool( np.any( self.fouCrop ) ):
self.__shiftsTriMat[:,:,0] *= self.shapePadded[0] / self.__shapeCropped[0]
self.__shiftsTriMat[:,:,1] *= self.shapePadded[1] / self.__shapeCropped[1]
self.bench['xcorr3'] = time.time()
# Pointer reference house-keeping
del templateMaskFFT, tempComplex, tempComplex2 # Pointer
return
def mnxc2( self, I, J, shapeCropped, refine=False ):
"""
2-D Masked, Intensity Normalized, Cross-correlation
"""
tempComplex = self.__tempComplex # Pointer re-assignment
tempComplex2 = self.__tempComplex2 # Pointer re-assignment
maskProduct = self.__maskProduct
normConst2 = self.__normConst2
if not self.masks.shape[0] == 1:
# Compute maskProduct, term is M1^* .* M2
baseMask_block = self.__baseMaskFFT[J,:,:]; templateMaskFFT = self.__templateMaskFFT # Pointer re-assignment
tempComplex2 = nz.evaluate( "templateMaskFFT * baseMask_block" )
self.__IFFT2.update_arrays( tempComplex2, tempComplex ); self.__IFFT2.execute()
# maskProduct = np.clip( np.round( np.real( tempComplex ) ), eps, np.Inf )
self.__maskProduct = nz.evaluate( "real(tempComplex)*normConst2" )
# Compute mask correlation terms
if self.masks.shape[0] == 1:
templateImageFFT = self.__templateImageFFT; baseMask_block = self.__baseMaskFFT # Pointer re-assignment
self.__IFFT2.update_arrays( nz.evaluate( "baseMask_block * templateImageFFT"), tempComplex ); self.__IFFT2.execute()
Corr_templateMask = nz.evaluate( "real(tempComplex)*normConst2" ) # Normalization
baseImageFFT_block = self.__baseImageFFT[J,:,:]; templateMaskFFT = self.__templateMaskFFT
self.__IFFT2.update_arrays( nz.evaluate( "templateMaskFFT * baseImageFFT_block"), tempComplex ); self.__IFFT2.execute()
# These haven't been normalized, so let's do so. They are FFT squared, so N*N
# This reduces the strain on single-precision range.
Corr_baseMask = nz.evaluate( "real(tempComplex)*normConst2" ) # Normalization
# Compute the intensity normalzaiton for the template
if self.masks.shape[0] == 1:
baseMaskFFT = self.__baseMaskFFT; templateSquaredFFT = self.__templateSquaredFFT
self.__IFFT2.update_arrays( nz.evaluate( "baseMaskFFT * templateSquaredFFT"), tempComplex ); self.__IFFT2.execute()
else:
self.__IFFT2.update_arrays( nz.evaluate( "baseMaskFFT_block * templateSquaredFFT"), tempComplex ); self.__IFFT2.execute()
# DenomTemplate = nz.evaluate( "real(tempComplex)*normConst2 - real( Corr_templateMask * (Corr_templateMask / maskProduct) )" )
# Compute the intensity normalzaiton for the base Image
baseSquared_block = self.__baseSquaredFFT[J,:,:]
self.__IFFT2.update_arrays( nz.evaluate( "templateMaskFFT * baseSquared_block"), tempComplex2 ); self.__IFFT2.execute()
# Compute Denominator intensity normalization
# DenomBase = nz.evaluate( "real(tempComplex2)*normConst2- real( Corr_baseMask * (Corr_baseMask / maskProduct) )" )
Denom = nz.evaluate( "sqrt( (real(tempComplex2)*normConst2- real( Corr_baseMask * (Corr_baseMask / maskProduct)))" +
"* (real(tempComplex)*normConst2 - real( Corr_templateMask * (Corr_templateMask / maskProduct)) ) )" )
# What happened to numexpr clip?
Denom = np.clip( Denom, 1, np.Inf )
# print( "Number of small Denominator values: " + str(np.sum(DenomTemplate < 1.0)) )
# Compute Numerator (the phase correlation)
tempComplex2 = nz.evaluate( "baseImageFFT_block * templateImageFFT" )
self.__IFFT2.update_arrays( tempComplex2, tempComplex ); self.__IFFT2.execute()
# Numerator = nz.evaluate( "real(tempComplex)*normConst2 - real( Corr_templateMask * Corr_baseMask / maskProduct)" )
# Compute final correlation
self.__C = nz.evaluate( "(real(tempComplex)*normConst2 - real( Corr_templateMask * Corr_baseMask / maskProduct)) / Denom" )
# print( "%%%% mnxc2.Denom.dtype = " + str(Denom.dtype) )
self.__originTriMat[I,J] = self.__C[0,0]
if bool(self.suppressOrigin):
# If gain reference is quite old we can still get one bright pixel at the center.
# The hot pixel filter has mitigated this but it's still a minor source of bias.
self.__C[0,0] = 0.125 * ( self.__C[1,0] + self.__C[0,1] + self.__C[-1,0] + self.__C[-1,0] +
self.__C[1,1] + self.__C[-1,1] + self.__C[-1,1] + self.__C[-1,-1] )
# We have everything in normal FFT order until here; Some speed-up could be found by its removal.
# Pratically we don't have to do this fftshift, but it makes plotting easier to understand
self.__C = np.fft.ifftshift( self.__C )
# We can crop C if maxShift is not None and preShift is False
if self.maxShift is not None and self.preShift is False:
shapeCropped2 = (np.array(shapeCropped)/2.0).astype('int')
self.__C = self.__C[shapeCropped2[0]-self.maxShift:shapeCropped2[0]+self.maxShift, shapeCropped2[1]-self.maxShift:shapeCropped2[1]+self.maxShift]
del normConst2, baseMask_block, templateMaskFFT, templateImageFFT, Corr_templateMask, baseImageFFT_block
del Corr_baseMask, baseSquared_block, baseMaskFFT, templateSquaredFFT, maskProduct
del tempComplex, tempComplex2
def locatePeak( self, I, J ):
"""
Subpixel peak location by Fourier interpolation.
"""
tempComplex = self.__tempComplex; tempComplex2 = self.__tempComplex2
# Apply B-factor low-pass filter to correlation function
if self.Bmode == 'opti':
self.bench['opti0'] = time.time()
# Want to define this locally so it inherits scope variables.
def inversePeakContrast( Bsigma ):
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, Bsigma )
return np.std(self.__C_filt ) / (np.max(self.__C_filt ) - np.mean(self.__C_filt ) )
# B_opti= scipy.optimize.fminbound( inversePeakContrast, 0.0, 10.0, xtol=1E-3 )
sigmaOptiMax = 7.0
sigmaOptiMin = 0.0
maxIter = 15 # Let's apply some more constraints to speed this up
tolerance = 0.01
result = scipy.optimize.minimize_scalar( inversePeakContrast,
bounds=[sigmaOptiMin,sigmaOptiMax], method="bounded",
options={'maxiter':maxIter, 'xatol':tolerance } )
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, result.x )
self.bench['opti0'] = time.time()
if self.verbose >= 2:
print( "Found optimum B-sigma: %.3f"%result.x + ", with peak sig: %.3f"%(1.0/result.fun)+" in %.1f"%(1E3*(self.bench['opti1']-self.bench['opti0']))+" ms" )
elif bool(self.Brad) and self.Bmode =='fourier':
tempComplex = self.__C.astype(fftw_dtype)
self.__FFT2.update_arrays( tempComplex, tempComplex2 ); self.__FFT2.execute()
Bfilter = self.__Bfilter
self.__IFFT2.update_arrays( nz.evaluate( "tempComplex2*Bfilter" ), tempComplex ); self.__IFFT2.execute()
# Conservation of counts with Fourier filtering is not
# very straight-forward.
C_filt = nz.evaluate( "real( tempComplex )/sqrt(normConst)" )
elif bool(self.Brad) and self.Bmode == u'conv' or self.Bmode == u'convolution':
# Convert self.Brad as an MTF to an equivalent sigma for a PSF
# TODO: Check that Bsigma is correct with Fourier cropping"
Bsigma = self.shapePadded / (np.sqrt(2) * np.pi * self.Brad)
# Scipy's gaussian filter conserves total counts
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, Bsigma )
else: # No filtering
self.__C_filt = self.__C
# Apply maximum shift max mask, if present
if bool( self.maxShift ):
# for previous frame alignment compensation, we need to shift the mask around...
C_filt = self.__C_filt
if bool( self.preShift ):
# print( "In pre-shift" )
# This isn't working with 'refine'
if self.triMode != u'refine':
rolledMask = np.roll( np.roll( self.__mask_maxShift,
np.round(self.__shiftsTriMat[I,J-1,0]).astype('int'), axis=0 ),
np.round(self.__shiftsTriMat[I,J-1,1]).astype('int'), axis=1 )
elif self.triMode == u'refine':
# With refine the matrix is populated like an autocorrelation function.
rolledMask = np.roll( np.roll( self.__mask_maxShift,
np.round(self.__shiftsTriMat[I-1,I-1,0]).astype('int'), axis=0 ),
np.round(self.__shiftsTriMat[I-1,I-1,1]).astype('int'), axis=1 )
pass
C_masked = nz.evaluate("C_filt*rolledMask")
cmaxpos = np.unravel_index( np.argmax( C_masked ), C_masked.shape )
self.__peaksigTriMat[I,J] = (C_masked[cmaxpos] - np.mean(C_filt[rolledMask]))/ | np.std(C_filt[rolledMask]) | numpy.std |
import numpy as np
import sklearn.metrics
def pr_global(
y_hat,
y_obs,
threshold=0.5
):
"""
:param y_hat:
:param y_obs:
:param threshold:
:return: (data sets, 1) for each metric
"""
for i in range(len(y_hat)):
if len(y_hat[i].shape) == 1:
y_hat[i] = np.expand_dims(y_hat[i], axis=1)
if len(y_obs[i].shape) == 1:
y_obs[i] = np.expand_dims(y_obs[i], axis=1)
is_multiclass = y_obs[0].shape[1] > 1
if is_multiclass:
y_hat_disc = [np.zeros_like(y_hat[i]) for i in range(len(y_hat))]
for i in range(len(y_hat)):
y_hat_disc[i][np.arange(0, y_hat[i].shape[0]), np.argmax(y_hat[i], axis=1)] = 1
true_call = [
np.all((y_obs[i] >= threshold) == (y_hat_disc[i] > 0.5), axis=1)
for i in range(len(y_hat))
]
tp = [
np.logical_and(
true_call[i],
np.any(y_obs[i][:, :-1] >= threshold, axis=1)
).flatten()
for i in range(len(y_hat_disc))
]
tn = [
np.logical_and(
true_call[i],
y_obs[i][:, -1] >= threshold
).flatten()
for i in range(len(y_hat_disc))
]
fp = [
np.logical_and(
np.logical_not(true_call[i]),
np.any(y_hat_disc[i][:, :-1] >= threshold, axis=1)
).flatten()
for i in range(len(y_hat_disc))
]
fn = [
np.logical_and(
np.logical_not(true_call[i]),
y_hat_disc[i][:, -1] >= threshold
).flatten()
for i in range(len(y_hat_disc))
]
else:
tp = [
np.logical_and(y_obs[i] >= threshold, y_hat[i] >= threshold).flatten()
for i in range(len(y_hat))
]
tn = [
np.logical_and(y_obs[i] < threshold, y_hat[i] < threshold).flatten()
for i in range(len(y_hat))
]
fp = [
np.logical_and(y_obs[i] < threshold, y_hat[i] >= threshold).flatten()
for i in range(len(y_hat))
]
fn = [
np.logical_and(y_obs[i] >= threshold, y_hat[i] < threshold).flatten()
for i in range(len(y_hat))
]
for i, y in enumerate(y_hat):
assert np.sum(np.sum(np.vstack([tp[i], tn[i], fp[i], fn[i]]).T, axis=1) != 1) == 0, \
"tp %i, fp %i, tn %i, fn %i, all %i" % \
(np.sum(tp[i]), np.sum(tn[i]), np.sum(fp[i]), np.sum(fn[i]), y.shape[0])
tp = np.expand_dims(np.array([np.sum(x) for x in tp]), axis=-1)
tn = np.expand_dims(np.array([np.sum(x) for x in tn]), axis=-1)
fp = np.expand_dims(np.array([np.sum(x) for x in fp]), axis=-1)
fn = np.expand_dims(np.array([np.sum(x) for x in fn]), axis=-1)
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
tpr = tp / np.expand_dims(np.array([x.shape[0] for x in y_obs]), axis=-1)
tnr = tn / np.expand_dims(np.array([x.shape[0] for x in y_obs]), axis=-1)
fpr = fp / np.expand_dims(np.array([x.shape[0] for x in y_obs]), axis=-1)
fnr = fn / np.expand_dims(np.array([x.shape[0] for x in y_obs]), axis=-1)
return precision, recall, tpr, tnr, fpr, fnr
def pr_label(
y_hat,
y_obs,
labels,
labels_unique,
threshold=0.5
):
"""
:param y_hat:
:param y_obs:
:param labels:
:param threshold:
:return: (data sets, labels_unique) for each metric
"""
assert len(y_hat) == len(y_obs)
assert len(y_hat) == len(labels)
assert np.all([y_hat[i].shape == y_obs[i].shape for i in range(len(y_hat))])
for i in range(len(y_hat)):
if len(y_hat[i].shape) == 1:
y_hat[i] = np.expand_dims(y_hat[i], axis=1)
if len(y_obs[i].shape) == 1:
y_obs[i] = np.expand_dims(y_obs[i], axis=1)
if labels[i] is not None:
if len(labels[i]) == 0:
labels[i] = None
for i in range(len(y_hat)):
if labels[i] is not None:
assert y_hat[i].shape[0] == len(labels[i]), \
"%i, %i \n %s" % (y_hat[i].shape[0], len(labels[i]), str(labels[i]))
is_multiclass = y_obs[0].shape[1] > 1
if is_multiclass:
y_hat_bool = [np.ones_like(y_hat[i]) == 0 for i in range(len(y_hat))]
for i in range(len(y_hat)):
for j in range(y_hat[i].shape[0]):
y_hat_bool[i][j, np.argmax(y_hat[i][j, :])] = True
else:
y_hat_bool = [x >= threshold for x in y_hat]
tp = [
np.logical_and(y_obs[i] >= threshold, y_hat_bool[i])
for i in range(len(y_obs))
]
tn = [
np.logical_and(y_obs[i] < threshold, np.logical_not(y_hat_bool[i]))
for i in range(len(y_obs))
]
fp = [
np.logical_and(y_obs[i] < threshold, y_hat_bool[i])
for i in range(len(y_obs))
]
fn = [
np.logical_and(y_obs[i] >= threshold, np.logical_not(y_hat_bool[i]))
for i in range(len(y_obs))
]
if labels[0] is None or y_obs[0].shape[1] > 1:
# labels are grouped in columns of y and confusion table arrays.
tp = np.concatenate([np.sum(x, axis=0, keepdims=True) for x in tp], axis=0)
tn = np.concatenate([np.sum(x, axis=0, keepdims=True) for x in tn], axis=0)
fp = np.concatenate([np.sum(x, axis=0, keepdims=True) for x in fp], axis=0)
fn = np.concatenate([np.sum(x, axis=0, keepdims=True) for x in fn], axis=0)
elif labels[0] is not None and y_obs[0].shape[1] == 1:
assert labels_unique is not None, "supply labels_unique"
# y and confusion table arrays all have a single column and labels correspond to sets of rows.
tp = np.concatenate([
np.concatenate([
np.sum(x[labels[i] == y, :], axis=0, keepdims=True)
if np.sum(labels[i] == y) > 0 else np.array([[np.nan]])
for y in labels_unique
], axis=-1)
for i, x in enumerate(tp)
], axis=0)
tn = np.concatenate([
np.concatenate([
np.sum(x[labels[i] == y, :], axis=0, keepdims=True)
if np.sum(labels[i] == y) > 0 else np.array([[np.nan]])
for y in labels_unique
], axis=-1)
for i, x in enumerate(tn)
], axis=0)
fp = np.concatenate([
np.concatenate([
np.sum(x[labels[i] == y, :], axis=0, keepdims=True)
if np.sum(labels[i] == y) > 0 else np.array([[np.nan]])
for y in labels_unique
], axis=-1)
for i, x in enumerate(fp)
], axis=0)
fn = np.concatenate([
np.concatenate([
np.sum(x[labels[i] == y, :], axis=0, keepdims=True)
if np.sum(labels[i] == y) > 0 else | np.array([[np.nan]]) | numpy.array |
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import unittest
import numpy as np
import torch
import torch.nn.functional as F
import lava.lib.dl.slayer as slayer
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 902
| np.random.seed(seed) | numpy.random.seed |
import cv2
import numpy as np
import os
cam = cv2.VideoCapture(0)
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
input = input("enter the name: ")
frames = []
outputs = []
while(True):
ret, Image = cam.read(0)
if (ret):
faces = classifier.detectMultiScale(Image)
for i in faces:
x, y, w, h = i
rec = cv2.rectangle(Image, (x, y), (x+w, y+h), (255, 0, 0), 2)
rec_win = Image[y:y + h, x:x + w]
fix = cv2.resize(rec_win, (100, 100))
opt = cv2.cvtColor(fix, cv2.COLOR_BGR2GRAY)
cv2.imshow("My Camera", Image)
cv2.imshow("Frame", opt)
key = cv2.waitKey(1)
if(key == ord("q") or key == ord("Q") or key == ord("E")):
break
if (key == ord("c") or key == ord("C")):
#cv2.imwrite("cam.jpg", Image)
frames.append(opt.flatten())
outputs.append([input])
hly = np.array(frames)
vly = np.array(outputs)
print(type(vly))
save = | np.hstack([vly, hly]) | numpy.hstack |
import numpy as np
from aerosandbox import ExplicitAnalysis
from aerosandbox.geometry import *
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_3D.singularities.uniform_strength_horseshoe_singularities import \
calculate_induced_velocity_horseshoe
from typing import Dict, Any
### Define some helper functions that take a vector and make it a Nx1 or 1xN, respectively.
# Useful for broadcasting with matrices later.
def tall(array):
return np.reshape(array, (-1, 1))
def wide(array):
return np.reshape(array, (1, -1))
class VortexLatticeMethod(ExplicitAnalysis):
"""
An explicit (linear) vortex-lattice-method aerodynamics analysis.
Usage example:
>>> analysis = asb.VortexLatticeMethod(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>> )
>>> aero_data = analysis.run()
>>> analysis.draw()
"""
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
run_symmetric_if_possible: bool = False,
verbose: bool = False,
spanwise_resolution: int = 10,
spanwise_spacing: str = "cosine",
chordwise_resolution: int = 10,
chordwise_spacing: str = "cosine",
vortex_core_radius: float = 1e-8,
align_trailing_vortices_with_wind: bool = False,
):
super().__init__()
self.airplane = airplane
self.op_point = op_point
self.verbose = verbose
self.spanwise_resolution = spanwise_resolution
self.spanwise_spacing = spanwise_spacing
self.chordwise_resolution = chordwise_resolution
self.chordwise_spacing = chordwise_spacing
self.vortex_core_radius = vortex_core_radius
self.align_trailing_vortices_with_wind = align_trailing_vortices_with_wind
### Determine whether you should run the problem as symmetric
self.run_symmetric = False
if run_symmetric_if_possible:
raise NotImplementedError("VLM with symmetry detection not yet implemented!")
# try:
# self.run_symmetric = ( # Satisfies assumptions
# self.op_point.beta == 0 and
# self.op_point.p == 0 and
# self.op_point.r == 0 and
# self.airplane.is_entirely_symmetric()
# )
# except RuntimeError: # Required because beta, p, r, etc. may be non-numeric (e.g. opti variables)
# pass
def run(self) -> Dict[str, Any]:
if self.verbose:
print("Meshing...")
##### Make Panels
front_left_vertices = []
back_left_vertices = []
back_right_vertices = []
front_right_vertices = []
is_trailing_edge = []
for wing in self.airplane.wings:
points, faces = wing.mesh_thin_surface(
method="quad",
chordwise_resolution=self.chordwise_resolution,
chordwise_spacing=self.chordwise_spacing,
spanwise_resolution=self.spanwise_resolution,
spanwise_spacing=self.spanwise_spacing,
add_camber=True
)
front_left_vertices.append(points[faces[:, 0], :])
back_left_vertices.append(points[faces[:, 1], :])
back_right_vertices.append(points[faces[:, 2], :])
front_right_vertices.append(points[faces[:, 3], :])
is_trailing_edge.append(
(np.arange(len(faces)) + 1) % self.chordwise_resolution == 0
)
front_left_vertices = np.concatenate(front_left_vertices)
back_left_vertices = np.concatenate(back_left_vertices)
back_right_vertices = np.concatenate(back_right_vertices)
front_right_vertices = np.concatenate(front_right_vertices)
is_trailing_edge = np.concatenate(is_trailing_edge)
### Compute panel statistics
diag1 = front_right_vertices - back_left_vertices
diag2 = front_left_vertices - back_right_vertices
cross = np.cross(diag1, diag2)
cross_norm = np.linalg.norm(cross, axis=1)
normal_directions = cross / tall(cross_norm)
areas = cross_norm / 2
# Compute the location of points of interest on each panel
left_vortex_vertices = 0.75 * front_left_vertices + 0.25 * back_left_vertices
right_vortex_vertices = 0.75 * front_right_vertices + 0.25 * back_right_vertices
vortex_centers = (left_vortex_vertices + right_vortex_vertices) / 2
vortex_bound_leg = right_vortex_vertices - left_vortex_vertices
collocation_points = (
0.5 * (0.25 * front_left_vertices + 0.75 * back_left_vertices) +
0.5 * (0.25 * front_right_vertices + 0.75 * back_right_vertices)
)
### Save things to the instance for later access
self.front_left_vertices = front_left_vertices
self.back_left_vertices = back_left_vertices
self.back_right_vertices = back_right_vertices
self.front_right_vertices = front_right_vertices
self.is_trailing_edge = is_trailing_edge
self.normal_directions = normal_directions
self.areas = areas
self.left_vortex_vertices = left_vortex_vertices
self.right_vortex_vertices = right_vortex_vertices
self.vortex_centers = vortex_centers
self.vortex_bound_leg = vortex_bound_leg
self.collocation_points = collocation_points
##### Setup Operating Point
if self.verbose:
print("Calculating the freestream influence...")
steady_freestream_velocity = self.op_point.compute_freestream_velocity_geometry_axes() # Direction the wind is GOING TO, in geometry axes coordinates
steady_freestream_direction = steady_freestream_velocity / np.linalg.norm(steady_freestream_velocity)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
collocation_points)
freestream_velocities = np.add(wide(steady_freestream_velocity), rotation_freestream_velocities)
# Nx3, represents the freestream velocity at each panel collocation point (c)
freestream_influences = np.sum(freestream_velocities * normal_directions, axis=1)
### Save things to the instance for later access
self.steady_freestream_velocity = steady_freestream_velocity
self.steady_freestream_direction = steady_freestream_direction
self.freestream_velocities = freestream_velocities
##### Setup Geometry
### Calculate AIC matrix
if self.verbose:
print("Calculating the collocation influence matrix...")
u_collocations_unit, v_collocations_unit, w_collocations_unit = calculate_induced_velocity_horseshoe(
x_field=tall(collocation_points[:, 0]),
y_field=tall(collocation_points[:, 1]),
z_field=tall(collocation_points[:, 2]),
x_left=wide(left_vortex_vertices[:, 0]),
y_left=wide(left_vortex_vertices[:, 1]),
z_left=wide(left_vortex_vertices[:, 2]),
x_right=wide(right_vortex_vertices[:, 0]),
y_right=wide(right_vortex_vertices[:, 1]),
z_right=wide(right_vortex_vertices[:, 2]),
trailing_vortex_direction=steady_freestream_direction if self.align_trailing_vortices_with_wind else np.array([1, 0, 0]),
gamma=1,
vortex_core_radius=self.vortex_core_radius
)
AIC = (
u_collocations_unit * tall(normal_directions[:, 0]) +
v_collocations_unit * tall(normal_directions[:, 1]) +
w_collocations_unit * tall(normal_directions[:, 2])
)
##### Calculate Vortex Strengths
if self.verbose:
print("Calculating vortex strengths...")
self.vortex_strengths = np.linalg.solve(AIC, -freestream_influences)
##### Calculate forces
### Calculate Near-Field Forces and Moments
# Governing Equation: The force on a straight, small vortex filament is F = rho * cross(V, l) * gamma,
# where rho is density, V is the velocity vector, cross() is the cross product operator,
# l is the vector of the filament itself, and gamma is the circulation.
if self.verbose:
print("Calculating forces on each panel...")
# Calculate the induced velocity at the center of each bound leg
V_centers = self.get_velocity_at_points(vortex_centers)
# Calculate forces_inviscid_geometry, the force on the ith panel. Note that this is in GEOMETRY AXES,
# not WIND AXES or BODY AXES.
Vi_cross_li = np.cross(V_centers, vortex_bound_leg, axis=1)
forces_geometry = self.op_point.atmosphere.density() * Vi_cross_li * tall(self.vortex_strengths)
moments_geometry = np.cross(
np.add(vortex_centers, -wide(self.airplane.xyz_ref)),
forces_geometry
)
# Calculate total forces and moments
force_geometry = np.sum(forces_geometry, axis=0)
moment_geometry = np.sum(moments_geometry, axis=0)
force_wind = self.op_point.convert_axes(
force_geometry[0], force_geometry[1], force_geometry[2],
from_axes="geometry",
to_axes="wind"
)
moment_wind = self.op_point.convert_axes(
moment_geometry[0], moment_geometry[1], moment_geometry[2],
from_axes="geometry",
to_axes="wind"
)
### Save things to the instance for later access
self.forces_geometry = forces_geometry
self.moments_geometry = moments_geometry
self.force_geometry = force_geometry
self.force_wind = force_wind
self.moment_geometry = moment_geometry
self.moment_wind = moment_wind
# Calculate dimensional forces
L = -force_wind[2]
D = -force_wind[0]
Y = force_wind[1]
l = moment_wind[0] # TODO review axes
m = moment_wind[1]
n = moment_wind[2]
# Calculate nondimensional forces
q = self.op_point.dynamic_pressure()
s_ref = self.airplane.s_ref
b_ref = self.airplane.b_ref
c_ref = self.airplane.c_ref
CL = L / q / s_ref
CD = D / q / s_ref
CY = Y / q / s_ref
Cl = l / q / s_ref / b_ref
Cm = m / q / s_ref / c_ref
Cn = n / q / s_ref / b_ref
return {
"L" : L,
"D" : D,
"Y" : Y,
"l" : l,
"m" : m,
"n" : n,
"CL" : CL,
"CD" : CD,
"CY" : CY,
"Cl" : Cl,
"Cm" : Cm,
"Cn" : Cn,
"F_g": force_geometry,
"F_w": force_wind,
"M_g": moment_geometry,
"M_w": moment_wind
}
def get_induced_velocity_at_points(self, points: np.ndarray) -> np.ndarray:
"""
Computes the induced velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the induced velocities at. Given in geometry axes.
Returns: A Nx3 of the induced velocity at those points. Given in geometry axes.
"""
u_induced, v_induced, w_induced = calculate_induced_velocity_horseshoe(
x_field=tall(points[:, 0]),
y_field=tall(points[:, 1]),
z_field=tall(points[:, 2]),
x_left=wide(self.left_vortex_vertices[:, 0]),
y_left=wide(self.left_vortex_vertices[:, 1]),
z_left=wide(self.left_vortex_vertices[:, 2]),
x_right=wide(self.right_vortex_vertices[:, 0]),
y_right=wide(self.right_vortex_vertices[:, 1]),
z_right=wide(self.right_vortex_vertices[:, 2]),
trailing_vortex_direction=self.steady_freestream_direction if self.align_trailing_vortices_with_wind else np.array([1, 0, 0]),
gamma=wide(self.vortex_strengths),
vortex_core_radius=self.vortex_core_radius
)
u_induced = np.sum(u_induced, axis=1)
v_induced = np.sum(v_induced, axis=1)
w_induced = | np.sum(w_induced, axis=1) | numpy.sum |
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_repeat(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_repeat(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange( | np.prod(shape) | numpy.prod |
#!/bin/python
import binascii
import codecs
import json
import logging
import os
import time
from shutil import copyfile
import numpy as np
import csv
from configs.alignerconfig import directory_path
import uuid
from anuvaad_auditor.errorhandler import post_error
from anuvaad_auditor.errorhandler import post_error_wf
from anuvaad_auditor.loghandler import log_exception
log = logging.getLogger('file')
two_files = True
no_of_words = 200
file_encoding = 'utf-16'
class AlignmentUtils:
def __init__(self):
pass
# Utility to parse input files.
def parse_input_file(self, path_eng, path_indic):
source = []
target_corp = []
if two_files:
with codecs.open(path_indic, 'r',file_encoding) as txt_file:
for row in txt_file:
if len(row.rstrip()) != 0:
source.append(row.rstrip())
with codecs.open(path_eng, 'r',file_encoding) as txt_file:
for row in txt_file:
if len(row.rstrip()) != 0:
target_corp.append(row.rstrip())
else:
with codecs.open(path_eng, 'r',file_encoding) as csv_file:
csv_reader = csv.reader((l.replace('\0', '') for l in csv_file))
for row in csv_reader:
if len(row) != 0:
source.append(row[0])
target_corp.append(row[1])
return source, target_corp
def parse_json(self, path_eng, path_indic):
source = []
target_corp = []
f = open(path_indic)
response = json.load(f)
for page in response['result']:
for block in page['text_blocks']:
for sentence in block['tokenized_sentences'] :
source.append(sentence['src'])
f = open(path_eng)
response = json.load(f)
for page in response['result']:
for block in page['text_blocks']:
for sentence in block['tokenized_sentences'] :
target_corp.append(sentence['src'])
return source, target_corp
# Utility to write the output to a file
def write_output(self, list, path):
with codecs.open(path, 'w', file_encoding) as txt_file:
for row in list:
txt_file.write(row + "\r\n")
# Utility to write the JSON output to a file
def write_json_output(self, df, path):
with open(path, 'w', encoding = file_encoding) as json_file:
df.to_json(json_file, force_ascii=False,orient='records')
# Utility to calculate cosine distances between 2 vectors
def cscalc(self, vector_one, vector_two):
vector_one = np.squeeze(vector_one)
vector_two = np.squeeze(vector_two)
dot = np.dot(vector_one, vector_two)
norma = np.linalg.norm(vector_one)
normb = | np.linalg.norm(vector_two) | numpy.linalg.norm |
from control import tf
from matplotlib import pyplot as plt
from numpy import abs, arccos, arctan2, array, count_nonzero, cos, delete, imag, linspace, log10, meshgrid, polymul, \
polyval, rad2deg, real, roots, sqrt, tan
from scipy.signal import tf2zpk
def bode_alt(sys, title=''):
mag, phase, omega = frequency_response(sys)
plt.subplot(2, 1, 1)
plt.plot(omega, mag)
plt.xscale('log')
plt.ylabel('Magnitude [dB]')
plt.grid()
plt.title('Bode' + title)
plt.subplot(2, 1, 2)
plt.plot(omega, phase)
plt.xscale('log')
plt.ylabel('Phase [deg]')
plt.xlabel('frequency [rps]')
plt.grid()
plt.show()
def frequency_response(tf, w_in=[0.01, 100], plot=0):
w = linspace(w_in[0], w_in[1], 10000)
wi = w*1j
g = 20 * log10(abs(polyval(tf.num[0][0], wi))) - 20 * log10(abs( | polyval(tf.den[0][0], wi) | numpy.polyval |
# Author : <NAME> [https://github.com/vstark21]
import os
import numpy as np
# This function builds a CNN model.
def build_model():
files = os.listdir()
if "mnist_model.h5" not in files:
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from keras.datasets import mnist
from keras.models import load_model
import pandas as pd
(feat_tkeras, lab_tkeras), (feat_val, lab_val) = mnist.load_data()
train_data = pd.read_csv("train.csv")
lab_train = train_data["label"]
feat_train = np.array(train_data.drop("label", axis=1)).reshape((-1, 28, 28, 1))
features_train = np.concatenate((np.expand_dims(feat_tkeras, axis=-1), feat_train), axis=0)
labels_train = | np.concatenate((lab_tkeras, lab_train), axis=0) | numpy.concatenate |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from .parameterized_spline import ParameterizedSpline
import numpy as np
import math
from transformations import quaternion_from_euler, euler_from_quaternion
REF_VECTOR = [0.0,1.0]
def get_tangent_at_parameter(spline, u, eval_range=0.5):
"""
Returns
------
* dir_vector : np.ndarray
The normalized direction vector
* start : np.ndarry
start of the tangent line / the point evaluated at arc length
"""
tangent = [1.0,0.0]
magnitude = 0
while magnitude == 0: # handle cases where the granularity of the spline is too low
l1 = u - eval_range
l2 = u + eval_range
p1 = spline.query_point_by_absolute_arc_length(l1)
p2 = spline.query_point_by_absolute_arc_length(l2)
tangent = p2 - p1
magnitude = np.linalg.norm(tangent)
eval_range += 0.1
if magnitude != 0:
tangent /= magnitude
return tangent
def get_angle_between_vectors(a,b):
a /= np.linalg.norm(a)
b /= np.linalg.norm(b)
angle = math.acos((a[0] * b[0] + a[1] * b[1]))
return angle
def get_tangents2d(translation, eval_range=0.5):
""" Create a list of tangents for a list of translations to be used for an AnnotatedSpline"""
""" TODO fix """
steps = len(translation)
spline = ParameterizedSpline(translation)
parameters = np.linspace(0, 1, steps)# this is not correct
tangents = []
for u in parameters:
tangent = get_tangent_at_parameter(spline, u, eval_range)
tangents.append(tangent)
return tangents
def complete_tangents(translation, given_tangents, eval_range=0.5):
steps = len(translation)
spline = ParameterizedSpline(translation)
parameters = [spline.get_absolute_arc_length_of_point(t)[0] for t in translation]
#parameters = np.linspace(0, 1, steps)
tangents = given_tangents
for idx, u in enumerate(parameters):
if tangents[idx] is None:
tangents[idx] = get_tangent_at_parameter(spline, u, eval_range)
return tangents
def complete_orientations_from_tangents(translation, given_orientations, eval_range=0.5, ref_vector=REF_VECTOR):
steps = len(translation)
spline = ParameterizedSpline(translation)
parameters = np.linspace(0, 1, steps)
orientations = given_orientations
for idx, u in enumerate(parameters):
if orientations[idx] is None:
tangent = get_tangent_at_parameter(spline, u, eval_range)
print("estimate tangent",idx, tangent)
orientations[idx] = tangent_to_quaternion(tangent, ref_vector)
return orientations
def tangent_to_quaternion(tangent, ref_vector=REF_VECTOR):
a = ref_vector
b = np.array([tangent[0], tangent[2]])
angle = get_angle_between_vectors(a, b)
return quaternion_from_euler(0, angle, 0)
def quaternion_to_tangent(q, ref_vector=REF_VECTOR):
e = euler_from_quaternion(q)
angle = e[1]
sa = math.sin(angle)
ca = math.cos(angle)
m = np.array([[ca, -sa], [sa, ca]])
return np.dot(m, ref_vector)
def tangents_to_quaternions(tangents, ref_vector=REF_VECTOR):
quaternions = []
for tangent in tangents:
q = tangent_to_quaternion(tangent, ref_vector)
quaternions.append(q)
return quaternions
def get_orientations_from_tangents2d(translation, ref_vector=REF_VECTOR):
""" Create a list of orientations for a list of translations to be used for an AnnotatedSpline.
Note it seems that as long as the number of points are the same, the same spline parameters can be used for the
query of the spline.
"""
""" TODO fix """
ref_vector = np.array(ref_vector)
steps = len(translation)
spline = ParameterizedSpline(translation)
parameters = np.linspace(0,1, steps)
orientation = []
for u in parameters:
tangent = get_tangent_at_parameter(spline, u, eval_range=0.1)
a = ref_vector
b = np.array([tangent[0], tangent[2]])
angle = get_angle_between_vectors(a, b)
orientation.append(quaternion_from_euler(*np.radians([0, angle, 0])))
return orientation
def get_tangents(points, length):
spline = ParameterizedSpline(points)
x = np.linspace(0, spline.full_arc_length, length)
new_points = []
tangents = []
for v in x:
s, t = spline.get_tangent_at_arc_length(v)
new_points.append(s)
tangents.append(t)
return new_points, tangents
def plot_annotated_spline(spline,root_motion, filename, scale_factor=0.7):
from matplotlib import pyplot as plt
def plot_annotated_tangent(ax, spline, x, length):
p = spline.query_point_by_absolute_arc_length(x)*scale_factor
t = spline.query_orientation_by_absolute_arc_length(x)*scale_factor
start = -p[0], p[2]
p_prime = [-p[0] + -t[0] * length, p[2] + t[2] * length]
# p_prime = [p[0] + t[0] * length, p[2] + t[2] * length]
points = np.array([start, p_prime]).T
# t = tangent.T.tolist()
#
# print "tangent", t
ax.plot(*points)
fig = plt.figure()
sub_plot_coordinate = (1, 1, 1)
ax = fig.add_subplot(*sub_plot_coordinate)
control_points = spline.spline.control_points
control_points = | np.array(control_points) | numpy.array |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../input/abstraction-and-reasoning-challenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_input, test_prediction,
input_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_input = test_input.reshape(input_shape[0], input_shape[1])
axs[0].imshow(test_input, cmap=cmap, norm=norm)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.reshape(input_shape[0], input_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, norm=norm)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, norm=norm)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(input_color, nfeat):
nrows, ncols = input_color.shape
feat = np.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = input_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(input_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(input_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(np.unique(input_color[i, :]))
feat[cur_idx, 10] = len(np.unique(input_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(np.unique(input_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
input_color = np.array(task[mode][task_num]['input'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['input']), len(task[mode][task_num]['input'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of input rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(input_color, nfeat))
target.extend(np.array(target_color).reshape(-1, ))
return np.array(feat), np.array(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
all_task_ids = sorted(os.listdir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in all_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on input pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
input_color = np.array(task['test'][task_num]['input'])
nrows, ncols = len(task['test'][task_num]['input']), len(
task['test'][task_num]['input'][0])
feat = make_features(input_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).reshape(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (np.array(task['test'][task_num]['output']) == preds).sum() / (nrows * ncols)
model_accuracies['ens'].append(ens_acc)
pred_taskids.append(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(np.array(task['test'][task_num]['output'])==preds).sum()/(nrows*ncols))
# print()
preds = preds.astype(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = flattener(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).sum())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
eval_tasks = sorted(os.listdir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.append(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.append(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(np.array(x), cmap=cmap, norm=norm)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Input = [Defensive_Copy(task['train'][i]['input']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Input.append(Defensive_Copy(task['test'][task_id]['input']))
return Input, Output
def Recolor(task):
Input = task[0]
Output = task[1]
Test_Picture = Input[-1]
Input = Input[:-1]
N = len(Input)
for x, y in zip(Input, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.append((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actually solve the problem
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # meaning that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(flattener(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = np.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, np.arange(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.split('_')[0]
pair_id = int(output_id.split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Input = [Defensive_Copy(task['train'][j]['input']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Input.append(Defensive_Copy(task['test'][pair_id]['input']))
solution = Recolor([Input, Output])
pred = ''
if solution != -1:
Solved.append(i)
pred1 = flattener(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = flattener(example_grid)
Proposed_Answers.append(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].astype(str).values
out2 = sample_sub2["output"].astype(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().split(" ")[:1] + o2.strip().split(" ")[:2]
o = " ".join(o[:3])
merge_output.append(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].astype(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = np.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, np.arange(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import numpy as np
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_array, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../input/abstraction-and-reasoning-challenge/')
# data_path = Path('.') # Artyom: it's better use symlinks locally
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [np.array([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = np.zeros((*x.shape, 3))
y[x < 0, :] = np.array([112, 128, 144])
y[x > 9, :] = np.array([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL image from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL image
"""
img = Image.fromarray(cmap(x).astype(np.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate images vertically
arguments:
imgs : an array of PIL images
border = 10 : the size of space between images
returns:
a PIL image
"""
h = max(img.height for img in imgs)
w = sum(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["input"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["input"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', np.zeros_like(t["input"]))
fig_num += 1
plt.tight_layout()
plt.show()
def real_trace_param_automata(input, params, n_iter, n_hidden):
"""
Execute an automata and return all the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `input` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an array of tuples if output and hidden grids
"""
# hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(input, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(input, hidden, rule)
# trace.append((output, hidden))
# input = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
# trace.append((output, hidden))
#
# if (input.shape == output.shape) and (output == input).all():
# break
# input = output
hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
global_rules, ca_rules, split_rule, merge_rule = params
grids = apply_split_rule(input, hidden, split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (inp, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'all' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(inp, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (input, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
if np.array_equal(input, output):
break
input = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# allow_rotation = rule['allow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
input_i = grids[index_from][0]
input_j = grids[index_to][0]
# print(np.max(input_i>0, axis=1))
# print(np.max(input_i>0, axis=1).shape)
# print(np.arange(input_i.shape[0]).shape)
#1/0
i_nonzero_rows = np.arange(input_i.shape[0])[np.max(input_i>0, axis=1)]
i_nonzero_columns = np.arange(input_i.shape[1])[np.max(input_i>0, axis=0)]
j_nonzero_rows = np.arange(input_j.shape[0])[np.max(input_j>0, axis=1)]
j_nonzero_columns = np.arange(input_j.shape[1])[np.max(input_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_minrow = np.min(i_nonzero_rows)
i_mincol = np.min(i_nonzero_columns)
i_maxrow = np.max(i_nonzero_rows) + 1
i_maxcol = np.max(i_nonzero_columns) + 1
j_minrow = np.min(j_nonzero_rows)
j_mincol = np.min(j_nonzero_columns)
j_maxrow = np.max(j_nonzero_rows) + 1
j_maxcol = np.max(j_nonzero_columns) + 1
figure_to_align = input_i[i_minrow:i_maxrow, i_mincol:i_maxcol]
figure_target = input_j[j_minrow:j_maxrow, j_mincol:j_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if np.logical_and(np.logical_and(figure_target > 0, figure_target!=rule['allow_color']), figure_target != fig_1).any():
continue
fit = np.sum(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
imin = j_minrow-best_i_fit
imax = j_minrow-best_i_fit + figure_to_align.shape[0]
jmin = j_mincol - best_j_fit
jmax = j_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = max(imin, 0)
begin_j = max(jmin, 0)
end_i = min(imax, input_j.shape[0])
end_j = min(jmax, input_j.shape[1])
i_fig_begin = (begin_i-imin)
i_fig_end = figure_to_align.shape[0]-(imax-end_i)
j_fig_begin = (begin_j-jmin)
j_fig_end = figure_to_align.shape[1]-(jmax-end_j)
if rule['fill_with_color'] == 0:
input_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if input_j[begin_i + i, begin_j + j] == 0:
input_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(input, params, n_iter, n_hidden):
# expected = real_trace_param_automata(input, params, n_iter, n_hidden)
#
# testcase = {'input': input, 'params': params}
# print(str(testcase).replace('\'', '"').replace('array(', '').replace(')', ''))
output = cpp_trace_param_automata(input, params, n_iter)
# if not np.array_equal(expected, output):
# print('cpp result is wrong')
# print('input:')
# print(input)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# diff = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# diff_lines = [' '.join(line) for line in diff]
# diff_str = '[[' + ']\n ['.join(diff_lines)
#
# print('diff:')
# print(diff_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_image=None):
# """
# Create a video from an array of automata states
#
# arguments:
# states : array of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_image = None: image to add to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_image is not None:
# frame.append(prefix_image)
# frame.append(draw_one(canvas))
# frames.append(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(np.array, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['input'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_image=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_image=draw_one(np.zeros_like(task['input'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_np = lambda x: {k: np.array(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'input', 'output'}
res['train'].append(dict_vals_to_np(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'input'}
else:
assert set(t) == {'input', 'output'}
res['test'].append(dict_vals_to_np(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], repeat=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_split_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smallest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = np.random.randint(config['mink1'], config['maxk1']+1)
rule['k2'] = np.random.randint(config['mink2'], config['maxk2']+1)
return rule
def get_random_merge_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_split_rule(input, hidden, split_rule):
if split_rule['type'] == 'nothing':
return [(input, hidden)]
if split_rule['type'] == 'macro_multiply':
ks = split_rule['k1'] * split_rule['k2']
grids = [(np.copy(input), np.copy(hidden)) for _ in range(ks)]
return grids
#split_rule['type'] = 'figures'
dif_c_edge = split_rule['type'] == 'figures'
communities = get_connectivity_info(input, ignore_black=True, edge_for_difcolors=dif_c_edge)
if len(communities) > 0:
if split_rule['sort'] == 'biggest':
communities = communities[::-1]
grids = [(np.zeros_like(input), np.zeros_like(hidden)) for _ in range(len(communities))]
for i in range(len(communities)):
for point in communities[i]:
grids[i][0][point] = input[point]
else:
grids = [(input, hidden)]
return grids
def apply_merge_rule(grids, merge_rule, split_rule):
if split_rule['type'] == 'macro_multiply':
shape_base = grids[0][0].shape
shapes = [arr[0].shape for arr in grids]
if not np.array([shape_base == sh for sh in shapes]).all():
return np.zeros((1, 1), dtype=np.int)
ks_1 = split_rule['k1']
ks_2 = split_rule['k2']
output = np.zeros((shape_base[0] * ks_1, shape_base[1] * ks_2), dtype=np.int8)
for k1 in range(ks_1):
for k2 in range(ks_2):
output[(k1*shape_base[0]):((k1+1) * shape_base[0]), (k2*shape_base[1]):((k2+1) * shape_base[1])] = grids[k1*ks_2 + k2][0]
return output
if merge_rule['type'] == 'cellwise_or':
output = np.zeros_like(grids[0][0])
for i in np.arange(len(grids))[::-1]:
if grids[i][0].shape == output.shape:
output[grids[i][0]>0] = grids[i][0][grids[i][0]>0]
return output
elif merge_rule['type'] == 'output_first':
output = grids[0][0]
elif merge_rule['type'] == 'output_last':
output = grids[-1][0]
return output
def get_random_ca_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'copy_color_by_direction',
'direct_check',
'indirect_check',
'nbh_check',
'corner_check',
'color_distribution',
]
ca_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
ca_rules += [c['type'] for c in ca]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(ca_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
def get_random_ignore_colors():
if config['possible_ignore_colors'].shape[0] > 0:
possible_colors = config['possible_ignore_colors']
return possible_colors[np.random.randint(2, size=possible_colors.shape[0]) == 1]
else:
return []
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return get_random_all_colors()
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'ca_rule'
rule['ignore_colors'] = list(config['ignore_colors'])
if np.random.rand() < 0.5 and config['possible_ignore_colors'].shape[0]:
rule['ignore_colors'] += [random.choice(config['possible_ignore_colors'])]
if random_type == 'copy_color_by_direction':
rule['direction'] = random.choice(['everywhere'])
rule['copy_color'] = [get_random_out_color()]
rule['look_back_color'] = rule['copy_color'][0]
elif random_type == 'corner_check':
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'direct_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'indirect_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'nbh_check':
rule['nbh_check_sum'] = np.random.randint(8)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'color_distribution':
rule['direction'] = random.choice(
['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['check_in_empty'] = np.random.randint(2)
rule['color_out'] = get_random_out_color()
if rule['check_in_empty'] == 0:
rule['color_in'] = rule['color_out']
else:
rule['color_in'] = get_random_all_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['color_out']]))
return rule
def get_random_global_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'distribute_colors',
'unity',
'color_for_inners',
'map_color',
'draw_lines',
'draw_line_to',
'gravity',
'make_holes',
'distribute_from_border',
'align_pattern',
'rotate',
'flip'
]
if config['allow_make_smaller']:
types_possible += \
[
'crop_empty',
'crop_figure',
'split_by_H',
'split_by_W',
'reduce'
]
# if config['allow_make_bigger']:
# types_possible += \
# [
# 'macro_multiply_by',
# 'micro_multiply_by',
# 'macro_multiply_k',
# ]
gl_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
gl_rules += [c['type'] for c in gl]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(gl_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'global_rule'
rule['apply_to'] = random.choice(['all', 'index'])
if np.random.rand()<0.2:
rule['apply_to'] = 'last'
if rule['apply_to'] == 'index':
rule['apply_to_index'] = np.random.choice(10)
if random_type == 'macro_multiply_k':
rule['k'] = (np.random.randint(1, 4), np.random.randint(1, 4))
elif random_type == 'flip':
rule['how'] = random.choice(['ver', 'hor'])
elif random_type == 'rotate':
rule['rotations_count'] = np.random.randint(1, 4)
elif random_type == 'micro_multiply_by':
rule['how_many'] = random.choice([2, 3, 4, 5, 'size'])
elif random_type == 'macro_multiply_by':
rule['how_many'] = random.choice(['both', 'hor', 'ver'])
rule['rotates'] = [np.random.randint(1) for _ in range(4)]
rule['flips'] = [random.choice(['hor', 'ver', 'horver', 'no']) for _ in range(4)]
elif random_type == 'distribute_from_border':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
elif random_type == 'draw_lines':
rule['direction'] = random.choice(['everywhere', 'horizontal', 'vertical', 'horver', 'diagonal'])
# 'top', 'bottom', 'left', 'right',
# 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['not_stop_by_color'] = 0 # get_random_all_color()
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'reduce':
rule['skip_color'] = get_random_all_color()
elif random_type == 'draw_line_to':
#rule['direction_type'] = random.choice(['border'])
rule['direction_color'] = get_random_all_color()
rule['not_stop_by_color'] = 0
if np.random.rand() < 0.5:
rule['not_stop_by_color_and_skip'] = get_random_all_color()
else:
rule['not_stop_by_color_and_skip'] = 0
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'distribute_colors':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
rule['horizontally'] = np.random.randint(2)
rule['vertically'] = np.random.randint(2)
rule['intersect'] = get_random_out_color()
elif random_type == 'color_for_inners':
rule['color_out'] = get_random_out_color()
elif random_type == 'crop_figure':
rule['mode'] = random.choice(['smallest', 'biggest'])
rule['dif_c_edge'] = random.choice([True, False])
elif random_type == 'unity':
rule['mode'] = random.choice(['diagonal', 'horizontal', 'vertical', 'horver'])
# rule['inner'] = np.random.choice(2)
rule['ignore_colors'] = [0]
if | np.random.rand() | numpy.random.rand |
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2015-2021 UT-BATTELLE, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Perturbation Growth Test:
This tests the null hypothesis that the reference (n) and modified (m) model
ensembles represent the same atmospheric state after each physics parameterization
is applied within a single time-step using the two-sample (n and m) T-test for equal
averages at a 95% confidence level. Ensembles are generated by repeating the
simulation for many initial conditions, with each initial condition subject to
multiple perturbations.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import math
import argparse
# import logging
from pprint import pprint
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import stats
from netCDF4 import Dataset
import livvkit
from livvkit.util import elements as el
from livvkit.util import functions as fn
from evv4esm.utils import bib2html
# logger = logging.getLogger(__name__)
def parse_args(args=None):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config',
type=fn.read_json,
default='test/pge_pc0101123.json',
help='A JSON config file containing a `pg` dictionary defining ' +
'the options.')
args = parser.parse_args(args)
name = args.config.keys()[0]
config = args.config[name]
return name, config
def _instance2sub(instance_number, total_perturbations):
"""
Converts an instance number (ii) to initial condition index (ci) and
perturbation index (pi) subscripts
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
perturbation_index = (instance_number - 1) % total_perturbations
initial_condition = (instance_number - 1 - perturbation_index) // total_perturbations
return initial_condition, perturbation_index
def _sub2instance(initial_condition, perturbation_index, total_perturbations):
"""
Converts initial condition index (ci) and perturbation index (pi) subscripts
to an instance number (ii)
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
instance = initial_condition * total_perturbations + perturbation_index + 1
return instance
def rmse_writer(file_name, rmse, perturbation_names, perturbation_variables, init_file_template, model_name):
"""
Opens and writes a netcdf file for PGE curves
This function is here purely to avoid duplicate
codes so that it is easy to maintain code longterm
"""
with Dataset(file_name, 'w') as nc:
ninit, nprt_m1, nvars = rmse.shape
nc.createDimension('ninit', ninit)
nc.createDimension('nprt', nprt_m1 + 1)
nc.createDimension('nprt_m1', nprt_m1)
nc.createDimension('nvars', nvars)
nc_init_cond = nc.createVariable('init_cond_files', str, 'ninit')
nc_perturbation = nc.createVariable('perturbation_names', str, 'nprt')
nc_variables = nc.createVariable('perturbation_variables', str, 'nvars')
nc_rmse = nc.createVariable('rmse', 'f8', ('ninit', 'nprt_m1', 'nvars'))
# NOTE: Assignment to netcdf4 variable length string array can be done
# via numpy arrays, or in a for loop using integer indices.
# NOTE: Numpy arrays can't be created from a generator for some dumb reason,
# so protect with list
nc_perturbation[:] = np.array(list(perturbation_names))
nc_variables[:] = np.array(list(perturbation_variables))
nc_rmse[:] = rmse[:]
for icond in range(0, ninit):
# NOTE: Zero vs One based indexing
nc_init_cond[icond] = init_file_template.format(model_name, 'i', icond+1)
def variables_rmse(ifile_test, ifile_cntl, var_list, var_pefix=''):
"""
Compute RMSE difference between perturbation and control for a set of
variables
Args:
ifile_test: Path to a NetCDF dataset for a perturbed simulation
ifile_cntl: Path to a NetCDF dataset for the control simulation
var_list (list): List of all variables to analyze
var_pefix: Optional prefix (e.g., t_, qv_) to apply to the variable
returns:
rmse (pandas.DataFrame): A dataframe containing the RMSE and maximum
difference details between the perturbed and control simulation
"""
with Dataset(ifile_test) as ftest, Dataset(ifile_cntl) as fcntl:
lat = ftest.variables['lat']
lon = ftest.variables['lon']
rmse = pd.DataFrame(columns=('RMSE', 'max diff', 'i', 'j', 'control', 'test', 'lat', 'lon'), index=var_list)
# reshape for RMSE
dims = len(ftest.variables[var_pefix + var_list[0]].dimensions)
if dims == 3: # see if it is SE grid
nx, ny = ftest.variables[var_pefix + var_list[0]][0, ...].shape
nz = 1
else:
nx, ny, nz = ftest.variables[var_pefix + var_list[0]][0, ...].shape
for ivar, vvar in enumerate(var_list):
var = var_pefix + vvar
if var in ftest.variables:
vtest = ftest.variables[var.strip()][0, ...] # first dimension is time (=0)
vcntl = fcntl.variables[var.strip()][0, ...] # first dimension is time (=0)
vrmse = math.sqrt(((vtest - vcntl)**2).mean()) / np.mean(vcntl)
diff = abs(vtest[...] - vcntl[...])
ind_max = np.unravel_index(diff.argmax(), diff.shape)
rmse.loc[vvar] = (vrmse, diff[ind_max], ind_max[0], ind_max[1],
vcntl[ind_max], vtest[ind_max],
lat[ind_max[1]], lon[ind_max[1]])
return rmse
def _print_details(details):
for set_ in details:
print('-' * 80)
print(set_)
print('-' * 80)
pprint(details[set_])
def main(args):
nvar = len(args.variables)
nprt = len(args.perturbations)
# for test cases (new environment etc.)
# logger.debug("PGN_INFO: Test case comparison...")
rmse_prototype = {}
for icond in range(args.ninit):
prt_rmse = {}
for iprt, prt_name in enumerate(args.perturbations):
if prt_name == 'woprt':
continue
iinst_ctrl = _sub2instance(icond, 0, nprt)
ifile_ctrl = os.path.join(args.ref_dir,
args.instance_file_template.format('', args.component, iinst_ctrl, '_woprt'))
# logger.debug("PGN_INFO:CNTL_TST:" + ifile_cntl)
iinst_test = _sub2instance(icond, iprt, nprt)
ifile_test = os.path.join(args.test_dir,
args.instance_file_template.format(
args.test_case + '.', args.component, iinst_test, '_' + prt_name))
# logger.debug("PGN_INFO:TEST_TST:" + ifile_test)
prt_rmse[prt_name] = variables_rmse(ifile_test, ifile_ctrl, args.variables, 't_')
rmse_prototype[icond] = pd.concat(prt_rmse)
rmse = pd.concat(rmse_prototype)
comp_rmse = | np.reshape(rmse.RMSE.values, (args.ninit, nprt-1, nvar)) | numpy.reshape |
##
# \file resampler.py
# \brief Class to perform resampling operations
#
# \author <NAME> (<EMAIL>)
# \date June 2018
#
import os
import itk
import numpy as np
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
import simplereg.data_reader as dr
import simplereg.data_writer as dw
import simplereg.utilities as utils
from simplereg.niftyreg_to_simpleitk_converter import \
NiftyRegToSimpleItkConverter as nreg2sitk
from simplereg.definitions import ALLOWED_INTERPOLATORS
class Resampler(object):
def __init__(self,
path_to_fixed,
path_to_moving,
path_to_transform,
interpolator="Linear",
spacing=None,
padding=0,
add_to_grid=0,
verbose=0,
):
self._path_to_fixed = path_to_fixed
self._path_to_moving = path_to_moving
self._path_to_transform = path_to_transform
self._interpolator = interpolator
self._spacing = spacing
self._padding = padding
self._add_to_grid = add_to_grid
self._verbose = verbose
self._warped_moving_sitk = None
self._warped_moving_itk = None
def write_image(self, path_to_output):
if self._warped_moving_sitk is not None:
dw.DataWriter.write_image(
self._warped_moving_sitk, path_to_output)
else:
dw.DataWriter.write_image(
self._warped_moving_itk, path_to_output)
def run(self):
# Possible to use _run_itk for all interpolators. However, loading of
# itk library takes noticeably longer. Hence, only use it when required
if self._interpolator in ["OrientedGaussian"]:
if self._path_to_transform is not None:
# This could be implemented for rigid transformations.
# For affine, or even displacement fields, it is not quite
# clear how a PSF-transformed option shall look like.
raise ValueError(
"OrientedGaussian interpolation does not allow a "
"transformation during resampling.")
self._run_itk()
else:
self._run_sitk()
def _run_itk(self):
# read input
fixed_itk = dr.DataReader.read_image(self._path_to_fixed, as_itk=1)
moving_itk = dr.DataReader.read_image(self._path_to_moving, as_itk=1)
# get image resampling information
size, origin, spacing, direction = self.get_space_resampling_properties(
image_sitk=fixed_itk,
spacing=self._spacing,
add_to_grid=self._add_to_grid,
add_to_grid_unit="mm")
if self._path_to_transform is not None:
transform_itk = dr.DataReader.read_transform(
self._path_to_transform, as_itk=1)
else:
transform_itk = getattr(
itk, "Euler%dDTransform" % fixed_itk.GetImageDimension()).New()
interpolator_itk = self._convert_interpolator_itk(
fixed_itk,
moving_itk,
spacing,
self._interpolator,
alpha_cut=3,
)
# resample image
resampler_itk = itk.ResampleImageFilter[
type(moving_itk), type(fixed_itk)].New()
resampler_itk.SetInput(moving_itk)
resampler_itk.SetSize(size)
resampler_itk.SetTransform(transform_itk)
resampler_itk.SetInterpolator(interpolator_itk)
resampler_itk.SetOutputOrigin(origin)
resampler_itk.SetOutputSpacing(spacing)
resampler_itk.SetOutputDirection(fixed_itk.GetDirection())
resampler_itk.SetDefaultPixelValue(self._padding)
resampler_itk.UpdateLargestPossibleRegion()
resampler_itk.Update()
self._warped_moving_itk = resampler_itk.GetOutput()
self._warped_moving_itk.DisconnectPipeline()
def _run_sitk(self):
# read input
fixed_sitk = dr.DataReader.read_image(self._path_to_fixed)
moving_sitk = dr.DataReader.read_image(self._path_to_moving)
# get image resampling information
size, origin, spacing, direction = self.get_space_resampling_properties(
image_sitk=fixed_sitk,
spacing=self._spacing,
add_to_grid=self._add_to_grid,
add_to_grid_unit="mm")
if self._path_to_transform is not None:
transform_sitk = dr.DataReader.read_transform(
self._path_to_transform)
else:
transform_sitk = getattr(
sitk, "Euler%dDTransform" % fixed_sitk.GetDimension())()
# resample image
self._warped_moving_sitk = sitk.Resample(
moving_sitk,
size,
transform_sitk,
self._convert_interpolator_sitk(self._interpolator),
origin,
spacing,
direction,
float(self._padding),
fixed_sitk.GetPixelIDValue(),
)
@staticmethod
def _convert_interpolator_sitk(interpolator):
if interpolator.isdigit():
if int(interpolator) == 0:
interpolator = "NearestNeighbor"
elif int(interpolator) == 1:
interpolator = "Linear"
else:
raise ValueError(
"Interpolator order not known. Allowed options are: 0, 1")
if interpolator not in ALLOWED_INTERPOLATORS:
raise ValueError(
"Interpolator not known. Allowed options are: %s" % (
", ".join(ALLOWED_INTERPOLATORS)))
return getattr(sitk, "sitk%s" % interpolator)
def _convert_interpolator_itk(
self,
fixed_itk,
moving_itk,
spacing,
interpolator,
alpha_cut,
pixel_type=itk.D,
):
if interpolator.isdigit():
if int(interpolator) == 0:
interpolator = "NearestNeighbor"
elif int(interpolator) == 1:
interpolator = "Linear"
else:
raise ValueError(
"Interpolator order not known. Allowed options are: 0, 1")
if interpolator not in ALLOWED_INTERPOLATORS:
raise ValueError(
"Interpolator not known. Allowed options are: %s" % (
", ".join(ALLOWED_INTERPOLATORS)))
if interpolator == "OrientedGaussian":
cov = self._get_oriented_psf_covariance(
fixed_itk, moving_itk, spacing)
interpolator_itk = itk.OrientedGaussianInterpolateImageFunction[
type(fixed_itk), pixel_type].New()
interpolator_itk.SetCovariance(cov.flatten())
interpolator_itk.SetAlpha(alpha_cut)
else:
interpolator_itk = getattr(
itk, "%sInterpolateImageFunction" % interpolator)[
type(fixed_itk), pixel_type].New()
return interpolator_itk
def _get_oriented_psf_covariance(self, fixed_itk, moving_itk, spacing):
# Fixed axis-aligned covariance matrix representing the PSF
cov = self._get_psf_covariance(spacing)
# Express fixed axis-aligned PSF in moving space coordinates
fixed_direction = sitkh.get_sitk_from_itk_direction(
fixed_itk.GetDirection())
moving_direction = sitkh.get_sitk_from_itk_direction(
moving_itk.GetDirection())
U = self._get_rotation_matrix(fixed_direction, moving_direction)
cov = U.dot(cov).dot(U.transpose())
return cov
##
# Compute (axis aligned) covariance matrix from spacing. The PSF is
# modelled as Gaussian with
# *- FWHM = 1.2*in-plane-resolution (in-plane)
# *- FWHM = slice thickness (through-plane)
# \date 2017-11-01 16:16:36+0000
#
# \param spacing 3D array containing in-plane and through-plane
# dimensions
#
# \return (axis aligned) covariance matrix representing PSF modelled
# Gaussian as 3x3 np.array
#
@staticmethod
def _get_psf_covariance(spacing):
sigma2 = np.zeros_like(np.array(spacing))
# Compute Gaussian to approximate in-plane PSF:
sigma2[0:2] = (1.2 * spacing[0:2])**2 / (8 * np.log(2))
# Compute Gaussian to approximate through-plane PSF:
if sigma2.size == 3:
sigma2[2] = spacing[2]**2 / (8 * np.log(2))
return np.diag(sigma2)
##
# Gets the relative rotation matrix to express fixed-axis aligned
# covariance matrix in coordinates of moving image
# \date 2016-10-14 16:37:57+0100
#
# \param fixed_direction fixed image direction
# \param moving_direction moving image direction
#
# \return The relative rotation matrix as 3x3 numpy array
#
@staticmethod
def _get_rotation_matrix(fixed_direction, moving_direction):
dim = np.sqrt(np.array(fixed_direction).size).astype(np.uint8)
fixed_direction = np.array(fixed_direction).reshape(dim, dim)
moving_direction = | np.array(moving_direction) | numpy.array |
""" test get/set & misc """
from datetime import timedelta
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
IndexSlice,
MultiIndex,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
def test_basic_indexing():
s = Series(np.random.randn(5), index=["a", "b", "a", "a", "b"])
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s[5]
with pytest.raises(IndexError, match=msg):
s[5] = 0
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
s = s.sort_index()
with pytest.raises(IndexError, match=msg):
s[5]
msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$"
with pytest.raises(IndexError, match=msg):
s[5] = 0
def test_basic_getitem_with_labels(datetime_series):
indices = datetime_series.index[[5, 10, 15]]
result = datetime_series[indices]
expected = datetime_series.reindex(indices)
tm.assert_series_equal(result, expected)
result = datetime_series[indices[0] : indices[2]]
expected = datetime_series.loc[indices[0] : indices[2]]
tm.assert_series_equal(result, expected)
def test_basic_getitem_dt64tz_values():
# GH12089
# with tz for values
ser = Series(
date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]
)
expected = Timestamp("2011-01-01", tz="US/Eastern")
result = ser.loc["a"]
assert result == expected
result = ser.iloc[0]
assert result == expected
result = ser["a"]
assert result == expected
def test_getitem_setitem_ellipsis():
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
tm.assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
@pytest.mark.parametrize(
"result_1, duplicate_item, expected_1",
[
[
Series({1: 12, 2: [1, 2, 2, 3]}),
Series({1: 313}),
Series({1: 12}, dtype=object),
],
[
Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
Series({1: [1, 2, 3]}),
Series({1: [1, 2, 3]}),
],
],
)
def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
tm.assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_setitem_integers():
# caused bug without test
s = Series([1, 2, 3], ["a", "b", "c"])
assert s.iloc[0] == s["a"]
s.iloc[0] = 5
tm.assert_almost_equal(s["a"], 5)
def test_series_box_timestamp():
rng = date_range("20090415", "20090519", freq="B")
ser = Series(rng)
assert isinstance(ser[0], Timestamp)
assert isinstance(ser.at[1], Timestamp)
assert isinstance(ser.iat[2], Timestamp)
assert isinstance(ser.loc[3], Timestamp)
assert isinstance(ser.iloc[4], Timestamp)
ser = Series(rng, index=rng)
assert isinstance(ser[0], Timestamp)
assert isinstance(ser.at[rng[1]], Timestamp)
assert isinstance(ser.iat[2], Timestamp)
assert isinstance(ser.loc[rng[3]], Timestamp)
assert isinstance(ser.iloc[4], Timestamp)
def test_series_box_timedelta():
rng = timedelta_range("1 day 1 s", periods=5, freq="h")
ser = Series(rng)
assert isinstance(ser[0], Timedelta)
assert isinstance(ser.at[1], Timedelta)
assert isinstance(ser.iat[2], Timedelta)
assert isinstance(ser.loc[3], Timedelta)
assert isinstance(ser.iloc[4], Timedelta)
def test_getitem_ambiguous_keyerror(indexer_sl):
ser = Series(range(10), index=list(range(0, 20, 2)))
with pytest.raises(KeyError, match=r"^1$"):
indexer_sl(ser)[1]
def test_getitem_dups_with_missing(indexer_sl):
# breaks reindex, so need to use .loc internally
# GH 4246
ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"])
with pytest.raises(KeyError, match=re.escape("['bam'] not in index")):
indexer_sl(ser)[["foo", "bar", "bah", "bam"]]
def test_setitem_ambiguous_keyerror(indexer_sl):
s = Series(range(10), index=list(range(0, 20, 2)))
# equivalent of an append
s2 = s.copy()
indexer_sl(s2)[1] = 5
expected = s.append(Series([5], index=[1]))
tm.assert_series_equal(s2, expected)
def test_setitem(datetime_series, string_series):
datetime_series[datetime_series.index[5]] = np.NaN
datetime_series[[1, 2, 17]] = np.NaN
datetime_series[6] = np.NaN
assert np.isnan(datetime_series[6])
assert np.isnan(datetime_series[2])
datetime_series[np.isnan(datetime_series)] = 5
assert not np.isnan(datetime_series[2])
def test_setslice(datetime_series):
sl = datetime_series[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique is True
# FutureWarning from NumPy about [slice(None, 5).
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_basic_getitem_setitem_corner(datetime_series):
# invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]
msg = "key of type tuple not found and not a MultiIndex"
with pytest.raises(KeyError, match=msg):
datetime_series[:, 2]
with pytest.raises(KeyError, match=msg):
datetime_series[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
with tm.assert_produces_warning(FutureWarning):
# GH#31299
result = datetime_series[[slice(None, 5)]]
expected = datetime_series[:5]
tm.assert_series_equal(result, expected)
# OK
msg = r"unhashable type(: 'slice')?"
with pytest.raises(TypeError, match=msg):
datetime_series[[5, slice(None, None)]]
with pytest.raises(TypeError, match=msg):
datetime_series[[5, slice(None, None)]] = 2
def test_slice(string_series, object_series):
numSlice = string_series[10:20]
numSliceEnd = string_series[-10:]
objSlice = object_series[10:20]
assert string_series.index[9] not in numSlice.index
assert object_series.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert string_series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == string_series.index[11]
assert tm.equalContents(numSliceEnd, np.array(string_series)[-10:])
# Test return view.
sl = string_series[10:20]
sl[:] = 0
assert (string_series[10:20] == 0).all()
def test_timedelta_assignment():
# GH 8209
s = Series([], dtype=object)
s.loc["B"] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta("1 days"), index=["B"]))
s = s.reindex(s.index.insert(0, "A"))
tm.assert_series_equal(s, Series([np.nan, Timedelta("1 days")], index=["A", "B"]))
s.loc["A"] = timedelta(1)
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion():
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})
return_value = df.set_index(["a", "b", "c"], inplace=True)
assert return_value is None
s = Series([1], index=[(2, 2, 2)])
df["val"] = 0
df
df["val"].update(s)
expected = DataFrame(
{"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
)
return_value = expected.set_index(["a", "b", "c"], inplace=True)
assert return_value is None
tm.assert_frame_equal(df, expected)
def test_preserve_refs(datetime_series):
seq = datetime_series[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(datetime_series[10])
def test_cast_on_putmask():
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype="int64")
s[[True, False]] = Series([0], index=[1], dtype="int64")
expected = Series([0, 2], index=[1, 2], dtype="int64")
tm.assert_series_equal(s, expected)
def test_type_promote_putmask():
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
tm.assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
def test_setitem_mask_promote_strs():
ser = Series([0, 1, 2, 0])
mask = ser > 0
ser2 = ser[mask].map(str)
ser[mask] = ser2
expected = Series([0, "1", "2", 0])
tm.assert_series_equal(ser, expected)
def test_setitem_mask_promote():
ser = Series([0, "foo", "bar", 0])
mask = Series([False, True, True, False])
ser2 = ser[mask]
ser[mask] = ser2
expected = Series([0, "foo", "bar", 0])
tm.assert_series_equal(ser, expected)
def test_multilevel_preserve_name(indexer_sl):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
ser = Series(np.random.randn(len(index)), index=index, name="sth")
result = indexer_sl(ser)["foo"]
assert result.name == ser.name
"""
miscellaneous methods
"""
def test_slice_with_zero_step_raises(index, frame_or_series, indexer_sli):
ts = frame_or_series(np.arange(len(index)), index=index)
with pytest.raises(ValueError, match="slice step cannot be zero"):
indexer_sli(ts)[::0]
@pytest.mark.parametrize(
"index",
[
date_range("2014-01-01", periods=20, freq="MS"),
period_range("2014-01", periods=20, freq="M"),
timedelta_range("0", periods=20, freq="H"),
],
)
def test_slice_with_negative_step(index):
keystr1 = str(index[9])
keystr2 = str(index[13])
ser = Series( | np.arange(20) | numpy.arange |
import numpy as np
def AngleAxisRotatePoint(angle_axis, point):
theta2 = np.dot(angle_axis, angle_axis)
result = np.zeros((3))
if theta2 > 1e-15:
theta = np.sqrt(theta2)
costheta = np.cos(theta)
sintheta = np.sin(theta)
theta_inverse = 1.0/theta
w = | np.multiply(angle_axis, theta_inverse) | numpy.multiply |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
import os
import datetime as dt
import main
from eval import data_analysis
# LaTeX settings
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'sans-serif': ['lmodern'], 'size': 18})
plt.rc('axes', **{'titlesize': 18, 'labelsize': 18})
# Constants
JSON_PATH = './out/'
OUT_PATH = './out/'
MODEL_NAMES = {
'KF': ('KalmanFilter', ''),
'KF(+W)': ('KalmanFilter', '_W'),
'KF(+WF)': ('KalmanFilter', '_WF'),
'KD-IC': ('KD-IC', ''),
'KD-IC(+W)': ('KD-IC', '_W'),
'KD-IC(+WF)': ('KD-IC', '_WF'),
'LN-IC': ('LogNormal-IC', ''),
'LN-IC(+W)': ('LogNormal-IC', '_W'),
'LN-IC(+WF)': ('LogNormal-IC', '_WF'),
'DeepAR': ('DeepAR', ''),
'DeepAR(+W)': ('DeepAR', '_W'),
'DeepAR(+WF)': ('DeepAR', '_WF'),
'LW': ('LastWeek', '')
}
MAIN_SEED = '42'
DECIMALS = 2
COLORS = ('C0', 'C1', 'C3', 'C9', 'C7')
MARKERS = ('o', 'X', 'v', 'd', 'p')
LINESTYLES = ('solid', 'dashed', 'dashdot')
S_D = 48
S_W = 7 * S_D
def get_file_name(model, level, cluster, seed=''):
return f'{MODEL_NAMES[model][0]}{seed}_{level}_{cluster}{MODEL_NAMES[model][1]}'
def get_path(model, level, cluster, seed=''):
return JSON_PATH + f'{MODEL_NAMES[model][0]}{seed}/{get_file_name(model, level, cluster, seed)}.json'
def load_res(model, level, cluster, seed=''):
if 'DeepAR' in model and seed == '':
seed = MAIN_SEED
with open(get_path(model, level, cluster, seed), 'r') as fp:
res = json.load(fp)
return res
def collect_results(
levels=('L0', 'L1', 'L2', 'L3'),
metrics=('MAPE', 'rMAE', 'rRMSE', 'rCRPS'),
models=('KF', 'KF(+W)', 'KF(+WF)',
'KD-IC', 'KD-IC(+W)', 'KD-IC(+WF)',
'DeepAR', 'DeepAR(+W)', 'DeepAR(+WF)',
'LW'),
seeds=(0, 1, 2, 3, 4),
forecast_reps=28,
save_results_with_info=True
):
results_path = os.path.join(JSON_PATH, 'results_with_info.npy')
if os.path.isfile(results_path):
results_with_info = np.load(results_path, allow_pickle=True)
return results_with_info[0], results_with_info[1]
results = {}
level_info = data_analysis.get_level_info(levels)
for level in levels:
clusters = level_info[level]['clusters']
# Create results array
results[level] = np.empty((len(metrics), len(models), len(clusters), forecast_reps))
results[level][:] = np.nan
for m, model in enumerate(models):
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(clusters):
if 'DeepAR' in model and level is not 'L3':
res_per_seed = []
for seed in seeds:
res_per_seed.append(load_res(model, level, cluster, seed))
for i, metric in enumerate(metrics):
results[level][i, m, c] = np.mean([res[metric] for res in res_per_seed], axis=0)
else:
res = load_res(model, level, cluster)
for i, metric in enumerate(metrics):
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
results[level][i, m, c] = res[metric]
info = {
'levels': level_info,
'metrics': list(metrics),
'models': list(models),
'reps': forecast_reps
}
if save_results_with_info:
np.save(results_path, (results, info), allow_pickle=True)
return results, info
def collect_results_per_tstp(
levels=('L0', 'L1', 'L2'),
metrics=('rMAE', 'rRMSE', 'rCRPS'),
models=('KF', 'KF(+W)', 'KF(+WF)',
'KD-IC', 'KD-IC(+W)', 'KD-IC(+WF)',
'DeepAR', 'DeepAR(+W)', 'DeepAR(+WF)',
'LW'),
seeds=(0, 1, 2, 3, 4),
forecast_reps=28,
horizon=192,
save_results_per_tstp_with_info=True
):
results_path = os.path.join(JSON_PATH, 'results_per_tstp_with_info.npy')
if os.path.isfile(results_path):
results_with_info = np.load(results_path, allow_pickle=True)
return results_with_info[0], results_with_info[1]
results = {}
level_info = data_analysis.get_level_info(levels)
t_train, t_val = main.train_val_split(data_analysis.energy_df.index)
for level in levels:
clusters = level_info[level]['clusters']
# Create results array
results[level] = np.empty((len(seeds), len(metrics), len(models), len(clusters), forecast_reps, horizon))
results[level][:] = np.nan
level_info[level]['y_mean'] = []
for c, cluster in enumerate(clusters):
level_info[level]['y_mean'].append(
np.nanmean(data_analysis.get_observations_at(level, cluster, t_train))
)
y_true = data_analysis.get_observations_at(level, cluster, t_val).reshape(forecast_reps, horizon)
for m, model in enumerate(models):
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
if 'DeepAR' in model and level is not 'L3':
for s, seed in enumerate(seeds):
res = load_res(model, level, cluster, seed)
for i, metric in enumerate(metrics):
if metric == 'rMAE':
results[level][s, i, m, c] = np.abs(y_true - res['p50'])
elif metric == 'rRMSE':
results[level][s, i, m, c] = (y_true - res['mean']) ** 2
elif metric == 'rCRPS':
results[level][s, i, m, c] = res['CRPS']
else:
res = load_res(model, level, cluster)
for i, metric in enumerate(metrics):
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
if metric == 'rMAE':
results[level][0, i, m, c] = np.abs(y_true - res['p50'])
elif metric == 'rRMSE':
results[level][0, i, m, c] = (y_true - res['mean']) ** 2
elif metric == 'rCRPS':
results[level][0, i, m, c] = res['CRPS']
info = {
'levels': level_info,
'metrics': list(metrics),
'models': list(models),
'reps': forecast_reps,
'horizon': horizon
}
if save_results_per_tstp_with_info:
np.save(results_path, (results, info), allow_pickle=True)
return results, info
def create_metric_df(metric, with_std=True, to_LaTeX=True):
results, info = collect_results()
i = info['metrics'].index(metric)
row_names = info['models']
col_names = info['levels'].keys()
metric_df = pd.DataFrame(index=row_names, columns=col_names, dtype=float)
for level in col_names:
for m, model in enumerate(row_names):
mean = np.mean(results[level][i, m])
metric_df.loc[model, level] = (('%%.%sf' % DECIMALS) % mean) if not np.isnan(mean) else '-'
if with_std and not np.isnan(mean):
std = np.std(results[level][i, m])
metric_df.loc[model, level] += (' (%%.%sf)' % DECIMALS) % std
if to_LaTeX:
df_to_LaTeX(metric_df)
return metric_df
def create_level_df(level, with_std=True, to_LaTeX=True):
results, info = collect_results()
row_names = info['metrics']
col_names = info['models']
level_df = pd.DataFrame(index=row_names, columns=col_names, dtype=float)
for i, metric in enumerate(row_names):
for m, model in enumerate(col_names):
mean = np.mean(results[level][i, m])
level_df.loc[metric, model] = (('%%.%sf' % DECIMALS) % mean) if not np.isnan(mean) else '-'
if with_std and not np.isnan(mean):
std = np.std(results[level][i, m])
level_df.loc[metric, model] += (' (%%.%sf)' % DECIMALS) % std
if to_LaTeX:
df_to_LaTeX(level_df)
return level_df
def create_runtime_df(models=('KF', 'KD-IC', 'DeepAR', 'LW'), with_std=False, to_LaTeX=True):
_, info = collect_results()
train_name = 'Avg. training time [s]'
prediction_name = 'Avg. prediction time [s]'
runtime_df = pd.DataFrame(index=[train_name, prediction_name], columns=models, dtype=float)
for model in models:
training_times = []
prediction_times = []
for level in info['levels'].keys():
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for cluster in info['levels'][level]['clusters']:
res = load_res(model, level, cluster)
training_times.append(res['fit_time'])
prediction_times.append(res['prediction_time'])
decimals = DECIMALS + 1
runtime_df.loc[train_name, model] = ('%%.%sf' % decimals) % np.mean(training_times)
runtime_df.loc[prediction_name, model] = ('%%.%sf' % decimals) % np.mean(prediction_times)
if with_std:
runtime_df.loc[train_name, model] += (' (%%.%sf)' % decimals) % np.std(training_times)
runtime_df.loc[prediction_name, model] += (' (%%.%sf)' % decimals) % np.std(prediction_times)
if to_LaTeX:
df_to_LaTeX(runtime_df)
return runtime_df
def df_to_LaTeX(df):
num_columns = len(df.columns)
print(df.to_latex(
float_format=f'%.{DECIMALS}f',
na_rep='-',
column_format='l' + ''.join('r' * num_columns)
))
def get_color(model):
if 'KF' in model:
return COLORS[0]
elif 'KD-IC' in model:
return COLORS[1]
elif 'DeepAR' in model:
return COLORS[2]
elif 'LW' in model:
return COLORS[3]
else:
return COLORS[4]
def get_linestyle(model):
if '(+W)' in model:
return LINESTYLES[1]
elif '(+WF)' in model:
return LINESTYLES[2]
else:
return LINESTYLES[0]
def _complete_plot(name, legend=True, grid=True):
if legend:
plt.legend()
if grid:
plt.grid()
plt.tight_layout()
plt.savefig(OUT_PATH + f'{name}.pdf', bbox_inches='tight')
plt.close()
def plot_epoch_loss(model, level, cluster, seed=MAIN_SEED):
assert 'DeepAR' in model, "Loss plot only available for deep models"
res = load_res(model, level, cluster, seed)
train_loss = res['train_loss']
val_loss = res['val_loss']
plt.figure(figsize=(6, 4))
plt.plot(np.arange(len(train_loss)) + 1, train_loss, color=COLORS[0], label='Train')
plt.plot(np.arange(len(val_loss)) + 1, val_loss, color=COLORS[1], label='Validation')
plt.ylabel('Loss')
plt.xlabel('Epoch')
_complete_plot(f'{get_file_name(model, level, cluster, seed)}_epoch_loss', grid=False)
def plot_horizon(model, metric, horizons=(1, 2, 3, 4), levels=('L0', 'L1', 'L2')):
results, info = collect_results_per_tstp()
model_W = model + '(+W)'
model_WF = model + '(+WF)'
i = info['metrics'].index(metric)
m = info['models'].index(model)
m_W = info['models'].index(model_W)
m_WF = info['models'].index(model_WF)
score = np.empty(len(horizons))
score_W = np.empty(len(horizons))
score_WF = np.empty(len(horizons))
for h, horizon in enumerate(horizons):
idx = np.arange(0, horizon * S_D)
res = []
res_W = []
res_WF = []
for level in levels:
for c, cluster in enumerate(info['levels'][level]['clusters']):
y_mean = info['levels'][level]['y_mean'][c]
if metric == 'rRMSE':
res.append(100 * np.sqrt(np.mean(results[level][:, i, m, c, :, idx], axis=2)) / y_mean)
res_W.append(100 * np.sqrt(np.mean(results[level][:, i, m_W, c, :, idx], axis=2)) / y_mean)
res_WF.append(100 * np.sqrt(np.mean(results[level][:, i, m_WF, c, :, idx], axis=2)) / y_mean)
else:
res.append(100 * np.mean(results[level][:, i, m, c, :, idx], axis=2) / y_mean)
res_W.append(100 * np.mean(results[level][:, i, m_W, c, :, idx], axis=2) / y_mean)
res_WF.append(100 * np.mean(results[level][:, i, m_WF, c, :, idx], axis=2) / y_mean)
score[h] = np.nanmean(res)
score_W[h] = np.nanmean(res_W)
score_WF[h] = np.nanmean(res_WF)
skill_W = 100 * (1 - score_W / score)
skill_WF = 100 * (1 - score_WF / score)
print(f'SS_{metric} (W): {skill_W}')
print(f'SS_{metric} (WF): {skill_WF}')
plt.figure(figsize=(3.5, 4))
plt.plot(
score,
linestyle=get_linestyle(model),
color=get_color(model),
marker=MARKERS[0]
)
plt.plot(
score_W,
linestyle=get_linestyle(model_W),
color=get_color(model_W),
marker=MARKERS[1]
)
plt.plot(
score_WF,
linestyle=get_linestyle(model_WF),
color=get_color(model_WF),
marker=MARKERS[2]
)
plt.ylim(6.95, 8.35)
plt.ylabel(metric)
plt.xlabel('Horizon')
plt.xticks(np.arange(len(horizons)), np.array(horizons))
plt.title(model)
_complete_plot(f"{model}_{metric}_horizon", grid=False, legend=False)
def plot_reps(metric, levels=('L0', 'L1', 'L2'), models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
# Lines for second legend
_, ax = plt.subplots()
lines = ax.plot([0, 1], [0, 1], '-C7', [0, 1], [0, 2], '--C7')
plt.close()
plt.figure(figsize=(10, 4))
for j, model in enumerate(models):
m = info['models'].index(model)
reps_mean = []
for level in levels:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(info['levels'][level]['clusters']):
reps_mean.append(results[level][i, m, c])
reps_mean = np.mean(reps_mean, axis=0)
plt.plot(
reps_mean,
label=model if '(' not in model else None,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
plt.xlabel('Forecast origin')
plt.yticks(np.arange(5, 17, 2.5))
t0 = load_res('LW', 'L0', 'Agg')['t0']
ticks = [dt.datetime.strptime(tstp, '%Y-%m-%d, %H:%M').strftime('%b, %d') for tstp in t0[1::5]]
plt.xticks(np.arange(1, len(t0), 5), ticks, rotation=0)
plt.grid(axis='y')
second_legend = plt.legend(lines, ('no weather', 'actual weather'), loc='upper left')
plt.gca().add_artist(second_legend)
_complete_plot(f"{f'{name}_' if name is not None else ''}{metric}_reps", grid=False)
def plot_clusters(level, metric, models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
plt.figure(figsize=(10, 4))
for model in models:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
m = info['models'].index(model)
clusters_mean = np.mean(results[level][i, m], axis=1)
plt.plot(
clusters_mean,
label=model,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
cluster_labels = [f"{cluster.replace('ACORN-', '')} ({count})" for cluster, count in zip(
info['levels'][level]['clusters'],
info['levels'][level]['cardinality']
)]
if level == 'L3':
plt.xticks(np.arange(0, len(cluster_labels), 100), np.array(cluster_labels)[::100], rotation=90)
elif level == 'L2':
plt.xticks(np.arange(len(cluster_labels)), cluster_labels, rotation=90)
else:
plt.xticks(np.arange(len(cluster_labels)), cluster_labels)
_complete_plot(f"{f'{name}_' if name is not None else ''}{level}_{metric}_clusters")
def plot_aggregate_size(metric, models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
aggregate_sizes = []
errors = {}
bottom_level_errors = {}
for model in models:
errors[model] = []
bottom_level_errors[model] = []
for level, level_info in info['levels'].items():
for c, agg_size in enumerate(level_info['cardinality']):
if level != 'L3':
aggregate_sizes.append(agg_size)
for model in models:
m = info['models'].index(model)
errors[model].append( | np.mean(results[level][i, m, c]) | numpy.mean |
"""Augmentations for spectograms"""
from abc import abstractmethod
import cv2
import numpy as np
from pathlib import Path
import PIL
from PIL import Image
from scipy.ndimage.filters import laplace
class RandomTransform:
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, y):
return self.transform(image, y)
def transform(self, image, y):
if np.random.rand() < self.p:
return self._transform(image, y)
return image, y
@abstractmethod
def _transform(self, image, y):
pass
class Compose:
def __init__(self, transform_list):
self.transform_list = transform_list
def __call__(self, image, y):
out = image.copy()
for tfm in self.transform_list:
out, y = tfm(out, y)
return out, y
class RobustScaling(RandomTransform):
def __init__(self, min_percentile=0.025, max_percentile=0.975, p=1.0):
super().__init__(p=p)
self.min_percentile = min_percentile
self.max_percentile = max_percentile
def _transform(self, image, y):
image = image.copy()
min_value = | np.percentile(image, self.min_percentile * 100) | numpy.percentile |
from PIL import Image
import cv2
import numpy as np
maskPath="mask.png"
cascPath="haarcascade_frontalface_default.xml"
mask=Image.open(maskPath)
face_cascade=cv2.CascadeClassifier(cascPath)
def thug(image):
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray,1.3)
background=Image.fromarray(image)
for(x,y,w,h)in faces:
r_mask=mask.resize((w,h),Image.ANTIALIAS)
offset=(x,y)
background.paste(r_mask,offset,mask=r_mask) #just pasted the mask not the trANspiracy
return | np.asarray(background) | numpy.asarray |
"""
Copyright (c) 2017 Matterport, Inc.
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import random
import itertools
import numpy as np
from skimage.measure import find_contours
import cv2
from models.model import detection_layer, unmold_detections
from models.modules import *
from utils import *
def tileImages(image_list, padding_x=5, padding_y=5, background_color=0):
"""Tile images"""
height = image_list[0][0].shape[0]
width = image_list[0][0].shape[1]
result_image = np.full((height * len(image_list) + padding_y * (len(image_list) + 1), width * len(image_list[0]) + padding_x * (len(image_list[0]) + 1), 3), fill_value=background_color, dtype=np.uint8)
for index_y, images in enumerate(image_list):
for index_x, image in enumerate(images):
offset_x = index_x * width + (index_x + 1) * padding_x
offset_y = index_y * height + (index_y + 1) * padding_y
if image.ndim == 2:
image = np.expand_dims(image, axis=-1).tile((1, 1, 3))
pass
result_image[offset_y:offset_y + height, offset_x:offset_x + width] = image
continue
continue
return result_image
############################################################
# Batch visualization
############################################################
def visualizeBatchDeMoN(options, input_dict, results, indexOffset=0, prefix='', concise=False):
cornerColorMap = {'gt': np.array([255, 0, 0]), 'pred': np.array([0, 0, 255]), 'inp': np.array([0, 255, 0])}
topdownSize = 256
for batchIndex in range(len(input_dict['image_1'])):
pose = input_dict['pose'][batchIndex]
for resultIndex, result in enumerate(results):
if concise and resultIndex < len(results) - 1:
continue
depth_pred = invertDepth(result['depth'][batchIndex]).detach().cpu().numpy().squeeze()
depth_gt = input_dict['depth'][batchIndex].squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
if options.scaleMode != 'variant':
valid_mask = np.logical_and(depth_gt > 1e-4, depth_pred > 1e-4)
depth_gt_values = depth_gt[valid_mask]
depth_pred_values = depth_pred[valid_mask]
scale = np.exp(np.mean(np.log(depth_gt_values) - np.log(depth_pred_values)))
depth_pred *= scale
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_depth_pred_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
if 'flow' in result:
flow_pred = result['flow'][batchIndex, :2].detach().cpu().numpy().transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_flow_pred_' + str(len(results) - 1 - resultIndex) + '.png', cv2.resize(drawFlowImage(flow_pred), (256, 192)))
pass
if 'rotation' in result and resultIndex >= len(results) - 2:
pass
continue
if not concise:
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_depth_gt.png', drawDepthImage(input_dict['depth'][batchIndex]))
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_image_0.png', (input_dict['image_1'][batchIndex].transpose((1, 2, 0)) + 0.5) * 255)
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_image_1.png', (input_dict['image_2'][batchIndex].transpose((1, 2, 0)) + 0.5) * 255)
flow_gt = input_dict['flow'][batchIndex, :2].transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_flow_gt.png', cv2.resize(drawFlowImage(flow_gt), (256, 192)))
pass
continue
return
def visualizeBatchPair(options, config, inp_pair, detection_pair, indexOffset=0, prefix='', suffix='', write_ply=False, write_new_view=False):
detection_images = []
for pair_index, (input_dict, detection_dict) in enumerate(zip(inp_pair, detection_pair)):
image_dict = visualizeBatchDetection(options, config, input_dict, detection_dict, indexOffset=indexOffset, prefix=prefix, suffix='_' + str(pair_index), prediction_suffix=suffix, write_ply=write_ply, write_new_view=write_new_view)
detection_images.append(image_dict['detection'])
continue
detection_image = tileImages([detection_images])
return
def visualizeBatchRefinement(options, config, input_dict, results, indexOffset=0, prefix='', suffix='', concise=False):
if not concise:
image = (input_dict['image'].detach().cpu().numpy().transpose((0, 2, 3, 1))[0] + 0.5) * 255
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image_0.png', image)
image_2 = (input_dict['image_2'].detach().cpu().numpy().transpose((0, 2, 3, 1))[0] + 0.5) * 255
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image_1.png', image_2)
depth_gt = input_dict['depth'].detach().cpu().numpy().squeeze()
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_gt.png', drawDepthImage(depth_gt))
flow_gt = input_dict['flow'][0, :2].detach().cpu().numpy().transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_flow_gt.png', cv2.resize(drawFlowImage(flow_gt), (256, 192)))
pass
numbers = []
for resultIndex, result in enumerate(results):
if 'mask' in result and (options.losses == '' or '0' in options.losses):
masks = result['mask'].detach().cpu().numpy()
masks = np.concatenate([np.maximum(1 - masks.sum(0, keepdims=True), 0), masks], axis=0).transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_segmentation_' + str(len(results) - 1 - resultIndex) + '.png', drawSegmentationImage(masks, blackIndex=0) * (masks.max(-1, keepdims=True) > 0.5).astype(np.uint8))
pass
if concise:
continue
if 'depth' in result and (options.losses == '' or '3' in options.losses):
depth_pred = invertDepth(result['depth']).detach().cpu().numpy().squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
if options.scaleMode != 'variant':
valid_mask = np.logical_and(depth_gt > 1e-4, depth_pred > 1e-4)
depth_gt_values = depth_gt[valid_mask]
depth_pred_values = depth_pred[valid_mask]
scale = np.exp(np.mean(np.log(depth_gt_values) - np.log(depth_pred_values)))
depth_pred *= scale
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_pred_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
pass
if 'plane_depth' in result and (options.losses == '' or '3' in options.losses):
depth_pred = invertDepth(result['plane_depth']).detach().cpu().numpy().squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
if options.scaleMode != 'variant':
valid_mask = np.logical_and(depth_gt > 1e-4, depth_pred > 1e-4)
depth_gt_values = depth_gt[valid_mask]
depth_pred_values = depth_pred[valid_mask]
scale = np.exp(np.mean(np.log(depth_gt_values) - np.log(depth_pred_values)))
depth_pred *= scale
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_pred_plane_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
pass
if 'flow' in result and (options.losses == '' or '1' in options.losses):
flow_pred = result['flow'][0, :2].detach().cpu().numpy().transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_flow_pred_' + str(len(results) - 1 - resultIndex) + '.png', cv2.resize(drawFlowImage(flow_pred), (256, 192)))
pass
if 'rotation' in result and resultIndex >= len(results) - 2:
pass
if 'plane' in result and resultIndex > 0:
numbers.append(np.linalg.norm(result['plane'].detach().cpu().numpy() - results[0]['plane'].detach().cpu().numpy()))
pass
if 'warped_image' in result and resultIndex >= len(results) - 2:
warped_image = ((result['warped_image'].detach().cpu().numpy().transpose((0, 2, 3, 1))[0] + 0.5) * 255).astype(np.uint8)
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image_warped_' + str(len(results) - 1 - resultIndex) + '.png', warped_image)
pass
if 'plane_depth_one_hot' in result:
depth_pred = invertDepth(result['plane_depth_one_hot']).detach().cpu().numpy().squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_pred_plane_onehot_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
pass
continue
if 'parameter' in options.suffix:
print('plane diff', numbers)
pass
return
def visualizeBatchDetection(options, config, input_dict, detection_dict, indexOffset=0, prefix='', suffix='', prediction_suffix='', write_ply=False, write_new_view=False):
image_dict = {}
images = input_dict['image'].detach().cpu().numpy().transpose((0, 2, 3, 1))
images = unmold_image(images, config)
image = images[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image' + suffix + '.png', image[80:560])
if 'warped_image' in input_dict:
warped_images = input_dict['warped_image'].detach().cpu().numpy().transpose((0, 2, 3, 1))
warped_images = unmold_image(warped_images, config)
warped_image = warped_images[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image' + suffix + '_warped.png', warped_image[80:560])
pass
if 'warped_depth' in input_dict:
warped_depth = input_dict['warped_depth'].detach().cpu().numpy()
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + '_warped.png', drawDepthImage(warped_depth[80:560]))
pass
if 'warped_mask' in input_dict:
warped_mask = input_dict['warped_mask'].detach().cpu().numpy()[0]
pass
if 'depth' in input_dict:
depths = input_dict['depth'].detach().cpu().numpy()
depth_gt = depths[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + '.png', drawDepthImage(depth_gt[80:560]))
pass
windows = (0, 0, images.shape[1], images.shape[2])
windows = (0, 0, images.shape[1], images.shape[2])
class_colors = ColorPalette(config.NUM_CLASSES).getColorMap().tolist()
if 'mask' in input_dict:
box_image = image.copy()
boxes = input_dict['bbox'][0].detach().cpu().numpy()
masks = input_dict['mask'][0].detach().cpu().numpy()
if config.NUM_PARAMETER_CHANNELS > 0:
depths = masks[:, :, :, 1]
masks = masks[:, :, :, 0]
pass
segmentation_image = image * 0.0
for box, mask in zip(boxes, masks):
box = np.round(box).astype(np.int32)
mask = cv2.resize(mask, (box[3] - box[1], box[2] - box[0]))
segmentation_image[box[0]:box[2], box[1]:box[3]] = np.minimum(segmentation_image[box[0]:box[2], box[1]:box[3]] + np.expand_dims(mask, axis=-1) * np.random.randint(255, size=(3, ), dtype=np.int32), 255)
continue
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_segmentation' + suffix + '.png', segmentation_image.astype(np.uint8)[80:560])
if config.NUM_PARAMETER_CHANNELS > 0 and not config.OCCLUSION:
depth_image = np.zeros((image.shape[0], image.shape[1]))
for box, patch_depth in zip(boxes, depths):
box = np.round(box).astype(np.int32)
patch_depth = cv2.resize(patch_depth, (box[3] - box[1], box[2] - box[0]), cv2.INTER_NEAREST)
depth_image[box[0]:box[2], box[1]:box[3]] = patch_depth
continue
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_patch' + suffix + '.png', drawDepthImage(depth_image[80:560]))
pass
pass
if 'boundary' in detection_dict:
boundary_pred = detection_dict['boundary'].detach().cpu().numpy()[0]
boundary_gt = input_dict['boundary'].detach().cpu().numpy()[0]
for name, boundary in [('gt', boundary_gt), ('pred', boundary_pred)]:
boundary_image = image.copy()
boundary_image[boundary[0] > 0.5] = np.array([255, 0, 0])
boundary_image[boundary[1] > 0.5] = np.array([0, 0, 255])
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_boundary' + suffix + '_' + name + '.png', boundary_image)
continue
pass
if 'depth' in detection_dict:
depth_pred = detection_dict['depth'][0].detach().cpu().numpy()
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + prediction_suffix + '.png', drawDepthImage(depth_pred[80:560]))
if options.debug:
valid_mask = (depth_gt > 1e-4) * (input_dict['segmentation'].detach().cpu().numpy()[0] >= 0) * (detection_dict['mask'].detach().cpu().numpy().squeeze() > 0.5)
pass
pass
if 'depth_np' in detection_dict:
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + prediction_suffix + '_np.png', drawDepthImage(detection_dict['depth_np'].squeeze().detach().cpu().numpy()[80:560]))
pass
if 'depth_ori' in detection_dict:
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + prediction_suffix + '_ori.png', drawDepthImage(detection_dict['depth_ori'].squeeze().detach().cpu().numpy()[80:560]))
pass
if 'detection' in detection_dict and len(detection_dict['detection']) > 0:
detections = detection_dict['detection'].detach().cpu().numpy()
detection_masks = detection_dict['masks'].detach().cpu().numpy().transpose((1, 2, 0))
if 'flag' in detection_dict:
detection_flags = detection_dict['flag']
else:
detection_flags = {}
pass
instance_image, normal_image, depth_image = draw_instances(config, image, depth_gt, detections[:, :4], detection_masks > 0.5, detections[:, 4].astype(np.int32), detections[:, 6:], detections[:, 5], draw_mask=True, transform_planes=False, detection_flags=detection_flags)
image_dict['detection'] = instance_image
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_segmentation' + suffix + prediction_suffix + '.png', instance_image[80:560])
else:
image_dict['detection'] = np.zeros(image.shape, dtype=image.dtype)
pass
if write_new_view and False:
detection_masks = detection_dict['masks']
pose = np.eye(4)
pose[:3, :3] = np.matmul(axisAngleToRotationMatrix(np.array([-1, 0, 0]), np.pi / 18 * 0), axisAngleToRotationMatrix(np.array([0, 0, -1]), np.pi / 18))
pose[:3, 3] = np.array([-0.4, 0, 0])
drawNewViewDepth(options.test_dir + '/' + str(indexOffset) + '_new_view' + suffix + prediction_suffix + '.png', detection_masks[:, 80:560].detach().cpu().numpy(), detection_dict['plane_XYZ'].detach().cpu().numpy().transpose((0, 2, 3, 1))[:, 80:560], input_dict['camera'].detach().cpu().numpy(), pose)
depth = depth_gt[80:560]
ranges = config.getRanges(input_dict['camera']).detach().cpu().numpy()
XYZ_gt = ranges * np.expand_dims(depth, axis=-1)
drawNewViewDepth(options.test_dir + '/' + str(indexOffset) + '_new_view_depth_gt' + suffix + prediction_suffix + '.png', np.expand_dims(depth > 1e-4, 0), np.expand_dims(XYZ_gt, 0), input_dict['camera'].detach().cpu().numpy(), pose)
depth = detection_dict['depth_np'].squeeze()[80:560]
ranges = config.getRanges(input_dict['camera']).detach().cpu().numpy()
XYZ_gt = ranges * np.expand_dims(depth, axis=-1)
drawNewViewDepth(options.test_dir + '/' + str(indexOffset) + '_new_view_depth_pred' + suffix + prediction_suffix + '.png', np.expand_dims(depth > 1e-4, 0), np.expand_dims(XYZ_gt, 0), input_dict['camera'].detach().cpu().numpy(), pose)
pass
if write_new_view:
detection_masks = detection_dict['masks'][:, 80:560].detach().cpu().numpy()
XYZ_pred = detection_dict['plane_XYZ'].detach().cpu().numpy().transpose((0, 2, 3, 1))[:, 80:560]
depth = depth_gt[80:560]
ranges = config.getRanges(input_dict['camera']).detach().cpu().numpy()
XYZ_gt = np.expand_dims(ranges * np.expand_dims(depth, axis=-1), 0)
valid_mask = np.expand_dims(depth > 1e-4, 0).astype(np.float32)
camera = input_dict['camera'].detach().cpu().numpy()
valid_mask = np.expand_dims(cv2.resize(valid_mask[0], (256, 192)), 0)
XYZ_gt = np.expand_dims(cv2.resize(XYZ_gt[0], (256, 192)), 0)
detection_masks = np.stack([cv2.resize(detection_masks[c], (256, 192)) for c in range(len(detection_masks))], axis=0)
XYZ_pred = np.stack([cv2.resize(XYZ_pred[c], (256, 192)) for c in range(len(XYZ_pred))], axis=0)
locations = [np.array([-0.4, 0, 0]), np.array([0, 0, 0]), np.array([0, 0, 0]), np.array([0.4, 0, 0])]
angle_pairs = [(np.array([-1, 0, 0, np.pi / 18 * 0]), np.array([0, 0, -1, np.pi / 18])), (np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])), (np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])), (np.array([-1, 0, 0, np.pi / 18 * 0]), np.array([0, 0, 1, np.pi / 18]))]
num_frames = [25, 10, 25]
for c in range(len(locations) - 1):
if c == 2:
continue
for frame in range(num_frames[c]):
ratio = float(frame + 1) / num_frames[c]
location = locations[c] + (locations[c + 1] - locations[c]) * ratio
angle_pair = [angle_pairs[c][dim] + (angle_pairs[c + 1][dim] - angle_pairs[c][dim]) * ratio for dim in range(2)]
pose = np.eye(4)
pose[:3, :3] = np.matmul(axisAngleToRotationMatrix(angle_pair[0][:3], angle_pair[0][3]), axisAngleToRotationMatrix(angle_pair[1][:3], angle_pair[1][3]))
pose[:3, 3] = location
index_offset = sum(num_frames[:c]) + frame
drawNewViewDepth(options.test_dir + '/' + str(indexOffset) + '_video/' + str(index_offset) + '.png', detection_masks, XYZ_pred, camera, pose)
drawNewViewDepth(options.test_dir + '/' + str(indexOffset) + '_video_gt/' + str(index_offset) + '.png', valid_mask, XYZ_gt, camera, pose)
continue
continue
exit(1)
pass
if write_ply:
detection_masks = detection_dict['masks']
if 'plane_XYZ' not in detection_dict:
plane_XYZ = planeXYZModule(config.getRanges(input_dict['camera']), detection_dict['detection'][:, 6:9], width=config.IMAGE_MAX_DIM, height=config.IMAGE_MIN_DIM)
plane_XYZ = plane_XYZ.transpose(1, 2).transpose(0, 1).transpose(2, 3).transpose(1, 2)
zeros = torch.zeros(int(plane_XYZ.shape[0]), 3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM).cuda()
plane_XYZ = torch.cat([zeros, plane_XYZ, zeros], dim=2)
detection_dict['plane_XYZ'] = plane_XYZ
pass
print(options.test_dir + '/' + str(indexOffset) + '_model' + suffix + prediction_suffix + '.ply')
writePLYFileMask(options.test_dir + '/' + str(indexOffset) + '_model' + suffix + prediction_suffix + '.ply', image[80:560], detection_masks[:, 80:560].detach().cpu().numpy(), detection_dict['plane_XYZ'].detach().cpu().numpy().transpose((0, 2, 3, 1))[:, 80:560], write_occlusion='occlusion' in options.suffix)
pose = np.eye(4)
pose[:3, :3] = np.matmul(axisAngleToRotationMatrix(np.array([-1, 0, 0]), np.pi / 18), axisAngleToRotationMatrix(np.array([0, -1, 0]), np.pi / 18))
pose[:3, 3] = np.array([-0.4, 0.3, 0])
current_dir = os.path.dirname(os.path.realpath(__file__))
pose_filename = current_dir + '/test/pose_new_view.txt'
print(pose_filename)
with open(pose_filename, 'w') as f:
for row in pose:
for col in row:
f.write(str(col) + '\t')
continue
f.write('\n')
continue
f.close()
pass
model_filename = current_dir + '/' + options.test_dir + '/' + str(indexOffset) + '_model' + suffix + prediction_suffix + '.ply'
output_filename = current_dir + '/' + options.test_dir + '/' + str(indexOffset) + '_model' + suffix + prediction_suffix + '.png'
try:
os.system('../../../Screenshoter/Screenshoter --model_filename=' + model_filename + ' --output_filename=' + output_filename + ' --pose_filename=' + pose_filename)
except:
pass
pass
return image_dict
def visualizeBatchDepth(options, config, input_dict, detection_dict, indexOffset=0, prefix='', suffix='', write_ply=False):
image_dict = {}
images = input_dict['image'].detach().cpu().numpy().transpose((0, 2, 3, 1))
images = unmold_image(images, config)
for batchIndex, image in enumerate(images):
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_image' + suffix + '.png', image)
continue
depths = input_dict['depth'].detach().cpu().numpy()
for batchIndex, depth in enumerate(depths):
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_depth' + suffix + '.png', drawDepthImage(depth))
continue
if 'depth_np' in detection_dict:
for batchIndex, depth in enumerate(detection_dict['depth_np'].detach().cpu().numpy()):
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_depth_pred_np' + suffix + '.png', drawDepthImage(depth))
continue
pass
return
def visualizeBatchSingle(options, config, images, image_metas, rpn_rois, depths, dicts, input_dict={}, inference={}, indexOffset=0, prefix='', suffix='', compare_planenet=False):
image = images[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image' + suffix + '.png', image)
depth = depths[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + '.png', drawDepthImage(depth))
windows = (0, 0, images.shape[1], images.shape[2])
class_colors = ColorPalette(config.NUM_CLASSES).getColorMap(returnTuples=True)
instance_colors = ColorPalette(1000).getColorMap(returnTuples=True)
if 'mask' in input_dict:
box_image = image.copy()
boxes = input_dict['bbox'][0].detach().cpu().numpy()
masks = input_dict['mask'][0].detach().cpu().numpy()
for box, mask in zip(boxes, masks):
box = np.round(box).astype(np.int32)
cv2.rectangle(box_image, (box[1], box[0]), (box[3], box[2]), color=(0, 0, 255), thickness=2)
continue
segmentation_image = image * 0.0
for box, mask in zip(boxes, masks):
box = np.round(box).astype(np.int32)
mask = cv2.resize(mask, (box[3] - box[1], box[2] - box[0]))
segmentation_image[box[0]:box[2], box[1]:box[3]] = np.minimum(segmentation_image[box[0]:box[2], box[1]:box[3]] + np.expand_dims(mask, axis=-1) * np.random.randint(255, size=(3, ), dtype=np.int32), 255)
continue
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_detection' + suffix + '.png', segmentation_image.astype(np.uint8))
pass
for name, result_dict in dicts:
if len(rpn_rois) > 0:
detections, keep_indices, ori_rois = detection_layer(config, rpn_rois.unsqueeze(0), result_dict['mrcnn_class'], result_dict['mrcnn_bbox'], result_dict['mrcnn_parameter'], image_metas, return_indices=True)
box_image = image.copy()
for instance_index, box in enumerate(detections.detach().cpu().numpy().astype(np.int32)):
cv2.rectangle(box_image, (box[1], box[0]), (box[3], box[2]), color=class_colors[int(box[4])], thickness=3)
continue
else:
continue
if len(detections) > 0:
detections[:, :4] = ori_rois
detections = detections.detach().cpu().numpy()
mrcnn_mask = result_dict['mrcnn_mask'][keep_indices].detach().cpu().numpy()
if name == 'gt':
class_mrcnn_mask = np.zeros(list(mrcnn_mask.shape) + [config.NUM_CLASSES], dtype=np.float32)
for index, (class_id, mask) in enumerate(zip(detections[:, 4].astype(np.int32), mrcnn_mask)):
if config.GLOBAL_MASK:
class_mrcnn_mask[index, :, :, 0] = mask
else:
class_mrcnn_mask[index, :, :, class_id] = mask
pass
continue
mrcnn_mask = class_mrcnn_mask
else:
mrcnn_mask = mrcnn_mask.transpose((0, 2, 3, 1))
pass
box_image = image.copy()
for instance_index, box in enumerate(detections.astype(np.int32)):
cv2.rectangle(box_image, (box[1], box[0]), (box[3], box[2]), color=tuple(class_colors[int(box[4])]), thickness=3)
continue
final_rois, final_class_ids, final_scores, final_masks, final_parameters = unmold_detections(config, detections, mrcnn_mask, image.shape, windows, debug=False)
result = {
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
"parameters": final_parameters,
}
instance_image, normal_image, depth_image = draw_instances(config, image, depth, result['rois'], result['masks'], result['class_ids'], result['parameters'], result['scores'])
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_detection' + suffix + '_' + name + '.png', instance_image)
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + '_' + name + '.png', depth_image)
else:
print('no detections')
pass
continue
if len(inference) > 0:
instance_image, normal_image, depth_image = draw_instances(config, image, depth, inference['rois'], inference['masks'], inference['class_ids'], inference['parameters'], inference['scores'], draw_mask=True)
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_detection' + suffix + '.png', instance_image)
if compare_planenet:
print(image.shape, image.min(), image.max())
pred_dict = detector.detect(image[80:560])
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_planenet_segmentation.png', drawSegmentationImage(pred_dict['segmentation'], blackIndex=10))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_planenet_depth.png', drawDepthImage(pred_dict['depth']))
pass
pass
return
def visualizeBatchBoundary(options, config, images, boundary_pred, boundary_gt, indexOffset=0):
images = (images.detach().cpu().numpy().transpose((0, 2, 3, 1)) + config.MEAN_PIXEL).astype(np.uint8)
boundary_pred = boundary_pred.detach().cpu().numpy()
boundary_gt = boundary_gt.detach().cpu().numpy()
for batchIndex in range(len(images)):
for name, boundary in [('gt', boundary_gt[batchIndex]), ('pred', boundary_pred[batchIndex])]:
image = images[batchIndex].copy()
image[boundary[0] > 0.5] = np.array([255, 0, 0])
image[boundary[1] > 0.5] = | np.array([0, 0, 255]) | numpy.array |
"""
Highly inspired by the work of Scotts Lab: https://arxiv.org/abs/2001.05022v1
https://github.com/ScottLabUCB/HTTEM/blob/master/pyNanoFind/bragg_filtering/bragg_filtering.ipynb
"""
import numpy as np
import scipy.fftpack as ftp
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy.signal import medfilt2d
from skimage.util import pad
import sys
import h5py
# import realDataProcess as rdp
# import freqCutting as fcut
from scipy import fftpack
from scipy import signal
from skimage.morphology import opening, closing, square
from skimage.measure import label, regionprops
from skimage.filters import threshold_otsu
import helpers as helpers
def circular_mask(size_x=1024, size_y=1024, cx=512, cy=512, r=50):
x = np.arange(0, size_x)
y = np.arange(0, size_y)
arr = np.zeros((size_x, size_y))
mask = (x[np.newaxis,:]-cx)**2 + (y[:,np.newaxis]-cy)**2 < r**2
arr[mask] = 1
return arr
# plt.figure(figsize=(6, 6))
# plt.pcolormesh(x, y, arr)
# plt.colorbar()
# plt.show()
def ring_mask(size_x=1024, size_y=1024, cx=512, cy=512, r_in=50, r_out=100):
x = np.arange(0, size_x)
y = np.arange(0, size_y)
arr = | np.zeros((size_x, size_y)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 22 17:32:46 2019
@author: Wei-Hsiang, Shen
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import os
from tensorflow.keras import layers
# Training parameters
BATCH_SIZE = 128
TOTAL_EPOCHS = 10
# We use the MNIST dataset
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images = np.expand_dims(train_images, axis=-1)
test_images = np.expand_dims(test_images, axis=-1)
train_images = | np.array(train_images, dtype=np.float32) | numpy.array |
from __future__ import print_function, division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.insert(0, '..')
import chronostar.fitplotter as fp
import chronostar.retired2.datatool as dt
def plotEveryIter(rdir, star_pars, bg_hists=None, true_memb=None):
try:
print("Attempting init")
if os.path.isfile(rdir + 'init_xw.pdf'):
print(" init already plotted...")
else:
for dim1, dim2 in ('xy', 'uv', 'xu', 'yv', 'zw', 'xw'):
plt.clf()
fp.plotPaneWithHists(dim1, dim2, star_pars=star_pars,
groups=rdir + 'init_groups.npy',
weights=None, group_now=True,
bg_hists=bg_hists, membership=true_memb)
plt.savefig(rdir + 'init_{}{}.pdf'.format(dim1, dim2))
except:
print("init lacking files")
iter_count = 0
while True:
try:
print("Attempting iter {}".format(iter_count))
# idir = rdir + 'iter{}/'.format(iter_count)
idir = rdir + 'iter{:02}/'.format(iter_count)
if os.path.isfile(idir + 'iter_{:02}_xw.pdf'.format(iter_count)):
print(' iter_{:02} already plotted'.format(iter_count))
else:
z = np.load(idir + 'membership.npy')
weights = z.sum(axis=0)
for dim1, dim2 in ('xy', 'uv', 'xu', 'yv', 'zw', 'xw'):
plt.clf()
fp.plotPaneWithHists(dim1, dim2, star_pars=star_pars,
groups=idir + 'best_groups.npy',
weights=weights, group_now=True,
bg_hists=bg_hists,
membership=z,
true_memb=true_memb
)
plt.savefig(idir + 'iter_{:02}_{}{}.pdf'.format(
iter_count, dim1, dim2))
iter_count += 1
except IOError:
print("Iter {} is lacking files".format(iter_count))
break
try:
print("Attempting final")
idir = rdir + 'final/'
if os.path.isfile(idir + 'final_xw.pdf'):
print(" final already plotted")
else:
z = np.load(idir + 'final_membership.npy')
weights = z.sum(axis=0)
for dim1, dim2 in ('xy', 'uv', 'xu', 'yv', 'zw', 'xw'):
plt.clf()
fp.plotPaneWithHists(dim1, dim2, star_pars=star_pars,
groups=idir + 'final_groups.npy',
weights=weights, group_now=True,
membership=z,
true_memb=true_memb,
)
plt.savefig(idir + 'final_{}{}.pdf'.format(
dim1, dim2))
except IOError:
print("final is lacking files")
return
def getZfromOrigins(origins, star_pars):
if type(origins) is str:
origins = dt.loadGroups(origins)
if type(star_pars) is str:
star_pars = dt.loadXYZUVW(star_pars)
nstars = star_pars['xyzuvw'].shape[0]
ngroups = len(origins)
nassoc_stars = | np.sum([o.nstars for o in origins]) | numpy.sum |
from __future__ import print_function
#
# Computing the free energy difference of an organic crystal polymorph at different gamma values
#
# Copyright <NAME> and <NAME>, University of Virginia, 2014
#
import sys
import numpy as np
import pymbar # multistate Bennett acceptance ratio
from pymbar import timeseries # timeseries analysis
import MBARBootstrap # Bootstrapping algorithm
import os.path
from optparse import OptionParser # for parsing command-line options
import sys
import Harvist #Hamiltonian Reweighting Visualization Toolkit
import pdb
import panedr
def dA_Gamma_MBAR(plot_out=True, MINGAMMA=0, MAXGAMMA=100, GSPACING=10, LAMBDA=100, exponent=2, polymorphs='p1 p2',
Molecules=72, Independent=4, Temp=200, Pressure=1, k=1000, ignoreframes=500,
includeframes=100000, potential='oplsaa',bonds=False, hinge='DefaultHinge'):
if (plot_out):
import matplotlib # for making plots, version 'matplotlib-1.1.0-1'; errors may pop up when using earlier versions
import matplotlib.pyplot as plt
font = {'family': 'normal',
'weight': 'normal',
'size': 16}
matplotlib.rc('font', **font)
# =============================================================================================
# ENSURE THAT USER INPUTS ARE SENSIBLE
# =============================================================================================
# TEMPERATURE
if Temp < 0:
print("Invalid Temperature: " + str(Temp))
sys.exit()
# GAMMA
if (MINGAMMA == -1) and (MAXGAMMA == -1) and (GSPACING == -1) and (exponent == 1):
print("Using default values!")
# The Gamma points sampled
Gammas = ['000L', '010L', '020L', '030L', '040L', '050L', '060L', '070L', '080L', '090L', '100L']
elif MINGAMMA < 0 or MAXGAMMA < 0 or GSPACING < 0 or MINGAMMA > MAXGAMMA:
print("Invalid Gamma Specifications")
sys.exit()
else:
RawGamma = MINGAMMA
Gammas = []
Gamma_names = []
gamma_names = np.arange(MINGAMMA, MAXGAMMA + GSPACING, GSPACING)
while RawGamma < MAXGAMMA:
if exponent >= 0:
Gamma = int(100 * (float(RawGamma) / float(MAXGAMMA)) ** abs(exponent))
else:
Gamma = int(100 * (1 - (float(MAXGAMMA - RawGamma) / float(MAXGAMMA)) ** abs(exponent)))
Gammas.append(Gamma)
# Format the gamma point name
if RawGamma < 10:
Gamma_names.append('00' + str(int(RawGamma)) + 'G')
elif RawGamma < 100:
Gamma_names.append('0' + str(int(RawGamma)) + 'G')
else:
Gamma_names.append('100G')
RawGamma = RawGamma + GSPACING
# Catch the final gamma point
Gammas.append(int(MAXGAMMA))
if MAXGAMMA < 10:
Gamma_names.append('00' + str(int(MAXGAMMA)) + 'G')
elif MAXGAMMA < 100:
Gamma_names.append('0' + str(int(MAXGAMMA)) + 'G')
else:
Gamma_names.append('100G')
# LAMBDA
if LAMBDA < 0 or LAMBDA > 100:
print("Invalid Lambda Point: " + str(LAMBDA))
sys.exit()
# POLYMORPH
polymorphs = polymorphs.split()
polymorph = []
polymorph_short = []
for i, token in enumerate(polymorphs):
polymorph.append('Polymorph ' + str(token))
polymorph_short.append(token)
# POTENTIAL
if potential != "oplsaa" and potential != "gromos" and potential != "designeda" and potential != "oplsaafakeg" and \
potential != "oplsaafakea":
print("Invalid Potential")
print("Supported potentials: oplsaa gromos designeda oplsaafakeg oplsaafakea")
sys.exit()
# =============================================================================================
# FORMAT INPUTS
# =============================================================================================
# POTENTIAL
PotNAME = ""
if potential == "oplsaa":
PotNAME = "OPLS"
elif potential == "gromos":
PotNAME = "GROM"
elif potential == "designeda":
PotNAME = "DESA"
elif potential == "oplsaafakeg":
PotNAME = "FAKEG"
elif potential == "oplsaafakea":
PotNAME = "FAKEA"
# OPTIONAL HINGE
if hinge == "DefaultHinge":
hinges = ['_G']
else:
# Read in each job
hinges = []
hingevect = options.hinge.split()
for i, token in enumerate(hingevect):
hinges.append("_G_" + str(token))
# =============================================================================================
# READ IN RAW DATA
# =============================================================================================
# Constants.
kB = 1.3806488e-23 * 6.0221413e23 / (1000.0 * 4.184) # Boltzmann constant in kcal/mol
omitT = [] # Temperatures to be omitted from the analysis
# Parameters
T_k = Temp * np.ones(len(Gammas), float) # Convert temperatures to floats
print(T_k)
print(Gammas)
g_k = np.zeros([len(Gammas)], float)
K = len(Gammas) # How many states?
# total number of states examined; 0 are unsampled if bonds are left on, 1 is unsampled if the bonds are removed
if bonds == True:
Kbig = K
dhdl_placement = 6
else:
Kbig = K
dhdl_placement = 5
# maximum number of snapshots/simulation (could make this automated) - doesn't matter, as long as it's long enough.
N_max = 200000
# beta factor for the different temperatures
beta_k = 1.0 / (kB * T_k)
dA = np.zeros([len(polymorph), Kbig], float)
ddA = np.zeros([len(polymorph), Kbig], float)
convert_units = 0.2390057 * np.ones(Kbig, float) # Convert all energies to kcal/mol
# Allocate storage for simulation data
for i, poly in enumerate(polymorph):
# N_k[k] is the total number of snapshots from alchemical state k
N_k = np.zeros([Kbig], np.int32)
# N_k_s[k,s] is the total number of snapshots from alchemical state k from seed s
N_k_s = np.zeros([Kbig, len(hinges)], np.int32)
# u_kln[k,l,n] is the adjusted energy of snapshot n from simulation k
u_kln = np.zeros([K, Kbig, N_max], np.float64)
# dhdl_kn[k,n] is the derivative of energy with respect to lambda of snapshot n from simulation k
dhdl_kn = np.zeros([K, N_max], np.float64)
#Load in the data for each run
for k in range(K):
n = 0
for s, hinge in enumerate(hinges):
# cycle through all the input total energy data
dirpath = polymorph_short[i] + '/interactions/' + str(gamma_names[k])
fname = dirpath + '/PROD.edr'
dhdlname = dirpath + '/dhdl_PROD.xvg'
if k not in omitT:
potential_energy = panedr.edr_to_df(fname)['Potential'].values
print("loading " + fname)
dhdl_energy = np.loadtxt(dhdlname, comments=['#', '$', '@', '!'])
print("loading " + dhdlname)
# Removing any non-equilibrated points of the simulation
[start_production, _, _] = timeseries.detectEquilibration(potential_energy)
potential_energy = potential_energy[start_production:]
dhdl_energy = dhdl_energy[start_production:]
# the energy of every configuration from each state evaluated at its sampled state
n = len(potential_energy)
u_kln[k, :K, :n] = (potential_energy.reshape((n, 1)) + dhdl_energy[:, dhdl_placement:]).T * \
convert_units[k]
dhdl_kn[k, :n] = (float(Independent) / Molecules) * \
np.sum(dhdl_energy[:, 2:dhdl_placement], axis=1) * convert_units[k]
if s == 0:
N_k_s[k, s] = n
else:
N_k_s[k, s] = n - sum(N_k_s[k, 0:s])
N_k[k] = n
# convert to nondimensional units from kcal/mol
u_kln *= beta_k[0]
# all data loaded from the three sets
u_kln_save = u_kln.copy()
N_k_save = N_k.copy()
g_k = np.zeros([K])
print("Number of retained samples")
print(N_k)
print("Number of retained samples from each seed")
print(N_k_s)
# =============================================================================================
# COMPUTE FREE ENERGY DIFFERENCE USING MBAR
# =============================================================================================
# Initialize MBAR.
print("Running MBAR...")
# generate the weights of each of the umbrella set
mbar = pymbar.MBAR(u_kln, N_k, verbose=True, subsampling_protocol=[{'method': 'L-BFGS-B'}])
print("MBAR Converged...")
# testing
for k in range(Kbig):
w = np.exp(mbar.Log_W_nk[:, k])
print("max weight in state %d is %12.7f" % (k, np.max(w)))
neff = 1 / | np.sum(w ** 2) | numpy.sum |
from db import dbtools
from tbainfo import tbarequests
from picklist_team import Team
import numpy as np
import globals
def main():
globals.init()
db = dbtools("Champs", "frc900", "frc900")
tba = tbarequests('jQusM2aYtJLHXv3vxhDcPpIWzaxjMga5beNRWOarv6wdRwTF63vNpIsLYVANvCWE')
cargo_weight = int(globals.cargo_weight)/100
panel_weight = int(globals.panel_weight)/100
endgame_weight = int(globals.endgame_weight)/100
team_keys = tba.get_teams(globals.tba_competition_id)
teams = []
# create and populate team objects
for team_key in team_keys:
team = new_team(team_key, db)
teams.append(team)
mean_cargo = | np.mean([team.cargo for team in teams]) | numpy.mean |
"""Test the mlpack destructors."""
from __future__ import division, print_function
import numpy as np
# noinspection PyPackageRequirements
import pytest
from sklearn.utils import check_random_state
from ddl.datasets import make_toy_data
from ddl.externals.mlpack import MlpackDensityTreeEstimator
from ddl.tree import TreeDensity, TreeDestructor
from ddl.validation import check_destructor
try:
# noinspection PyProtectedMember
from ddl.externals.mlpack import _det as det
except ImportError:
import warnings
warnings.warn('In test script, could not import necessary mlpack wrappers.')
def test_mlpack_density_tree_destructor():
destructor = TreeDestructor(
tree_density=TreeDensity(
tree_estimator=MlpackDensityTreeEstimator(max_leaf_nodes=10),
)
)
assert check_destructor(destructor)
@pytest.mark.parametrize('test', ['canonical', 'complex', 'default'])
def test_mlpack_det(test):
rng = check_random_state(0)
if test == 'canonical':
X = rng.rand(200, 2)
min_vals = np.zeros(X.shape[1])
max_vals = np.ones(X.shape[1])
total_points = X.shape[0]
tree = det.PyDTree(min_vals=min_vals, max_vals=max_vals, total_points=total_points)
tree.fit(X)
print(tree)
elif test == 'complex':
X = rng.randn(200, 2)
tree = det.PyDTree(X=X)
tree.fit(X)
print(tree)
tree.fit(X, max_depth=5)
print(tree)
tree.fit(X, max_leaf_nodes=8)
print(tree)
else:
X = rng.randn(200, 2)
tree = det.PyDTree(X)
alpha = -1 # Just to initialize alpha
for i in range(10):
if i == 0:
alpha = tree.grow(X)
else:
if tree.num_children() == 0:
break # Stop since no more pruning allowed
alpha = tree.prune_and_update(alpha, X.shape[0])
print('alpha=%g' % alpha)
print(tree.get_tree_str(show_leaves=True))
# noinspection SpellCheckingInspection
def test_mlpack_det_get_arrayed_tree():
# Setup dataset
D = make_toy_data('uniform_grid', n_samples=1000, random_state=0)
X = D.X
# Setup tree
min_vals = np.zeros(X.shape[1])
max_vals = np.ones(X.shape[1])
total_points = X.shape[0]
tree = det.PyDTree(min_vals=min_vals, max_vals=max_vals, total_points=total_points)
# Fit tree
tree.fit(X, max_leaf_nodes=5, min_leaf_size=5)
arrayed_tree = tree.get_arrayed_tree()
# print(tree.get_tree_str(show_leaves=True))
# stack = [(0, None)]
# while len(stack) > 0:
# print(stack)
# node_i, is_left = stack.pop()
# prefix = 'Left' if is_left else 'Right'
#
# if arrayed_tree.feature[node_i] >= 0:
# print('%s dim=%3d, threshold=%.3f'
# % (prefix, arrayed_tree.feature[node_i], arrayed_tree.threshold[node_i]))
# stack.append((arrayed_tree.children_right[node_i], False))
# stack.append((arrayed_tree.children_left[node_i], True))
# else:
# print('%s' % prefix)
# print(arrayed_tree)
# print(repr(arrayed_tree.feature))
# print(repr(arrayed_tree.threshold))
# print(repr(arrayed_tree.children_left))
# print(repr(arrayed_tree.children_right))
# Test based on known values
assert np.all(arrayed_tree.feature == np.array([0, -1, 1, 1, 1, -1, -1, -1, -1]))
nan = np.nan
expected_threshold = np.array(
[0.5631866381580427, nan, 0.7914520226478026,
0.40126660240012946, 0.19549050993708061, nan,
nan, nan, nan]
)
| np.testing.assert_allclose(arrayed_tree.threshold, expected_threshold, rtol=1e-16) | numpy.testing.assert_allclose |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Bayesian Gaussian Mixture Model Classes:
contains the basic fields and methods of Bayesian GMMs
the high level functions are/should be binded in C
The base class BGMM relies on an implementation that perfoms Gibbs sampling
A derived class VBGMM uses Variational Bayes inference instead
A third class is introduces to take advnatge of the old C-bindings,
but it is limited to diagonal covariance models
Author : <NAME>, 2008-2011
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import numpy.random as nr
from scipy.linalg import inv, cholesky, eigvalsh
from scipy.special import gammaln
import math
from .utils import kmeans
from .gmm import GMM
##################################################################
# ancillary functions ############################################
##################################################################
def detsh(H):
"""
Routine for the computation of determinants of symmetric positive
matrices
Parameters
----------
H array of shape(n,n)
the input matrix, assumed symmmetric and positive
Returns
-------
dh: float, the determinant
"""
return np.prod(eigvalsh(H))
def dirichlet_eval(w, alpha):
"""
Evaluate the probability of a certain discrete draw w
from the Dirichlet density with parameters alpha
Parameters
----------
w: array of shape (n)
alpha: array of shape (n)
"""
if np.shape(w) != np.shape(alpha):
raise ValueError("incompatible dimensions")
loge = np.sum((alpha-1) * np.log(w))
logb = np.sum(gammaln(alpha)) - gammaln(alpha.sum())
loge -= logb
return np.exp(loge)
def generate_normals(m, P):
""" Generate a Gaussian sample with mean m and precision P
Parameters
----------
m array of shape n: the mean vector
P array of shape (n,n): the precision matrix
Returns
-------
ng : array of shape(n): a draw from the gaussian density
"""
icp = inv(cholesky(P))
ng = nr.randn(m.shape[0])
ng = np.dot(ng, icp)
ng += m
return ng
def generate_Wishart(n, V):
"""
Generate a sample from Wishart density
Parameters
----------
n: float,
the number of degrees of freedom of the Wishart density
V: array of shape (n,n)
the scale matrix of the Wishart density
Returns
-------
W: array of shape (n,n)
the draw from Wishart density
"""
icv = cholesky(V)
p = V.shape[0]
A = nr.randn(p, p)
for i in range(p):
A[i, i:] = 0
A[i, i] = np.sqrt(nr.chisquare(n - i))
R = np.dot(icv, A)
W = np.dot(R, R.T)
return W
def wishart_eval(n, V, W, dV=None, dW=None, piV=None):
"""Evaluation of the probability of W under Wishart(n,V)
Parameters
----------
n: float,
the number of degrees of freedom (dofs)
V: array of shape (n,n)
the scale matrix of the Wishart density
W: array of shape (n,n)
the sample to be evaluated
dV: float, optional,
determinant of V
dW: float, optional,
determinant of W
piV: array of shape (n,n), optional
inverse of V
Returns
-------
(float) the density
"""
# check that shape(V)==shape(W)
p = V.shape[0]
if dV is None:
dV = detsh(V)
if dW is None:
dW = detsh(W)
if piV is None:
piV = inv(V)
ldW = math.log(dW) * (n - p - 1) / 2
ltr = - np.trace(np.dot(piV, W)) / 2
la = (n * p * math.log(2) + math.log(dV) * n) / 2
lg = math.log(math.pi) * p * (p - 1) / 4
lg += gammaln(np.arange(n - p + 1, n + 1).astype(np.float) / 2).sum()
lt = ldW + ltr - la - lg
return math.exp(lt)
def normal_eval(mu, P, x, dP=None):
""" Probability of x under normal(mu, inv(P))
Parameters
----------
mu: array of shape (n),
the mean parameter
P: array of shape (n, n),
the precision matrix
x: array of shape (n),
the data to be evaluated
Returns
-------
(float) the density
"""
dim = P.shape[0]
if dP is None:
dP = detsh(P)
w0 = math.log(dP) - dim * math.log(2 * math.pi)
w0 /= 2
dx = mu - x
q = np.dot(np.dot(P, dx), dx)
w = w0 - q / 2
like = math.exp(w)
return like
def generate_perm(k, nperm=100):
"""
returns an array of shape(nbperm, k) representing
the permutations of k elements
Parameters
----------
k, int the number of elements to be permuted
nperm=100 the maximal number of permutations
if gamma(k+1)>nperm: only nperm random draws are generated
Returns
-------
p: array of shape(nperm,k): each row is permutation of k
"""
from scipy.special import gamma
if k == 1:
return np.reshape(np.array([0]), (1, 1)).astype(np.int)
if gamma(k + 1) < nperm:
# exhaustive permutations
aux = generate_perm(k - 1)
n = aux.shape[0]
perm = np.zeros((n * k, k)).astype(np.int)
for i in range(k):
perm[i * n:(i + 1) * n, :i] = aux[:, :i]
perm[i * n:(i + 1) * n, i] = k-1
perm[i * n:(i + 1) * n, i + 1:] = aux[:, i:]
else:
from numpy.random import rand
perm = np.zeros((nperm, k)).astype(np.int)
for i in range(nperm):
p = np.argsort(rand(k))
perm[i] = p
return perm
def multinomial(probabilities):
"""
Generate samples form a miltivariate distribution
Parameters
----------
probabilities: array of shape (nelements, nclasses):
likelihood of each element belongin to each class
each row is assumedt to sum to 1
One sample is draw from each row, resulting in
Returns
-------
z array of shape (nelements): the draws,
that take values in [0..nclasses-1]
"""
nvox = probabilities.shape[0]
nclasses = probabilities.shape[1]
cuml = np.zeros((nvox, nclasses + 1))
cuml[:, 1:] = np.cumsum(probabilities, 1)
aux = np.random.rand(nvox, 1)
z = np.argmax(aux < cuml, 1)-1
return z
def dkl_gaussian(m1, P1, m2, P2):
"""
Returns the KL divergence between gausians densities
Parameters
----------
m1: array of shape (n),
the mean parameter of the first density
P1: array of shape(n,n),
the precision parameters of the first density
m2: array of shape (n),
the mean parameter of the second density
P2: array of shape(n,n),
the precision parameters of the second density
"""
tiny = 1.e-15
dim = np.size(m1)
if m1.shape != m2.shape:
raise ValueError("incompatible dimensions for m1 and m2")
if P1.shape != P2.shape:
raise ValueError("incompatible dimensions for P1 and P2")
if P1.shape[0] != dim:
raise ValueError("incompatible dimensions for m1 and P1")
d1 = max(detsh(P1), tiny)
d2 = max(detsh(P2), tiny)
dkl = np.log(d1 / d2) + np.trace(np.dot(P2, inv(P1))) - dim
dkl += np.dot(np.dot((m1 - m2).T, P2), (m1 - m2))
dkl /= 2
return dkl
def dkl_wishart(a1, B1, a2, B2):
"""
returns the KL divergence bteween two Wishart distribution of
parameters (a1,B1) and (a2,B2),
Parameters
----------
a1: Float,
degrees of freedom of the first density
B1: array of shape(n,n),
scale matrix of the first density
a2: Float,
degrees of freedom of the second density
B2: array of shape(n,n),
scale matrix of the second density
Returns
-------
dkl: float, the Kullback-Leibler divergence
"""
from scipy.special import psi, gammaln
tiny = 1.e-15
if B1.shape != B2.shape:
raise ValueError("incompatible dimensions for B1 and B2")
dim = B1.shape[0]
d1 = max(detsh(B1), tiny)
d2 = max(detsh(B2), tiny)
lgc = dim * (dim - 1) * math.log(np.pi) / 4
lg1 = lgc
lg2 = lgc
lw1 = - math.log(d1) + dim * math.log(2)
lw2 = - math.log(d2) + dim * math.log(2)
for i in range(dim):
lg1 += gammaln((a1 - i) / 2)
lg2 += gammaln((a2 - i) / 2)
lw1 += psi((a1 - i) / 2)
lw2 += psi((a2 - i) / 2)
lz1 = 0.5 * a1 * dim * math.log(2) - 0.5 * a1 * math.log(d1) + lg1
lz2 = 0.5 * a2 * dim * math.log(2) - 0.5 * a2 * math.log(d2) + lg2
dkl = (a1 - dim - 1) * lw1 - (a2 - dim - 1) * lw2 - a1 * dim
dkl += a1 * np.trace(np.dot(B2, inv(B1)))
dkl /= 2
dkl += (lz2 - lz1)
return dkl
def dkl_dirichlet(w1, w2):
""" Returns the KL divergence between two dirichlet distribution
Parameters
----------
w1: array of shape(n),
the parameters of the first dirichlet density
w2: array of shape(n),
the parameters of the second dirichlet density
"""
if w1.shape != w2.shape:
raise ValueError("incompatible dimensions for w1 and w2")
dkl = 0
from scipy.special import gammaln, psi
dkl = np.sum(gammaln(w2)) - np.sum(gammaln(w1))
dkl += gammaln(np.sum(w1)) - gammaln(np.sum(w2))
dkl += np.sum((w1 - w2) * (psi(w1) - psi(np.sum(w1))))
return dkl
#######################################################################
# main GMM class #####################################################
#######################################################################
class BGMM(GMM):
"""
This class implements Bayesian GMMs
this class contains the follwing fields
k: int,
the number of components in the mixture
dim: int,
the dimension of the data
means: array of shape (k, dim)
all the means of the components
precisions: array of shape (k, dim, dim)
the precisions of the componenets
weights: array of shape (k):
weights of the mixture
shrinkage: array of shape (k):
scaling factor of the posterior precisions on the mean
dof: array of shape (k)
the degrees of freedom of the components
prior_means: array of shape (k, dim):
the prior on the components means
prior_scale: array of shape (k, dim):
the prior on the components precisions
prior_dof: array of shape (k):
the prior on the dof (should be at least equal to dim)
prior_shrinkage: array of shape (k):
scaling factor of the prior precisions on the mean
prior_weights: array of shape (k)
the prior on the components weights
shrinkage: array of shape (k):
scaling factor of the posterior precisions on the mean
dof : array of shape (k): the posterior dofs
fixme
-----
only 'full' precision is supported
"""
def __init__(self, k=1, dim=1, means=None, precisions=None,
weights=None, shrinkage=None, dof=None):
"""
Initialize the structure with the dimensions of the problem
Eventually provide different terms
"""
GMM.__init__(self, k, dim, 'full', means, precisions, weights)
self.shrinkage = shrinkage
self.dof = dof
if self.shrinkage is None:
self.shrinkage = np.ones(self.k)
if self.dof is None:
self.dof = np.ones(self.k)
if self.precisions is not None:
self._detp = [detsh(self.precisions[k]) for k in range(self.k)]
def check(self):
"""
Checking the shape of sifferent matrices involved in the model
"""
GMM.check(self)
if self.prior_means.shape[0] != self.k:
raise ValueError("Incorrect dimension for self.prior_means")
if self.prior_means.shape[1] != self.dim:
raise ValueError("Incorrect dimension for self.prior_means")
if self.prior_scale.shape[0] != self.k:
raise ValueError("Incorrect dimension for self.prior_scale")
if self.prior_scale.shape[1] != self.dim:
raise ValueError("Incorrect dimension for self.prior_scale")
if self.prior_dof.shape[0] != self.k:
raise ValueError("Incorrect dimension for self.prior_dof")
if self.prior_weights.shape[0] != self.k:
raise ValueError("Incorrect dimension for self.prior_weights")
def set_priors(self, prior_means, prior_weights, prior_scale, prior_dof,
prior_shrinkage):
"""
Set the prior of the BGMM
Parameters
----------
prior_means: array of shape (self.k,self.dim)
prior_weights: array of shape (self.k)
prior_scale: array of shape (self.k,self.dim,self.dim)
prior_dof: array of shape (self.k)
prior_shrinkage: array of shape (self.k)
"""
self.prior_means = prior_means
self.prior_weights = prior_weights
self.prior_scale = prior_scale
self.prior_dof = prior_dof
self.prior_shrinkage = prior_shrinkage
# cache some pre-computations
self._dets = [detsh(self.prior_scale[k]) for k in range(self.k)]
self._inv_prior_scale = np.array([inv(self.prior_scale[k])
for k in range(self.k)])
self.check()
def guess_priors(self, x, nocheck=0):
"""
Set the priors in order of having them weakly uninformative
this is from Fraley and raftery;
Journal of Classification 24:155-181 (2007)
Parameters
----------
x, array of shape (nb_samples,self.dim)
the data used in the estimation process
nocheck: boolean, optional,
if nocheck==True, check is skipped
"""
# a few parameters
small = 0.01
elshape = (1, self.dim, self.dim)
mx = np.reshape(x.mean(0), (1, self.dim))
dx = x - mx
vx = np.dot(dx.T, dx) / x.shape[0]
px = np.reshape(np.diag(1.0 / np.diag(vx)), elshape)
px *= np.exp(2.0 / self.dim * math.log(self.k))
# set the priors
self.prior_means = np.repeat(mx, self.k, 0)
self.prior_weights = np.ones(self.k)
self.prior_scale = np.repeat(px, self.k, 0)
self.prior_dof = np.ones(self.k) * (self.dim + 2)
self.prior_shrinkage = np.ones(self.k) * small
# cache some pre-computations
self._dets = np.ones(self.k) * detsh(px[0])
self._inv_prior_scale = np.repeat(
np.reshape(inv(px[0]), elshape), self.k, 0)
# check that everything is OK
if nocheck == True:
self.check()
def initialize(self, x):
"""
initialize z using a k-means algorithm, then upate the parameters
Parameters
----------
x: array of shape (nb_samples,self.dim)
the data used in the estimation process
"""
if self.k > 1:
cent, z, J = kmeans(x, self.k)
else:
z = np.zeros(x.shape[0]).astype(np.int)
self.update(x, z)
def pop(self, z):
"""
compute the population, i.e. the statistics of allocation
Parameters
----------
z array of shape (nb_samples), type = np.int
the allocation variable
Returns
-------
hist : array shape (self.k) count variable
"""
hist = np.array([np.sum(z == k) for k in range(self.k)])
return hist
def update_weights(self, z):
"""
Given the allocation vector z, resample the weights parameter
Parameters
----------
z array of shape (nb_samples), type = np.int
the allocation variable
"""
pop = self.pop(z)
weights = pop + self.prior_weights
self.weights = np.random.dirichlet(weights)
def update_means(self, x, z):
"""
Given the allocation vector z,
and the corresponding data x,
resample the mean
Parameters
----------
x: array of shape (nb_samples,self.dim)
the data used in the estimation process
z: array of shape (nb_samples), type = np.int
the corresponding classification
"""
pop = self.pop(z)
self.shrinkage = self.prior_shrinkage + pop
empmeans = np.zeros(np.shape(self.means))
prior_shrinkage = np.reshape(self.prior_shrinkage, (self.k, 1))
shrinkage = np.reshape(self.shrinkage, (self.k, 1))
for k in range(self.k):
empmeans[k] = np.sum(x[z == k], 0)
means = empmeans + self.prior_means * prior_shrinkage
means /= shrinkage
for k in range(self.k):
self.means[k] = generate_normals(\
means[k], self.precisions[k] * self.shrinkage[k])
def update_precisions(self, x, z):
"""
Given the allocation vector z,
and the corresponding data x,
resample the precisions
Parameters
----------
x array of shape (nb_samples,self.dim)
the data used in the estimation process
z array of shape (nb_samples), type = np.int
the corresponding classification
"""
pop = self.pop(z)
self.dof = self.prior_dof + pop + 1
rpop = pop + (pop == 0)
self._detp = np.zeros(self.k)
for k in range(self.k):
# empirical means
empmeans = np.sum(x[z == k], 0) / rpop[k]
dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim))
# scatter
dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim))
scatter = np.dot(dx.T, dx)
# bias
addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k]
# covariance = prior term + scatter + bias
covariance = self._inv_prior_scale[k] + scatter + addcov
#precision
scale = inv(covariance)
self.precisions[k] = generate_Wishart(self.dof[k], scale)
self._detp[k] = detsh(self.precisions[k])
def update(self, x, z):
"""
update function (draw a sample of the GMM parameters)
Parameters
----------
x array of shape (nb_samples,self.dim)
the data used in the estimation process
z array of shape (nb_samples), type = np.int
the corresponding classification
"""
self.update_weights(z)
self.update_precisions(x, z)
self.update_means(x, z)
def sample_indicator(self, like):
"""
sample the indicator from the likelihood
Parameters
----------
like: array of shape (nb_samples,self.k)
component-wise likelihood
Returns
-------
z: array of shape(nb_samples): a draw of the membership variable
"""
tiny = 1 + 1.e-15
like = (like.T / like.sum(1)).T
like /= tiny
z = multinomial(like)
return z
def sample(self, x, niter=1, mem=0, verbose=0):
"""
sample the indicator and parameters
Parameters
----------
x array of shape (nb_samples,self.dim)
the data used in the estimation process
niter=1 : the number of iterations to perform
mem=0: if mem, the best values of the parameters are computed
verbose=0: verbosity mode
Returns
-------
best_weights: array of shape (self.k)
best_means: array of shape (self.k, self.dim)
best_precisions: array of shape (self.k, self.dim, self.dim)
possibleZ: array of shape (nb_samples, niter)
the z that give the highest posterior
to the data is returned first
"""
self.check_x(x)
if mem:
possibleZ = - np.ones((x.shape[0], niter)).astype(np.int)
score = - np.inf
bpz = - np.inf
for i in range(niter):
like = self.likelihood(x)
sll = np.mean(np.log(np.sum(like, 1)))
sll += np.log(self.probability_under_prior())
if sll > score:
score = sll
best_weights = self.weights.copy()
best_means = self.means.copy()
best_precisions = self.precisions.copy()
z = self.sample_indicator(like)
if mem:
possibleZ[:, i] = z
puz = sll # to save time
self.update(x, z)
if puz > bpz:
ibz = i
bpz = puz
if mem:
aux = possibleZ[:, 0].copy()
possibleZ[:, 0] = possibleZ[:, ibz].copy()
possibleZ[:, ibz] = aux
return best_weights, best_means, best_precisions, possibleZ
def sample_and_average(self, x, niter=1, verbose=0):
"""
sample the indicator and parameters
the average values for weights,means, precisions are returned
Parameters
----------
x = array of shape (nb_samples,dim)
the data from which bic is computed
niter=1: number of iterations
Returns
-------
weights: array of shape (self.k)
means: array of shape (self.k,self.dim)
precisions: array of shape (self.k,self.dim,self.dim)
or (self.k, self.dim)
these are the average parameters across samplings
Notes
-----
All this makes sense only if no label switching as occurred so this is
wrong in general (asymptotically).
fix: implement a permutation procedure for components identification
"""
aprec = np.zeros(np.shape(self.precisions))
aweights = np.zeros(np.shape(self.weights))
ameans = np.zeros(np.shape(self.means))
for i in range(niter):
like = self.likelihood(x)
z = self.sample_indicator(like)
self.update(x, z)
aprec += self.precisions
aweights += self.weights
ameans += self.means
aprec /= niter
ameans /= niter
aweights /= niter
return aweights, ameans, aprec
def probability_under_prior(self):
"""
Compute the probability of the current parameters of self
given the priors
"""
p0 = 1
p0 = dirichlet_eval(self.weights, self.prior_weights)
for k in range(self.k):
mp = np.reshape(self.precisions[k] * self.prior_shrinkage[k],
(self.dim, self.dim))
p0 *= normal_eval(self.prior_means[k], mp, self.means[k])
p0 *= wishart_eval(self.prior_dof[k], self.prior_scale[k],
self.precisions[k], dV=self._dets[k],
dW=self._detp[k], piV=self._inv_prior_scale[k])
return p0
def conditional_posterior_proba(self, x, z, perm=None):
"""
Compute the probability of the current parameters of self
given x and z
Parameters
----------
x: array of shape (nb_samples, dim),
the data from which bic is computed
z: array of shape (nb_samples), type = np.int,
the corresponding classification
perm: array ok shape(nperm, self.k),typ=np.int, optional
all permutation of z under which things will be recomputed
By default, no permutation is performed
"""
pop = self.pop(z)
rpop = (pop + (pop == 0)).astype(np.float)
dof = self.prior_dof + pop + 1
shrinkage = self.prior_shrinkage + pop
weights = pop + self.prior_weights
# initialize the porsterior proba
if perm is None:
pp = dirichlet_eval(self.weights, weights)
else:
pp = np.array([dirichlet_eval(self.weights[pj], weights)
for pj in perm])
for k in range(self.k):
m1 = np.sum(x[z == k], 0)
#0. Compute the empirical means
empmeans = m1 / rpop[k]
#1. the precisions
dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim))
dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim))
addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k]
covariance = self._inv_prior_scale[k] + np.dot(dx.T, dx) + addcov
scale = inv(covariance)
_dets = detsh(scale)
#2. the means
means = m1 + self.prior_means[k] * self.prior_shrinkage[k]
means /= shrinkage[k]
#4. update the posteriors
if perm is None:
pp *= wishart_eval(
dof[k], scale, self.precisions[k],
dV=_dets, dW=self._detp[k], piV=covariance)
else:
for j, pj in enumerate(perm):
pp[j] *= wishart_eval(
dof[k], scale, self.precisions[pj[k]], dV=_dets,
dW=self._detp[pj[k]], piV=covariance)
mp = scale * shrinkage[k]
_dP = _dets * shrinkage[k] ** self.dim
if perm is None:
pp *= normal_eval(means, mp, self.means[k], dP=_dP)
else:
for j, pj in enumerate(perm):
pp[j] *= normal_eval(
means, mp, self.means[pj[k]], dP=_dP)
return pp
def evidence(self, x, z, nperm=0, verbose=0):
"""
See bayes_factor(self, x, z, nperm=0, verbose=0)
"""
return self.bayes_factor(self, x, z, nperm, verbose)
def bayes_factor(self, x, z, nperm=0, verbose=0):
"""
Evaluate the Bayes Factor of the current model using Chib's method
Parameters
----------
x: array of shape (nb_samples,dim)
the data from which bic is computed
z: array of shape (nb_samples), type = np.int
the corresponding classification
nperm=0: int
the number of permutations to sample
to model the label switching issue
in the computation of the Bayes Factor
By default, exhaustive permutations are used
verbose=0: verbosity mode
Returns
-------
bf (float) the computed evidence (Bayes factor)
Notes
-----
See: Marginal Likelihood from the Gibbs Output
Journal article by <NAME>;
Journal of the American Statistical Association, Vol. 90, 1995
"""
niter = z.shape[1]
p = []
perm = generate_perm(self.k)
if nperm > perm.shape[0]:
nperm = perm.shape[0]
for i in range(niter):
if nperm == 0:
temp = self.conditional_posterior_proba(x, z[:, i], perm)
p.append(temp.mean())
else:
drand = np.argsort(np.random.rand(perm.shape[0]))[:nperm]
temp = self.conditional_posterior_proba(x, z[:, i],
perm[drand])
p.append(temp.mean())
p = np.array(p)
mp = np.mean(p)
p0 = self.probability_under_prior()
like = self.likelihood(x)
bf = np.log(p0) + np.sum(np.log(np.sum(like, 1))) - np.log(mp)
if verbose:
print(np.log(p0), np.sum(np.log(np.sum(like, 1))), np.log(mp))
return bf
# ---------------------------------------------------------
# --- Variational Bayes inference -------------------------
# ---------------------------------------------------------
class VBGMM(BGMM):
"""
Subclass of Bayesian GMMs (BGMM)
that implements Variational Bayes estimation of the parameters
"""
def __init__(self, k=1, dim=1, means=None, precisions=None,
weights=None, shrinkage=None, dof=None):
BGMM.__init__(self, k, dim, means, precisions, weights, shrinkage, dof)
self.scale = self.precisions.copy()
def _Estep(self, x):
"""VB-E step
Parameters
----------
x array of shape (nb_samples,dim)
the data used in the estimation process
Returns
-------
like: array of shape(nb_samples,self.k),
component-wise likelihood
"""
n = x.shape[0]
like = np.zeros((n, self.k))
from scipy.special import psi
spsi = psi(np.sum(self.weights))
for k in range(self.k):
# compute the data-independent factor first
w0 = psi(self.weights[k]) - spsi
w0 += 0.5 * np.log(detsh(self.scale[k]))
w0 -= self.dim * 0.5 / self.shrinkage[k]
w0 += 0.5 * np.log(2) * self.dim
for i in range(self.dim):
w0 += 0.5 * psi((self.dof[k] - i) / 2)
m = np.reshape(self.means[k], (1, self.dim))
b = self.dof[k] * self.scale[k]
q = np.sum(np.dot(m - x, b) * (m - x), 1)
w = w0 - q / 2
w -= 0.5 * np.log(2 * np.pi) * self.dim
like[:, k] = np.exp(w)
if like.min() < 0:
raise ValueError('Likelihood cannot be negative')
return like
def evidence(self, x, like=None, verbose=0):
"""computation of evidence bound aka free energy
Parameters
----------
x array of shape (nb_samples,dim)
the data from which evidence is computed
like=None: array of shape (nb_samples, self.k), optional
component-wise likelihood
If None, it is recomputed
verbose=0: verbosity model
Returns
-------
ev (float) the computed evidence
"""
from scipy.special import psi
from numpy.linalg import inv
tiny = 1.e-15
if like is None:
like = self._Estep(x)
like = (like.T / np.maximum(like.sum(1), tiny)).T
pop = like.sum(0)[:self.k]
pop = np.reshape(pop, (self.k, 1))
spsi = psi(np.sum(self.weights))
empmeans = np.dot(like.T[:self.k], x) / np.maximum(pop, tiny)
F = 0
# start with the average likelihood term
for k in range(self.k):
# compute the data-independent factor first
Lav = psi(self.weights[k]) - spsi
Lav -= np.sum(like[:, k] * np.log( | np.maximum(like[:, k], tiny) | numpy.maximum |
"""
A few helper functions for brownian-manifold
"""
import numpy as np
def vector_cross(v,w):
"""
helper to compute the cross product
of two vectors.
Parameters
----------
v: array
w: float, threshold parameter
Returns
-------
v_cross_w: array, the resulting cross product of
v and w (v X w)
"""
v_cross_w = np.array([v[1]*w[2] - v[2]*w[1],
v[2]*w[0] - v[0]*w[2],
v[0]*w[1] - v[1]*w[0]])
return v_cross_w
def arctan2(y,x):
"""
helper to compute
the azimuth angle when converting from Cartesian coordinates
to other coordinate systems
Given an array of two coordinates (y,x) representing
points on a plane, this function computes angles (in radians)
mapped to range [0,2*pi)
Parameters
----------
y: array
x: array
Returns
-------
theta: array, the resulting angle:
arctan(y/x) --inverse tangent
mapped to range [0,2*pi)
"""
theta = np.asarray(np.arctan2(y,x))
for i in range(theta.size-1):
if theta[i] < 0:
theta[i]+=2*np.pi
return theta
def surface_sphere(radius):
"""
"""
phi, theta = np.mgrid[0.0:np.pi:100j, 0.0:2.0*np.pi:100j]
x_blank_sphere = radius*np.sin(phi)*np.cos(theta)
y_blank_sphere = radius*np.sin(phi)*np.sin(theta)
z_blank_sphere = radius*np.cos(phi)
sphere_surface = np.array(([x_blank_sphere,
y_blank_sphere,
z_blank_sphere]))
return sphere_surface
def surface_cylinder(radius, height):
"""
"""
p0 = np.array([0, 0, height]) #point at one end
p1 = np.array([0, 0, -height]) #point at other end
#vector in direction of axis
v = p1 - p0
#find magnitude of vector
mag = np.linalg.norm(v)
#unit vector in direction of axis
v = v / mag
#make some vector not in the same direction as v
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
#make vector perpendicular to v
n1 = np.cross(v, not_v)
#normalize n1
n1 /= np.linalg.norm(n1)
#make unit vector perpendicular to v and n1
n2 = np.cross(v, n1)
#surface ranges over t from 0 to length of axis and 0 to 2*pi
t = | np.linspace(0, mag, 2) | numpy.linspace |
__author__ = "<NAME>"
__copyright__ = "Copyright 2018-2020 <NAME>"
__license__ = "BSD 3-clause"
__version__ = "0.2.0"
__email__ = "<EMAIL>"
import pandas
import os
import numpy as np
import pickle
import argparse
from glove_loader import SEPARATORS, STOPWORDS, REPLACINGS
def save_embeddings(dataframe_path, vocabulary_path, embeddings_path, mode='texts', type='bow'):
df = pandas.read_pickle(dataframe_path)
vocabulary_list = | np.load(vocabulary_path) | numpy.load |
from __future__ import print_function, division, absolute_import
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import six.moves as sm
import imgaug as ia
# TODO add tests for:
# hooks is_activated
# hooks is_propagating
# hooks preprocess
# hooks postprocess
# HeatmapsOnImage.__init__()
# HeatmapsOnImage.get_arr()
# HeatmapsOnImage.to_uint8()
# HeatmapsOnImage.from_0to1()
# HeatmapsOnImage.copy()
# HeatmapsOnImage.deepcopy()
class TestHeatmapsOnImage_draw(unittest.TestCase):
def test_basic_functionality(self):
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
v1_coords = [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1),
(3, 2)]
v2_coords = [(0, 0), (0, 3), (3, 0), (3, 3)]
v3_coords = [(1, 1), (1, 2), (2, 1), (2, 2)]
for y, x in v1_coords:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in v2_coords:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in v3_coords:
assert np.allclose(heatmaps_drawn[y, x], v3)
def test_use_size_arg_with_different_shape_than_heatmap_arr_shape(self):
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in sm.xrange(4):
for x in sm.xrange(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in sm.xrange(4):
for x in sm.xrange(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
# TODO test other cmaps
class TestHeatmapsOnImage_draw_on_image(unittest.TestCase):
@property
def heatmaps(self):
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
def test_cmap_is_none(self):
heatmaps = self.heatmaps
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert | np.all(heatmaps_drawn[0:4, 0:2, :] == 0) | numpy.all |
import importlib
import numpy as np
import torch
import torch.nn.functional as F
from skimage import measure
from unet3d.losses import compute_per_channel_dice, expand_as_one_hot
from unet3d.utils import get_logger, adapted_rand
LOGGER = get_logger('EvalMetric')
SUPPORTED_METRICS = ['dice', 'iou', 'boundary_ap', 'dt_ap', 'quantized_dt_ap', 'angle', 'inverse_angular']
class DiceCoefficient:
"""Computes Dice Coefficient.
Generalized to multiple channels by computing per-channel Dice Score
(as described in https://arxiv.org/pdf/1707.03237.pdf) and theTn simply taking the average.
Input is expected to be probabilities instead of logits.
This metric is mostly useful when channels contain the same semantic class (e.g. affinities computed with different offsets).
DO NOT USE this metric when training with DiceLoss, otherwise the results will be biased towards the loss.
"""
def __init__(self, epsilon=1e-5, ignore_index=None, **kwargs):
self.epsilon = epsilon
self.ignore_index = ignore_index
def __call__(self, input, target):
"""
:param input: 5D probability maps torch tensor (NxCxDxHxW)
:param target: 4D or 5D ground truth torch tensor. 4D (NxDxHxW) tensor will be expanded to 5D as one-hot
:return: Soft Dice Coefficient averaged over all channels/classes
"""
# Average across channels in order to get the final score
return torch.mean(compute_per_channel_dice(input, target, epsilon=self.epsilon, ignore_index=self.ignore_index))
class MeanIoU:
"""
Computes IoU for each class separately and then averages over all classes.
"""
def __init__(self, skip_channels=(), ignore_index=None, **kwargs):
"""
:param skip_channels: list/tuple of channels to be ignored from the IoU computation
:param ignore_index: id of the label to be ignored from IoU computation
"""
self.ignore_index = ignore_index
self.skip_channels = skip_channels
def __call__(self, input, target):
"""
:param input: 5D probability maps torch float tensor (NxCxDxHxW)
:param target: 4D or 5D ground truth torch tensor. 4D (NxDxHxW) tensor will be expanded to 5D as one-hot
:return: intersection over union averaged over all channels
"""
assert input.dim() == 5
n_classes = input.size()[1]
if target.dim() == 4:
target = expand_as_one_hot(target, C=n_classes, ignore_index=self.ignore_index)
# batch dim must be 1
input = input[0]
target = target[0]
assert input.size() == target.size()
binary_prediction = self._binarize_predictions(input, n_classes)
if self.ignore_index is not None:
# zero out ignore_index
mask = target == self.ignore_index
binary_prediction[mask] = 0
target[mask] = 0
# convert to uint8 just in case
binary_prediction = binary_prediction.byte()
target = target.byte()
per_channel_iou = []
for c in range(n_classes):
if c in self.skip_channels:
continue
per_channel_iou.append(self._jaccard_index(binary_prediction[c], target[c]))
assert per_channel_iou, "All channels were ignored from the computation"
return torch.mean(torch.tensor(per_channel_iou))
def _binarize_predictions(self, input, n_classes):
"""
Puts 1 for the class/channel with the highest probability and 0 in other channels. Returns byte tensor of the
same size as the input tensor.
"""
if n_classes == 1:
# for single channel input just threshold the probability map
result = input > 0.5
return result.long()
_, max_index = torch.max(input, dim=0, keepdim=True)
return torch.zeros_like(input, dtype=torch.uint8).scatter_(0, max_index, 1)
def _jaccard_index(self, prediction, target):
"""
Computes IoU for a given target and prediction tensors
"""
return torch.sum(prediction & target).float() / torch.sum(prediction | target).float()
class AdaptedRandError:
def __init__(self, all_stats=False, **kwargs):
self.all_stats = all_stats
def __call__(self, input, target):
return adapted_rand(input, target, all_stats=self.all_stats)
class BoundaryAdaptedRandError:
def __init__(self, threshold=0.4, use_last_target=False, use_first_input=False, invert_pmaps=True, **kwargs):
self.threshold = threshold
self.use_last_target = use_last_target
self.use_first_input = use_first_input
self.invert_pmaps = invert_pmaps
def __call__(self, input, target):
if isinstance(input, torch.Tensor):
assert input.dim() == 5
# convert to numpy array
input = input[0].detach().cpu().numpy() # 4D
if isinstance(target, torch.Tensor):
if not self.use_last_target:
assert target.dim() == 4
# convert to numpy array
target = target[0].detach().cpu().numpy() # 3D
else:
# if use_last_target == True the target must be 5D (NxCxDxHxW)
assert target.dim() == 5
target = target[0, -1].detach().cpu().numpy() # 3D
if isinstance(input, np.ndarray):
assert input.ndim == 4
if isinstance(target, np.ndarray):
assert target.ndim == 3
if self.use_first_input:
# compute only on the first input channel
n_channels = 1
else:
n_channels = input.shape[0]
per_channel_arand = []
for c in range(n_channels):
predictions = input[c]
# threshold probability maps
predictions = predictions > self.threshold
if self.invert_pmaps:
# for connected component analysis we need to treat boundary signal as background
# assign 0-label to boundary mask
predictions = np.logical_not(predictions)
predictions = predictions.astype(np.uint8)
# run connected components on the predicted mask; consider only 1-connectivity
predicted = measure.label(predictions, background=0, connectivity=1)
# make sure that target is 'int' type as well
target = target.astype(np.int64)
# compute AdaptedRand error
arand = adapted_rand(predicted, target)
per_channel_arand.append(arand)
# get minimum AdaptedRand error across channels
min_arand, c_index = | np.min(per_channel_arand) | numpy.min |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - <EMAIL>
#
# twitter : @edonyzpc
#
# Last modified: 2015-12-08 21:09
#
# Filename: prec2iter.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('Precision VS Iterations')
ax = fig.add_subplot(111)
ax1 = fig.add_subplot(111)
ax2 = fig.add_subplot(111)
x = np.arange(3000, 10000, 100)
x1 = np.linspace(3000, 5000, 21)
x2 = np.linspace(5100, 10000, 49)
y1 = 1000000.0/(((x1-2500)/1.2)**3) + 0.04 #cone
y1_2 = np.exp((x2)/(1000*np.exp(np.pi)))/30.0 - 0.0024
y2 = 200000.0/(x**2) + 0.03 #suntou
y3 = 100000.0/(3*(x**2)) + 0.03#plane
y1 = [i+np.random.normal(-0.02, 0.04)/150.0 for i in y1]
y1_2 = [i+np.random.normal(-0.02, 0.04)/135.0 for i in y1_2]
y2 = [i+np.random.normal(-0.02, 0.04)/100.0 for i in y2]
y3 = [i+ | np.random.normal(-0.02, 0.04) | numpy.random.normal |
import argparse
import os
import sys
import cv2
import numpy as np
import tensorflow as tf
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
import plates
def get_corner_points(r):
masks = r['masks']
if masks.shape[2] == 0:
return None
# Si considera la maschera più grande
index = np.argmax(np.sum(np.sum(masks, axis=1), axis=0))
masks = np.moveaxis(masks, -1, 0)
bigger_mask = masks[index]
bigger_mask = | np.array(bigger_mask * 255) | numpy.array |
import numpy as np
from scipy.optimize import curve_fit
from scipy.optimize import fsolve, brentq
from scipy.interpolate import interp1d
import scipy.integrate
import sys
import os
import velociraptor_python_tools as vpt
from scipy.spatial import cKDTree
import h5py
import re
from constants import *
from snapshot import *
import copy
import itertools
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
def getHaloCoord(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h
coords = np.zeros(3)
if (('Xcminpot' not in catalog.keys())):# or
# (np.abs(catalog['Xcminpot'][halo])>0.1) or
# (np.abs(catalog['Ycminpot'][halo])>0.1) or
# (np.abs(catalog['Zcminpot'][halo])>0.1)):
return getHaloCoordCOM(catalog, halo, z=z, snapshottype=snapshottype, physical=physical)
if physical:
coords[0] = (catalog['Xcminpot'][halo])
coords[1] = (catalog['Ycminpot'][halo])
coords[2] = (catalog['Zcminpot'][halo])
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
coords[0] = (catalog['Xcminpot'][halo])*h*(1+z)
coords[1] = (catalog['Ycminpot'][halo])*h*(1+z)
coords[2] = (catalog['Zcminpot'][halo])*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
coords[0] = (catalog['Xcminpot'][halo])*(1+z)
coords[1] = (catalog['Ycminpot'][halo])*(1+z)
coords[2] = (catalog['Zcminpot'][halo])*(1+z)
else:
print('Snapshottype not set')
return coords
def getHaloRadius(catalog, halo, z=0, rtype='R_200crit', snapshottype='GADGET', physical=False): #Mpc/h
if physical:
return catalog[rtype][halo]
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
return catalog[rtype][halo]*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
return catalog[rtype][halo]*(1+z)
def getHaloCoordCOM(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h
coords = np.zeros(3)
if physical:
coords[0] = catalog['Xc'][halo]
coords[1] = catalog['Yc'][halo]
coords[2] = catalog['Zc'][halo]
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
coords[0] = catalog['Xc'][halo]*h*(1+z)
coords[1] = catalog['Yc'][halo]*h*(1+z)
coords[2] = catalog['Zc'][halo]*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
coords[0] = catalog['Xc'][halo]*(1+z)
coords[1] = catalog['Yc'][halo]*(1+z)
coords[2] = catalog['Zc'][halo]*(1+z)
return coords
def readHaloFile(halofile):
atime,tree,numhalos,halodata,cosmodata,unitdata = vpt.ReadUnifiedTreeandHaloCatalog(halofile, desiredfields=[], icombinedfile=1,iverbose=0)
return atime,tree,numhalos,halodata,cosmodata,unitdata
def findSurroundingHaloProperties(hp, halolist, d_snap, boxsize=32.):
coords = hp['Coord']
halotree = cKDTree(coords, boxsize=boxsize)
for k in halolist:
if hp['R200'][k] == -1:
continue
halostring = hp['HaloIndex'][k]
length_of_neighbours = len(np.array(halotree.query_ball_point([hp['Coord'][k]], r=hp['R200'][k]*5)[0]))
distance, indices = halotree.query([hp['Coord'][k]], k=length_of_neighbours)
indices = np.array(indices[0])[1:]
distance = np.array(distance[0])[1:]
hp['Neighbours'][halostring] = hp['HaloIndex'][indices]
hp['Neighbour_distance'][halostring] = distance
hp['Neighbour_Velrad'][halostring] = np.zeros(len(distance))
j=0
for i in indices:
partindices = hp['Partindices'][hp['HaloIndex'][i]]
hp['Neighbour_Velrad'][halostring][j] = np.sum(d_snap['File'].get_radialvelocity(hp['Coord'][k], indices=partindices))/len(partindices)
j+=1
def fixSatelliteProblems(hp, TEMPORALHALOIDVAL=1000000000000, boxsize=32):
welke = np.where(hp['Coord'][:, 0] >= 0)[0]
halotree = cKDTree(hp['Coord'][welke], boxsize=boxsize)
toolarge = welke[np.where(hp['R200'][welke] > hp['R200'][np.argmax(hp['n_part'])]*1.2)[0]]
#print(i, toolarge)
if len(toolarge) != 0:
for tl in toolarge:
hp['M200'][tl] = -1
hp['R200'][tl] = -1
hp['hostHaloIndex'][hp['HaloIndex'][tl]==hp['hostHaloIndex']] = -2
for halo in welke:#range(len(hp['M200'])):
if hp['M200'][halo] == -1:
continue
buren = np.array(halotree.query_ball_point(hp['Coord'][halo], r = 2*hp['R200'][halo]))
if len(buren) <= 1:
continue
buren = buren[hp['R200'][buren] != -1]
if len(buren) == 0:
continue
i_largest = np.argmax(hp['n_part'][buren])
index_largest = buren[i_largest]
buren = np.delete(buren,i_largest)
coords = hp['Coord'][buren] - hp['Coord'][index_largest]
coords = np.where(np.abs(coords) > 0.5*boxsize, coords - coords/np.abs(coords)*boxsize, coords)
rad = np.sqrt(np.sum(coords*coords, axis=1))
burentemp = np.where(hp['R200'][buren]-rad+hp['R200'][index_largest] > 0)[0]
if len(burentemp) == 0:
continue
buren = buren[burentemp]
hp['hostHaloIndex'][buren] = index_largest
hp['M200'][buren] = -1
hp['R200'][buren] = -1
def findSubHaloFraction(hp, catalog):
if len(hp['hostHaloIndex']) < 10:
hp['Msub'] = np.zeros(len(hp['M200']))
return 0
i_hostH = np.where(hp['hostHaloIndex'] > -1)[0]
hp['Msub'] = np.zeros(len(hp['M200']))
for i in i_hostH:
isattemp = np.where(hp['HaloID'][i] == catalog['ID'])[0]
hp['Msub'][hp['hostHaloIndex'][i]] += catalog['Mass_FOF'][isattemp]
def buildHaloDictionary(Hydro=None, partType=None, multiple=None):
if ('DM' in partType) or ('H' in partType) or ('S' in partType):
return buildHaloDictionary_nieuw(partType=partType, multiple=multiple)
haloproperties = {}
if partType is None:
if Hydro is None:
sys.exit("buildHaloDictionary should have an entry for either Hydro or partType")
if partType is not None:
if partType in [0, 2, 3, 4, 5]:
sys.exit("Bestaat nog niet voor partType = %i" %partType)
elif partType == 7:
Hydro = True
elif partType == 8:
Hydro = True
haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vmax', 'Rmax',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable'])
if Hydro:
haloarray.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH',
'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH'])
if partType == 8:
haloarray.extend(['lambdaS', 'DensityS',
'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
for key in haloarray:
if (multiple is not None) and (key=='Partindices'):
haloproperties[key] = {}
else:
haloproperties[key] = np.zeros(0)
return haloproperties
def allocateSizes(key, lengte):
if key in ['R200', 'M200', 'redshift', 'lambda', 'Vmax', 'Rmax', 'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'lambdaDM', 'lambdaH',
'DMFraction', 'HFraction', 'lambdaS', 'SFraction']:
return np.ones(lengte[0])*-1
if key in ['HaloIndex', 'HaloID', 'snapshot', 'Npart', 'NpartDM', 'NpartH','NpartS',
'n_part', 'MaxRadIndex', 'hostHaloIndex', 'Tail', 'Head',
'RootHead', 'RootTail']:
return np.ones(lengte[0]).astype(int)*-1
elif key in ['Coord', 'Vel']:
return np.ones((lengte[0], 3))*-1
elif key in ['Density', 'AngularMomentum', 'Velrad', 'Mass_profile',
'DensityDM', 'DensityH', 'DMFraction_profile', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH', 'lambdaS', 'DensityS',
'SFraction_profile', 'MassS_profile','VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']:
return np.zeros((lengte[0], lengte[1]))
elif key in ['Npart_profile', 'NpartDM_profile', 'NpartH_profile', 'NpartS_profile']:
return np.zeros((lengte[0], lengte[1])).astype(int)
def buildHaloDictionary_nieuw(partType=None, multiple=None):
haloproperties = {}
if partType is None:
sys.exit("buildHaloDictionary should have an entry for partType")
haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vmax', 'Rmax',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'Tail', 'Head', 'Vmax_part', 'Rmax_part',
'Vmax_interp', 'Rmax_interp', 'RootHead', 'RootTail'])
if 'H' in partType:
haloarray.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH', 'NpartDM_profile','NpartH', 'NpartDM',
'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH'])
if 'S' in partType:
haloarray.extend(['lambdaS', 'DensityS', 'NpartS',
'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
for key in haloarray:
if (multiple is not None) and (key=='Partindices'):
haloproperties[key] = {}
elif multiple is not None:
haloproperties[key] = allocateSizes(key, multiple)
else:
haloproperties[key] = None
return haloproperties
def quantity_keys():
return (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Npart', 'NpartDM',
'NpartH', 'NpartS', 'Vel', 'n_part', 'Tail', 'Head', 'RootHead', 'RootTail',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'lambdaDM', 'lambdaH',
'lambdaS', 'DMFraction', 'HFraction', 'SFraction',
'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp'])
def profile_keys():
return (['HaloIndex', 'HaloID', 'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'MassTable',
'Mass_profile', 'MaxRadIndex', 'Density', 'DensityDM', 'DensityH', 'NpartH_profile', 'DMFraction_profile',
'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature',
'AngularMomentumDM', 'AngularMomentumH', 'NpartS_profile', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
def convertVel_keys():
return (['HaloIndex', 'HaloID', 'Npart', 'NpartDM', 'NpartH', 'NpartS', 'n_part', 'Vel', 'Coord', 'R200', 'M200',
'Tail', 'Head', 'RootHead', 'RootTail', 'redshift', 'snapshot', 'hostHaloIndex'])
def findHaloPropertiesInSnap_nieuw(catalog, d_snap, Nhalo=100, halolist=None,
startHalo=0, d_radius=None, d_partType = None, d_runparams=None,
partdata = None, TEMPORALHALOIDVAL=1000000000000, boxsize=None, debug=False):
#Keeping all VELOCIraptor haloes, but saving 'wrong' haloes as HaloIndex = -1
if d_runparams['VELconvert'] == False:
boxsize = d_snap['File'].boxsize
partType = d_partType['particle_type']
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, d_snap['snapshot']))
if 'profile' in d_radius.keys():
ylen = len(d_radius['profile'])
else:
ylen = 0
haloproperties = buildHaloDictionary(partType=partType, multiple=[Nhalo, ylen])
if len(catalog['Mass_200crit']) == 0:
return haloproperties
# if (d_runparams['VELconvert'] == False):
# sortorder = np.argsort(catalog['Mass_tot'][:])[::-1]
# sortorderinvert = np.argsort(sortorder)
# for key in catalog.keys():
# catalog[key][:] = catalog[key][sortorder]
# else:
#sortorder = np.arange(len(catalog['Mass_tot'])).astype(int)
# if partdata is not None:
# for key in partdata.keys():
# partdata[key][:] = partdata[key][sortorder]
if halolist is None:
haloindices = np.arange(startHalo, startHalo+Nhalo).astype(int)
use_existing_r200 = False
else:
haloindices = (halolist%TEMPORALHALOIDVAL - 1).astype(int)
use_existing_r200 = False
halo_i = -1
for halo in haloindices:
halo_i += 1
#if halolist is not None:
# print('Computing properties for halo %i'%halo)
if halo%10000==0:
print('Computing properties for halo %i-%i' %(halo, halo+10000))
if halo > len(catalog['Xc'])-1:
print("Nhalo > N(velociraptor haloes)")
break
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_runparams['SnapshotType'],
physical=d_runparams['Physical'])
coords = coords%boxsize
radhier = getHaloRadius(catalog, halo_i, z=d_snap['redshift'],
rtype = d_radius['Rchoice'], snapshottype=d_runparams['SnapshotType'],
physical=d_runparams['Physical'])
satellite = False
#Trusting VELOCIraptor not to falsely identify haloes as satellites
if (halolist is None) and (catalog['hostHaloID'][halo_i] != -1):
satellite = True
hostHaloIDtemp = np.where(catalog['hostHaloID'][halo_i]==catalog['ID'])[0]
if len(hostHaloIDtemp) == 0:
hostHaloIDtemp = -2
else:
hostHaloIDtemp = hostHaloIDtemp[0]
else:
hostHaloIDtemp = -1
#All happens here
if debug:
start_time = time.time()
print('M200: ', catalog['Mass_200crit'][halo_i])
print('R200: ', catalog['R_200crit'][halo_i])
print('ID: ', catalog['ID'][halo_i])
if d_runparams['VELconvert']:
if d_runparams['ParticleDataType'] != 'None':
halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'],
partType=partType, particledata=partdata['Particle_Types'], d_partType=d_partType)
else:
halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'],
partType=partType)
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
elif d_runparams['ParticleDataType'] == 'None':
#print("Halo", halo)
halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius,
partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200,
profiles=d_runparams['Profiles'], quantities=d_runparams['Quantities'], debug=debug)
else:
#print("Halo", halo,len(partdata['Particle_IDs'][sortorder[halo]]))
halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius,
partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200,
profiles=d_runparams['Profiles'], quantities=d_runparams['Quantities'], debug=debug,
particledata=partdata['Particle_IDs'][halo_i])
if halopropertiestemp is None:
if debug:
print("De halo is leeg???")
continue
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
start_time = time.time()
if d_runparams['TreeData']:
halopropertiestemp['Tail'] = catalog['Tail'][halo_i]-1
halopropertiestemp['Head'] = catalog['Head'][halo_i]-1
halopropertiestemp['RootTail'] = catalog['RootTail'][halo_i]-1
halopropertiestemp['RootHead'] = catalog['RootHead'][halo_i]-1
if d_runparams['VELconvert'] == False:
if halopropertiestemp is None:
halopropertiestemp = buildHaloDictionary(partType=partType)
halopropertiestemp['HaloID'] = catalog['ID'][halo_i]
halopropertiestemp['HaloIndex'] = -1
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
halopropertiestemp['Coord'] = coords
else:
if satellite:
halopropertiestemp['Npart'] = catalog['npart'][halo_i]
halopropertiestemp['n_part'] = catalog['npart'][halo_i]
halopropertiestemp['HaloID'] = catalog['ID'][halo_i]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoordCOM(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_runparams['SnapshotType'], physical=d_runparams['Physical'])
rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/
halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
doorgaan = False
if (d_runparams['Profiles'] == True) and (key in profile_keys()):
doorgaan = True
if (d_runparams['Quantities'] == True) and (key in quantity_keys()):
doorgaan = True
if (d_runparams['VELconvert'] == True) and (key in convertVel_keys()):
doorgaan = True
if doorgaan == False:
continue
if key in ['Radius', 'MassTable', 'snapshot', 'redshift']:
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
if (halopropertiestemp['HaloIndex'] == -1) and (key != 'HaloID'):
continue
if halopropertiestemp[key] is None:
continue
elif key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
else:
haloproperties[key][halo] = halopropertiestemp[key]
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
if 'profile' in d_radius.keys():
haloproperties['Radius'] = d_radius['profile']
haloproperties['redshift'] = np.array([d_snap['redshift']])
haloproperties['snapshot'] = np.array([d_snap['snapshot']])
j = 0
if d_runparams['VELconvert'] == False:
haloproperties['MassTable'] = d_snap['File'].mass
for i in d_snap['File'].readParticles:
if haloproperties['MassTable'][i] == 0 and d_snap['File'].npart[i] != 0:
waar = np.where(d_snap['File'].partTypeArray == i)[0][0]
haloproperties['MassTable'][i] = d_snap['File'].masses[waar]
j += 1
if d_runparams['TreeData']:
haloproperties['Tail'] = haloproperties['Tail'].astype(int)
haloproperties['Head'] = haloproperties['Head'].astype(int)
haloproperties['RootTail'] = haloproperties['RootTail'].astype(int)
haloproperties['RootHead'] = haloproperties['RootHead'].astype(int)
if (len(haloproperties['Coord']) > 0) and (halolist is None):
if d_runparams['Quantities'] or d_runparams['VELconvert']:
print("Reassigning satellite haloes")
fixSatelliteProblems(haloproperties, boxsize=boxsize)
return haloproperties
def findHaloPropertiesInSnap(catalog, snappath, snapshot, partType=8, Nhalo=100,
startHalo=0, softeningLength=0.002, Radius=1., partlim=200, sortorder=None,
boxsize=32, TEMPORALHALOIDVAL=1000000000000, particledata=None, mass=False):
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, snapshot))
haloproperties = buildHaloDictionary(partType=partType, multiple=True)
if len(catalog['Mass_tot']) == 0:
return haloproperties
if sortorder is None:
sortorder = np.argsort(catalog['Mass_tot'][:])[::-1]
sortorderinvert = np.argsort(sortorder)
else:
sortorderinvert = np.argsort(sortorder)
d_snap = {}
d_snap['snapshot'] = snapshot
limiet = 0
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=partType, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
for key in catalog.keys():
catalog[key][:] = catalog[key][sortorder]
for halo in range(startHalo, startHalo+Nhalo):
#start_time = time.time()
#print(halo)
#print(catalog['npart'][halo])
if halo%1000==0:
print('Computing properties for halo %i-%i' %(halo, halo+1000))
if halo > len(catalog['Xc'])-1:
print("Halo limit reached: nhalo = %i, hlim = %i" %(halo, limiet))
print("Coordinates: ", coords)
break
if limiet > 500: #Only computing sats
if catalog['hostHaloID'][halo] == -1:
continue
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
coords = coords%boxsize
radhier = getHaloRadius(catalog, halo, z=d_snap['File'].redshift)
satellite = False
if (catalog['npart'][halo] < 20) or (catalog['Mass_200crit'][halo]*h == 0):
startHalo += 1
# haloproperties['TreeBool'][halo] = 0
continue
#Checking for dissapeared host haloes
if (catalog['hostHaloID'][halo] != -1) and len(haloproperties['HaloID'])>1:
haloindextemp = np.where((haloproperties['HaloID']%TEMPORALHALOIDVAL)==catalog['hostHaloID'][halo]%TEMPORALHALOIDVAL)[0]
if len(haloindextemp) == 0:
hostHaloIDtemp = -1
if catalog['npart'][halo] < partlim/2.:
hostHaloIDtemp = -2
satellite = True
else:
afstandtemp = (haloproperties['Coord'][haloindextemp[0]]-coords)
afstandtemp = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
afstandtemp = (np.sum(afstandtemp*afstandtemp))**0.5
if afstandtemp < haloproperties['R200'][haloindextemp[0]]: # and catalog['npart'][halo] > 50:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = haloindextemp[0]
satellite = True
else:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = -1
else:
hostHaloIDtemp = -1
#All happens here
halopropertiestemp = findHaloProperties(d_snap, halo, coords, Radius, partType=partType,
rad=radhier, mass=mass, satellite=satellite, partlim=partlim)
#print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
if halopropertiestemp is None:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
if satellite == False and halopropertiestemp['Npart'] < partlim:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
limiet = 0
if satellite:
halopropertiestemp['Npart'] = catalog['npart'][halo]
#start_time = time.time()
halopropertiestemp['n_part'] = catalog['npart'][halo]
halopropertiestemp['HaloID'] = catalog['ID'][halo]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
if key in ['TreeBool', 'Tail', 'Head', 'Radius', 'MassTable', 'snapshot', 'redshift']:
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
elif key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif halo == startHalo:
haloproperties[key] = [halopropertiestemp[key]]
else:
haloproperties[key] = np.concatenate((haloproperties[key], [halopropertiestemp[key]]))
#print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
haloproperties['Radius'] = Radius
haloproperties['redshift'] = np.array([d_snap['File'].redshift])
haloproperties['snapshot'] = np.array([d_snap['snapshot']])
haloproperties['MassTable'] = d_snap['File'].mass
j = 0
for i in d_snap['File'].readParticles:
if haloproperties['MassTable'][i] == 0 and d_snap['File'].npart[i] != 0:
waar = np.where(d_snap['File'].partTypeArray == i)[0][0]
haloproperties['MassTable'][i] = d_snap['File'].masses[waar]
j += 1
findSubHaloFraction(haloproperties, catalog)
print("Reassigning satellite haloes")
if len(haloproperties['Coord']) > 0:
if 'DMFraction' in haloproperties.keys():
Hydro = True
else:
Hydro = False
fixSatelliteProblems(haloproperties, Hydro = Hydro)
#print("Computing subhalo fraction")
print(haloproperties.keys())
return haloproperties
def findHaloProperties(d_snap, halo, Coord, fixedRadius, r200fac = 8, partType=None, rad=None, satellite=False,
partlim=200, profiles=False, quantities=True, particledata=None, debug=False, use_existing_r200=False):
haloproperties = buildHaloDictionary(partType=partType)
if isinstance(fixedRadius, dict):
if 'profile' in fixedRadius.keys():
radprofile = fixedRadius['profile']
radfrac = fixedRadius['Rfrac']
else:
radfrac = fixedRadius['Rfrac']
else:
radprofile = fixedRadius
radfrac = r200fac
snap = d_snap['File']
haloproperties['HaloIndex'] = halo
haloproperties['HaloID'] = halo#catalog['ID'][halo]
snap.debug = debug
coord = Coord
if debug:
start_time = time.time()
if rad is None:
rad = fixedRadius[-1]
snap.get_temphalo(coord, rad, r200fac=radfrac, fixedRadius=radprofile, satellite=satellite,
particledata=particledata, partlim=partlim, initialise_profiles=profiles, use_existing_r200=use_existing_r200)
if len(snap.temphalo['indices']) < partlim or len(snap.temphalo['indices'])<=1:
if debug:
print('Halo has %i particles, and is thus too small' %len(snap.temphalo['indices']))
return None
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halo initiated', snap.temphalo['R200'])
if profiles:
if debug:
start_time = time.time()
snap.get_temphalo_profiles()
snap.get_specific_angular_momentum_radius(coord, radius=snap.temphalo['Radius'])
haloproperties['AngularMomentum'] = snap.temphalo['AngularMomentum']
haloproperties['Density'] = snap.temphalo['profile_density']
haloproperties['Velrad'] = snap.temphalo['profile_vrad']
haloproperties['Npart_profile'] = snap.temphalo['profile_npart']
haloproperties['Mass_profile'] = snap.temphalo['profile_mass']
haloproperties['MaxRadIndex'] = snap.temphalo['MaxRadIndex']
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halo profiles calculated')
haloproperties['Coord'] = snap.temphalo['Coord']
#Virial radius and mass
R200 = snap.temphalo['R200']
haloproperties['M200']= snap.temphalo['M200']
haloproperties['R200'] = R200
#Assigning halo properties
if quantities:
if debug:
start_time = time.time()
if (satellite == False) or (particledata is not None):
snap.get_spin_parameter()
haloproperties['lambda'] = snap.temphalo['lambda']
haloproperties['lambda'] = snap.temphalo['lambda']
snap.get_Vmax_Rmax()
haloproperties['Vmax_part'] = snap.temphalo['Vmax_part']
haloproperties['Rmax_part'] = snap.temphalo['Rmax_part']
haloproperties['Vmax_interp'] = snap.temphalo['Vmax_interp']
haloproperties['Rmax_interp'] = snap.temphalo['Rmax_interp']
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'lambda calculated')
haloproperties['Vel'] = snap.temphalo['Vel']
haloproperties['Partindices'] = snap.temphalo['indices']
haloproperties['Npart'] = len(haloproperties['Partindices'])
# if satellite == False:
# haloproperties['Virial_ratio'] = snap.get_virial_ratio(1000)
# else:
# haloproperties['Virial_ratio'] = -1
if debug:
start_time = time.time()
if len(snap.readParticles) > 1:
nietnulhier=np.where(haloproperties['Mass_profile']!=0)
for i_pT in range(len(snap.readParticles)):
if quantities:
if (satellite == False) or (particledata is not None):
haloproperties['lambda'+snap.namePrefix[i_pT]] = snap.temphalo['lambda'+snap.namePrefix[i_pT]]
else:
haloproperties['lambda'+snap.namePrefix[i_pT]] = -1
haloproperties['Npart'+snap.namePrefix[i_pT]] = snap.temphalo['Npart'+snap.namePrefix[i_pT]]
haloproperties[snap.namePrefix[i_pT]+'Fraction'] = snap.temphalo[snap.namePrefix[i_pT]+'Fraction']
if profiles:
haloproperties['AngularMomentum'+snap.namePrefix[i_pT]] = snap.temphalo['AngularMomentum'+snap.namePrefix[i_pT]]
haloproperties['Density'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'density']
haloproperties['Npart'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'npart']
haloproperties['Velrad'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'vrad']
haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'mass']
if snap.readParticles[i_pT] == 0:
haloproperties['Temperature'] = snap.temphalo['profile_temperature']
elif snap.readParticles[i_pT] == 5:
haloproperties['AgeS'] = snap.temphalo['profile_Sage']
haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'] = np.zeros_like(haloproperties['Mass_profile'])
haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'][nietnulhier] = haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'][nietnulhier]/haloproperties['Mass_profile'][nietnulhier]
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'particle types done')
if particledata is not None:
if debug:
start_time = time.time()
snap.delete_used_indices(snap.temphalo['indices'])
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'Deleted particles')
return haloproperties
def copyVELOCIraptor(catalog, halo, Coord, redshift, d_partType=None, partType=None, particledata=None):
c = constant(redshift=redshift)
c.change_constants(redshift)
comoving_rhocrit200 = deltaVir*c.rhocrit_Ms_Mpci3*h/(h*(1+redshift))**3
haloproperties = buildHaloDictionary(partType=partType)
haloproperties['HaloIndex'] = halo
haloproperties['HaloID'] = catalog['ID'][halo]
haloproperties['n_part'] = catalog['npart'][halo]
haloproperties['Coord'] = Coord
#Virial radius and mass
haloproperties['M200'] = catalog['Mass_200crit'][halo]*h
haloproperties['R200'] = (haloproperties['M200']*1.e10/(comoving_rhocrit200 * 4./3. * np.pi))**(1./3.)
#Assigning halo properties
haloproperties['Vel'] = np.array([catalog['VXc'][halo], catalog['VYc'][halo], catalog['VZc'][halo]])*(1+redshift)
haloproperties['Npart'] = catalog['npart'][halo]
if (particledata is not None) and (len(d_partType['particle_type']) > 1):
allpart = len(particledata[halo])
for i_pT in range(len(d_partType['particle_type'])):
if allpart == 0:
haloproperties['Npart'+d_partType['particle_type'][i_pT]] = 0
else:
haloproperties['Npart'+d_partType['particle_type'][i_pT]] = len(np.where(particledata[halo] == d_partType['particle_number'][i_pT])[0])
#print(d_partType['particle_type'][i_pT], d_partType['particle_number'][i_pT], haloproperties['Npart'+d_partType['particle_type'][i_pT]])
return haloproperties
def everythingOutside(haloproperties, d_snap):
allpin = np.zeros(0)
iets=0
allpinBool = np.array([True]*np.sum(d_snap['File'].npart))
for i in haloproperties['HaloIndex']:
allpinBool[haloproperties['Partindices'][i]] = False
outsideIndices = np.where(allpinBool)[0]
insideIndices = np.where(allpinBool==False)[0]
outsideIndicesDM = outsideIndices[np.where(outsideIndices < d_snap['File'].npart[0])[0]]
outsideIndicesH = outsideIndices[np.where(outsideIndices >= d_snap['File'].npart[0])[0]]
insideIndicesDM = insideIndices[np.where(insideIndices < d_snap['File'].npart[0])[0]]
insideIndicesH = insideIndices[np.where(insideIndices >= d_snap['File'].npart[0])[0]]
dmmass = d_snap['File'].get_masses()[0]
hmass = d_snap['File'].get_masses()[-1]
haloproperties['Outside_fdm_temp_DMpart_Hpart_dmmass_hmass'] = np.array([len(outsideIndicesDM)*dmmass/(len(outsideIndicesDM)*dmmass+len(outsideIndicesH)*hmass),
np.sum(d_snap['File'].get_temperature()[outsideIndicesH])/len(outsideIndicesH), len(outsideIndicesDM), len(outsideIndicesH), dmmass, hmass])
haloproperties['Inside_fdm_temp_DMpart_Hpart_dmmass_hmass'] = np.array([len(insideIndicesDM)*dmmass/(len(insideIndicesDM)*dmmass+len(insideIndicesH)*hmass),
np.sum(d_snap['File'].get_temperature()[insideIndicesH])/len(insideIndicesH), len(insideIndicesDM), len(insideIndicesH), dmmass, hmass])
def writeDataToHDF5quantities(path, name, haloproperties, overwrite=False, savePartData=False,
convertVel=False, copyVel=False):
existing = False
if overwrite==False and os.path.isfile(path + name):
haloprop = h5py.File(path + name, 'r+')
existing = True
HaloIndices = haloprop['HaloIndex'][:]
overlap = np.where(np.in1d(haloproperties['HaloIndex'], HaloIndices))[0]
nonoverlap = np.delete(haloproperties['HaloIndex'][:], overlap)
nonoverlapindex = np.delete(np.arange(0, len(haloproperties['HaloIndex']), 1).astype(int), overlap)
nonoverlaplist = ['haloIndex_%05d' %i for i in nonoverlap]
else:
haloprop = h5py.File(path+name, 'w')
for key in haloproperties.keys():
if (copyVel==False) and (convertVel==False) and (key not in quantity_keys()):
continue
if (copyVel==False) and convertVel and (key not in convertVel_keys()):
continue
if isinstance(haloproperties[key], dict):
if not savePartData:
if key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices':
continue
if existing:
temp = haloprop[key]
else:
temp = haloprop.create_group(key)
for key2 in haloproperties[key].keys():
if haloproperties[key][key2] is None:
print(key)
continue
key2string = 'haloIndex_%05d' %key2
if existing:
if len(np.where(np.in1d(key2string, nonoverlaplist))[0]) > 0:
temp.create_dataset(key2string, data = np.array(haloproperties[key][key2]))
else:
temp.create_dataset(key2string, data = np.array(haloproperties[key][key2]))
else:
if haloproperties[key] is None:
print(key)
continue
if existing:
if key == 'Radius' or key == 'MassTable' or key == 'snapshot' or key == 'redshift':
continue
data = haloprop[key][:]
for i in nonoverlapindex:
data = np.concatenate((data, [haloproperties[key][i]]))
del haloprop[key]
haloprop.create_dataset(key, data = data)
else:
haloprop.create_dataset(key, data = np.array(haloproperties[key]))
haloprop.close()
def writeDataToHDF5profiles(path, name, haloproperties, overwrite=False, savePartData=False):
existing = False
if overwrite==False and os.path.isfile(path + name):
haloprop = h5py.File(path + name, 'r+')
existing = True
HaloIndices = haloprop['HaloIndex'][:]
overlap = np.where(np.in1d(haloproperties['HaloIndex'], HaloIndices))[0]
nonoverlap = np.delete(haloproperties['HaloIndex'][:], overlap)
nonoverlapindex = np.delete(np.arange(0, len(haloproperties['HaloIndex']), 1).astype(int), overlap)
nonoverlaplist = ['haloIndex_%05d' %i for i in nonoverlap]
else:
haloprop = h5py.File(path+name, 'w')
for key in haloproperties.keys():
if key not in profile_keys():
continue
if isinstance(haloproperties[key], dict):
if not savePartData:
if key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices':
continue
if existing:
temp = haloprop[key]
else:
temp = haloprop.create_group(key)
for key2 in haloproperties[key].keys():
if haloproperties[key][key2] is None:
print(key)
continue
key2string = 'haloIndex_%05d' %key2
if existing:
if len(np.where(np.in1d(key2string, nonoverlaplist))[0]) > 0:
temp.create_dataset(key2string, data = np.array(haloproperties[key][key2]))
else:
temp.create_dataset(key2string, data = np.array(haloproperties[key][key2]))
else:
if haloproperties[key] is None:
print(key)
continue
if existing:
if key == 'Radius' or key == 'MassTable' or key == 'snapshot' or key == 'redshift':
continue
data = haloprop[key][:]
for i in nonoverlapindex:
data = np.concatenate((data, [haloproperties[key][i]]))
del haloprop[key]
haloprop.create_dataset(key, data = data)
else:
haloprop.create_dataset(key, data = np.array(haloproperties[key]))
haloprop.close()
def writeDataToHDF5(path, name, haloproperties, overwrite=False, savePartData=False):
existing = False
if overwrite==False and os.path.isfile(path + name):
haloprop = h5py.File(path +name, 'r+')
existing = True
HaloIndices = haloprop['HaloIndex'][:]
overlap = np.where(np.in1d(haloproperties['HaloIndex'], HaloIndices))[0]
nonoverlap = np.delete(haloproperties['HaloIndex'][:], overlap)
nonoverlapindex = np.delete(np.arange(0, len(haloproperties['HaloIndex']), 1).astype(int), overlap)
nonoverlaplist = ['haloIndex_%05d' %i for i in nonoverlap]
else:
haloprop = h5py.File(path+name, 'w')
for key in haloproperties.keys():
if isinstance(haloproperties[key], dict):
if not savePartData:
if key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices':
continue
if existing:
temp = haloprop[key]
else:
temp = haloprop.create_group(key)
for key2 in haloproperties[key].keys():
if haloproperties[key][key2] is None:
print(key)
continue
key2string = 'haloIndex_%05d' %key2
if existing:
if len(np.where(np.in1d(key2string, nonoverlaplist))[0]) > 0:
temp.create_dataset(key2string, data = np.array(haloproperties[key][key2]))
else:
temp.create_dataset(key2string, data = np.array(haloproperties[key][key2]))
else:
if haloproperties[key] is None:
print(key)
continue
if existing:
if key == 'Radius' or key == 'MassTable' or key == 'snapshot' or key == 'redshift':
continue
data = haloprop[key][:]
for i in nonoverlapindex:
data = np.concatenate((data, [haloproperties[key][i]]))
del haloprop[key]
haloprop.create_dataset(key, data = data)
else:
haloprop.create_dataset(key, data = np.array(haloproperties[key]))
haloprop.close()
def readHDF5Data(path, name, Hydro=True):
existing = False
if os.path.isfile(path + name):
haloprop = h5py.File(path +name, 'r')
else:
sys.exit('Error: file '+path+name+' not found.')
haloproperties = buildHaloDictionary(Hydro=Hydro, multiple=True)
for key in haloprop.id:
if isinstance(haloproperties[key.decode('utf-8')], dict):
if isinstance(haloprop[key].id, h5py.h5d.DatasetID):
continue
temp = haloprop[key]
for key2 in haloprop[key].id:
haloindex = [int(s) for s in re.findall(r'\d+', key2.decode('utf-8'))][0]
haloproperties[key.decode('utf-8')][haloindex] = temp[key2][:]
else:
haloproperties[key.decode('utf-8')] = haloprop[key][:]
haloprop.close()
return haloproperties
def readHDF5DataSets(path, name, datasets, Hydro=True):
existing = False
if os.path.isfile(path + name):
haloprop = h5py.File(path +name, 'r')
else:
sys.exit('Error: file '+path+name+' not found.')
haloproperties = buildHaloDictionary(Hydro=Hydro, multiple=True)
for key in haloprop.id:
if key.decode('utf-8') in datasets:
if isinstance(haloproperties[key.decode('utf-8')], dict):
if isinstance(haloprop[key].id, h5py.h5d.DatasetID):
continue
temp = haloprop[key]
for key2 in haloprop[key].id:
haloindex = [int(s) for s in re.findall(r'\d+', key2.decode('utf-8'))][0]
haloproperties[key.decode('utf-8')][haloindex] = temp[key2][:]
else:
haloproperties[key.decode('utf-8')] = haloprop[key][:]
haloprop.close()
return haloproperties
def getRidOfBadHaloes(hp):
c = constant()
c.change_constants(hp['redshift'])
wrong = np.where(4./3*np.pi*hp['R200']**3*200*c.rhocrit_Ms_Mpci3/(h**2*(1+hp['redshift'])**3) > 1.2*hp['M200']*1e10)[0]
wrong = np.append(wrong, np.where(4./3*np.pi*hp['R200']**3*200*c.rhocrit_Ms_Mpci3/(h**2*(1+hp['redshift'])**3) < 0.8*hp['M200']*1e10)[0])
wronghi = hp['HaloIndex'][wrong]
print(len(wronghi))
for i in hp.keys():
if i == 'Inside_fdm_temp_DMpart_Hpart_dmmass_hmass' or i == 'Outside_fdm_temp_DMpart_Hpart_dmmass_hmass':
continue
if isinstance(hp[i], dict):
for j in wronghi:
hp[i].pop(j, None)
else:
hp[i] = np.delete(hp[i], wrong)
def rewriteHeadTails(halodata, snapmin=0, snapmax=200, TEMPORALHALOIDVAL=1000000000000):
sortorder = {}
sortorderinvert = {}
newtail = {}
newhead = {}
for snap in range(snapmin, snapmax+1):
sortorder[snap] = np.argsort(halodata[snap]['Mass_tot'][:])[::-1]
sortorderinvert[snap] = np.argsort(sortorder[snap]) #Orders it to point to the right position in the ID list
for snap in range(snapmin, snapmax+1):
oldhead = halodata[snap]['Head'][sortorder[snap]]
oldtail = halodata[snap]['Tail'][sortorder[snap]]
newtail[snap] = np.zeros(len(oldtail))
newhead[snap] = np.zeros(len(oldhead))
tempsnaps = (oldtail/TEMPORALHALOIDVAL).astype(int)
if len(tempsnaps) == 0:
continue
for i in range(min(tempsnaps), min(snap-1, max(tempsnaps))+1):
loctemp = np.where(tempsnaps == i)[0]
if len(loctemp) == 0:
continue
prevhalotemp = (oldtail[loctemp]%TEMPORALHALOIDVAL - 1).astype(int)
newtail[snap][loctemp] = (sortorderinvert[i][prevhalotemp]%TEMPORALHALOIDVAL).astype(int) + i*TEMPORALHALOIDVAL
tempsnaps = (oldhead/TEMPORALHALOIDVAL).astype(int)
for i in range(max(min(tempsnaps), snap+1), max(tempsnaps)+1):
loctemp = np.where(tempsnaps == i)[0]
if len(loctemp) == 0:
continue
prevhalotemp = (oldhead[loctemp]%TEMPORALHALOIDVAL - 1).astype(int)
newhead[snap][loctemp] = sortorderinvert[i][prevhalotemp] + i*TEMPORALHALOIDVAL
newtail[snap] = newtail[snap].astype(int)
newhead[snap] = newhead[snap].astype(int)
return sortorder, newtail, newhead
def ReadParticleTypes(basefilename,iseparatesubfiles=0,iverbose=0, unbound=True):
"""
VELOCIraptor/STF catalog_group and catalog_parttypes in hdf5
Note that a file will indicate how many files the total output has been split into
"""
inompi=True
if (iverbose): print("reading particle data",basefilename)
gfilename=basefilename+".catalog_groups"
tfilename=basefilename+".catalog_parttypes"
utfilename=tfilename+".unbound"
#check for file existence
if (os.path.isfile(gfilename)==True):
numfiles=0
else:
gfilename+=".0"
tfilename+=".0"
utfilename+=".0"
inompi=False
if (os.path.isfile(gfilename)==False):
print("file not found")
return []
byteoffset=0
#load header information from file to get total number of groups
#hdf
gfile = h5py.File(gfilename, 'r')
filenum=int(gfile["File_id"][0])
numfiles=int(gfile["Num_of_files"][0])
numhalos=np.uint64(gfile["Num_of_groups"][0])
numtothalos=np.uint64(gfile["Total_num_of_groups"][0])
gfile.close()
particledata=dict()
particledata['Npart']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Particle_Types']=[[] for i in range(numtothalos)]
#now for all files
counter=np.uint64(0)
subfilenames=[""]
if (iseparatesubfiles==1): subfilenames=["",".sublevels"]
for ifile in range(numfiles):
for subname in subfilenames:
bfname=basefilename+subname
gfilename=bfname+".catalog_groups"
tfilename=bfname+".catalog_parttypes"
utfilename=tfilename+".unbound"
if (inompi==False):
gfilename+="."+str(ifile)
tfilename+="."+str(ifile)
utfilename+="."+str(ifile)
if (iverbose) : print("reading",bfname,ifile)
gfile = h5py.File(gfilename, 'r')
numhalos=np.uint64(gfile["Num_of_groups"][0])
numingroup=np.uint64(gfile["Group_Size"])
uoffset=np.uint64(gfile["Offset_unbound"])
offset=np.uint64(gfile["Offset"])
gfile.close()
tfile = h5py.File(tfilename, 'r')
utfile = h5py.File(utfilename, 'r')
tdata=np.uint16(tfile["Particle_types"])
utdata=np.uint16(utfile["Particle_types"])
npart=len(tdata)
unpart=len(utdata)
tfile.close()
utfile.close()
#now with data loaded, process it to produce data structure
unumingroup=np.zeros(numhalos,dtype=np.uint64)
for i in range(int(numhalos-1)):
unumingroup[i]=(uoffset[i+1]-uoffset[i]);
unumingroup[-1]=(unpart-uoffset[-1])
if unbound:
particledata['Npart'][counter:counter+numhalos]=numingroup
else:
particledata['Npart'][counter:counter+numhalos] = numingroup-unumingroup
for i in range(numhalos):
if unbound:
particledata['Particle_Types'][int(i+counter)]=np.zeros(numingroup[i],dtype=np.int64)
particledata['Particle_Types'][int(i+counter)][:int(numingroup[i]-unumingroup[i])]=tdata[offset[i]:offset[i]+numingroup[i]-unumingroup[i]]
particledata['Particle_Types'][int(i+counter)][int(numingroup[i]-unumingroup[i]):numingroup[i]]=utdata[uoffset[i]:uoffset[i]+unumingroup[i]]
else:
particledata['Particle_Types'][int(i+counter)]=np.zeros(numingroup[i]-unumingroup[i],dtype=np.int64)
particledata['Particle_Types'][int(i+counter)][:int(numingroup[i]-unumingroup[i])]=tdata[offset[i]:offset[i]+numingroup[i]-unumingroup[i]]
counter+=numhalos
return particledata
def ReadParticleDataFile(basefilename,iseparatesubfiles=0,iparttypes=0,iverbose=1, binarydtype=np.int64,
unbound=True, selected_files=None, halolist=None, TEMPORALHALOIDVAL = 1000000000000):
"""
VELOCIraptor/STF catalog_group, catalog_particles and catalog_parttypes in various formats
Note that a file will indicate how many files the total output has been split into
"""
inompi=True
if (iverbose): print("reading particle data",basefilename)
gfilename=basefilename+".catalog_groups"
pfilename=basefilename+".catalog_particles"
upfilename=pfilename+".unbound"
tfilename=basefilename+".catalog_parttypes"
utfilename=tfilename+".unbound"
#check for file existence
if (os.path.isfile(gfilename)==True):
numfiles=0
else:
gfilename+=".0"
pfilename+=".0"
upfilename+=".0"
tfilename+=".0"
utfilename+=".0"
inompi=False
if (os.path.isfile(gfilename)==False):
print("file not found")
return []
byteoffset=0
#If a list of haloes is given, we only want to read in the haloes (memory efficient)
if halolist is not None:
haloindices = (halolist%TEMPORALHALOIDVAL - 1).astype(int)
#load header information from file to get total number of groups
gfile = h5py.File(gfilename, 'r')
numfiles=int(gfile["Num_of_files"][0])
numtothalos=np.uint64(gfile["Total_num_of_groups"][0])
gfile.close()
if selected_files is not None:
numtothalos = np.uint64(0)
numfiles = len(selected_files)
for ifile in selected_files:
filename = basefilename+".catalog_groups"+"."+str(ifile)
halofile = h5py.File(filename, 'r')
numtothalos += np.uint64(halofile["Num_of_groups"][0])
halofile.close()
if halolist is not None:
numtothalos = len(haloindices)
particledata=dict()
particledata['Npart']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Npart_unbound']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Particle_IDs']=[[] for i in range(numtothalos)]
if (iparttypes==1):
particledata['Particle_Types']=[[] for i in range(numtothalos)]
#now for all files
counter=np.uint64(0)
if halolist is not None:
noffset = np.uint64(0)
subfilenames=[""]
if (iseparatesubfiles==1): subfilenames=["",".sublevels"]
for ifile in range(numfiles):
if selected_files is not None:
ifile_temp = selected_files[ifile]
else:
ifile_temp = ifile
for subname in subfilenames:
bfname=basefilename+subname
gfilename=bfname+".catalog_groups"
pfilename=bfname+".catalog_particles"
upfilename=pfilename+".unbound"
tfilename=bfname+".catalog_parttypes"
utfilename=tfilename+".unbound"
if (inompi==False):
gfilename+="."+str(ifile_temp)
pfilename+="."+str(ifile_temp)
upfilename+="."+str(ifile_temp)
tfilename+="."+str(ifile_temp)
utfilename+="."+str(ifile_temp)
if (iverbose) : print("reading",bfname,ifile_temp)
gfile = h5py.File(gfilename, 'r')
numhalos=np.uint64(gfile["Num_of_groups"][0])
if halolist is not None:
ww = haloindices[np.where((haloindices >= noffset)&(haloindices < noffset+numhalos))[0]] - noffset
noffset += numhalos
numhalos_hl = len(ww)
else:
ww = | np.arange(0, numhalos, 1) | numpy.arange |
# Jinja2 templating http://bit.ly/flask_walkthrough
from flask import Flask, request, jsonify, render_template, current_app, Response
# needed to ensure weights are loaded
from tensorflow.python.keras.backend import set_session
# specified instead of vague next line
from tensorflow.python.keras.models import load_model
# from keras.models import load_model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# import tensorflow
from numpy import asarray
from PIL import Image as Image_PIL # PYTHON IMAGING LIBRARY
import base64
import numpy as np
import os
import re # regular expresions
global graph
global model
global session
def get_model():
model = load_model(model_path)
print(f'Model from {model_path} is Loaded')
return model
def convert_canvas(canvas_data):
'''
USER INPUT (CANVAS) IMAGE CONVERSION;
Canvas will sytem_outputput user entry as Base64 Encoded String;
This function will Decode the string to binary data to work with;
'''
# pinpoint 64 encoded string
encoded_string = re.search(r'base64,(.*)', canvas_data).group(1)
decoded_binary_data = base64.b64decode(encoded_string) # decode
# print(encoded_string)
# save canvas output as png file to process prediction;
sytem_output = open(filepath, 'wb')
sytem_output.write(decoded_binary_data)
sytem_output.close()
return filepath
def preprocess_image(filepath):
'''
Argument: Filepath
- Converts image to grayscale
- Resizes image to 28 x 28
- Creates Array from Pixel Data
- Inverts Array Values;
Returns Array from pixels & prints to console Image
'''
image = Image_PIL.open(filepath) # create instance of downloaded image;
# convert to grayscale; remove rgb channel;
image = image.convert(mode='L')
image = image.resize((28, 28)) # resize from 280x280 to 28x28;
image_array = asarray(image)
image_array = | np.invert(image_array) | numpy.invert |
import numpy as np
from matplotlib import pyplot as plt
# Plot Function
def draw_pts(x, y):
for i in range(len(x)):
if y[i] == 1:
plt.plot(x[i][0], x[i][1], 'ro')
else:
plt.plot(x[i][0], x[i][1], 'bo')
# plt.show()
def draw_line(w, b):
line_x = [0, 7]
line_y = [0, 0]
# w * x +b
# w[0] * x[0] + w[1] * x[1] + b = 0
for i in range(len(line_x)):
line_y[i] = (- w[0] * line_x[i] - b) / (w[1] + 1e-9)
plt.plot(line_x, line_y)
# Data & Maker
# x = np.array([[3, 3], [4, 3], [2, 4], [1, 1], [1, 2], [2, 1]])
# y = np.array([1, 1, 1, -1, -1, -1])
num = 50
x = np.vstack((
np.random.randn(num, 2) + 6, np.random.randn(num, 2) + 2
))
y = np.hstack((
np.ones(num), - np.ones(num)
))
# draw_pts(x, y)
# Initial Parameter & Learning rate
w = [0, 0]
b = 0
lr = 1
# Primitive Form
for j in range(100):
wrong_pt_cnt = 0
for i in range(len(y)):
if y[i] != np.sign(np.dot(w, x[i]) + b):
w += lr * y[i] * x[i]
b += lr * y[i]
wrong_pt_cnt += 1
if wrong_pt_cnt == 0:
break
# draw_line(w, b)
# plt.show()
# Dual Form
gram = | np.dot(x, x.T) | numpy.dot |
"""inpainting by rectangle patch scannning
"""
import numpy as np
import cv2
from skimage.util.shape import view_as_windows
from math import ceil
import matplotlib.pyplot as plt
from PIL import Image
class Fast_Synthesis_based_inpaint:
def __init__(self, img, inpaint_mask, sample_mask, cloth_mask, in_mirror_hor=False, in_mirror_vert=False, DEBUG=False):
'''
img:PIL
inpaint_mask:PIL
sample_mask:PIL
'''
self.img = np.array(img)
self.ori_img = np.array(img).copy()
self.sample_mask = sample_mask
self.cloth_mask = np.array(cloth_mask.convert('L')).astype('bool')
self.inpaint_mask = np.array(inpaint_mask.convert('L')).astype('uint8')
# 合理的采样区域
self.sample_mask = ~self.inpaint_mask.astype('bool') * np.array(self.sample_mask.convert('L')).astype('bool')
if np.sum(self.inpaint_mask) == 0: # 不希望做任何更改
self.pass_ = True
else:
self.pass_ = False
self.DEBUG = DEBUG
self.mirror_hor = in_mirror_hor
self.mirror_vert = in_mirror_vert
self.img = self.img / 255.
self.x, self.y, self.w, self.h = cv2.boundingRect(self.inpaint_mask) # 找最小外接矩形
# self.img[self.y:self.y+self.h, self.x:self.x+self.w].fill(1)#填充矩形为纯白
self.init_hyper_parameters()
self.examplePatches = self.init_patch()
self.canvas_with_bound = self.init_canvas()
self.initKDtrees()
def init_hyper_parameters(self):
self.patchSize = int(min(18, min(self.w, self.h) // 4)) # 不大于18size of the patch (without the overlap)
self.overlapSize = max(2, self.patchSize // 6) # 不小于2 the overlap region
self.searchKernelSize = self.patchSize + 2 * self.overlapSize
self.windowStep = 1
if self.DEBUG:
print("patchSize: %s" % self.patchSize)
print("overlapSize: %s" % self.overlapSize)
print("searchKernelSize: %s" % self.searchKernelSize)
print("windowStep: %s" % self.windowStep)
def init_patch(self):
self.sample_area = self.img.copy()
self.sample_mask[self.y:self.y + self.h, self.x:self.x + self.w].fill(False)
self.sample_area[~self.sample_mask] = -1
result = view_as_windows(self.sample_area, [self.searchKernelSize, self.searchKernelSize, 3], self.windowStep)
result = result.squeeze()
axis = (np.zeros((result.shape[0], result.shape[1])) + 1).astype('bool')
axis *= np.min(result, (2, 3, 4)) >= 0
index = np.array(range(len(np.where(axis == True)[0])))
if len(np.where(axis == True)[0]) >= 5000:
index = np.random.choice(index, 5000, replace=False)
select_index = np.array([np.where(axis == True)[0][index], np.where(axis == True)[1][index]])
axis = (np.zeros((result.shape[0], result.shape[1]))).astype('bool')
axis[select_index[0], select_index[1]] = True
result = result[axis]
if self.mirror_hor:
hor_result = result[:, :, ::-1] # y轴翻转水平镜像
result = np.concatenate((result, hor_result))
if self.mirror_vert:
vert_result = result[:, ::-1] # x轴翻转垂直镜像
result = np.concatenate((result, vert_result))
return result
def init_canvas(self):
# check whether the outputSize adheres to patch+overlap size
self.num_patches_X = ceil((self.w - self.overlapSize) / (self.patchSize + self.overlapSize))
self.num_patches_Y = ceil((self.h - self.overlapSize) / (self.patchSize + self.overlapSize))
# calc needed output image size
self.required_size_X = self.num_patches_X * self.patchSize + (self.num_patches_X + 1) * self.overlapSize
self.required_size_Y = self.num_patches_Y * self.patchSize + (self.num_patches_Y + 1) * self.overlapSize
self.filledMap = np.zeros(
(self.num_patches_Y, self.num_patches_X)) # map showing which patches have been resolved
self.up_bound = max(self.y - (self.required_size_Y - self.h) // 2, 0)
self.down_bound = self.up_bound + self.required_size_Y
self.left_bound = max(self.x - (self.required_size_X - self.w) // 2, 0)
self.right_bound = self.left_bound + self.required_size_X
canvas_with_bound = np.copy(self.img[self.up_bound: self.down_bound, self.left_bound:self.right_bound])
self.x, self.y = self.left_bound + self.overlapSize, self.up_bound + self.overlapSize
if self.DEBUG:
print("num_patches_X %s" % self.num_patches_X)
print("num_patches_Y %s" % self.num_patches_Y)
self.canvas_b = np.copy(canvas_with_bound)
self.canvas_c = np.copy(canvas_with_bound)
return canvas_with_bound
def initKDtrees(self):
topOverlap = self.examplePatches[:, 0:self.overlapSize, :, :]
leftOverlap = self.examplePatches[:, :, 0:self.overlapSize, :].transpose(0, 2, 1, 3)
downOverlap = self.examplePatches[:, -self.overlapSize:, :, :]
rightOverlap = self.examplePatches[:, :, -self.overlapSize:, :].transpose(0, 2, 1, 3)
self.flatten_combined_ld = np.zeros(
(leftOverlap.shape[0], leftOverlap.shape[1], leftOverlap.shape[2] * 2, leftOverlap.shape[3]))
self.flatten_combined_ld[:, :, :leftOverlap.shape[2], :] = leftOverlap
self.flatten_combined_ld[:, :, leftOverlap.shape[2]:, :] = downOverlap
self.flatten_combined_ldt = np.zeros(
(leftOverlap.shape[0], leftOverlap.shape[1], leftOverlap.shape[2] * 3, leftOverlap.shape[3]))
self.flatten_combined_ldt[:, :, :leftOverlap.shape[2], :] = leftOverlap
self.flatten_combined_ldt[:, :, leftOverlap.shape[2]:leftOverlap.shape[2] * 2, :] = downOverlap
self.flatten_combined_ldt[:, :, leftOverlap.shape[2] * 2:, :] = topOverlap
self.flatten_combined_ldr = np.zeros(
(leftOverlap.shape[0], leftOverlap.shape[1], leftOverlap.shape[2] * 3, leftOverlap.shape[3]))
self.flatten_combined_ldr[:, :, :leftOverlap.shape[2], :] = leftOverlap
self.flatten_combined_ldr[:, :, leftOverlap.shape[2]:leftOverlap.shape[2] * 2, :] = downOverlap
self.flatten_combined_ldr[:, :, leftOverlap.shape[2] * 2:, :] = rightOverlap
self.flatten_combined_ldrt = np.zeros(
(leftOverlap.shape[0], leftOverlap.shape[1], leftOverlap.shape[2] * 4, leftOverlap.shape[3]))
self.flatten_combined_ldrt[:, :, :leftOverlap.shape[2], :] = leftOverlap
self.flatten_combined_ldrt[:, :, leftOverlap.shape[2]:leftOverlap.shape[2] * 2, :] = downOverlap
self.flatten_combined_ldrt[:, :, leftOverlap.shape[2] * 2:leftOverlap.shape[2] * 3, :] = rightOverlap
self.flatten_combined_ldrt[:, :, leftOverlap.shape[2] * 3:, :] = topOverlap
def getOverlapAreaTop(self, coord):
# do I have a top neighbour
x_range = [(self.patchSize + self.overlapSize) * coord[0],
(self.patchSize + self.overlapSize) * (coord[0] + 1) + self.overlapSize]
y_range = [(self.patchSize + self.overlapSize) * coord[1],
(self.patchSize + self.overlapSize) * (coord[1] + 1) + self.overlapSize]
if self.DEBUG:
self.canvas_b[x_range[0]:x_range[1], y_range[0]:y_range[1]][0:self.overlapSize, :, :] = 0
return np.copy(self.canvas_with_bound[x_range[0]:x_range[1], y_range[0]:y_range[1]])[0:self.overlapSize, :, :]
def getOverlapAreaLeft(self, coord):
# do I have a left neighbour
x_range = [(self.patchSize + self.overlapSize) * coord[0],
(self.patchSize + self.overlapSize) * (coord[0] + 1) + self.overlapSize]
y_range = [(self.patchSize + self.overlapSize) * coord[1],
(self.patchSize + self.overlapSize) * (coord[1] + 1) + self.overlapSize]
if self.DEBUG:
self.canvas_b[x_range[0]:x_range[1], y_range[0]:y_range[1]][:, 0:self.overlapSize, :] = 0
return np.copy(self.canvas_with_bound[x_range[0]:x_range[1], y_range[0]:y_range[1]])[:, 0:self.overlapSize, :]
def getOverlapAreaDown(self, coord):
# do I have a down neighbour
x_range = [(self.patchSize + self.overlapSize) * coord[0],
(self.patchSize + self.overlapSize) * (coord[0] + 1) + self.overlapSize]
y_range = [(self.patchSize + self.overlapSize) * coord[1],
(self.patchSize + self.overlapSize) * (coord[1] + 1) + self.overlapSize]
if self.DEBUG:
self.canvas_b[x_range[0]:x_range[1], y_range[0]:y_range[1]][-self.overlapSize:, :, :] = 1
return np.copy(self.canvas_with_bound[x_range[0]:x_range[1], y_range[0]:y_range[1]])[-self.overlapSize:, :, :]
def getOverlapAreaRight(self, coord):
# do I have a left neighbour
x_range = [(self.patchSize + self.overlapSize) * coord[0],
(self.patchSize + self.overlapSize) * (coord[0] + 1) + self.overlapSize]
y_range = [(self.patchSize + self.overlapSize) * coord[1],
(self.patchSize + self.overlapSize) * (coord[1] + 1) + self.overlapSize]
if self.DEBUG:
self.canvas_b[x_range[0]:x_range[1], y_range[0]:y_range[1]][:, -self.overlapSize:, :] = 0
return np.copy(self.canvas_with_bound[x_range[0]:x_range[1], y_range[0]:y_range[1]])[:, -self.overlapSize:, :]
def linearBlendOverlaps(self, canvasOverlap, examplePatchOverlap, mode):
if mode == 'left':
mask = np.repeat(np.arange(self.overlapSize)[np.newaxis, :], np.shape(canvasOverlap)[0],
axis=0) / self.overlapSize
elif mode == 'top':
mask = np.repeat(np.arange(self.overlapSize)[:, np.newaxis], np.shape(canvasOverlap)[1],
axis=1) / self.overlapSize
elif mode == 'right':
mask = np.repeat((self.overlapSize - 1 - | np.arange(self.overlapSize) | numpy.arange |
########################################################################
#
# License: BSD
# Created: September 1, 2010
# Author: <NAME> - <EMAIL>
#
########################################################################
import sys
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
import blaze.carray as ca
from common import MayBeDiskTest
class createTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing ctable creation from a tuple of carrays"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00b(self):
"""Testing ctable creation from a tuple of lists"""
t = ca.ctable(([1,2,3],[4,5,6]), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([[1,2,3],[4,5,6]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00c(self):
"""Testing ctable creation from a tuple of carrays (single column)"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
self.assertRaises(ValueError, ca.ctable, a, 'f0', rootdir=self.rootdir)
def test01(self):
"""Testing ctable creation from a tuple of numpy arrays"""
N = 1e1
a = np.arange(N, dtype='i4')
b = np.arange(N, dtype='f8')+1
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([a,b]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing ctable creation from an structured array"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03a(self):
"""Testing ctable creation from large iterator"""
N = 10*1000
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8',
count=N, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03b(self):
"""Testing ctable creation from large iterator (with a hint)"""
N = 10*1000
ra = np.fromiter(((i, i*2.) for i in xrange(N)),
dtype='i4,f8', count=N)
t = ca.fromiter(((i, i*2.) for i in xrange(N)),
dtype='i4,f8', count=N, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
class createDiskTest(createTest, TestCase):
disk = True
class persistentTest(MayBeDiskTest, TestCase):
disk = True
def test00a(self):
"""Testing ctable opening in "r" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='r')
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
self.assertRaises(RuntimeError, t.__setitem__, 1, (0, 0.0))
self.assertRaises(RuntimeError, t.append, (0, 0.0))
def test00b(self):
"""Testing ctable opening in "w" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='w')
#print "t->", `t`
N = 0
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((0, 0.0))
t.append((0, 0.0))
t[1] = (1, 2.0)
ra = np.rec.fromarrays([(0,1),(0.0, 2.0)], 'i4,f8').view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00c(self):
"""Testing ctable opening in "a" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='a')
#print "t->", `t`
# Check values
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((10, 11.0))
t.append((10, 11.0))
t[-1] = (11, 12.0)
# Check values
N = 12
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01a(self):
"""Testing ctable creation in "r" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
self.assertRaises(RuntimeError, ca.ctable, (a, b), ('f0', 'f1'),
rootdir=self.rootdir, mode='r')
def test01b(self):
"""Testing ctable creation in "w" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Overwrite the last ctable
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir, mode='w')
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((10, 11.0))
t.append((10, 11.0))
t[11] = (11, 12.0)
# Check values
N = 12
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01c(self):
"""Testing ctable creation in "a" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Overwrite the last ctable
self.assertRaises(RuntimeError, ca.ctable, (a, b), ('f0', 'f1'),
rootdir=self.rootdir, mode='a')
class add_del_colTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing adding a new column (list flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c.tolist(), 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00(self):
"""Testing adding a new column (carray flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(ca.carray(c), 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01a(self):
"""Testing adding a new column (numpy flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01b(self):
"""Testing cparams when adding a new column (numpy flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, cparams=ca.cparams(1), rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, 'f2')
self.assert_(t['f2'].cparams.clevel == 1, "Incorrect clevel")
def test02(self):
"""Testing adding a new column (default naming)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(ca.carray(c))
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing inserting a new column (at the beginning)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, name='c0', pos=0)
ra = np.fromiter(((i*3, i, i*2.) for i in xrange(N)), dtype='i8,i4,f8')
ra.dtype.names = ('c0', 'f0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing inserting a new column (in the middle)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, name='c0', pos=1)
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
ra.dtype.names = ('f0', 'c0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test05(self):
"""Testing removing an existing column (at the beginning)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=0)
# The next gives a segfault. See:
# http://projects.scipy.org/numpy/ticket/1598
#ra = np.fromiter(((i*3, i*2) for i in xrange(N)), dtype='i8,f8')
#ra.dtype.names = ('f1', 'f2')
dt = np.dtype([('f1', 'i8'), ('f2', 'f8')])
ra = np.fromiter(((i*3, i*2) for i in xrange(N)), dtype=dt)
#print "t->", `t`
#print "ra", ra
#assert_array_equal(t[:], ra, "ctable values are not correct")
def test06(self):
"""Testing removing an existing column (at the end)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=2)
ra = np.fromiter(((i, i*3) for i in xrange(N)), dtype='i4,i8')
ra.dtype.names = ('f0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test07(self):
"""Testing removing an existing column (in the middle)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=1)
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
ra.dtype.names = ('f0', 'f2')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test08(self):
"""Testing removing an existing column (by name)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol('f1')
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
ra.dtype.names = ('f0', 'f2')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
class add_del_colDiskTest(add_del_colTest, TestCase):
disk = True
class getitemTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing __getitem__ with only a start"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
start = 9
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[start], ra[start], "ctable values are not correct")
def test01(self):
"""Testing __getitem__ with start, stop"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
start, stop = 3, 9
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[start:stop], ra[start:stop],
"ctable values are not correct")
def test02(self):
"""Testing __getitem__ with start, stop, step"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
start, stop, step = 3, 9, 2
#print "t->", `t[start:stop:step]`
#print "ra->", ra[start:stop:step]
assert_array_equal(t[start:stop:step], ra[start:stop:step],
"ctable values are not correct")
def test03(self):
"""Testing __getitem__ with a column name"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
colname = "f1"
#print "t->", `t[colname]`
#print "ra->", ra[colname]
assert_array_equal(t[colname][:], ra[colname],
"ctable values are not correct")
def test04(self):
"""Testing __getitem__ with a list of column names"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, rootdir=self.rootdir)
colnames = ["f0", "f2"]
# For some version of NumPy (> 1.7) I cannot make use of
# ra[colnames] :-/
ra2 = np.fromiter(((i, i*3) for i in xrange(N)), dtype='i4,i8')
ra2.dtype.names = ('f0', 'f2')
#print "t->", `t[colnames]`
#print "ra2->", ra2
assert_array_equal(t[colnames][:], ra2,
"ctable values are not correct")
class getitemDiskTest(getitemTest, TestCase):
disk = True
class setitemTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing __setitem__ with only a start"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(9, None)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing __setitem__ with only a stop"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(None, 9, None)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing __setitem__ with a start, stop"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(1,90, None)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing __setitem__ with a start, stop, step"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(1,90, 2)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing __setitem__ with a large step"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(1,43, 20)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
class setitemDiskTest(setitemTest, TestCase):
disk = True
class appendTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing append() with scalar values"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.append((N, N*2))
ra = np.fromiter(((i, i*2.) for i in xrange(N+1)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing append() with numpy arrays"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
a = np.arange(N, N+10, dtype='i4')
b = np.arange(N, N+10, dtype='f8')*2.
t.append((a, b))
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing append() with carrays"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
a = np.arange(N, N+10, dtype='i4')
b = np.arange(N, N+10, dtype='f8')*2.
t.append((ca.carray(a), ca.carray(b)))
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing append() with structured arrays"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
ra2 = np.fromiter(((i, i*2.) for i in xrange(N, N+10)), dtype='i4,f8')
t.append(ra2)
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing append() with another ctable"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
ra2 = np.fromiter(((i, i*2.) for i in xrange(N, N+10)), dtype='i4,f8')
t2 = ca.ctable(ra2)
t.append(t2)
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
| assert_array_equal(t[:], ra, "ctable values are not correct") | numpy.testing.assert_array_equal |
import typing as ty
import warnings
import numba as nb
import numpy as np
from numba.core.errors import NumbaWarning
from .. import utility
from ..paths import Box, Path, Paths
from ..shape import Shape
warnings.simplefilter("ignore", category=NumbaWarning)
class Sphere(Shape):
def __init__(self, center=None, radius=1.0, texture=1):
super().__init__()
self.center = (
np.zeros(3)
if center is None
else np.asarray(center, dtype=np.float64)
)
self.radius = radius
radius_vec = np.array([radius, radius, radius], dtype=np.float64)
self.box = Box(self.center - radius_vec, self.center + radius_vec)
self.texture = texture
def compile(self):
pass
def bounding_box(self) -> Box:
return self.box
def contains(self, v: np.ndarray, f) -> bool:
return utility.vector_length(v - self.center) <= self.radius + f
def intersect(
self, ray_origin: np.ndarray, ray_direction: np.ndarray
) -> float:
return Sphere._intersect(
self.radius, self.center, ray_origin, ray_direction
)
@staticmethod
@nb.njit(
"float64(float64, float64[:], float64[:], float64[:])",
cache=True,
)
def _intersect(
radius: float,
center: np.ndarray,
ray_origin: np.ndarray,
ray_direction: np.ndarray,
) -> float:
to = ray_origin - center
b = np.dot(to, ray_direction)
c = np.dot(to, to) - radius * radius
d = b * b - c
if d > 0:
d = np.sqrt(d)
t1 = -b - d
if t1 > 1e-2:
return t1
t2 = -b + d
if t2 > 1e-2:
return t2
return utility.INF
def paths(self) -> Paths:
if self.texture == 1:
return Paths(Sphere.paths_1(self.radius, self.center))
elif self.texture == 2:
return Paths(Sphere.paths_2(self.radius, self.center))
elif self.texture == 3:
return Paths(Sphere.paths_3(self.radius, self.center))
elif self.texture == 4:
return Paths(Sphere.paths_4(self.radius, self.center))
@staticmethod
def paths_1(
radius: float, center: np.ndarray
) -> ty.List[ty.List[np.ndarray]]:
# Grid pattern
paths = []
n = 5
o = 10
for lat in range(-90 + o, 91 - o, n):
paths.append(
[
Sphere.latlng_to_xyz(lat, lng, radius) + center
for lng in range(0, 361)
]
)
for lng in range(0, 361, n):
paths.append(
[
Sphere.latlng_to_xyz(lat, lng, radius) + center
for lat in np.arange(-90 + o, 91 - o)
]
)
return paths
@staticmethod
def paths_2(radius: float, center: np.ndarray) -> ty.List[Path]:
# Criss-cross pattern
paths = []
equator = Path(
[Sphere.latlng_to_xyz(0, lng, radius) for lng in range(360)]
)
for i in range(100):
matrix = np.eye(4)
for j in range(3):
v = utility.random_unit_vector()
matrix = utility.matrix_mul_matrix(
utility.vector_rotate(v, np.random.random() * 2 * np.pi),
matrix,
)
matrix = utility.matrix_mul_matrix(
utility.vector_translate(center), matrix
)
paths.append(equator.transform(matrix))
return paths
@staticmethod
@nb.njit("float64[:,:,:](float64, float64[:])")
def paths_3(radius: float, center: np.ndarray) -> np.ndarray:
paths = np.zeros((20000, 2, 3))
for i in range(20000):
v = utility.random_unit_vector() * radius + center
paths[i, 0] = v
paths[i, 1] = v.copy()
return paths
@staticmethod
def paths_4(
radius: float, center: np.ndarray
) -> ty.List[ty.List[np.ndarray]]:
# Criss-cross with circles
paths = []
seen = []
radii = []
for i in range(140):
while True:
v = utility.random_unit_vector()
m = np.random.random() * 0.25 + 0.05
ok = True
for s in range(len(seen)):
threshold = m + radii[s] + 0.02
if utility.vector_length(seen[s] - v) < threshold:
ok = False
break
if ok:
seen.append(v)
radii.append(m)
break
p = utility.vector_normalize(
np.cross(v, utility.random_unit_vector())
)
q = utility.vector_normalize(np.cross(p, v))
n = np.random.randint(0, 4) + 1
for k in range(n):
path = []
for j in range(0, 360, 5):
a = np.deg2rad(j)
path.append(
utility.vector_normalize(
v + p * np.cos(a) * m + q * np.sin(a) * m
)
* radius
+ center
)
paths.append(path)
m += 0.75
return paths
@staticmethod
@nb.njit("float64[:](float64, float64, float64)", cache=True)
def latlng_to_xyz(lat, lng, radius) -> np.ndarray:
lat, lng = np.deg2rad(lat), np.deg2rad(lng)
x = radius * np.cos(lat) * np.cos(lng)
y = radius * np.cos(lat) * np.sin(lng)
z = radius * np.sin(lat)
return np.array([x, y, z])
class OutlineSphere(Sphere):
def __init__(
self,
eye: ty.Union[ty.List[float], np.ndarray],
up: ty.Union[ty.List[float], np.ndarray],
center: ty.Union[ty.List[float], np.ndarray],
radius: float,
):
super().__init__(center, radius)
self.eye = np.asarray(eye, dtype=np.float64)
self.up = np.asarray(up, dtype=np.float64)
def paths(self) -> Paths:
hyp = utility.vector_length(self.center - self.eye)
theta = np.arcsin(self.radius / hyp)
adj = self.radius / np.tan(theta)
d = np.cos(theta) * adj
r = np.sin(theta) * adj
w = utility.vector_normalize(self.center - self.eye)
u = utility.vector_normalize( | np.cross(w, self.up) | numpy.cross |
"""
fastspecfit.templates.qa
========================
QA for templates
"""
import pdb
import os
import numpy as np
from astropy.table import Table
from scipy.ndimage import median_filter
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.templates import rebuild_fastspec_spectrum
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from desiutil.log import get_logger
log = get_logger()
def plot_style(font_scale=1.2):
import seaborn as sns
sns.set(context='talk', style='ticks', palette='deep', font_scale=font_scale)#, rc=rc)
colors = sns.color_palette()
return sns, colors
def qa_bpt(targetclass, fastspecfile=None, png=None):
"""QA of the fastspec emission-line spectra.
"""
from fastspecfit.templates.templates import remove_undetected_lines, read_stacked_fastspec
sns, _ = plot_style()
fastmeta, _fastspec = read_stacked_fastspec(fastspecfile, read_spectra=False)
fastspec = remove_undetected_lines(_fastspec)
nobj = len(fastmeta)
def oplot_class(ax, kewley=False, **kwargs):
if kewley:
niiha = np.linspace(-1.9, 0.4, 1000)
oiiihb = 0.61 / (niiha-0.47) + 1.19
else:
niiha = np.linspace(-1.9, -0.1, 1000)
oiiihb = 0.61 / (niiha-0.05) + 1.3
ax.plot(niiha, oiiihb, **kwargs)
def _bpt(cc, cclabel='Redshift', vmin=None, vmax=None, png=None):
fig, ax = plt.subplots(figsize=(10, 7))
cb = ax.scatter(niiha, oiiihb, c=cc, cmap='jet', vmin=vmin, vmax=vmax)
oplot_class(ax, kewley=True, color='k', ls='--', lw=3, label='Kewley+01')
oplot_class(ax, kewley=False, color='k', lw=3, label='Kauffmann+03')
plt.colorbar(cb, label=cclabel)
ax.set_xlim(-1.9, 0.7)
ax.set_ylim(-1.2, 1.5)
ax.set_xlabel(r'$\log_{10}$ ([NII] $\lambda6584$ / H$\alpha$)')
ax.set_ylabel(r'$\log_{10}$ ([OIII] $\lambda5007$ / H$\beta$)')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.legend(fontsize=16, loc='lower left')#, ncol=2)
plt.subplots_adjust(bottom=0.15, left=0.18, top=0.95, right=0.95)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
good = np.where(
(fastspec['HALPHA_FLUX'] > 0) *
(fastspec['HBETA_FLUX'] > 0) *
(fastspec['NII_6584_FLUX'] > 0) *
(fastspec['OIII_5007_FLUX'] > 0)
#(fastspec['HALPHA_CHI2'] < 1e4)
)[0]
niiha = np.log10(fastspec['NII_6584_FLUX'][good] / fastspec['HALPHA_FLUX'][good])
oiiihb = np.log10(fastspec['OIII_5007_FLUX'][good] / fastspec['HBETA_FLUX'][good])
ww = np.where((niiha > -0.05) * (niiha < 0.05) * (oiiihb < -0.5))[0]
#log.info(fastspec[good][ww]['HALPHA_FLUX', 'NII_6584_FLUX'])
zz = fastspec['CONTINUUM_Z'][good]
ewhb = fastspec['HBETA_EW'][good]
#rW1 = fastmeta['RW1'][good]
#gr = fastmeta['GR'][good]
_bpt(zz, 'Redshift', vmin=0, vmax=0.5, png=png.replace('.png', '-redshift.png'))
_bpt(np.log10(ewhb), r'$\log_{10}\,\mathrm{EW}(\mathrm{H}\beta)$',
png=png.replace('.png', '-ewhb.png'))
#_bpt(rW1, r'$r-W1$', vmin=-0.3, vmax=0.9, png=png.replace('.png', '-rW1.png'))
#_bpt(gi, r'$g-i$', vmin=0.6, vmax=1.3, png=png.replace('.png', '-gi.png'))
def qa_fastspec_fullspec(targetclass, fastwave=None, fastflux=None, fastivar=None,
fastmeta=None, fastspec=None, fastspecfile=None, CFit=None,
EMFit=None, ncol=3, nrow=5, photometric_models=False,
pdffile=None):
"""Full-spectrum QA.
photometric_models - use the fits to the broadband continuum
"""
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.sample import SAMPLE_PROPERTIES as props
from fastspecfit.templates.templates import rebuild_fastspec_spectrum, read_stacked_fastspec
sns, _ = plot_style()
if CFit is None or EMFit is None:
from fastspecfit.continuum import ContinuumFit
from fastspecfit.emlines import EMLineFit
CFit = ContinuumFit()
EMFit = EMLineFit()
if fastwave is None:
fastwave, fastflux, fastivar, fastmeta, fastspec = read_stacked_fastspec(fastspecfile)
#fastspec = remove_undetected_lines(fastspec, EMFit.linetable, devshift=False)
absmaglabel = props[targetclass]['absmag_label']
colorlabel = props[targetclass]['color_label']
nobj = len(fastmeta)
icam = 0
zobj = np.unique(fastmeta['ZOBJ'])
npage = len(zobj)
inches_wide_perpanel = 4.0
inches_tall_perpanel = 3.0
if npage == 1:
png = True
else:
png = False
if pdffile:
if png:
pdffile = pdffile.replace('.pdf', '.png')
else:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(pdffile)
for ipage in [0]:#np.arange(npage):
log.info('Building page {}/{}'.format(ipage+1, npage))
pageindx = np.where(zobj[ipage] == fastmeta['ZOBJ'])[0]
absmag = sorted(set(fastmeta['ABSMAG'][pageindx])) # subpage
nsubpage = len(absmag)
for isubpage in [6]:#np.arange(nsubpage):
subpageindx = np.where((absmag[isubpage] == fastmeta['ABSMAG'][pageindx]))[0]
fig, allax = plt.subplots(nrow, ncol, figsize=(inches_wide_perpanel*ncol, inches_tall_perpanel*nrow),
sharex=True, sharey=False)#True)
for iplot, (indx, ax) in enumerate(zip(pageindx[subpageindx], allax.flatten())):
#log.info(ipage, isubpage, iplot, len(pageindx), len(subpageindx))
# rebuild the best-fitting spectrum; these models have been
# normalized already in iterative_stack
modelwave, continuum, smooth_continuum, emlinemodel, data = rebuild_fastspec_spectrum(
fastspec[indx], fastwave, fastflux[indx, :], fastivar[indx, :], CFit, EMFit)
# rest-frame
if photometric_models:
modelwave_phot, continuum_phot = rebuild_fastspec_spectrum(fastspec[indx], _, _, _, CFit,
EMFit, full_resolution=True,
normalize_wave=props[targetclass]['normwave'])
#modelwave_phot *= (1 + data['zredrock'])
#continuum_phot /= (1 + data['zredrock'])
zfact = (1 + data['zredrock'])
#ax.plot(data['wave'][icam]/zfact, data['flux'][icam], color='skyblue')
ax.plot(modelwave_phot, continuum_phot, color='gray')
ax.plot(modelwave/zfact, (continuum+emlinemodel), color='firebrick', alpha=0.7)
xmin, xmax = 900, 4e4
ww = np.where((modelwave_phot > xmin) * (modelwave_phot < xmax))[0]
ymin, ymax = np.min(continuum_phot[ww]), np.max(continuum_phot[ww])
if np.max(emlinemodel) > ymax:
pdb.set_trace()
ymax = np.max(emlinemodel)
else:
# observed frame
ax.plot(data['wave'][icam], data['flux'][icam], color='skyblue')
ax.plot(modelwave, continuum+emlinemodel, color='firebrick', alpha=0.5)
ax.plot(modelwave, continuum, color='blue', alpha=0.5)
#ax.plot(modelwave, continuum+smooth_continuum, color='gray', alpha=0.3)
ax.plot(modelwave, smooth_continuum, color='gray', alpha=0.7)
xmin, xmax = modelwave.min(), modelwave.max()
ymin, ymax = 1e6, -1e6
filtflux = median_filter(data['flux'][icam], 51, mode='nearest')
sigflux = np.std(data['flux'][icam][data['ivar'][icam] > 0])
if -2 * sigflux < ymin:
ymin = -2 * sigflux
if sigflux * 5 > ymax:
ymax = sigflux * 5
if np.max(filtflux) > ymax:
ymax = np.max(filtflux) * 1.4
ax.text(0.96, 0.06, r'${:.2f}<{}<{:.2f}$'.format(
fastmeta['COLORMIN'][indx], colorlabel,
fastmeta['COLORMAX'][indx]),
ha='right', va='bottom', transform=ax.transAxes, fontsize=10,
bbox=dict(boxstyle='round', facecolor='gray', alpha=0.25))
ax.text(0.04, 0.96, '\n'.join(( 'N={}, S/N={:.1f}'.format(
fastmeta['NOBJ'][indx], fastspec['CONTINUUM_SNR_ALL'][indx]), )),
ha='left', va='top', transform=ax.transAxes, fontsize=10,
bbox=dict(boxstyle='round', facecolor='gray', alpha=0.25))
print(ymin, ymax)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticklabels([])
ax.set_yticklabels([])
if photometric_models:
ax.set_xscale('log')
plt.subplots_adjust(wspace=0.05, hspace=0.05, left=0.07, right=0.95, top=0.95, bottom=0.1)
if iplot == ncol*nrow-1:
break
fig.text(0.52, 0.968, r'${:.2f}<z<{:.2f}\ {:.1f}<{}<{:.1f}$'.format(
fastmeta['ZOBJMIN'][indx], fastmeta['ZOBJMAX'][indx],
fastmeta['{}MIN'.format('ABSMAG')][indx], absmaglabel,
fastmeta['{}MAX'.format('ABSMAG')][indx]),
ha='center', va='center', fontsize=22)
for rem in np.arange(ncol*nrow-iplot-1)+iplot+1:
allax.flatten()[rem].axis('off')
if pdffile and png is False:
pdf.savefig(fig)
plt.close()
if pdffile:
log.info('Writing {}'.format(pdffile))
if png:
fig.savefig(pdffile)
plt.close()
else:
pdf.close()
def qa_fastspec_emlinespec(targetclass, fastwave=None, fastflux=None, fastivar=None,
fastmeta=None, fastspec=None, fastspecfile=None, CFit=None,
EMFit=None, ncol=3, nrow=5, pdffile=None):
"""QA of the fastspec emission-line spectra.
"""
from matplotlib.colors import Normalize
from fastspecfit.templates.templates import remove_undetected_lines
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.sample import SAMPLE_PROPERTIES as props
from fastspecfit.templates.templates import rebuild_fastspec_spectrum, read_stacked_fastspec
sns, _ = plot_style()
if CFit is None or EMFit is None:
from fastspecfit.continuum import ContinuumFit
from fastspecfit.emlines import EMLineFit
CFit = ContinuumFit()
EMFit = EMLineFit()
if fastwave is None:
fastwave, fastflux, fastivar, fastmeta, fastspec = read_stacked_fastspec(fastspecfile)
fastspec_fix = remove_undetected_lines(fastspec, EMFit.linetable, devshift=False)
# plotting preferences
cmap = plt.cm.get_cmap('jet')
#cmap = sns.color_palette(as_cmap=True)
cnorm = Normalize(vmin=np.min(fastmeta['ZOBJ']), vmax=np.max(fastmeta['ZOBJ']))
inches_wide = 16
inches_fullspec = 6
inches_perline = inches_fullspec / 2.0
nlinepanels = 4
nline = len(set(EMFit.linetable['plotgroup']))
nlinerows = np.ceil(nline / nlinepanels).astype(int)
nrows = 1 + nlinerows
height_ratios = np.hstack([1, [0.5]*nlinerows])
plotsig_default = 150.0 # 300.0 # [km/s]
meanwaves, deltawaves, sigmas, linenames = [], [], [], []
for plotgroup in set(EMFit.linetable['plotgroup']):
I = np.where(plotgroup == EMFit.linetable['plotgroup'])[0]
linenames.append(EMFit.linetable['nicename'][I[0]])
meanwaves.append(np.mean(EMFit.linetable['restwave'][I]))
deltawaves.append((np.max(EMFit.linetable['restwave'][I]) -
np.min(EMFit.linetable['restwave'][I])) / 2)
sigmas.append(plotsig_default)
srt = np.argsort(meanwaves)
meanwaves = np.hstack(meanwaves)[srt]
deltawaves = np.hstack(deltawaves)[srt]
sigmas = np.hstack(sigmas)[srt]
linenames = np.hstack(linenames)[srt]
absmaglabel = props[targetclass]['absmag_label']
colorlabel = props[targetclass]['color_label']
# how many pages?
nobj = len(fastmeta)
icam = 0
restcolor = np.unique(fastmeta['COLOR'])
npage = len(restcolor)
if npage == 1:
png = True
else:
png = False
if pdffile:
if png:
pdffile = pdffile.replace('.pdf', '.png')
else:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(pdffile)
# make the plot!
for ipage in np.arange(npage):
log.info('Building page {}/{}'.format(ipage+1, npage))
pageindx = np.where(restcolor[ipage] == fastmeta['COLOR'])[0]
absmag = sorted(set(fastmeta['ABSMAG'][pageindx])) # subpage
nsubpage = len(absmag)
for isubpage in np.arange(nsubpage):#[:1]:#[::2]:
subpageindx = np.where((absmag[isubpage] == fastmeta['ABSMAG'][pageindx]))[0]
fig = plt.figure(figsize=(inches_wide, 2*inches_fullspec + inches_perline*nlinerows))
gs = fig.add_gridspec(nrows, nlinepanels, height_ratios=height_ratios)
bigax = fig.add_subplot(gs[0, :])
ax, irow, icol = [], 1, 0
for iax in np.arange(nline):
icol = iax % nlinepanels
if iax > 0 and iax % nlinepanels == 0:
irow += 1
xx = fig.add_subplot(gs[irow, icol])
ax.append(xx)
bigymin, bigymax = 1e6, -1e6
lineymin, lineymax = np.zeros(nline)+1e6, np.zeros(nline)-1e6
removelabels = np.ones(nline, bool)
for iplot, indx in enumerate(pageindx[subpageindx]):
#log.info(ipage, isubpage, iplot, len(pageindx), len(subpageindx))
modelwave, continuum, smooth_continuum, emlinemodel, data = rebuild_fastspec_spectrum(
fastspec[indx], fastwave, fastflux[indx, :], fastivar[indx, :], CFit, EMFit)
#if fastmeta['IBIN'][indx] == 1262:
# pdb.set_trace()
redshift = data['zredrock']
emlineflux = data['flux'][icam] - continuum - smooth_continuum
modelwave /= (1+redshift) # rest-frame
label = 'z=[{:.2f}-{:.2f}] (N={})'.format(
fastmeta['ZOBJMIN'][indx], fastmeta['ZOBJMAX'][indx],
np.sum(fastmeta['ZOBJ'][pageindx[subpageindx]] == fastmeta['ZOBJ'][indx]))
#bigax.plot(modelwave/(1+redshift), emlineflux, color='gray')
bigax.plot(modelwave, emlinemodel, label=label, color=cmap(cnorm(fastmeta['ZOBJ'][indx])))
if -np.max(emlinemodel)*0.05 < bigymin:
bigymin = -np.max(emlinemodel)*0.05
if np.max(emlinemodel)*1.1 > bigymax:
bigymax = np.max(emlinemodel)*1.1
if np.max(emlinemodel) == 0.0:
bigymin, bigymax = 0.0, 1.0
# zoom in on individual emission lines
for iax, (meanwave, deltawave, sig, linename) in enumerate(zip(
meanwaves, deltawaves, sigmas, linenames)):
wmin = (meanwave - deltawave) - 8 * sig * meanwave / C_LIGHT
wmax = (meanwave + deltawave) + 8 * sig * meanwave / C_LIGHT
lineindx = np.where((modelwave > wmin) * (modelwave < wmax))[0]
if len(lineindx) > 1:
if np.min(emlinemodel[lineindx]) > 0.0: # at least one line kept (snr>3)
removelabels[iax] = False
ax[iax].plot(modelwave[lineindx], emlinemodel[lineindx],
color=cmap(cnorm(fastmeta['ZOBJ'][indx])))
if -np.max(emlinemodel[lineindx])*0.05 < lineymin[iax]:
lineymin[iax] = -np.max(emlinemodel[lineindx])*0.05
if np.max(emlinemodel[lineindx]) * 1.1 > lineymax[iax]:
lineymax[iax] = np.max(emlinemodel[lineindx]) * 1.1
if np.abs(lineymax[iax]-lineymin[iax]) < 1e-2:
removelabels[iax] = False
for iax, xx in enumerate(ax):
xx.text(0.08, 0.89, linenames[iax], ha='left', va='center',
transform=xx.transAxes, fontsize=20)
if removelabels[iax]:
xx.set_ylim(0, 1)
xx.set_xticklabels([])
xx.set_yticklabels([])
else:
if lineymax[iax] == lineymin[iax]:
lineymax[iax] = 1.0
xx.set_ylim(lineymin[iax], lineymax[iax])
xlim = xx.get_xlim()
xx.xaxis.set_major_locator(ticker.MaxNLocator(2))
# don't repeat the legend labels
hand, lab = bigax.get_legend_handles_labels()
ulabels = dict(zip(lab, hand))
bigax.legend(ulabels.values(), ulabels.keys(), fontsize=18, loc='upper left')
#bigax.legend(fontsize=18, loc='upper left')
bigax.set_ylim(bigymin, bigymax)
bigax.set_xlim(2600, 7200) # 3500, 9300)
bigax.set_title(r'${:.2f}<{}<{:.2f}\ {:.1f}<{}<{:.1f}$'.format(
fastmeta['COLORMIN'][indx], colorlabel,
fastmeta['COLORMAX'][indx],
fastmeta['ABSMAGMIN'][indx], absmaglabel,
fastmeta['ABSMAGMAX'][indx]))
#bigax.set_xlabel('Observed-frame Wavelength ($\AA$)')
plt.subplots_adjust(wspace=0.28, left=0.07, right=0.95, top=0.95, bottom=0.1)
if pdffile and png is False:
pdf.savefig(fig)
plt.close()
if pdffile:
log.info('Writing {}'.format(pdffile))
if png:
fig.savefig(pdffile)
plt.close()
else:
pdf.close()
def qa_photometry_templates(targetclass, samplefile=None, templatefile=None,
ntspace=5, png=None):
"""Compare the color-color tracks of the templates to the data.
"""
from fastspecfit.templates.sample import read_parent_sample
from fastspecfit.templates.templates import read_templates
if ntspace == 1:
prefix = 'All '
else:
prefix = ''
sns, _ = plot_style()
cmap = plt.cm.get_cmap('RdYlBu')
mincnt = 1
phot, spec, meta = read_parent_sample(samplefile)
def template_colors_zgrid(templatefile, targetclass):
"""Compute the colors of the templates on a fixed redshift grid.
"""
from speclite import filters
filt = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1')
wave, flux, meta = read_templates(templatefile)
nt = len(meta)
print('Number of templates = {}'.format(nt))
print(wave.min(), wave.max())
dz = 0.1
if targetclass == 'lrg':
zmin, zmax = 0.0, 1.4
elif targetclass == 'elg':
zmin, zmax = 0.0, 1.7
elif targetclass == 'bgs':
zmin, zmax = 0.0, 0.6
else:
pass
nz = np.round( (zmax - zmin) / dz ).astype('i2')
print('Number of redshift points = {}'.format(nz))
cc = dict(
redshift = np.linspace(zmin, zmax, nz),
gr = np.zeros((nt, nz), 'f4'),
rz = np.zeros((nt, nz), 'f4'),
rW1 = np.zeros((nt, nz), 'f4'),
zW1 = np.zeros((nt, nz), 'f4')
)
for iz, red in enumerate(cc['redshift']):
zwave = wave.astype('float') * (1 + red)
maggies = filt.get_ab_maggies(flux, zwave, mask_invalid=False)
cc['gr'][:, iz] = -2.5 * np.log10(maggies['decam2014-g'] / maggies['decam2014-r'] )
cc['rz'][:, iz] = -2.5 * np.log10(maggies['decam2014-r'] / maggies['decam2014-z'] )
cc['rW1'][:, iz] = -2.5 * np.log10(maggies['decam2014-r'] / maggies['wise2010-W1'] )
cc['zW1'][:, iz] = -2.5 * np.log10(maggies['decam2014-z'] / maggies['wise2010-W1'] )
return cc
# compute colors on a grid
log.info('Reading {}'.format(templatefile))
template_colors = template_colors_zgrid(templatefile, targetclass)
nt, nz = template_colors['gr'].shape
zmin = '{:.1f}'.format(template_colors['redshift'].min())
zmax = '{:.1f}'.format(template_colors['redshift'].max())
dz = '{:.1f}'.format(template_colors['redshift'][1] - template_colors['redshift'][0])
def elg_obs(phot, png=None):
grobslim = (-0.8, 1.8)
rzobslim = (-1, 2.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rz'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rz'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.17, 0.42, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.set_xlabel(r'$(r - z)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.set_xlim(rzobslim)
ax2.set_ylim(grobslim)
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.87, bottom=0.19, wspace=0.05)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def bgs_obs(phot, png=None):
grobslim = (-0.5, 2.5)
rzobslim = (-0.5, 1.5)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
ax1.grid(True)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rz'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rz'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.2, 0.1, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.set_xlim(rzobslim)
ax2.set_ylim(grobslim)
ax2.set_xlabel(r'$(r - z)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.87, bottom=0.19, wspace=0.05)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def lrg_obs(phot, png=None):
grobslim = (-0.2, 3)
rzobslim = (0.0, 3)
rW1obslim = (-0.3, 5.5)
zW1obslim = (-0.5, 3)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(phot['RMAG']-phot['W1MAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
#norm=LogNorm(vmin=1, vmax=100),
extent=np.hstack((rW1obslim, grobslim)))
ax1.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rW1obslim)
ax1.set_ylim(grobslim)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rW1'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rW1'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.1, 0.05, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.set_xlim(rW1obslim)
ax2.set_ylim(grobslim)
ax3.hexbin(phot['ZMAG']-phot['W1MAG'], phot['RMAG']-phot['ZMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zW1obslim, rzobslim)))
ax3.set_ylabel(r'$(r - z)_{\rm obs}$')
ax3.set_xlabel(r'$(z - W1)_{\rm obs}$')
ax3.set_xlim(zW1obslim)
ax3.set_ylim(rzobslim)
ax3.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax3.yaxis.set_major_locator(ticker.MultipleLocator(1))
for tt in np.arange(0, nt, ntspace):
ax4.plot(template_colors['zW1'][tt, :], template_colors['rz'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax4.scatter(template_colors['zW1'][tt, 0], template_colors['rz'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax4.text(0.05, 0.3, 'z=0.0', ha='left', va='bottom',
transform=ax4.transAxes, fontsize=14)
#ax4.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
# ha='left', va='bottom',
# transform=ax4.transAxes, fontsize=14)
ax4.yaxis.set_label_position('right')
ax4.yaxis.tick_right()
ax4.set_ylabel(r'$(r - z)_{\rm obs}$')
ax4.set_xlabel(r'$(z - W1)_{\rm obs}$')
ax4.set_xlim(zW1obslim)
ax4.set_ylim(rzobslim)
ax4.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax4.yaxis.set_major_locator(ticker.MultipleLocator(1))
for aa in (ax1, ax2, ax3, ax4):
aa.grid(True)
plt.subplots_adjust(top=0.95, left=0.1, right=0.9, bottom=0.13, wspace=0.05, hspace=0.28)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
# make the plots!
if targetclass == 'lrg':
lrg_obs(phot, png=png)
elif targetclass == 'elg':
elg_obs(phot, png=png)
elif targetclass == 'bgs':
bgs_obs(phot, png=png)
else:
pass
def qa_photometry(targetclass, samplefile=None, png_obs=None, png_rest=None, png_rest_bins=None):
"""QA of the observed- and rest-frame photometry.
"""
from matplotlib.colors import LogNorm
from fastspecfit.templates.sample import read_parent_sample, stacking_bins
sns, _ = plot_style()
cmap = plt.cm.get_cmap('RdYlBu')
mincnt = 1
phot, spec, meta = read_parent_sample(samplefile)
bins = stacking_bins(targetclass, verbose=True)
def bgs_obs(phot, png=None):
robslim = (15, 21.0)
grobslim = (-0.2, 2.5)
rzobslim = (-0.5, 1.5)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True)
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
hb = ax2.hexbin(phot['RMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((robslim, grobslim)))
ax2.set_xlabel(r'$r_{\rm obs}$')
ax2.set_ylim(grobslim)
ax2.set_xlim(robslim)
cax = fig.add_axes([0.88, 0.12, 0.02, 0.83])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, cax=cax, format=formatter, label='Number of Galaxies')
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.85, bottom=0.19, wspace=0.07)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def bgs_rest(phot, meta, bins=None, png=None):
zlim = (0.0, 0.6)
Mrlim = (-16, -25)
grlim = (-0.2, 1.2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(meta['Z'], phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, Mrlim)))
ax1.set_ylim(Mrlim)
ax1.set_xlim(zlim)
ax1.set_xlabel('Redshift')
ax1.set_ylabel(r'$M_{0.0r}$')
#ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0]
[ax1.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['ABSMAGMIN'])]
ax2.hexbin(meta['Z'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, grlim)))
ax2.set_xlim(zlim)
ax2.set_ylim(grlim)
ax2.set_xlabel('Redshift')
ax2.set_ylabel(r'$^{0.0}(g - r)$')#, labelpad=-10)
#ax2.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
#ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax2.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['COLORMIN'])]
hb = ax3.hexbin(phot['ABSMAG_R'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((Mrlim, grlim)))
ax3.set_xlabel(r'$M_{0.0r}$')
ax3.set_ylabel(r'$^{0.0}(g - r)$')#, labelpad=-10)
ax3.set_xlim(Mrlim)
ax3.set_ylim(grlim)
#ax3.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax3.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ABSMAGMIN'], bins['COLORMIN'])]
ax4.axis('off')
cax = fig.add_axes([0.49, 0.12, 0.02, 0.36])
#cax = fig.add_axes([0.54, 0.4, 0.35, 0.03])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, format=formatter, label='Number of Galaxies',
cax=cax)#, orientation='horizontal')
for aa in (ax1, ax2, ax3):
aa.grid(True)
plt.subplots_adjust(left=0.1, top=0.95, wspace=0.3, hspace=0.3, right=0.88, bottom=0.13)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def elg_obs(phot, png=None):
gobslim = (19.5, 24.5)
grobslim = (-1.2, 1.2)
rzobslim = (-1.5, 2.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True)
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
hb = ax2.hexbin(phot['GMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((gobslim, grobslim)))
ax2.set_xlabel(r'$g_{\rm obs}$')
ax2.set_ylim(grobslim)
ax2.set_xlim(gobslim)
cax = fig.add_axes([0.88, 0.12, 0.02, 0.83])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, cax=cax, format=formatter, label='Number of Galaxies')
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.85, bottom=0.19, wspace=0.07)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def elg_rest(phot, meta, bins=None, png=None):
zlim = (0.5, 1.6)
Mglim = (-18, -25)
grlim = (-0.5, 1.0)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(meta['Z'], phot['ABSMAG_G'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, Mglim)))
ax1.set_ylim(Mglim)
ax1.set_xlim(zlim)
ax1.set_xlabel('Redshift')
ax1.set_ylabel(r'$M_{0.0g}$')
ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0]
[ax1.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['ABSMAGMIN'])]
ax2.hexbin(meta['Z'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, grlim)))
ax2.set_xlim(zlim)
ax2.set_ylim(grlim)
ax2.set_xlabel('Redshift')
ax2.set_ylabel(r'$^{0.0}(g - r)$', labelpad=-10)
ax2.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax2.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['COLORMIN'])]
hb = ax3.hexbin(phot['ABSMAG_G'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((Mglim, grlim)))
ax3.set_xlabel(r'$M_{0.0g}$')
ax3.set_ylabel(r'$^{0.0}(g - r)$', labelpad=-10)
ax3.set_xlim(Mglim)
ax3.set_ylim(grlim)
ax3.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax3.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ABSMAGMIN'], bins['COLORMIN'])]
ax4.axis('off')
cax = fig.add_axes([0.49, 0.12, 0.02, 0.36])
#cax = fig.add_axes([0.54, 0.4, 0.35, 0.03])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, format=formatter, label='Number of Galaxies',
cax=cax)#, orientation='horizontal')
for aa in (ax1, ax2, ax3):
aa.grid(True)
plt.subplots_adjust(left=0.1, top=0.95, wspace=0.3, hspace=0.3, right=0.88, bottom=0.13)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def lrg_obs(phot, png=None):
zobslim = (16, 22)
W1obslim = (16, 21)
grobslim = (0.0, 4)
rzobslim = (0.0, 3)
rW1obslim = (0.7, 4.5)
zW1obslim = (0, 2.7)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(phot['RMAG']-phot['W1MAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
#norm=LogNorm(vmin=1, vmax=100),
extent=np.hstack((rW1obslim, grobslim)))
ax1.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rW1obslim)
ax1.set_ylim(grobslim)
ax2.hexbin(phot['ZMAG']-phot['W1MAG'], phot['RMAG']-phot['ZMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent= | np.hstack((zW1obslim, rzobslim)) | numpy.hstack |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = | np.array([]) | numpy.array |
from torch.utils.data import Dataset
import numpy as np
import random
import os
import time
from tqdm import tqdm
import torch
import logging
import json
from SoccerNet.Downloader import getListGames
from SoccerNet.Downloader import SoccerNetDownloader
from SoccerNet.Evaluation.utils import AverageMeter, EVENT_DICTIONARY_V2, INVERSE_EVENT_DICTIONARY_V2
from SoccerNet.Evaluation.utils import EVENT_DICTIONARY_V1, INVERSE_EVENT_DICTIONARY_V1
def feats2clip(feats, stride, clip_length, padding = "replicate_last", off=0):
if padding =="zeropad":
print("beforepadding", feats.shape)
pad = feats.shape[0] - int(feats.shape[0]/stride)*stride
print("pad need to be", clip_length-pad)
m = torch.nn.ZeroPad2d((0, 0, clip_length-pad, 0))
feats = m(feats)
print("afterpadding", feats.shape)
# nn.ZeroPad2d(2)
idx = torch.arange(start=0, end=feats.shape[0]-1, step=stride)
idxs = []
for i in torch.arange(-off, clip_length-off):
idxs.append(idx+i)
idx = torch.stack(idxs, dim=1)
if padding=="replicate_last":
idx = idx.clamp(0, feats.shape[0]-1)
# print(idx)
return feats[idx,...]
class SoccerNetClips(Dataset):
def __init__(self, path, features="ResNET_PCA512.npy", split=["train"], version=1,
framerate=2, window_size=15):
self.path = path
self.listGames = getListGames(split)
self.features = features
self.window_size_frame = window_size*framerate
self.version = version
if version == 1:
self.num_classes = 3
self.labels="Labels.json"
elif version == 2:
self.dict_event = EVENT_DICTIONARY_V2
self.num_classes = 17
self.labels="Labels-v2.json"
logging.info("Checking/Download features and labels locally")
downloader = SoccerNetDownloader(path)
downloader.downloadGames(files=[self.labels, f"1_{self.features}", f"2_{self.features}"], split=split, verbose=False,randomized=True)
logging.info("Pre-compute clips")
self.game_feats = list()
self.game_labels = list()
# game_counter = 0
for game in tqdm(self.listGames):
# Load features
feat_half1 = np.load(os.path.join(self.path, game, "1_" + self.features))
feat_half1 = feat_half1.reshape(-1, feat_half1.shape[-1])
feat_half2 = np.load(os.path.join(self.path, game, "2_" + self.features))
feat_half2 = feat_half2.reshape(-1, feat_half2.shape[-1])
feat_half1 = feats2clip(torch.from_numpy(feat_half1), stride=self.window_size_frame, clip_length=self.window_size_frame)
feat_half2 = feats2clip(torch.from_numpy(feat_half2), stride=self.window_size_frame, clip_length=self.window_size_frame)
# Load labels
labels = json.load(open(os.path.join(self.path, game, self.labels)))
label_half1 = np.zeros((feat_half1.shape[0], self.num_classes+1))
label_half1[:,0]=1 # those are BG classes
label_half2 = np.zeros((feat_half2.shape[0], self.num_classes+1))
label_half2[:,0]=1 # those are BG classes
for annotation in labels["annotations"]:
time = annotation["gameTime"]
event = annotation["label"]
half = int(time[0])
minutes = int(time[-5:-3])
seconds = int(time[-2::])
frame = framerate * ( seconds + 60 * minutes )
if version == 1:
if "card" in event: label = 0
elif "subs" in event: label = 1
elif "soccer" in event: label = 2
else: continue
elif version == 2:
if event not in self.dict_event:
continue
label = self.dict_event[event]
# if label outside temporal of view
if half == 1 and frame//self.window_size_frame>=label_half1.shape[0]:
continue
if half == 2 and frame//self.window_size_frame>=label_half2.shape[0]:
continue
if half == 1:
label_half1[frame//self.window_size_frame][0] = 0 # not BG anymore
label_half1[frame//self.window_size_frame][label+1] = 1 # that's my class
if half == 2:
label_half2[frame//self.window_size_frame][0] = 0 # not BG anymore
label_half2[frame//self.window_size_frame][label+1] = 1 # that's my class
self.game_feats.append(feat_half1)
self.game_feats.append(feat_half2)
self.game_labels.append(label_half1)
self.game_labels.append(label_half2)
self.game_feats = np.concatenate(self.game_feats)
self.game_labels = np.concatenate(self.game_labels)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
clip_feat (np.array): clip of features.
clip_labels (np.array): clip of labels for the segmentation.
clip_targets (np.array): clip of targets for the spotting.
"""
return self.game_feats[index,:,:], self.game_labels[index,:]
def __len__(self):
return len(self.game_feats)
class SoccerNetClipsTesting(Dataset):
def __init__(self, path, features="ResNET_PCA512.npy", split=["test"], version=1,
framerate=2, window_size=15):
self.path = path
self.listGames = getListGames(split)
self.features = features
self.window_size_frame = window_size*framerate
self.framerate = framerate
self.version = version
self.split=split
if version == 1:
self.dict_event = EVENT_DICTIONARY_V1
self.num_classes = 3
self.labels="Labels.json"
elif version == 2:
self.dict_event = EVENT_DICTIONARY_V2
self.num_classes = 17
self.labels="Labels-v2.json"
logging.info("Checking/Download features and labels locally")
downloader = SoccerNetDownloader(path)
for s in split:
if s == "challenge":
downloader.downloadGames(files=[f"1_{self.features}", f"2_{self.features}"], split=[s], verbose=False,randomized=True)
else:
downloader.downloadGames(files=[self.labels, f"1_{self.features}", f"2_{self.features}"], split=[s], verbose=False,randomized=True)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
feat_half1 (np.array): features for the 1st half.
feat_half2 (np.array): features for the 2nd half.
label_half1 (np.array): labels (one-hot) for the 1st half.
label_half2 (np.array): labels (one-hot) for the 2nd half.
"""
# Load features
feat_half1 = np.load(os.path.join(self.path, self.listGames[index], "1_" + self.features))
feat_half1 = feat_half1.reshape(-1, feat_half1.shape[-1])
feat_half2 = np.load(os.path.join(self.path, self.listGames[index], "2_" + self.features))
feat_half2 = feat_half2.reshape(-1, feat_half2.shape[-1])
# Load labels
label_half1 = | np.zeros((feat_half1.shape[0], self.num_classes)) | numpy.zeros |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_Winding_num.ipynb (unless otherwise specified).
__all__ = ['Fukui_Kitaev_LR_QP_wn', 'Fukui_Kitaev_LR_QP_inf_wn', 'Real_space_Kitaev_LR_QP_wn', 'w',
'Chiral_Kitaev_QP_LR_wn', 'd_num_h_Kitaev_LR_QP_inf', 'd_num_h_Kitaev_LR_QP_inf', 'Fukui_Kitaev_AA_2D_chern',
'Fukui_Kitaev_AA_2D_chern_inf', 'd_num_k_H_Kitaev_LR_QP', 'd_num_phase_H_Kitaev_LR_QP',
'compute_Kitaev_AA_wn_TKNN', 'd_num_k_H_Kitaev_LR_QP_inf', 'd_num_phase_H_Kitaev_LR_QP_inf',
'compute_Kitaev_AA_wn_TKNN_inf']
# Cell
from .Hamiltonians import *
import numpy as np
from mpmath import *
# Cell
def Fukui_Kitaev_LR_QP_wn(params, k_l, mu, L, rot = True, close_loop = True):
'''Compute the winding number using Fukui for the finite AAH-LR Kitaev chain.'''
Fn = params['Fn']
U = np.zeros(len(k_l), dtype=complex)
F = 0
if close_loop == False:
k_l_range = len(k_l)-1
else:
k_l_range = len(k_l)
for i in range(k_l_range):
evals, evecs = np.linalg.eigh(H_Kitaev_LR_QP(params, k_l[i], mu, L, rot))
evecs1 = evecs[:,:Fn]
if i == range(len(k_l))[-1]:
k = k_l[0]
else:
k = k_l[i+1]
evals, evecs = np.linalg.eigh(H_Kitaev_LR_QP(params, k, mu, L, rot))
evecs2 = evecs[:,:Fn]
Ui=np.dot(np.conjugate(np.transpose(evecs2)),evecs1)
Ui=np.linalg.det(Ui)
#Ui=Ui/np.abs(Ui)
U[i]=Ui
F = np.imag(np.log(np.prod(U)))
return F, U
def Fukui_Kitaev_LR_QP_inf_wn(params, k_l, mu, rot = True, close_loop = True):
'''Compute the winding number using Fukui for the infinite AAH-LR Kitaev chain.'''
Fn = params['Fn']
U = np.zeros(len(k_l), dtype=complex)
F = 0
if close_loop == False:
k_l_range = len(k_l)-1
else:
k_l_range = len(k_l)
for i in range(k_l_range):
evals, evecs = np.linalg.eigh(H_Kitaev_LR_QP_inf(params, k_l[i], mu, rot))
evecs1 = evecs[:,:Fn]
if i == range(len(k_l))[-1]:
k = k_l[0]
else:
k = k_l[i+1]
evals, evecs = np.linalg.eigh(H_Kitaev_LR_QP_inf(params, k, mu, rot))
evecs2 = evecs[:,:Fn]
Ui=np.dot(np.conjugate(np.transpose(evecs2)),evecs1)
Ui= | np.linalg.det(Ui) | numpy.linalg.det |
import os
import numpy as np
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
from time import sleep
def warm_up_exercise():
A = np.eye(5)
return A
def plot_data(x, y):
figure = pyplot.figure()
pyplot.plot(x, y, 'ro', ms=10, mec='k')
pyplot.ylabel('Profit in $10,000s')
pyplot.xlabel('Population of City in 10,000s')
def plot_cost_hist(J_hist, rate):
pyplot.plot(np.arange(len(J_hist)), J_hist, '-', ms=10, mec='k')
pyplot.ylabel('Cost J')
pyplot.xlabel('Iterations')
pyplot.plot(np.arange(len(J_hist)), J_hist, '-')
def compute_cost(X, y, theta):
m = y.shape[0]
J = (1/(2*m)) * np.sum( np.square((np.dot(X, theta) - y)) )
return J
def gradient_descent(X, y, theta, alpha, num_iters):
m = y.shape[0]
adjusted_theta = theta.copy()
J_hist = []
for i in range(0, num_iters):
adjusted_theta -= (alpha / m) * (np.dot(X, adjusted_theta) - y).dot(X)
J_hist.append(compute_cost(X, y, adjusted_theta))
return J_hist, adjusted_theta
def predict(X, theta):
pred = np.dot(X, theta)
print("For population =", X[1]*10000, ' we predict a profit of {:.2f}\n'.format(pred*10000))
def visualize_cost(X, y, trained_theta):
# Step over theta0 values in range -10,10 with 100 steps
# theta0_vals => (100 x 1)
theta0_vals = np.linspace(-10,10,100)
# Step over theta1 values in range -4 4 with 100 steps
# thteta1_vals => (100 x 1)
theta1_vals = np.linspace(-1,4,100)
# Create a matrix of costs at different values of theta0 and theta1
# J_vals => (100 x 100)
J_vals = | np.zeros((theta0_vals.shape[0], theta1_vals.shape[0])) | numpy.zeros |
import transform
import unittest
import numpy as np
import sys
sys.path.insert(0, '../')
import pdb
import rotateCorrection as rc
# another crude transformation computer
def _angle(x1, y1, x2, y2):
# inputs are in angle
dx = x2 - x1
dy = y2 - y1
if dy < 0:
return np.arctan(abs(dy)/dx)
else:
return -np.arctan(abs(dy)/dx)
def simple_angle_converter(pointpx, top_right, top_left, bottom_left, bottom_right, imagesize):
"""
All coordinate tuples are in (x, y) i.e. (lon, lat) convention.
pointpx (x, y): pixel coordinates counted from top left corner.
top_right, top_left, bottom_left, bottom_right: (lon, lat) pairs
imagesize: (width, height) tuple
"""
# first wrangle inputs
image_width, image_height = imagesize
px, py = pointpx
tr, tl, bl, br = top_right, top_left, bottom_left, bottom_right
# now start converting
image_width_in_lon = (tr[0] - tl[0] + br[0] - bl[0])/2
image_height_in_lat = (tl[1] - bl[1] + tr[1] - br[1])/2
top_left_lon, top_left_lat = tl
# 2. now convert (px, py) -> (dlon, dlat)
dlon = px*image_width_in_lon/image_width
dlat = py*image_height_in_lat/image_height
# compute the angle via simple trig.
angle_est1 = _angle(tl[0], tl[1], tr[0], tr[1])
angle_est2 = _angle(bl[0], bl[1], br[0], br[1])
angle = (angle_est1+angle_est2)/2
print(f"angle: {angle}")
rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
# apply reverse rotation: x2, y2 (unit: meter)
x2, y2 = np.dot(rot_matrix, np.array([dlon, dlat]))
# convert x2, y2 to lon, lat
actual_lon = top_left_lon + x2
actual_lat = top_left_lat - y2
return actual_lon, actual_lat
class TestTransformLukas(unittest.TestCase):
def test_unrotated_picture(self):
width = 10
height = 60
lon_min = 20
lon_max = 30
lat_min = 10
lat_max = 70
top_left = np.array([lon_min, lat_max])
top_right = np.array([lon_max, lat_max])
bottom_left = np.array([lon_min, lat_min])
bottom_right = np.array([lon_max, lat_min])
# check that top left is sane
res = transform.transform(np.array([0, 0]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, top_left))
# check that top right is sane
res = transform.transform(np.array([width,0]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, top_right))
# check that bottom left is sane
res = transform.transform(np.array([0,height]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, bottom_left))
# check that bottom right is sane
res = transform.transform(np.array([width,height]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, bottom_right))
class TestBasicTransform(unittest.TestCase):
def test_unrotated_picture(self):
width = 10
height = 60
lon_min = 20
lon_max = 30
lat_min = 10
lat_max = 70
top_left = np.array([lon_min, lat_max])
top_right = np.array([lon_max, lat_max])
bottom_left = | np.array([lon_min, lat_min]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import multiprocessing
import torch
from torch import nn, Tensor
from ctp.kernels import GaussianKernel
from ctp.clutrr.models import BatchNeuralKB, BatchHoppy, BatchUnary, BatchMulti
from ctp.reformulators import SymbolicReformulator
from typing import List, Dict, Tuple, Optional
import pytest
torch.set_num_threads(multiprocessing.cpu_count())
def encode_relation(facts: List[Tuple[str, str, str]],
relation_embeddings: nn.Embedding,
relation_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tensor:
indices_np = np.array([relation_to_idx[r] for _, r, _ in facts], dtype=np.int64)
indices = torch.from_numpy(indices_np)
if device is not None:
indices = indices.to(device)
return relation_embeddings(indices)
def encode_arguments(facts: List[Tuple[str, str, str]],
entity_embeddings: nn.Embedding,
entity_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tuple[Tensor, Tensor]:
indices_np = np.array([[entity_to_idx[s], entity_to_idx[o]] for s, _, o in facts], dtype=np.int64)
indices = torch.from_numpy(indices_np)
if device is not None:
indices = indices.to(device)
emb = entity_embeddings(indices)
return emb[:, 0, :], emb[:, 1, :]
@pytest.mark.light
def test_adv_v1():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('b', 'q', 'c'),
('c', 'p', 'd'),
('d', 'q', 'e'),
('e', 'p', 'f'),
('f', 'q', 'g'),
('g', 'p', 'h'),
('h', 'q', 'i'),
('i', 'p', 'l'),
('l', 'q', 'm'),
('m', 'p', 'n'),
('n', 'q', 'o'),
('o', 'p', 'p'),
('p', 'q', 'q'),
('q', 'p', 'r'),
('r', 'q', 's'),
('s', 'p', 't'),
('t', 'q', 'u'),
('u', 'p', 'v'),
('v', 'q', 'w'),
('x', 'r', 'y'),
('x', 's', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 12
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p'], predicate_to_index['q']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
hoppy0 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=0)
hoppy1 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=1)
hoppy2 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=2)
hoppy3 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=3)
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['c']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['e']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['g']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['i']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['m']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['o']
xs_np[6] = entity_to_index['a']
xp_np[6] = predicate_to_index['r']
xo_np[6] = entity_to_index['q']
xs_np[7] = entity_to_index['a']
xp_np[7] = predicate_to_index['r']
xo_np[7] = entity_to_index['s']
xs_np[8] = entity_to_index['a']
xp_np[8] = predicate_to_index['r']
xo_np[8] = entity_to_index['u']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf0 = hoppy0.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf1 = hoppy1.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf2 = hoppy2.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf3 = hoppy3.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf0_np = inf0.cpu().numpy()
inf1_np = inf1.cpu().numpy()
inf2_np = inf2.cpu().numpy()
inf3_np = inf3.cpu().numpy()
np.testing.assert_allclose(inf0_np, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf1_np, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf2_np, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf3_np, [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
print(inf3_np)
@pytest.mark.light
def test_adv_v2():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('a', 'p', 'd'),
('c', 'p', 'd'),
('e', 'q', 'f'),
('f', 'p', 'c'),
('x', 'r', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 6
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
unary = BatchUnary(model, hops_lst=[(reformulator, False)])
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['a']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['b']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['c']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['d']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['e']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['f']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf = unary.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf_np = inf.cpu().numpy()
print(inf_np)
np.testing.assert_allclose(inf_np, [1] * batch_size, rtol=1e-2, atol=1e-2)
@pytest.mark.light
def test_adv_v3():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('a', 'p', 'd'),
('c', 'p', 'd'),
('e', 'q', 'f'),
('f', 'p', 'c'),
('x', 'r', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 6
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy( | np.array([predicate_to_index['p']]) | numpy.array |
import unittest
import yaml
import numpy as np
from AlphaZero.env.go import GameState
from AlphaZero.processing.state_converter import TensorActionConverter, StateTensorConverter, ReverseTransformer
with open('tests/go_test.yaml') as f:
config = yaml.load(f)
def simple_board():
"""
"""
gs = GameState(size=7)
# make a tiny board for the sake of testing and hand-coding expected results
#
# X
# 0 1 2 3 4 5 6
# B W . . . . . 0
# B W . . . . . 1
# B . . . B . . 2
# Y . . . B k B . 3
# . . . W B W . 4
# . . . . W . . 5
# . . . . . . . 6
#
# where k is a ko position (white was just captured)
# ladder-looking thing in the top-left
gs.do_move((0, 0)) # B
gs.do_move((1, 0)) # W
gs.do_move((0, 1)) # B
gs.do_move((1, 1)) # W
gs.do_move((0, 2)) # B
# ko position in the middle
gs.do_move((3, 4)) # W
gs.do_move((3, 3)) # B
gs.do_move((4, 5)) # W
gs.do_move((4, 2)) # B
gs.do_move((5, 4)) # W
gs.do_move((5, 3)) # B
gs.do_move((4, 3)) # W - the ko position
gs.do_move((4, 4)) # B - does the capture
return gs
class TestTransformers(unittest.TestCase):
def test_forward_transformer_id_0(self):
gs = simple_board()
gs.transform(0)
target = np.asarray([
[1, 1, 1, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 0],
[0, 0, 1, 0, 1, -1, 0],
[0, 0, 0, 1, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
])
self.assertTrue(np.array_equal(gs.board, target))
def test_forward_transformer_id_1_and_history(self):
gs = simple_board()
gs.transform(1)
target = np.asarray([
[1, 1, 1, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 0],
[0, 0, 1, 0, 1, -1, 0],
[0, 0, 0, 1, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
])
target_history_last = np.asarray([
[1, 1, 1, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 0],
[0, 0, 1, -1, 0, -1, 0],
[0, 0, 0, 1, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
])
self.assertTrue(np.array_equal(gs.board, | np.rot90(target, 1) | numpy.rot90 |
'''
Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
If this code is useful to you, please cite the following paper:
<NAME>, <NAME>, and <NAME>. Learning topology from synthetic data for unsupervised depth completion.
In the Robotics and Automation Letters (RA-L) 2021 and Proceedings of International Conference on Robotics and Automation (ICRA) 2021
@article{wong2021learning,
title={Learning topology from synthetic data for unsupervised depth completion},
author={<NAME> and <NAME> and <NAME>},
journal={IEEE Robotics and Automation Letters},
volume={6},
number={2},
pages={1495--1502},
year={2021},
publisher={IEEE}
}
'''
import os, sys, glob
import multiprocessing as mp
import numpy as np
import cv2
sys.path.insert(0, 'src')
import data_utils
VOID_ROOT_DIRPATH = os.path.join('data', 'void_release')
VOID_DATA_150_DIRPATH = os.path.join(VOID_ROOT_DIRPATH, 'void_150')
VOID_DATA_500_DIRPATH = os.path.join(VOID_ROOT_DIRPATH, 'void_500')
VOID_DATA_1500_DIRPATH = os.path.join(VOID_ROOT_DIRPATH, 'void_1500')
VOID_OUTPUT_DIRPATH = os.path.join('data', 'void_learning_topology')
VOID_TRAIN_IMAGE_FILENAME = 'train_image.txt'
VOID_TRAIN_SPARSE_DEPTH_FILENAME = 'train_sparse_depth.txt'
VOID_TRAIN_VALIDITY_MAP_FILENAME = 'train_validity_map.txt'
VOID_TRAIN_GROUND_TRUTH_FILENAME = 'train_ground_truth.txt'
VOID_TRAIN_INTRINSICS_FILENAME = 'train_intrinsics.txt'
VOID_TEST_IMAGE_FILENAME = 'test_image.txt'
VOID_TEST_SPARSE_DEPTH_FILENAME = 'test_sparse_depth.txt'
VOID_TEST_VALIDITY_MAP_FILENAME = 'test_validity_map.txt'
VOID_TEST_GROUND_TRUTH_FILENAME = 'test_ground_truth.txt'
VOID_TEST_INTRINSICS_FILENAME = 'test_intrinsics.txt'
TRAIN_REFS_DIRPATH = os.path.join('training', 'void')
TEST_REFS_DIRPATH = os.path.join('testing', 'void')
# VOID training set 150 density
VOID_TRAIN_IMAGE_150_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_image_150.txt')
VOID_TRAIN_SPARSE_DEPTH_150_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_sparse_depth_150.txt')
VOID_TRAIN_VALIDITY_MAP_150_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_validity_map_150.txt')
VOID_TRAIN_GROUND_TRUTH_150_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_ground_truth_150.txt')
VOID_TRAIN_INTRINSICS_150_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_intrinsics_150.txt')
# VOID training set 500 density
VOID_TRAIN_IMAGE_500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_image_500.txt')
VOID_TRAIN_SPARSE_DEPTH_500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_sparse_depth_500.txt')
VOID_TRAIN_VALIDITY_MAP_500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_validity_map_500.txt')
VOID_TRAIN_GROUND_TRUTH_500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_ground_truth_500.txt')
VOID_TRAIN_INTRINSICS_500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_intrinsics_500.txt')
# VOID training set 1500 density
VOID_TRAIN_IMAGE_1500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_image_1500.txt')
VOID_TRAIN_SPARSE_DEPTH_1500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_sparse_depth_1500.txt')
VOID_TRAIN_VALIDITY_MAP_1500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_validity_map_1500.txt')
VOID_TRAIN_GROUND_TRUTH_1500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_ground_truth_1500.txt')
VOID_TRAIN_INTRINSICS_1500_FILEPATH = os.path.join(TRAIN_REFS_DIRPATH, 'void_train_intrinsics_1500.txt')
# VOID testing set 150 density
VOID_TEST_IMAGE_150_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_image_150.txt')
VOID_TEST_SPARSE_DEPTH_150_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_sparse_depth_150.txt')
VOID_TEST_VALIDITY_MAP_150_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_validity_map_150.txt')
VOID_TEST_GROUND_TRUTH_150_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_ground_truth_150.txt')
VOID_TEST_INTRINSICS_150_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_intrinsics_150.txt')
# VOID testing set 500 density
VOID_TEST_IMAGE_500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_image_500.txt')
VOID_TEST_SPARSE_DEPTH_500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_sparse_depth_500.txt')
VOID_TEST_VALIDITY_MAP_500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_validity_map_500.txt')
VOID_TEST_GROUND_TRUTH_500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_ground_truth_500.txt')
VOID_TEST_INTRINSICS_500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_intrinsics_500.txt')
# VOID testing set 1500 density
VOID_TEST_IMAGE_1500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_image_1500.txt')
VOID_TEST_SPARSE_DEPTH_1500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_sparse_depth_1500.txt')
VOID_TEST_VALIDITY_MAP_1500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_validity_map_1500.txt')
VOID_TEST_GROUND_TRUTH_1500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_ground_truth_1500.txt')
VOID_TEST_INTRINSICS_1500_FILEPATH = os.path.join(TEST_REFS_DIRPATH, 'void_test_intrinsics_1500.txt')
def process_frame(inputs):
'''
Processes a single depth frame
Args:
inputs : tuple
image path at time t=0,
image path at time t=1,
image path at time t=-1,
sparse depth path at time t=0,
validity map path at time t=0,
ground truth path at time t=0
Returns:
str : image reference directory path
str : output concatenated image path at time t=0
str : output sparse depth path at time t=0
str : output validity map path at time t=0
str : output ground truth path at time t=0
'''
image_path1, \
image_path0, \
image_path2, \
sparse_depth_path, \
validity_map_path, \
ground_truth_path = inputs
# Create image composite of triplets
image1 = cv2.imread(image_path1)
image0 = cv2.imread(image_path0)
image2 = cv2.imread(image_path2)
imagec = np.concatenate([image1, image0, image2], axis=1)
image_refpath = os.path.join(*image_path0.split(os.sep)[2:])
# Set output paths
image_outpath = os.path.join(VOID_OUTPUT_DIRPATH, image_refpath)
sparse_depth_outpath = sparse_depth_path
validity_map_outpath = validity_map_path
ground_truth_outpath = ground_truth_path
# Verify that all filenames match
image_out_dirpath, image_filename = os.path.split(image_outpath)
sparse_depth_filename = os.path.basename(sparse_depth_outpath)
validity_map_filename = os.path.basename(validity_map_outpath)
ground_truth_filename = os.path.basename(ground_truth_outpath)
assert image_filename == sparse_depth_filename
assert image_filename == validity_map_filename
assert image_filename == ground_truth_filename
cv2.imwrite(image_outpath, imagec)
return (image_refpath,
image_outpath,
sparse_depth_outpath,
validity_map_outpath,
ground_truth_outpath)
if not os.path.exists(TRAIN_REFS_DIRPATH):
os.makedirs(TRAIN_REFS_DIRPATH)
if not os.path.exists(TEST_REFS_DIRPATH):
os.makedirs(TEST_REFS_DIRPATH)
data_dirpaths = [
VOID_DATA_150_DIRPATH,
VOID_DATA_500_DIRPATH,
VOID_DATA_1500_DIRPATH
]
train_output_filepaths = [
[
VOID_TRAIN_IMAGE_150_FILEPATH,
VOID_TRAIN_SPARSE_DEPTH_150_FILEPATH,
VOID_TRAIN_VALIDITY_MAP_150_FILEPATH,
VOID_TRAIN_GROUND_TRUTH_150_FILEPATH,
VOID_TRAIN_INTRINSICS_150_FILEPATH
],
[
VOID_TRAIN_IMAGE_500_FILEPATH,
VOID_TRAIN_SPARSE_DEPTH_500_FILEPATH,
VOID_TRAIN_VALIDITY_MAP_500_FILEPATH,
VOID_TRAIN_GROUND_TRUTH_500_FILEPATH,
VOID_TRAIN_INTRINSICS_500_FILEPATH
],
[
VOID_TRAIN_IMAGE_1500_FILEPATH,
VOID_TRAIN_SPARSE_DEPTH_1500_FILEPATH,
VOID_TRAIN_VALIDITY_MAP_1500_FILEPATH,
VOID_TRAIN_GROUND_TRUTH_1500_FILEPATH,
VOID_TRAIN_INTRINSICS_1500_FILEPATH
]
]
test_output_filepaths = [
[
VOID_TEST_IMAGE_150_FILEPATH,
VOID_TEST_SPARSE_DEPTH_150_FILEPATH,
VOID_TEST_VALIDITY_MAP_150_FILEPATH,
VOID_TEST_GROUND_TRUTH_150_FILEPATH,
VOID_TEST_INTRINSICS_150_FILEPATH
],
[
VOID_TEST_IMAGE_500_FILEPATH,
VOID_TEST_SPARSE_DEPTH_500_FILEPATH,
VOID_TEST_VALIDITY_MAP_500_FILEPATH,
VOID_TEST_GROUND_TRUTH_500_FILEPATH,
VOID_TEST_INTRINSICS_500_FILEPATH
],
[
VOID_TEST_IMAGE_1500_FILEPATH,
VOID_TEST_SPARSE_DEPTH_1500_FILEPATH,
VOID_TEST_VALIDITY_MAP_1500_FILEPATH,
VOID_TEST_GROUND_TRUTH_1500_FILEPATH,
VOID_TEST_INTRINSICS_1500_FILEPATH
]
]
data_filepaths = \
zip(data_dirpaths, train_output_filepaths, test_output_filepaths)
for data_dirpath, train_filepaths, test_filepaths in data_filepaths:
# Training set
train_image_filepath = os.path.join(data_dirpath, VOID_TRAIN_IMAGE_FILENAME)
train_sparse_depth_filepath = os.path.join(data_dirpath, VOID_TRAIN_SPARSE_DEPTH_FILENAME)
train_validity_map_filepath = os.path.join(data_dirpath, VOID_TRAIN_VALIDITY_MAP_FILENAME)
train_ground_truth_filepath = os.path.join(data_dirpath, VOID_TRAIN_GROUND_TRUTH_FILENAME)
train_intrinsics_filepath = os.path.join(data_dirpath, VOID_TRAIN_INTRINSICS_FILENAME)
# Read training paths
train_image_paths = data_utils.read_paths(train_image_filepath)
train_sparse_depth_paths = data_utils.read_paths(train_sparse_depth_filepath)
train_validity_map_paths = data_utils.read_paths(train_validity_map_filepath)
train_ground_truth_paths = data_utils.read_paths(train_ground_truth_filepath)
train_intrinsics_paths = data_utils.read_paths(train_intrinsics_filepath)
assert len(train_image_paths) == len(train_sparse_depth_paths)
assert len(train_image_paths) == len(train_validity_map_paths)
assert len(train_image_paths) == len(train_ground_truth_paths)
assert len(train_image_paths) == len(train_intrinsics_paths)
# Testing set
test_image_filepath = os.path.join(data_dirpath, VOID_TEST_IMAGE_FILENAME)
test_sparse_depth_filepath = os.path.join(data_dirpath, VOID_TEST_SPARSE_DEPTH_FILENAME)
test_validity_map_filepath = os.path.join(data_dirpath, VOID_TEST_VALIDITY_MAP_FILENAME)
test_ground_truth_filepath = os.path.join(data_dirpath, VOID_TEST_GROUND_TRUTH_FILENAME)
test_intrinsics_filepath = os.path.join(data_dirpath, VOID_TEST_INTRINSICS_FILENAME)
# Read testing paths
test_image_paths = data_utils.read_paths(test_image_filepath)
test_sparse_depth_paths = data_utils.read_paths(test_sparse_depth_filepath)
test_validity_map_paths = data_utils.read_paths(test_validity_map_filepath)
test_ground_truth_paths = data_utils.read_paths(test_ground_truth_filepath)
test_intrinsics_paths = data_utils.read_paths(test_intrinsics_filepath)
assert len(test_image_paths) == len(test_sparse_depth_paths)
assert len(test_image_paths) == len(test_validity_map_paths)
assert len(test_image_paths) == len(test_ground_truth_paths)
assert len(test_image_paths) == len(test_intrinsics_paths)
# Get test set directories
test_seq_dirpaths = set(
[test_image_paths[idx].split(os.sep)[-3] for idx in range(len(test_image_paths))])
# Initialize placeholders for training output paths
train_image_outpaths = []
train_sparse_depth_outpaths = []
train_validity_map_outpaths = []
train_ground_truth_outpaths = []
train_intrinsics_outpaths = []
# Initialize placeholders for testing output paths
test_image_outpaths = []
test_sparse_depth_outpaths = []
test_validity_map_outpaths = []
test_ground_truth_outpaths = []
test_intrinsics_outpaths = []
# For each dataset density, grab the sequences
seq_dirpaths = glob.glob(os.path.join(data_dirpath, 'data', '*'))
n_sample = 0
for seq_dirpath in seq_dirpaths:
# For each sequence, grab the images, sparse depths and valid maps
image_paths = \
sorted(glob.glob(os.path.join(seq_dirpath, 'image', '*.png')))
sparse_depth_paths = \
sorted(glob.glob(os.path.join(seq_dirpath, 'sparse_depth', '*.png')))
validity_map_paths = \
sorted(glob.glob(os.path.join(seq_dirpath, 'validity_map', '*.png')))
ground_truth_paths = \
sorted(glob.glob(os.path.join(seq_dirpath, 'ground_truth', '*.png')))
intrinsics_path = os.path.join(seq_dirpath, 'K.txt')
assert len(image_paths) == len(sparse_depth_paths)
assert len(image_paths) == len(validity_map_paths)
# Load intrinsics
kin = | np.loadtxt(intrinsics_path) | numpy.loadtxt |
# This script is used to produce fitting and confidence interval for results in python.
#
#%%
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats.distributions import t
#%%
from numpy import cos, sin, exp, pi, meshgrid
def KentFunc(Xin, theta, phi, psi, kappa, beta, A):
# Assume theta_z, phi_z are column vectors ([0,2 pi]), theta, phi, psi are
# rotational scaler ([0,2 pi])
theta_z, phi_z = Xin[:, 0], Xin[:, 1]
Z = np.array([cos(theta_z) * cos(phi_z), sin(theta_z) * cos(phi_z), sin(phi_z)]).T # M by 3 finally
coord = SO3(theta, phi, psi)
mu1 = coord[:, 0:1] # col vector
# mu23 = coord[:, 1:3] # 2 col vectors, 3 by 2
mu2 = coord[:, 1:2] # 2 col vectors, 3 by 2
mu3 = coord[:, 2:3] # 2 col vectors, 3 by 2
fval = A * exp(kappa * Z @ mu1 + beta * ((Z @ mu2) ** 2 - (Z @ mu3) ** 2))
return fval[:, 0]
def KentFunc_bsl(Xin, theta, phi, psi, kappa, beta, A, bsl):
# Assume theta_z, phi_z are column vectors ([0,2 pi]), theta, phi, psi are
# rotational scaler ([0,2 pi])
theta_z, phi_z = Xin[:, 0], Xin[:, 1]
Z = np.array([cos(theta_z) * cos(phi_z), sin(theta_z) * | cos(phi_z) | numpy.cos |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 09:12:14 2018
@author: lenovo
"""
#创建
import h5py
#要是读取文件的话,就把w换成r
f=h5py.File("myh5py.hdf5","w")
d1=f.create_dataset("dset1", (20,), 'i')
for key in f.keys():
print(key)
print(f[key].name)
print(f[key].shape)
print(f[key].value)
## 赋值
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
d1=f.create_dataset("dset1",(20,),'i')
#赋值
d1[...]=np.arange(20)
#或者我们可以直接按照下面的方式创建数据集并赋值
f["dset2"]=np.arange(15)
for key in f.keys():
print(f[key].name)
print(f[key].value)
# 已有numpy数组时
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
a=np.arange(20)
d1=f.create_dataset("dset3",data=a)
for key in f.keys():
print(f[key].name)
print(f[key].value)
# 创建group
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
#创建一个名字为bar的组
g1=f.create_group("bar")
#在bar这个组里面分别创建name为dset1,dset2的数据集并赋值。
g1["dset1"]=np.arange(10)
g1["dset2"]=np.arange(12).reshape((3,4))
for key in g1.keys():
print(g1[key].name)
print(g1[key].value)
# group and datasets
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
#创建组bar1,组bar2,数据集dset
g1=f.create_group("bar1")
g2=f.create_group("bar2")
d=f.create_dataset("dset",data=np.arange(10))
#在bar1组里面创建一个组car1和一个数据集dset1。
c1=g1.create_group("car1")
d1=g1.create_dataset("dset1",data=np.arange(10))
#在bar2组里面创建一个组car2和一个数据集dset2
c2=g2.create_group("car2")
d2=g2.create_dataset("dset2",data= | np.arange(10) | numpy.arange |
import numpy as np
from autograd_cls import AutoGrad
from compute_utils import compute_lcv_lambda_gradient, compute_epsilon_lambda_gradient, compute_hjj, compute_z, compute_z_gradient, compute_eps_t, compute_hjj_gradient, get_discounted_return, calculate_batch_loss, invert_matrix, calculate_batch_mspbe_msbe_mse_losses
from lstd import LSTD, MiniBatchLSTDLambda
from adam import ADAM
import copy
from pprint import pprint
import pdb
import pudb
def minibatch_LSTD(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config
):
lambda_ = config.default_lambda
gamma = config.gamma
LSTD_lambda = MiniBatchLSTDLambda(gamma, lambda_, Phi)
G = {}
running_loss = []
num_episodes = len(trajectories)
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
ep_rewards = []
ep_states = []
cur_state, reward, next_state, done = traj[0]
LSTD_lambda.update(None, 0 , cur_state)
cur_state = next_state
#LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
LSTD_lambda.update(cur_state, reward, next_state)
ep_rewards.append(reward)
ep_states.append(cur_state)
if done:
LSTD_lambda.update(next_state, 0, None)
theta = LSTD_lambda.theta
ep_discountedrewards = get_discounted_return(ep_rewards, gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
#loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
ms_loss, rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
return LSTD_lambda, theta, G, rms_loss, ms_loss
def minibatch_LSTD_withCV(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config,
trajectories_test,
Gs_test
):
lambda_ = config.default_lambda
gamma = config.gamma
LSTD_lambda = MiniBatchLSTDLambda(gamma, lambda_, Phi)
G = {}
running_loss = []
num_episodes = len(trajectories)
valid_episode_counter = 0
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
ep_rewards = []
ep_states = []
cur_state, reward, next_state, done = traj[0]
LSTD_lambda.update(None, 0 , cur_state)
cur_state = next_state
#LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
LSTD_lambda.update(cur_state, reward, next_state)
ep_rewards.append(reward)
ep_states.append(cur_state)
if done:
LSTD_lambda.update(next_state, 0, None)
theta = LSTD_lambda.theta
ep_discountedrewards = get_discounted_return(ep_rewards, gamma)
#pdb.set_trace()
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
if valid_episode_counter % config.compute_cv_iterations == 0 and valid_episode_counter > 0:
#pudb.set_trace()
new_config = copy.deepcopy(config)
new_config.default_lambda = 0
current_cv_loss = compute_CV_loss(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger = None,
config =new_config)
losses, avg_losses = calculate_batch_mspbe_msbe_mse_losses(trajectories_test, Gs_test, theta, Phi, R, D, P, new_config)
print('current_cv_loss:{0}'.format(current_cv_loss))
if logger:
#pudb.set_trace()
logger.log_scalar('Train mean loto cv', current_cv_loss, valid_episode_counter)
logger.log_scalar('Test RMSPBE', avg_losses['RMSPBE'], valid_episode_counter)
logger.log_scalar('Test RMSBE', avg_losses['RMSBE'], valid_episode_counter)
logger.log_scalar('Test RMSBE', avg_losses['RMSE'], valid_episode_counter)
logger.writer.flush()
valid_episode_counter += 1
#loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
ms_loss, rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
return LSTD_lambda, theta, G, rms_loss, ms_loss
def LSTD_algorithm(trajectories, Phi, num_features, gamma=0.4, lambda_=0.2):
# LSTD operator:
LSTD_lambda = LSTD(num_features)
G = {}
running_loss = []
num_episodes = len(trajectories)
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
if len(traj) <= 4:
continue
ep_rewards = []
ep_states = []
cur_state = traj[0][0]
LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
theta = LSTD_lambda.theta
ep_discountedrewards = get_discounted_return(ep_rewards, gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
# print('Episode {0} loss is {1}'.format(ep, ep_loss))
# print('Episode {0} rewards are {1}'.format(ep, ep_rewards))
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
ms_loss,rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
# print("average running loss in training: ", sum(running_loss) / num_episodes)
# print("average loss after training: ", sum(loss) / num_episodes)
average_loss = rmse
return LSTD_lambda, theta, average_loss, G, rmspbe
def Adaptive_LSTD_algorithm(trajectories,
Phi,
P,
V,
D,
R,
Gs,
config
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
adaptive_LSTD_lambda = LSTD(config.num_features)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
if len(traj) <= 4:
continue
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = np.zeros((config.num_features, config.num_states))
H_diag = np.zeros(config.num_states) # n
eps = np.zeros(config.num_states)
states_count = np.zeros(config.num_states)
epsilon_lambda_gradient = np.zeros(config.num_states)
H_diag_gradient = np.zeros(config.num_states)
episode_loss = 0
cur_state = traj[0][0]
adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], config.gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
theta = adaptive_LSTD_lambda.theta
print(theta)
A = adaptive_LSTD_lambda.A
b = adaptive_LSTD_lambda.b
A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon, rcond=.1)
for timestep in range(len(traj)-1):
cur_state, reward, next_state, done = traj[timestep]
# To-do : change the following update to running average
states_count[cur_state] += 1
ct = states_count[cur_state]
Z[:,cur_state] = (ct-1)/ct *Z[:,cur_state]+ 1/ct * compute_z(lambda_, config.gamma, Phi, ep_states, timestep )
Z_gradient[:, cur_state] = (ct-1)/ct * Z_gradient[:, cur_state] + 1/ct * compute_z_gradient(lambda_, config.gamma, Phi, ep_states, timestep)
H_diag[cur_state] = (ct-1)/ct * H_diag[cur_state] + 1/ct * compute_hjj(Phi, lambda_, config.gamma, ep_states, timestep, A_inv)
eps[cur_state] = (ct-1)/ct * eps[cur_state] + 1/ct * compute_eps_t(Phi, theta, config.gamma, reward, ep_states, timestep)
epsilon_lambda_gradient[cur_state] = (ct-1)/ct * epsilon_lambda_gradient[cur_state] + \
1/ct * compute_epsilon_lambda_gradient(Phi,
lambda_,
config.gamma,
A,
b,
A_inv,
Z,
timestep,
ep_states,
ep_rewards
)
H_diag_gradient[cur_state] = (ct-1)/ct * H_diag_gradient[cur_state] + 1/ct * compute_hjj_gradient(Phi,
lambda_,
config.gamma,
ep_states,
timestep,
A,
b,
A_inv
)
#grad = compute_cv_gradient(Phi, theta, gamma, lambda_, P, V, D, R)
# Replaced the above update with:
grad = compute_lcv_lambda_gradient(eps,
H_diag,
ep_states,
epsilon_lambda_gradient,
H_diag_gradient,
grad_clip_max_norm = config.grad_clip_norm)
if config.compute_autograd:
auto_grad = Auto_grad.loss_autograd_fun(trajectories, Phi, config.num_features, config.gamma, lambda_, Gs)
print('gradient diff:{0}'.format(abs(grad-auto_grad)))
# if ep > 1000 :
# new_lambda = lambda_ - lr * compute_cv_gradient(Phi, theta, gamma, lambda_, P, V, D)
# print(new_lambda)
# if new_lambda >= 0 and new_lambda <= 1:
# lambda_ = new_lambda
# print('current lambda:{0}'.format(lambda_))
# grad = compute_cv_gradient2(Phi, theta, gamma, lambda_, R, A, b, z)
if config.use_adam_optimizer:
adam_optimizer.update(grad, ep)
new_lambda = adam_optimizer.x
else:
new_lambda = lambda_ - config.lr * grad
if new_lambda >= 0 and new_lambda <= 1:
lambda_ = new_lambda
print('current lambda:{0}'.format(lambda_))
ep_discountedrewards = get_discounted_return(ep_rewards, config.gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
# print('Episode {0} loss is {1}'.format(ep, ep_loss))
# print('Episode {0} rewards are {1}'.format(ep, ep_rewards))
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
#print("Final Lambda: {0}".format(lambda_))
#print("average running loss in training: ", np.mean(running_loss))
#print("average loss after training: ", np.mean(loss))
average_loss = rmse
return adaptive_LSTD_lambda, theta, average_loss, G, lambda_
def Adaptive_LSTD_algorithm_batch(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
#adaptive_LSTD_lambda = LSTD(config.num_features)
adaptive_LSTD_lambda = MiniBatchLSTDLambda(config.gamma, config.default_lambda, Phi)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
valid_episode_counter = 0
for ep in range(num_episodes):
traj = trajectories[ep]
G[ep] = []
if len(traj) <= 4:
continue
cur_state, reward, next_state, done = traj[0]
adaptive_LSTD_lambda.update(None, 0 , cur_state)
if valid_episode_counter % config.batch_size == 0:
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = np.zeros((config.num_features, config.num_states))
H_diag = np.zeros(config.num_states) # n
eps = np.zeros(config.num_states)
states_count = np.zeros(config.num_states)
epsilon_lambda_gradient = np.zeros(config.num_states)
H_diag_gradient = np.zeros(config.num_states)
episode_loss = 0
#cur_state = traj[0][0]
#adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update(cur_state, reward, next_state)
if done:
adaptive_LSTD_lambda.update(next_state, 0, None)
#adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], config.gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
if logger:
logger.log_scalar('average trajectories reward', np.mean(ep_rewards), valid_episode_counter)
logger.writer.flush()
theta = adaptive_LSTD_lambda.theta
A = adaptive_LSTD_lambda.A
b = adaptive_LSTD_lambda.b.reshape((-1,1))
#A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon, rcond=.1)
A_inv = invert_matrix(A)
for timestep in range(len(traj)-1):
cur_state, reward, next_state, done = traj[timestep]
states_count[cur_state] += 1
ct = states_count[cur_state]
Z[:,cur_state] = (ct-1)/ct *Z[:,cur_state]+ 1/ct * compute_z(lambda_, config.gamma, Phi, ep_states, timestep )
Z_gradient[:, cur_state] = (ct-1)/ct * Z_gradient[:, cur_state] + \
1/ct * compute_z_gradient(lambda_, config.gamma, Phi, ep_states, timestep)
H_diag[cur_state] = (ct-1)/ct * H_diag[cur_state] + \
1/ct * compute_hjj(Phi, lambda_, config.gamma, ep_states, timestep, A_inv)
eps[cur_state] = (ct-1)/ct * eps[cur_state] + \
1/ct * compute_eps_t(Phi, theta, config.gamma, reward, ep_states, timestep)
epsilon_lambda_gradient[cur_state] = (ct-1)/ct * epsilon_lambda_gradient[cur_state] + \
1/ct * compute_epsilon_lambda_gradient(Phi,
lambda_,
config.gamma,
A,
b,
A_inv,
Z,
timestep,
ep_states,
ep_rewards
)
H_diag_gradient[cur_state] = (ct-1)/ct * H_diag_gradient[cur_state] + 1/ct * compute_hjj_gradient(Phi,
lambda_,
config.gamma,
ep_states,
timestep,
A,
b,
A_inv
)
# update the gradients of the batch:
if valid_episode_counter % config.batch_size == 0:
grad = compute_lcv_lambda_gradient(eps,
H_diag,
ep_states,
epsilon_lambda_gradient,
H_diag_gradient,
grad_clip_max_norm = config.grad_clip_norm)
if logger:
logger.log_scalar('CV loss lambda gradients per batch', grad, valid_episode_counter/config.batch_size)
logger.writer.flush()
if config.compute_autograd:
auto_grad = Auto_grad.loss_autograd_fun(trajectories, Phi, config.num_features, config.gamma, lambda_, Gs)
print('gradient diff:{0}'.format(abs(grad-auto_grad)))
if config.use_adam_optimizer:
adam_optimizer.update(grad, valid_episode_counter+1)
new_lambda = adam_optimizer.x
else:
new_lambda = lambda_ - config.lr * grad
if new_lambda >= 0 and new_lambda <= 1:
lambda_ = new_lambda
print('gradient: {0}'.format(grad))
print('current lambda:{0}'.format(lambda_))
print('current theta:{0}'.format(theta))
ep_discountedrewards = get_discounted_return(ep_rewards, config.gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
valid_episode_counter += 1
# After we calculated the Theta parameter from the training data
#loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
new_config = copy.deepcopy(config)
new_config.lambda_ = lambda_
ms_loss, rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
print('Theta values: {0}'.format(theta))
print('episode RMSPBE :{0}'.format(rmspbe))
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
#print("Final Lambda: {0}".format(lambda_))
#print("average running loss in training: ", np.mean(running_loss))
#print("average loss after training: ", np.mean(loss))
return adaptive_LSTD_lambda, theta, G, lambda_, ms_loss, rms_loss
'''
The same as Adaptive_LSTD_algorithm_batch, except A and b
are calculated based on all the episodes.
'''
def Adaptive_LSTD_algorithm_batch_type2(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
adaptive_LSTD_lambda = LSTD(config.num_features)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
##### First go over all the trajectories and calculate estimate A and b:
for ep in range(num_episodes):
traj = trajectories[ep]
if len(traj) <= 4:
continue
cur_state = traj[0][0]
adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :],
reward,
Phi[next_state, :],
config.gamma,
lambda_,
timestep
)
# theta = adaptive_LSTD_lambda.theta
# A = adaptive_LSTD_lambda.A
# b = adaptive_LSTD_lambda.b
# A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon)
#pudb.set_trace()
######## Now use the above A and b to calculate optimal lambda:
valid_episode_counter = 0
for ep in range(num_episodes):
traj = trajectories[ep]
G[ep] = []
# if len(traj) <= 4:
# continue
if valid_episode_counter % config.batch_size == 0:
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = np.zeros((config.num_features, config.num_states))
H_diag = np.zeros(config.num_states) # n
eps = np.zeros(config.num_states)
states_count = np.zeros(config.num_states)
epsilon_lambda_gradient = np.zeros(config.num_states)
H_diag_gradient = np.zeros(config.num_states)
episode_loss = 0
cur_state = traj[0][0]
adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], config.gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
#pudb.set_trace()
theta = adaptive_LSTD_lambda.theta
A = adaptive_LSTD_lambda.A
b = adaptive_LSTD_lambda.b
A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon, rcond=.1)
for timestep in range(len(traj)-1):
cur_state, reward, next_state, done = traj[timestep]
states_count[cur_state] += 1
ct = states_count[cur_state]
Z[:,cur_state] = (ct-1)/ct *Z[:,cur_state]+ 1/ct * compute_z(lambda_, config.gamma, Phi, ep_states, timestep )
Z_gradient[:, cur_state] = (ct-1)/ct * Z_gradient[:, cur_state] + \
1/ct * compute_z_gradient(lambda_, config.gamma, Phi, ep_states, timestep)
H_diag[cur_state] = (ct-1)/ct * H_diag[cur_state] + \
1/ct * compute_hjj(Phi, lambda_, config.gamma, ep_states, timestep, A_inv)
eps[cur_state] = (ct-1)/ct * eps[cur_state] + \
1/ct * compute_eps_t(Phi, theta, config.gamma, reward, ep_states, timestep)
epsilon_lambda_gradient[cur_state] = (ct-1)/ct * epsilon_lambda_gradient[cur_state] + \
1/ct * compute_epsilon_lambda_gradient(Phi,
lambda_,
config.gamma,
A,
b,
A_inv,
Z,
timestep,
ep_states,
ep_rewards
)
H_diag_gradient[cur_state] = (ct-1)/ct * H_diag_gradient[cur_state] + 1/ct * compute_hjj_gradient(Phi,
lambda_,
config.gamma,
ep_states,
timestep,
A,
b,
A_inv
)
# update the gradients of the batch:
if valid_episode_counter % config.batch_size == 0:
grad = compute_lcv_lambda_gradient(eps,
H_diag,
ep_states,
epsilon_lambda_gradient,
H_diag_gradient,
grad_clip_max_norm = config.grad_clip_norm)
if config.compute_autograd:
auto_grad = Auto_grad.loss_autograd_fun(trajectories, Phi, config.num_features, config.gamma, lambda_, Gs)
print('gradient diff:{0}'.format(abs(grad-auto_grad)))
if config.use_adam_optimizer:
adam_optimizer.update(grad, valid_episode_counter+1)
new_lambda = adam_optimizer.x
else:
new_lambda = lambda_ - config.lr * grad
#pudb.set_trace()
if new_lambda >= 0 and new_lambda <= 1:
lambda_ = new_lambda
print('gradient: {0}'.format(grad))
print('current lambda:{0}'.format(lambda_))
ep_discountedrewards = get_discounted_return(ep_rewards, config.gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
valid_episode_counter += 1
# After we calculated the Theta parameter from the training data
loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
#print("Final Lambda: {0}".format(lambda_))
#print("average running loss in training: ", np.mean(running_loss))
#print("average loss after training: ", np.mean(loss))
return adaptive_LSTD_lambda, theta, rmse, G, lambda_
'''
This needs to be here to avoid having circular dependencies.
'''
def compute_CV_loss(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config):
'''
:param trajectories:
:param num_features:
:param gamma:
:param epsilon:
:return:
'''
total_num_tuples = sum([len(traj) for traj in trajectories])
num_episodes = len(trajectories)
loto_loss = []
step = 0
for i in range(min(1000,num_episodes)):
traj = trajectories[i]
if len(traj) <= 2:
continue
for j in range(len(traj)):
# leave one tuple oto_trajectoriesout
loto_trajectories = copy.deepcopy(trajectories)
del loto_trajectories[i][j]
model, _ , _ , rms_loss, ms_loss = minibatch_LSTD(loto_trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config)
theta = model.theta
BE = R + config.gamma * P @ Phi @ theta
#theta = [-24, -16, -8,0]
# pdb.set_trace()
#tuple_loss = (np.dot(Phi[trajectories[i][j][0], :], theta) - Gs[i][j]) ** 2
tuple_loss = (np.dot(Phi[trajectories[i][j][0], :], theta) - BE[trajectories[i][j][0]]) ** 2
loto_loss.append(tuple_loss)
if logger:
logger.log_scalar('average trajectories loss', rms_loss, step)
logger.log_scalar('current tuple loto cv', tuple_loss, step)
logger.log_scalar('mean loto cv', np.mean(loto_loss)**.5, step)
logger.writer.flush()
step += 1
print('trajectory :{0}, current mean loto loss:{1}'.format(i, np.mean(loto_loss)**.5))
cv_loss = np.mean(loto_loss)
return cv_loss ** 0.5
def Adaptive_LSTD_algorithm_batch_type3(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config,
trajectories_test,
Gs_test
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
#adaptive_LSTD_lambda = LSTD(config.num_features)
adaptive_LSTD_lambda = MiniBatchLSTDLambda(config.gamma, config.default_lambda, Phi)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
valid_episode_counter = 0
for ep in range(config.num_train_episodes):
traj = trajectories[ep]
G[ep] = []
# if len(traj) <= 4:
# continue
cur_state, reward, next_state, done = traj[0]
adaptive_LSTD_lambda.update(None, 0 , cur_state)
if valid_episode_counter % config.batch_size == 0:
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = | np.zeros((config.num_features, config.num_states)) | numpy.zeros |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Contents loader functions for DataSource.
'''
from scipy.misc import imresize, imread
from shutil import rmtree
from six import StringIO
from six.moves.urllib.parse import urljoin
from tqdm import tqdm
import contextlib
import csv
import h5py
import numpy
import os
import six.moves.urllib.request as request
import six
import tempfile
from nnabla.logger import logger
pypng_available = False
try:
import png
pypng_available = True
except ImportError:
pass
cv2_available = False
try:
import cv2
# TODO: Currently cv2 image reader doesn't work.
# cv2_available = True
except ImportError:
pass
class FileReader:
'''FileReader
Read dataset from sevaral data sources.
Supported data sources are,
* Local file (file or directory name)
* HTTP/HTTPS (URI)
* S3 (URI with s3:// prefix)
Currently HTTP/HTTPS source does not support CACHE input because
there is no standard way to get directory entry with
HTTP/HTTPS/protocol.
To access S3 data, you must specify credentials with environment
variable.
For example,
::
$ export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
$ export AWS_SECRET_ACCESS_KEY=<KEY>
Or, you can specify PROFILE with following.
::
$ export AWS_DEFAULT_PROFILE=my_profile
'''
def __init__(self, base_uri):
self._base_uri = base_uri
if base_uri[0:5].lower() == 's3://':
self._file_type = 's3'
uri_header, uri_body = self._base_uri.split('://', 1)
us = uri_body.split('/')
bucketname = us.pop(0)
self._s3_base_key = '/'.join(us)
logger.info('Creating session for S3 bucket {}'.format(bucketname))
import boto3
self._s3_bucket = boto3.session.Session().resource('s3').Bucket(bucketname)
elif base_uri[0:7].lower() == 'http://' or base_uri[0:8].lower() == 'https://':
self._file_type = 'http'
else:
self._file_type = 'file'
@contextlib.contextmanager
def open(self, filename=None):
if filename is None:
filename = self._base_uri
else:
if self._file_type == 's3':
filename = urljoin(self._base_uri.replace(
's3://', 'http://'), filename.replace('\\', '/')).replace('http://', 's3://')
elif self._file_type == 'http':
filename = urljoin(self._base_uri, filename.replace('\\', '/'))
else:
filename = os.path.abspath(os.path.join(os.path.dirname(
self._base_uri.replace('\\', '/')), filename.replace('\\', '/')))
f = None
if self._file_type == 's3':
uri_header, uri_body = filename.split('://', 1)
us = uri_body.split('/')
bucketname = us.pop(0)
key = '/'.join(us)
logger.info('Opening {}'.format(key))
f = StringIO(self._s3_bucket.Object(key).get()['Body'].read())
elif self._file_type == 'http':
f = request.urlopen(filename)
else:
f = open(filename, 'rb')
yield f
f.close()
@contextlib.contextmanager
def open_cache(self, cache_name):
if self._file_type == 's3':
tmpdir = tempfile.mkdtemp()
filename = urljoin((self._base_uri + '/').replace('s3://', 'http://'),
cache_name.replace('\\', '/')).replace('http://', 's3://')
key = '/'.join(filename.split('/')[3:])
fn = '{}/{}'.format(tmpdir, os.path.basename(filename))
with open(fn, 'wb') as f:
f.write(self._s3_bucket.Object(key).get()['Body'].read())
with h5py.File(fn, 'r') as h5:
yield h5
rmtree(tmpdir, ignore_errors=True)
elif self._file_type == 'http':
pass
else:
filename = os.path.abspath(os.path.join(os.path.dirname(
(self._base_uri + '/').replace('\\', '/')), cache_name.replace('\\', '/')))
with h5py.File(filename, 'r') as h5:
yield h5
def listdir(self):
if self._file_type == 's3':
list = []
for fn in self._s3_bucket.objects.filter(Prefix=self._s3_base_key + '/', Delimiter='/'):
list.append(os.path.basename(fn.key))
return sorted(list)
elif self._file_type == 'http':
return None
return sorted(os.listdir(self._base_uri))
def load_image_imread(file, shape=None, max_range=1.0):
'''
Load image from file like object.
:param file: Image contents
:type file: file like object.
:param shape: shape of output array
e.g. (3, 128, 192) : n_color, height, width.
:type shape: tuple of int
:param float max_range: the value of return array ranges from 0 to `max_range`.
:return: numpy array
'''
img255 = imread(
file) # return value is from zero to 255 (even if the image has 16-bitdepth.)
if len(img255.shape) == 2: # gray image
height, width = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, 1
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == 1)
if out_height != height or out_width != width:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width))
img255 = img255.reshape((out_n_color, out_height, out_width))
elif len(img255.shape) == 3: # RGB image
height, width, n_color = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, n_color
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == n_color)
if out_height != height or out_width != width or out_n_color != n_color:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width, out_n_color))
img255 = img255.transpose(2, 0, 1)
if max_range < 0 or max_range == 255.0:
return img255
else:
return img255 * (max_range / 255.0)
def load_image_pypng(file, shape=None, max_range=1.0):
import png
r = png.Reader(file=file)
width, height, pixels, metadata = r.read()
bitscale = 2 ** metadata['bitdepth'] - 1
img = numpy.array(list(pixels), dtype=numpy.float32).reshape(
(height, width, -1)) / bitscale # (height, width, n_channel)
if metadata['alpha'] and metadata['planes'] == 4: # RGBA
# TODO: this case is note tested well
try:
bg = numpy.array(metadata['background']) / bitscale
except KeyError:
bg = | numpy.array([1.0, 1.0, 1.0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
# Uses central difference with 4 points.
# I know the error (truncation and machine precision) is bounded by
# M*h^4/30+4*MachineEpsilon/h where M is the magnitude bound on the 5th derivative of f
# This implies the best h value should be h=(30*MachineEpsilon/M)^(1/5)
# However without a good idea of what M actually is I can't use this.
# If M=1 this corresponds to about 10^-3.
def derivative(f, comp, *args, h=1.0E-3):
coef = [-1.0/12, 2.0/3, 0, -2.0/3, 1.0/12]
m = (len(coef)-1)//2
diff = np.zeros(len(args))
diff[comp] = h
tot = 0
for i in range(m):
tot += coef[i]*f(*(np.array(args)+(m-i)*diff))
tot += coef[-1-i]*f(*(np.array(args)-(m-i)*diff))
return tot/h
def testDeriv(f, df, comp, *args):
hvals = np.logspace(-12, 0, 1000)
diff = []
minDiff = 10**5
bestH = 0
for h in hvals:
numDf = derivative(f, comp, *args, h=h)
currDiff = abs(df(*args) - numDf)
diff.append(currDiff)
if currDiff < minDiff:
minDiff = currDiff
bestH = h
print(bestH)
print(minDiff)
plt.loglog(hvals, diff)
plt.show()
# y is the function we would like to approximate (can be a vector) and f is the
# function which given t and y calculates the derivative of y with respect to t
# This function takes one step of size h using the RK4 method.
def RK4(t_0, y_0, f, h=1.0E-3):
t = t_0
y = np.copy(y_0)
k1 = f(t, y)
k2 = f(t+h/2.0, y+h*k1/2.0)
k3 = f(t+h/2.0, y+h*k2/2.0)
k4 = f(t+h, y+h*k3)
t += h
y += (h/6.0)*(k1+2*k2+2*k3+k4)
return t, y
# Given initial value problem returns the value of y evolved from times t_0
# until stop(t, y) return True.
# Uses adaptive time steps (Taken from Wikipedia page on adaptive step size)
def initialValueSolution(t_0, y_0, f, stop, tol=1.0E-5):
t = t_0
y = np.array(y_0).astype(float)
h = 1.0E-2
tvals = [t]
yvals = [y]
numStepsTaken = 0
while not stop(t, y, numStepsTaken):
tFull, yFull = RK4(t, y, f, h=h)
tHalf, yHalf = RK4(t, y, f, h=h/2.0)
tHalf, yHalf = RK4(tHalf, yHalf, f, h=h/2.0)
error = max(abs(yFull-yHalf))
if error < tol:
tvals.append(tFull)
yvals.append(yFull)
t = tFull
y = yFull
numStepsTaken += 1
if error == 0:
# print("0 error?")
h *= 2
else:
# .9 is safety factor to make sure we get desired accuracy,
# .3 is minimum decrease in h, 2 is maximum increase
h = .9*h*min(max(tol/error, .3), 2)
tvals = np.array(tvals)
yvals = | np.array(yvals) | numpy.array |
import numpy as np
import pandas as pd
import pytest
from pydantic import ValidationError
from napari.layers.utils.string_encoding import (
ConstantStringEncoding,
FormatStringEncoding,
ManualStringEncoding,
)
from napari.layers.utils.text_manager import TextManager
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_empty_text_manager_property():
"""Test creating an empty text manager in property mode.
This is for creating an empty layer with text initialized.
"""
properties = {'confidence': np.empty(0, dtype=float)}
text_manager = TextManager(
text='confidence', n_text=0, properties=properties
)
assert text_manager.values.size == 0
# add a text element
new_properties = {'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
np.testing.assert_equal(text_manager.values, ['0.5'])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_add_many_text_property():
properties = {'confidence': np.empty(0, dtype=float)}
text_manager = TextManager(
text='confidence',
n_text=0,
properties=properties,
)
text_manager.add({'confidence': np.array([0.5])}, 2)
np.testing.assert_equal(text_manager.values, ['0.5'] * 2)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_empty_text_manager_format():
"""Test creating an empty text manager in formatted mode.
This is for creating an empty layer with text initialized.
"""
properties = {'confidence': np.empty(0, dtype=float)}
text = 'confidence: {confidence:.2f}'
text_manager = TextManager(text=text, n_text=0, properties=properties)
assert text_manager.values.size == 0
# add a text element
new_properties = {'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
np.testing.assert_equal(text_manager.values, ['confidence: 0.50'])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_add_many_text_formatted():
properties = {'confidence': np.empty(0, dtype=float)}
text_manager = TextManager(
text='confidence: {confidence:.2f}',
n_text=0,
properties=properties,
)
text_manager.add({'confidence': np.array([0.5])}, 2)
np.testing.assert_equal(text_manager.values, ['confidence: 0.50'] * 2)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_manager_property():
n_text = 3
text = 'class'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
text_manager = TextManager(text=text, n_text=n_text, properties=properties)
np.testing.assert_equal(text_manager.values, classes)
# add new text with properties
new_properties = {'class': np.array(['A']), 'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
expected_text_2 = np.concatenate([classes, ['A']])
np.testing.assert_equal(text_manager.values, expected_text_2)
# remove the first text element
text_manager.remove({0})
np.testing.assert_equal(text_manager.values, expected_text_2[1::])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_manager_format():
n_text = 3
text = 'confidence: {confidence:.2f}'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
expected_text = np.array(
['confidence: 0.50', 'confidence: 0.30', 'confidence: 1.00']
)
text_manager = TextManager(text=text, n_text=n_text, properties=properties)
np.testing.assert_equal(text_manager.values, expected_text)
# add new text with properties
new_properties = {'class': np.array(['A']), 'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
expected_text_2 = np.concatenate([expected_text, ['confidence: 0.50']])
np.testing.assert_equal(text_manager.values, expected_text_2)
# test getting the text elements when there are none in view
text_view = text_manager.view_text([])
np.testing.assert_equal(text_view, np.empty((0,), dtype=str))
# test getting the text elements when the first two elements are in view
text_view = text_manager.view_text([0, 1])
np.testing.assert_equal(text_view, expected_text_2[0:2])
text_manager.anchor = 'center'
coords = np.array([[0, 0], [10, 10], [20, 20]])
text_coords = text_manager.compute_text_coords(coords, ndisplay=3)
np.testing.assert_equal(text_coords, (coords, 'center', 'center'))
# remove the first text element
text_manager.remove({0})
np.testing.assert_equal(text_manager.values, expected_text_2[1::])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_refresh_text():
n_text = 3
text = 'class'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
text_manager = TextManager(text=text, n_text=n_text, properties=properties)
new_classes = np.array(['D', 'E', 'F'])
new_properties = {
'class': new_classes,
'confidence': np.array([0.5, 0.3, 1]),
}
text_manager.refresh_text(new_properties)
| np.testing.assert_equal(new_classes, text_manager.values) | numpy.testing.assert_equal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.