prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import sys
import numpy as np
import json
import random
import trimesh
from sklearn.decomposition import PCA
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..', 'code'))
from pyquaternion import Quaternion
def load_obj(fn):
fin = open(fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
vertices = []; faces = [];
for line in lines:
if line.startswith('v '):
vertices.append(np.float32(line.split()[1:4]))
elif line.startswith('f '):
faces.append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))
f_arr = np.vstack(faces)
v_arr = np.vstack(vertices)
mesh = dict()
mesh['v'] = v_arr
mesh['f'] = f_arr
return mesh
def export_obj(out, mesh):
v = mesh['v']; f = mesh['f'];
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
for i in range(f.shape[0]):
fout.write('f %d %d %d\n' % (f[i, 0], f[i, 1], f[i, 2]))
def get_quaternion_from_axis_angle(axis, angle):
return Quaternion(axis=axis, angle=angle)
def get_quaternion_from_xy_axes(x, y):
x /= np.linalg.norm(x)
y /= np.linalg.norm(y)
z = np.cross(x, y)
z /= np.linalg.norm(z)
y = np.cross(z, x)
y /= np.linalg.norm(y)
R = np.vstack([x, y, z]).T
return Quaternion(matrix=R)
def get_rot_mat_from_quaternion(q):
return np.array(q.transformation_matrix, dtype=np.float32)
# center: numpy array of length 3
# size: numpy array of length 3
# q: numpy array of length 4 for quaternion
# output: mesh
# v --> vertices
# f --> faces
# setting --> 4 x 4 numpy array containing the world coordinates for the cube center (0, 0, 0, 1)
# and three local axes (1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1)
def gen_cuboid(center, size, q):
cube_mesh = load_obj('cube.obj')
cube_v = cube_mesh['v']
cube_f = cube_mesh['f']
n_vert = cube_v.shape[0]
n_face = cube_f.shape[1]
cube_v = np.concatenate([cube_v, np.ones((n_vert, 1))], axis=1)
cube_control_v = np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]], dtype=np.float32)
S = np.array([[size[0], 0, 0, 0], [0, size[1], 0, 0], [0, 0, size[2], 0], [0, 0, 0, 1]], dtype=np.float32)
R = q.transformation_matrix
T = np.array([[1, 0, 0, center[0]], [0, 1, 0, center[1]], [0, 0, 1, center[2]], [0, 0, 0, 1]], dtype=np.float32)
rot = T.dot(R).dot(S)
cube_v = rot.dot(cube_v.T).T
cube_control_v = rot.dot(cube_control_v.T).T
mesh = dict()
mesh['v'] = cube_v
mesh['f'] = cube_f
mesh['setting'] = cube_control_v
return mesh
def assemble_meshes(mesh_list):
n_vert = 0
verts = []; faces = [];
for mesh in mesh_list:
verts.append(mesh['v'])
faces.append(mesh['f']+n_vert)
n_vert += mesh['v'].shape[0]
vert_arr = np.vstack(verts)
face_arr = np.vstack(faces)
mesh = dict()
mesh['v'] = vert_arr
mesh['f'] = face_arr
return mesh
def export_settings(out_fn, setting_list):
with open(out_fn, 'w') as fout:
for setting in setting_list:
for i in range(4):
for j in range(4):
fout.write('%f ' % setting[i, j])
fout.write('\n')
def export_csg(out_fn, csg):
with open(out_fn, 'w') as fout:
json.dump(csg, fout)
def export_meshes(out, mesh_list):
with open(out, 'w') as fout:
n_vert = 0
verts = []; faces = [];
for idx, mesh in enumerate(mesh_list):
fout.write('\ng %d\n' % idx)
for i in range(mesh['v'].shape[0]):
fout.write('v %f %f %f\n' % (mesh['v'][i, 0], mesh['v'][i, 1], mesh['v'][i, 2]))
for i in range(mesh['f'].shape[0]):
fout.write('f %d %d %d\n' % (mesh['f'][i, 0]+n_vert, mesh['f'][i, 1]+n_vert, mesh['f'][i, 2]+n_vert))
n_vert += mesh['v'].shape[0]
def gen_cuboid_from_setting(setting):
R = np.array([setting[1] - setting[0],
setting[2] - setting[0],
setting[3] - setting[0],
setting[0]], dtype=np.float32).T
cube_mesh = load_obj('cube.obj')
cube_v = cube_mesh['v']
cube_f = cube_mesh['f']
n_vert = cube_v.shape[0]
cube_v = np.concatenate([cube_v, np.ones((n_vert, 1))], axis=1)
mesh = dict()
mesh['v'] = R.dot(cube_v.T).T
mesh['f'] = cube_f
mesh['setting'] = setting
return mesh
def settings_to_meshes(settings):
meshes = []
for setting in settings:
meshes.append(gen_cuboid_from_setting(setting))
return meshes
def create_axis_aligned_setting(x_min, x_max, y_min, y_max, z_min, z_max):
setting = np.array([[(x_min+x_max)/2, (y_min+y_max)/2, (z_min+z_max)/2, 1],
[x_max, (y_min+y_max)/2, (z_min+z_max)/2, 1],
[(x_min+x_max)/2, y_max, (z_min+z_max)/2, 1],
[(x_min+x_max)/2, (y_min+y_max)/2, z_max, 1]], dtype=np.float32)
return setting
def create_rotate_45_setting(x_min, x_max, y_min, y_max, z_min, z_max):
l1 = (x_max - x_min) / 2 / np.sqrt(2)
l2 = (z_max - z_min) / 2 / np.sqrt(2)
setting = np.array([[(x_min+x_max)/2, (y_min+y_max)/2, (z_min+z_max)/2, 1],
[(x_min+x_max)/2+l1, (y_min+y_max)/2, (z_min+z_max)/2+l1, 1],
[(x_min+x_max)/2, y_max, (z_min+z_max)/2, 1],
[(x_min+x_max)/2-l2, (y_min+y_max)/2, (z_min+z_max)/2+l2, 1]], dtype=np.float32)
return setting
def normalize_shape(settings):
mesh = assemble_meshes(settings_to_meshes(settings))
pts = sample_pc(mesh['v'][:, :3], mesh['f'], n_points=200)
center = np.mean(pts, axis=0)
pts -= center
scale = np.sqrt(np.max(np.sum(pts**2, axis=1)))
T = np.array([[1, 0, 0, -center[0]],
[0, 1, 0, -center[1]],
[0, 0, 1, -center[2]],
[0, 0, 0, 1]], dtype=np.float32)
S = np.array([[1.0/scale, 0, 0, 0],
[0, 1.0/scale, 0, 0],
[0, 0, 1.0/scale, 0],
[0, 0, 0, 1]], dtype=np.float32)
rot_mat = S.dot(T)
new_settings = []
for setting in settings:
new_settings.append(rot_mat.dot(setting.T).T)
return new_settings
def random_rotate(settings):
rotation_angle = random.random() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval, 0],
[0, 1, 0, 0],
[-sinval, 0, cosval, 0],
[0, 0, 0, 1]], dtype=np.float32)
new_settings = []
for setting in settings:
new_settings.append(rotation_matrix.dot(setting.T).T)
return new_settings
def gen_obb_mesh(obbs):
# load cube
cube_mesh = load_obj('cube.obj')
cube_v = cube_mesh['v']
cube_f = cube_mesh['f']
all_v = []; all_f = []; vid = 0;
for pid in range(obbs.shape[0]):
p = obbs[pid, :]
center = p[0: 3]
lengths = p[3: 6]
dir_1 = p[6: 9]
dir_2 = p[9: ]
dir_1 = dir_1/np.linalg.norm(dir_1)
dir_2 = dir_2/np.linalg.norm(dir_2)
dir_3 = np.cross(dir_1, dir_2)
dir_3 = dir_3/np.linalg.norm(dir_3)
v = | np.array(cube_v, dtype=np.float32) | numpy.array |
import sys
import os
import pickle
import argparse
import copy
import tqdm
import fairseq
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.colors as mcolors
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from mplot3d_dragger import Dragger3D
import torch
import seaborn
import numpy as np
def plot_trigram_with_proj(plt_scores, plt_labels_x, plt_labels_y, plt_labels_z, plt_scores_xy=None, plt_scores_yz=None, save_path=None, beam_size=5, animate=False, use_ax=None, setaxis=True, use_norm=None):
# invert x and z
plt_labels_x = reversed(copy.deepcopy(plt_labels_x))
plt_labels_z = reversed(copy.deepcopy(plt_labels_z))
plt_scores = torch.flip(plt_scores.data.clone(), [0, 2])
if plt_scores_xy is not None:
plt_scores_xy = torch.flip(plt_scores_xy.data.clone(), [1])
if plt_scores_yz is not None:
plt_scores_yz = torch.flip(plt_scores_yz.data.clone(), [0])
x_label_pad = 22
y_label_pad = 30
z_label_pad = 47
proj_alpha = 1
cube_alpha = 1
mask_cube = plt_scores.ne(-float('inf'))
mask_yz = mask_cube.any(0)
mask_xy = mask_cube.any(-1)
if not animate:
seaborn.set_context("poster", font_scale=0.85)
fig = plt.figure(figsize=(10,12), dpi=200)
plt.rcParams['grid.linewidth'] = 0.
plt.rcParams['figure.constrained_layout.use'] = False
ax = fig.gca(projection='3d')
ax.set_xticks(np.arange(0, beam_size, 1))
ax.set_yticks(np.arange(0, beam_size, 1))
ax.set_zticks(np.arange(0, beam_size, 1))
ax.set_xlim(0, beam_size)
ax.set_ylim(0, beam_size)
ax.set_zlim(0, beam_size)
else:
assert use_ax is not None
ax = use_ax
if plt_scores_xy is not None:
X = np.arange(0, beam_size+1, 1)
Y = np.arange(0, beam_size+1, 1)
X, Y = np.meshgrid(X, Y)
Z = X * 0
plt_mask = plt_scores_xy.eq(-float('inf')) | (~mask_xy.transpose(-1, -2))
#if (~plt_mask).float().sum().item() < 10:
# sys.exit(1)
plt_scores_xy[plt_mask] = plt_scores_xy.max()
v = plt_scores_xy.cpu().numpy()
norm = plt.Normalize()
colors = plt.cm.cool(norm(v))
colors = torch.Tensor(colors)
colors[:, :, -1] = proj_alpha
colors[plt_mask] = 0
colors = colors.numpy()
colors[:,:,0]=0.173
colors[:,:,1]=0.153
colors[:,:,2]=0.118
# Plot the surface.
#surf = ax.plot_surface(X, Y, Z, facecolors=colors, #cmap=cm.coolwarm,
# linewidth=0, zorder=-1e3, shade=True, edgecolors='none')
for i in range(beam_size):
for j in range(beam_size):
if not plt_mask[j,i]:
#print (j,i)
color = np.zeros((1,1,4))
color = color + colors[j, i] * 0
color[:,:,-1]=1
color[:,:,0]=0.5
color[:,:,1]=0.5
color[:,:,2]=0.5
ddd = 0
if j == 0:
linew = 0
#if i == 9:
# ddd= 0.5
# print ('here')
else:
linew = 0.5
X2 = np.arange(i-ddd, i+2, 1+ddd)
Y2 = | np.arange(j, j+2, 1) | numpy.arange |
#!/usr/bin/env python
import os
import glob
import numpy as np
from astropy.io import fits
from astropy.time import Time
from astropy.table import Column, MaskedColumn
import matplotlib.pyplot as plt
from iminuit import Minuit
from probfit import Chi2Regression, linear
TessTimeBin_sec = 120.0 # sec
TessTimeBin_day = TessTimeBin_sec / 24. / 60. / 60.
MISSING_VALUE = -9999
class Hist1D(object):
def __init__(self, edges):
self.edges = edges
self.hist, edges = np.histogram([], bins=self.edges)
self.bins = (edges[:-1] + edges[1:]) / 2.
def fill(self, arr):
hist, edges = np.histogram(arr, bins=self.edges)
self.hist += hist
@property
def data(self):
return self.bins, self.hist
def get_count_rate(cnt_array,exp_array):
rate_list = []
error_list = []
for i in range(len(cnt_array.data[1])):
cnt = cnt_array.data[1][i]
exp = exp_array.data[1][i]
if exp > 0:
rate = float(cnt) / float(exp)
error = float(np.sqrt(cnt)) / float(exp)
#print(cnt,exp,rate)
else:
rate = 0
error = 0
rate_list.append(rate)
error_list.append(error)
return np.array(rate_list), np.array(error_list)
class TessLightCurve():
def __init__(self,fitsfile):
self.fitsfile = fitsfile
print(self.fitsfile)
self.hdu = fits.open(self.fitsfile)
self.basename = os.path.splitext(os.path.basename(self.fitsfile))[0]
print(self.basename)
self.time_mjd = self.get_mjd()
self.lc_orig_table = self.hdu['LIGHTCURVE'].data
self.lc_orig_cols = self.lc_orig_table.columns
self.edges = self.time_mjd+TessTimeBin_day/2.0
self.edges = np.insert(self.edges,0,self.time_mjd[0]-TessTimeBin_day/2.0)
self.lc_list = {}
def get_mjd(self):
"""
TUNIT1 = 'BJD - 2457000, days' / column units: Barycenter corrected TESS Julian
TESS : BJD = TIME + 2457000 days
# MJD = BJD - 2400 000.5
# https://en.wikipedia.org/wiki/Julian_day
"""
return self.hdu['LIGHTCURVE'].data['TIME'] + self.hdu['LIGHTCURVE'].header['BJDREFI'] + self.hdu['LIGHTCURVE'].header['BJDREFF'] - 2400000.5
def cadence2mjd(self,a=0.00138893,b=58226.94810026):
return a * self.hdu['LIGHTCURVE'].data['CADENCENO'] + b
def append_nicer_gti(self,input_niobs_list):
self.niobsid_list = []
self.nigti_list = []
self.nimask = []
for mjd in self.time_mjd:
#out_gtinum = np.nan
out_gtinum = MISSING_VALUE
out_niobs = MISSING_VALUE
out_mask = True
for niobs in input_niobs_list:
out_gtinum = niobs.get_mjdnum(mjd)
#if not np.isnan(out_gtinum):
if out_gtinum != MISSING_VALUE:
out_niobs = niobs.obsid
out_mask = False
break
#if not np.isnan(out_gtinum):
#if out_gtinum != MISSING_VALUE:
# print(mjd,out_niobs,out_gtinum)
self.niobsid_list.append(out_niobs)
self.nigti_list.append(out_gtinum)
self.nimask.append(out_mask)
def append_nicer_count_rate(self,input_niobs_list,emin_keV,emax_keV):
print(emin_keV)
print(emax_keV)
name_cnt = 'cnt_%s_%skeV' % (emin_keV,emax_keV)
name_exp = 'exp_%s_%skeV' % (emin_keV,emax_keV)
name_rate = 'cps_%s_%skeV' % (emin_keV,emax_keV)
name_error = 'err_%s_%skeV' % (emin_keV,emax_keV)
lc_hist_cnt = Hist1D(edges=self.edges)
lc_hist_exp = Hist1D(edges=self.edges)
for niobs in input_niobs_list:
#print(niobs.obsid)
mask_energy = | np.logical_and(niobs.keV>=emin_keV,niobs.keV<=emax_keV) | numpy.logical_and |
from datetime import timedelta
from astropy import units as u
import numpy as np
from sunpy.time import parse_time
def get_sky_position(time, offset):
"""Code for converting solar offsets to pointing position.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.,
time='2016-07-26T19:53:15.00'
offset: Offset from the center of the Sun. Must have units from astropy:
i.e.: offset = np.array([1000, 150]) * u.arcsec
Returns
----------
sky_position: Two-element array giving the [RA, Dec] coordinates of the
Notes
----------
Syntax:
sky_position = get_sky_position(time, offset)
"""
from astropy.coordinates import get_sun
from astropy.time import Time
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
# Convert the date into something that's usable by astropy.
start_date = parse_time(time)
astro_time = Time(start_date)
# Use astropy get_sun for Sun sky position.
# sunpy has a similar function, but it may be giving a different
# epoch for the RA and dec. We need them in J2000 RA and dec.
astro_sun_pos = get_sun(astro_time)
# Get the solar north pole angle. cgs --> radians
# Update for sunpy v1.0+
# sun_np=sun.solar_north(t=time).cgs
sun_np=sun.P(time).cgs
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([astro_sun_pos.ra.deg, astro_sun_pos.dec.deg])* u.deg
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Project the offset onto the Sun
delta_offset = np.dot(offset, rotMatrix)
# Scale to RA based on the declination.
delta_offset = delta_offset * np.array([1. / np.cos(sun_pos[1]), 1.])
# Account for the fact that +Ra == East and we have defined +X = West
delta_offset = delta_offset * [-1.0, 1.0]
# Apply the offset and return the sky position.
sky_position = sun_pos + delta_offset
return sky_position
def get_skyfield_position(time, offset, load_path=None, parallax_correction=False):
"""Code for converting solar coordinates to astrometric (J200) RA/Dec coordinates.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.,
time='2016-07-26T19:53:15.00'
offset: Offset from the center of the Sun. Must have units from astropy:
i.e.: offset = np.array([1000, 150]) * u.arcsec
load_path (optional): Relative path from currently location to store bsp files
parallax_correction: Use the NuSTAR TLE to correct for orbital parallax
Returns
----------
sky_position: Two-element array giving the [RA, Dec] coordinates of the
target location. Note this is given in astrometric (J2000) RA/Dec, which is what
we need for the NuSTAR planning system.
Notes
----------
Syntax:
skyfield_position = get_skyfield_position(time, offset)
"""
from astropy.time import Time
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
from nustar_pysolar.utils import skyfield_ephem
start_date = parse_time(time)
utc = Time(start_date)
observer, sunephem, ts = skyfield_ephem(load_path=load_path,
parallax_correction=parallax_correction,
utc=utc)
tcheck = ts.from_astropy(utc)
geocentric = observer.at(tcheck).observe(sunephem)
this_ra_geo, this_dec_geo, dist = geocentric.radec()
# Get the solar north pole angle. cgs --> radians
# sun_np = sunpy.sun.solar_north(t=time).cgs
# Update for sunpy v1.0+
sun_np=sun.P(time).cgs
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([this_ra_geo.to(u.deg).value, this_dec_geo.to(u.deg).value])*u.deg
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Project the offset onto the Sun
delta_offset = np.dot(offset, rotMatrix)
# Scale to RA based on the declination.
delta_offset = delta_offset * np.array([1. / np.cos(sun_pos[1]), 1.])
# Account for the fact that +Ra == East and we have defined +X = West
delta_offset = delta_offset * [-1.0, 1.0]
# Apply the offset and return the sky position.
sky_position = sun_pos + delta_offset
return sky_position
def get_nustar_roll(time, angle):
"""Code to determine the NuSTAR roll angle for a given field-of-view on the
Sun for a given time.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.
time='2016-07-26T19:53:15.00'
angle: Desired roll offset from solar north in degrees.
For a "square" field of view, use angle=0 / 90 / 180 / 270 to have DET0
at the NE / SE / SW / NW corners of a square field of view.
For a "diamond" with DET0 to the south, use angle = 45.
Returns
----------
nustar_roll: NuSTAR PA angle with respect to celestial north.
"""
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
# Get the solar north pole angle. cgs --> radians
# sun_np=sun.solar_north(t=time).deg * u.deg
# Update for sunpy v1.0+
sun_np=sun.P(time).deg*u.deg
nustar_roll = np.mod(sun_np + angle, 360*u.deg)
return nustar_roll;
def _parse_timestamp(tstamp):
"""Convenience function for turning the SOC timestamp into a datetime object.
"""
date1 = tstamp.split('/')
year=date1[0].strip()
day, time=(date1[1].split())
stub = (year.strip()+'-01-01T00:00:00')
year = parse_time(stub)
hr, min, sec = time.split(':')
dt = timedelta(int(day)-1, int(sec), 0, 0, int(min), int(hr))
return year+dt;
def _parse_SOC_timestamp(tstamp):
"""Convenience function for turning the timestamp into a datetime object.
"""
date1 = tstamp.split(':')
year = date1[0]
day = date1[1]
hr = date1[2]
min = date1[3]
sec = date1[4]
stub = (year.strip()+'-01-01T00:00:00')
year = parse_time(stub)
# hr, min, sec = date1[2:4]
dt = timedelta(int(day)-1, int(sec), 0, 0, int(min), int(hr))
return year+dt;
def parse_occultations(infile):
"""Parse the shadow analysis file to determine the 'in Sun' times.
Parameters
----------
infile: Input file to be parsed.
Returns
----------
Returns a list of [ [start, stop], [start stop] ] times where start means
you egress from Earth shadow into the sunlight, while stop means you
re-enter Earth shadow.
Notes
---------
"""
f = open(infile)
all_pairs = []
start = 0
for ind,line in enumerate(f):
# Little parser here to find the right place to start reading in...
if (line.find("Shadow Begin") != -1):
start=start+1
# Skips over additional lines of whitespace.
if(start == 0):
continue
if(start <3):
start+=1
continue
# Get the first date string:
fields = line.split('-')
first = fields[0]
dtfirst = _parse_timestamp(first)
second = (fields[1].split('UTC'))[0].strip()
dtsecond=_parse_timestamp(second)
# Since the file actually gives the start/stop times of going into
# earthshadow, we actually want the "In Sun" times, which is the egress
# from earthshadow and the entry into the next earthshadow.
# Note that this skips the first row.
if(start == 3):
start+=1
else:
all_pairs.append([last, dtfirst])
# Store the last entry to add in the next time around...
last=dtsecond
f.close()
return all_pairs
def sunlight_periods(infile, tstart, tend):
"""Return the periods when NuSTAR is in Sunlight in the given timerange.
Parameters
----------
tstart, tend: ISO formatted times or something else that
sunpy.time.parse_time() can read.
i.e.
tstart='2017-03-11T23:09:10'
infile: Input file to be parsed. This should the value returned by
nustar_pysolar.download_occultation_times()
Returns
----------
Returns a list of [ [start, stop], [start stop] ] times where start means
you egress from Earth shadow into the sunlight, while stop means you
re-enter Earth shadow.
The list has been filtered to only include those epochs that span the given
time range.
Notes
---------
"""
import os.path
if not(os.path.isfile(infile)):
print('Error in nustar_pysolar.sunlight_periods.')
print('Input file: '+infile+' does not exist.')
return -1;
all_pairs = parse_occultations(infile)
checkstart = parse_time(tstart)
checkend = parse_time(tend)
in_range = []
set=0
for pair in all_pairs:
dtmin = (pair[0] - checkstart)
dtmax = (pair[1] - checkstart)
if ( (pair[1] > checkstart) ):
set=1
if (set == 0):
continue
if ( pair[1] > checkend ):
break
in_range.append(pair)
if len(in_range) == 0:
print('Error in function: '+sunlight_periods.__name__)
print('No dates found in range. Pick a different occultation file.')
return -1
else:
return in_range
def make_mosaic(orbit, outfile='mosaic.txt', write_output=False, make_regions=False,
reg_pref='testbox', extra_roll=0.*u.deg, write_sun=False):
'''
Code to make a mosaic for a 5x5 tiled array on the Sun.
Input:
tstart = '2018-05-28T15:37:00'
tend = '2018-05-28T23:10:00'
positions = make_mosaic(tstart, tend, write_output=True)
Optional flags:
write_output = [False] / True
Write the output pointing positions in NuSTAR SOC readable formats in 'outfile' for all of the pointings.
outfile = ['mosaic.txt']
Output file if write_output is used.
make_regions: [False] / True
Make ds9 region files for each tile so that you can see how the FoV moves with each mosaic location.
reg_pref: 'testbox'
The prefix for the region files. Useful if you want to make this meaningful.
Output mosaic file has columns of:
"Arrive By Time" RA DEC RA_SUN DEC_SUN
'''
import numpy as np
box_pa = get_nustar_roll(orbit[0], extra_roll)
sun_pa = get_nustar_roll(orbit[0], 0.)
pa = box_pa + 90*u.deg
base = np.array([-1.45, -0.725, 0, 0.725, 1.45])
xsteps = np.append(base, np.flip(base, 0))
xsteps = np.append(xsteps, base)
xsteps = np.append(xsteps, np.flip(base, 0))
xsteps = np.append(xsteps, base)
ysteps = np.array(np.zeros(5) + 1.45)
ysteps = np.append(ysteps, np.zeros(5) + 0.725)
ysteps = np.append(ysteps, np.zeros(5))
ysteps = np.append(ysteps, np.zeros(5)-0.725)
ysteps = np.append(ysteps, np.zeros(5)-1.45)
# Rotation matrix for a clockwise rotation on the solar disk:
rotMatrix = np.array([[np.cos(extra_roll), | np.sin(extra_roll) | numpy.sin |
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
from torchvision.models.vgg import vgg19
import numpy as np
class GANLoss(nn.Module):
def __init__(self, real_label=1.0, fake_label=0.0):
super(GANLoss, self).__init__()
self.real_label = real_label
self.fake_label = fake_label
#self.loss = nn.BCELoss().to(device)
self.lsgan = nn.MSELoss().to(device)
def convert_tensor(self, input, is_real):
if is_real:
return Variable(torch.FloatTensor(input.size()).fill_(self.real_label)).to(device)
else:
return Variable(torch.FloatTensor(input.size()).fill_(self.fake_label)).to(device)
def __call__(self, input, is_real):
return self.lsgan(input, self.convert_tensor(input,is_real).to(device))
class AttentionLoss(nn.Module):
def __init__(self, theta = 0.9, iteration = 5):
super(AttentionLoss, self).__init__()
self.theta = theta
self.iteration = iteration
self.loss = nn.MSELoss().to(device)
def __call__(self, A_, M_):
loss_ATT = None
for i in range(1, self.iteration+1):
if i == 1:
loss_ATT = pow(self.theta, float(self.iteration-i)) * self.loss(A_[i-1],M_)
else:
loss_ATT += pow(self.theta, float(self.iteration-i)) * self.loss(A_[i-1],M_)
return loss_ATT
# VGG19 pretrained on Imagenet
def trainable_(net, trainable):
for param in net.parameters():
param.requires_grad = trainable
class PerceptualLoss(nn.Module):
def __init__(self):
super(PerceptualLoss, self).__init__()
self.model = (vgg19(pretrained = True).to(device))
trainable_(self.model, False)
self.loss = nn.MSELoss().to(device)
self.vgg_layers = self.model.features
self.layer_names = { '0' : 'conv1_1', '3' : 'relu1_2', '6' : 'relu2_1', '8' : 'relu2_2', '11' : 'relu3_1' }
def get_layer_output(self, x):
output = []
for name, module in self.vgg_layers._modules.items():
if isinstance(module, nn.ReLU):
module = nn.ReLU(inplace=False)
x = module(x)
if name in self.layer_names:
output.append(x)
return output
def get_GTlayer_output(self, x):
with torch.no_grad():
output = []
for name, module in self.vgg_layers._modules.items():
if isinstance(module, nn.ReLU):
module = nn.ReLU(inplace=False)
x = module(x)
if name in self.layer_names:
output.append(x)
return output
def __call__(self, O_, T_):
o = self.get_layer_output(O_)
t = self.get_GTlayer_output(T_)
loss_PL = 0
for i in range(len(t)):
if i ==0:
loss_PL = self.loss(o[i],t[i])
else:
loss_PL += self.loss(o[i],t[i])
loss_PL=loss_PL/float(len(t))
loss_PL = Variable(loss_PL,requires_grad=True)
return loss_PL
class MultiscaleLoss(nn.Module):
def __init__(self, ld=[0.6, 0.8, 8], batch=1):
super(MultiscaleLoss, self).__init__()
self.loss = nn.L1Loss().to(device)
self.ld = ld
self.batch=batch
def __call__(self, S_, gt):
T_ = []
for i in range(S_[0].shape[0]):
temp = []
x = ( | np.array(gt[i]) | numpy.array |
from pprint import pprint
import json
import numpy as np
import statistics
import tensorflow as tf
with open("/data/input", "r") as file:
data = json.load(file)
features = []
labels = []
for album_uri, album in data.items():
def aggregate_mean():
aggregated = {
"anthony_score": float(album["anthony_score"]) / 10,
"acousticness": 0,
"danceability": 0,
"duration_ms": 0,
"energy": 0,
"instrumentalness": 0,
"key": 0,
"liveness": 0,
"loudness": 0,
"mode": 0,
"speechiness": 0,
"tempo": 0,
"time_signature": 0,
"valence": 0,
}
for track_uri, track in album["tracks"].items():
if track is not None:
aggregated["acousticness"] += track["acousticness"]
aggregated["danceability"] += track["danceability"]
aggregated["duration_ms"] += track["duration_ms"]
aggregated["energy"] += track["energy"]
aggregated["instrumentalness"] += track["instrumentalness"]
aggregated["key"] += track["key"]
aggregated["liveness"] += track["liveness"]
aggregated["loudness"] += track["loudness"]
aggregated["mode"] += track["mode"]
aggregated["speechiness"] += track["speechiness"]
aggregated["tempo"] += track["tempo"]
aggregated["time_signature"] += track["time_signature"]
aggregated["valence"] += track["valence"]
aggregated["acousticness"] /= len(album["tracks"])
aggregated["danceability"] /= len(album["tracks"])
aggregated["duration_ms"] /= len(album["tracks"])
aggregated["energy"] /= len(album["tracks"])
aggregated["instrumentalness"] /= len(album["tracks"])
aggregated["key"] /= len(album["tracks"])
aggregated["liveness"] /= len(album["tracks"])
aggregated["loudness"] /= len(album["tracks"])
aggregated["mode"] /= len(album["tracks"])
aggregated["speechiness"] /= len(album["tracks"])
aggregated["tempo"] /= len(album["tracks"])
aggregated["time_signature"] /= len(album["tracks"])
aggregated["valence"] /= len(album["tracks"])
return aggregated
def aggregate_meadian():
aggregated = {
"anthony_score": float(album["anthony_score"]) / 10,
"acousticness": [],
"danceability": [],
"duration_ms": [],
"energy": [],
"instrumentalness": [],
"key": [],
"liveness": [],
"loudness": [],
"mode": [],
"speechiness": [],
"tempo": [],
"time_signature": [],
"valence": [],
}
for track_uri, track in album["tracks"].items():
if track is not None:
aggregated["acousticness"].append(track["acousticness"])
aggregated["danceability"].append(track["danceability"])
aggregated["duration_ms"].append(track["duration_ms"])
aggregated["energy"].append(track["energy"])
aggregated["instrumentalness"].append(track["instrumentalness"])
aggregated["key"].append(track["key"])
aggregated["liveness"].append(track["liveness"])
aggregated["loudness"].append(track["loudness"])
aggregated["mode"].append(track["mode"])
aggregated["speechiness"].append(track["speechiness"])
aggregated["tempo"].append(track["tempo"])
aggregated["time_signature"].append(track["time_signature"])
aggregated["valence"].append(track["valence"])
aggregated["acousticness"] = statistics.median(aggregated["acousticness"])
aggregated["danceability"] = statistics.median(aggregated["danceability"])
aggregated["duration_ms"] = statistics.median(aggregated["duration_ms"])
aggregated["energy"] = statistics.median(aggregated["energy"])
aggregated["instrumentalness"] = statistics.median(
aggregated["instrumentalness"]
)
aggregated["key"] = statistics.median(aggregated["key"])
aggregated["liveness"] = statistics.median(aggregated["liveness"])
aggregated["loudness"] = statistics.median(aggregated["loudness"])
aggregated["mode"] = statistics.median(aggregated["mode"])
aggregated["speechiness"] = statistics.median(aggregated["speechiness"])
aggregated["tempo"] = statistics.median(aggregated["tempo"])
aggregated["time_signature"] = statistics.median(aggregated["time_signature"])
aggregated["valence"] = statistics.median(aggregated["valence"])
return aggregated
# aggregated = aggregate_mean()
aggregated = aggregate_meadian()
features.append([aggregated["danceability"], aggregated["instrumentalness"]])
labels.append([aggregated["anthony_score"]])
features = np.array(features)
labels = | np.array(labels) | numpy.array |
import os
import copy
import numpy as np
from astropy.io import fits
import astropy.units as u
import astropy.constants as const
from specutils import Spectrum1D
from astropy.table import Table
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from spectres import spectres
from paintbox.utils import broad2res, disp2vel
import context
def get_muse_fwhm():
""" Returns the FWHM of the MUSE spectrograph as a function of the
wavelength. """
wave, R = np.loadtxt(os.path.join(os.path.dirname(
os.path.abspath(__file__)), "muse_wave_R.dat")).T
wave = wave * u.nm
fwhm = wave.to("angstrom") / R
# First interpolation to obtain extrapolated values
f1 = interp1d(wave.to("angstrom"), fwhm, kind="linear", bounds_error=False,
fill_value="extrapolate")
# Second interpolation using spline
wave = np.hstack((4000, wave.to("angstrom").value, 10000))
f = interp1d(wave, f1(wave), kind="cubic", bounds_error=False)
return f
def plot_muse_fwhm():
f = get_muse_fwhm()
wave = np.linspace(4000, 10000, 1000)
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
plt.figure(1)
plt.minorticks_on()
plt.plot(wave, f(wave), "-")
plt.xlabel("$\lambda$ ($\AA$)")
plt.ylabel(r"Spectral resolution $\alpha$ FWHM (Angstrom)")
plt.show()
def plot_vel_resolution():
c = const.c
f = get_muse_fwhm()
wave = np.linspace(4000, 10000, 1000)
plt.style.use("seaborn-paper")
plt.figure(1)
plt.minorticks_on()
plt.plot(wave, c.to("km/s") * f(wave) / wave, "-")
plt.xlabel("$\lambda$ ($\AA$)")
plt.ylabel(r"Resolution FWHM (km/s)")
plt.show()
def review_masks(target_sigma=300):
wdir = os.path.join(context.home_dir, f"paintbox/dr1_sig{target_sigma}")
filenames = [_ for _ in os.listdir(wdir) if
_.endswith(f"sig{target_sigma}.fits")]
plt.figure(figsize=(20, 5))
plt.ion()
plt.show()
for filename in filenames:
galaxy = filename.split("_")[0]
table = Table.read(os.path.join(wdir, filename))
norm = fits.getval(os.path.join(wdir, filename), "NORM", ext=1)
wave = table["wave"].data
flux = table["flux"].data
mask = table["mask"].data
fluxerr = table["fluxerr"].data
while True:
plt.clf()
flux_plot = flux
flux_plot[mask == 1] = np.nan
plt.plot(wave, flux_plot)
plt.title(galaxy)
plt.tight_layout()
plt.draw()
plt.pause(0.01)
process = input("Update mask? (N/y): ")
if process.lower() in ["", "n", "no"]:
break
plt.waitforbuttonpress()
pts = np.asarray(plt.ginput(2, timeout=-1))
wmin = pts[:, 0].min()
wmax = pts[:, 0].max()
idx = | np.where((wave >= wmin) & (wave <= wmax)) | numpy.where |
import numpy as np
import pytest
from ndsys.features import VolterraFeatures, prepare_data
def test_prepare_data():
x = np.vstack([1, 2, 3])
y = np.vstack([10, 11, 12])
x_out, y_out = prepare_data(x, y, (1, 1), None)
assert (x_out == np.vstack([1, 2, 3])).all()
assert (y_out == np.vstack([10, 11, 12])).all()
x_out, y_out = prepare_data(x, y, (2, 1), None)
assert (x_out == np.vstack([1, 2, 3])).all()
assert (y_out == np.vstack([11, 12])).all()
x_out, y_out = prepare_data(x, y, (3, 1), None)
assert (x_out == np.vstack([1, 2, 3])).all()
assert (y_out == np.vstack([12])).all()
x_out, y_out = prepare_data(x, y, (1, 1), 'zeros')
assert (x_out == np.vstack([1, 2, 3])).all()
assert (y_out == np.vstack([10, 11, 12])).all()
x_out, y_out = prepare_data(x, y, (2, 1), 'zeros')
assert (x_out == np.vstack([0, 1, 2, 3])).all()
assert (y_out == np.vstack([10, 11, 12])).all()
x_out, y_out = prepare_data(x, y, (3, 1), 'zeros')
assert (x_out == np.vstack([0, 0, 1, 2, 3])).all()
assert (y_out == np.vstack([10, 11, 12])).all()
x_out, y_out = prepare_data(x, y, (2, 1), np.vstack([-1]))
assert (x_out == np.vstack([-1, 1, 2, 3])).all()
assert (y_out == np.vstack([10, 11, 12])).all()
x_out, y_out = prepare_data(x, y, (3, 1), np.vstack([-2, -1]))
assert (x_out == | np.vstack([-2, -1, 1, 2, 3]) | numpy.vstack |
import numpy as np
import pandas as pd
import pytest
import numpy.testing as npt
import matplotlib.pyplot as plt
from pulse2percept.viz import scatter_correlation, correlation_matrix
def test_scatter_correlation():
x = np.arange(100)
_, ax = plt.subplots()
ax = scatter_correlation(x, x, ax=ax)
npt.assert_equal(len(ax.texts), 1)
print(ax.texts)
npt.assert_equal('$r$=1.000' in ax.texts[0].get_text(), True)
# Ignore NaN:
ax = scatter_correlation([0, 1, np.nan, 3], [0, 1, 2, 3])
npt.assert_equal('$r$=1.000' in ax.texts[0].get_text(), True)
with pytest.raises(ValueError):
scatter_correlation(np.arange(10), np.arange(11))
with pytest.raises(ValueError):
scatter_correlation([1], [2])
def test_correlation_matrix():
df = pd.DataFrame()
df['a'] = pd.Series(np.arange(100))
df['b'] = pd.Series(list(df['a'][::-1]))
_, ax = plt.subplots()
ax = correlation_matrix(df, ax=ax)
with pytest.raises(TypeError):
correlation_matrix( | np.zeros((10, 20)) | numpy.zeros |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from numpy.core.fromnumeric import argmin
from startkit import *
#Ridge regression constants
# polyOrder = [ 2*i for i in range(1,7)] # for ridge regression
# polyOrder = [ i for i in range(1,17)] # for kernelized ridge regression
polyOrder = [ i for i in range(1,25)] # for kernelized ridge regression of circle
batch_size = int(n_train/4)
X_batches = [ X_train[k*batch_size:(k+1)*batch_size,:] for k in range(4)]
y_batches = [ y_train[k*batch_size:(k+1)*batch_size] for k in range(4)]
def plot():
pos = y[:] == +1.0
neg = y[:] == -1.0
plt.figure()
plt.xlabel("$x_0$")
plt.ylabel("$x_1$")
plt.scatter(X[pos,0].ravel(), X[pos,1].ravel(),c='red',marker="v",label="$y=+1$")
plt.scatter(X[neg,0].ravel(), X[neg,1].ravel(),c='blue',marker="+",label="$y=-1$")
plt.legend()
plt.show()
def ridge_reg():
Train_Errors = []
Validate_Errors = []
for d in polyOrder:
Ridges = []
Phi = assemble_feature(X_train,d)
w_ridge = lstsq(Phi,y_train,LAMBDA)
error = np.linalg.norm(Phi @ w_ridge - y_train)
Train_Errors.append(error)
Ridges.append(w_ridge)
Phi = assemble_feature(X_valid,d)
validate_error = np.linalg.norm(Phi @ w_ridge - y_valid)
Validate_Errors.append(validate_error)
heatmap(lambda x0,x1: assemble_feature(np.array([[x0,x1]]),d)@w_ridge,clip=1)
Averaged_Train_Errors = [np.mean(Train_Errors[i]) for i in range(len(Train_Errors))]
print("Averaged_Train_Errors: ",Averaged_Train_Errors)
print("Validate_Errors: ",Validate_Errors)
plt.figure()
plt.plot(polyOrder,Averaged_Train_Errors,label="Training errors")
plt.plot(polyOrder,Validate_Errors,label="Validation errors")
plt.xlabel("Polynomial order")
plt.ylabel("Errors")
plt.legend()
plt.show()
def k(x1,x2,p):
return((1+x1@x2)**p)
def k_matrix(X_1data,X_2data,p):
k_m = np.zeros((np.shape(X_1data)[0],np.shape(X_2data)[0]))
for i in range( | np.shape(k_m) | numpy.shape |
import numpy as np
class Struct(dict):
def __init__(self,**kw):
dict.__init__(self,kw)
self.__dict__ = self
def load(name, ref, prec=np.float32):
p0 = np.fromfile("output/%s_%d_p0.bin" % (name, ref), dtype=prec)
p1 = np.fromfile("output/%s_%d_p1.bin" % (name, ref), dtype=prec)
t = np.fromfile("output/%s_%d_t.bin" % (name, ref), dtype=prec)
return p0, p1, t
def parse_log(name, ref):
import re
lines = open("%s_%d.txt" % (name ,ref), "r").readlines()
pattern = "Simulation took:\s([\d\.s]+)"
for line in lines:
match = re.findall(pattern, line)
if match:
return float(match[0])
def convergence_rates(errors):
"""
Compute convergence rates assuming factor of two refinement and that the
error on the finest grid should be discarded.
"""
return ["-"] + list(np.log2(np.array(errors[:-2]) /
np.array(errors[1:-1])))
def error(u, v, dt):
"""
l2 error
"""
return | np.linalg.norm(u - v) | numpy.linalg.norm |
""" Code for loading and manipulating the arithmetic expression data """
import os
import h5py
import numpy as np
from pathlib import Path
from numpy import exp, sin
from tqdm import tqdm
from weighted_retraining.utils import print_flush
def load_data_str(data_dir):
""" load the arithmetic expression data in string format """
fname = 'equation2_15_dataset.txt'
with open(data_dir / fname) as f:
eqs = f.readlines()
for i in range(len(eqs)):
eqs[i] = eqs[i].strip().replace(' ', '')
return eqs
def load_data_enc(data_dir):
""" load the arithmetic expression dataset in one-hot encoded format """
fname = 'eq2_grammar_dataset.h5'
h5f = h5py.File(data_dir / fname, 'r')
data = h5f['data'][:]
h5f.close()
return data
def get_initial_dataset_and_weights(data_dir, ignore_percentile, n_data):
""" get the initial dataset (with corresponding scores) and the sample weights """
# load equation dataset, both one-hot encoded and as plain strings, and compute corresponding scores
data_str = load_data_str(data_dir)
data_enc = load_data_enc(data_dir)
data_scores = score_function(data_str)
# subsample data based on the desired percentile and # of datapoints
perc = np.percentile(data_scores, ignore_percentile)
perc_idx = data_scores >= perc
data_idx = np.random.choice(sum(perc_idx), min(n_data, sum(perc_idx)), replace=False)
data_str = list(np.array(data_str)[perc_idx][data_idx])
data_enc = data_enc[perc_idx][data_idx]
data_scores = data_scores[perc_idx][data_idx]
return data_str, data_enc, data_scores
def update_dataset_and_weights(new_inputs, new_scores, data_str, data_enc, data_scores, model):
""" update the dataet and the sample weights """
# discard invalid (None) inputs and their corresponding scores
valid_idx = np.array(new_inputs) != None
valid_inputs = list(new_inputs[valid_idx])
valid_scores = new_scores[valid_idx]
print_flush("\tDiscarding {}/{} new inputs that are invalid!".format(len(new_inputs) - len(valid_inputs), len(new_inputs)))
# add new inputs and scores to dataset, both as plain string and one-hot vector
print_flush("\tAppending new valid inputs to dataset...")
data_str += valid_inputs
new_inputs_one_hot = model.smiles_to_one_hot(valid_inputs)
data_enc = | np.append(data_enc, new_inputs_one_hot, axis=0) | numpy.append |
import numpy as np
import torch
import torch.nn.functional as F
import skimage.measure as sk
import time
import pyrender
import pymesh
import trimesh
from pyemd import emd_samples
import chamfer_python
import binvox_rw
from glob import glob
D2R = np.pi/180.0
voxsize = 32
sample_size = 2048
def RotatePhi(phi):
return np.array([[1, 0, 0, 0],
[0, np.cos(D2R*phi), np.sin(D2R*phi), 0],
[0, -np.sin(D2R*phi), np.cos(D2R*phi), 0],
[0, 0, 0, 1]])
def RotateAzimuth(phi):
return np.array([[ | np.cos(D2R*phi) | numpy.cos |
import os
import json
import pickle
from pathlib import Path
import numpy as np
from transformers import AutoTokenizer
from torch.utils.data import Dataset, DataLoader
class Vocab:
def __init__(self, vocab_list, add_pad=True, add_unk=True):
self._vocab_dict = dict()
self._reverse_vocab_dict = dict()
self._length = 0
if add_pad: # pad_id should be zero (for mask)
self.pad_word = '<pad>'
self.pad_id = self._length
self._vocab_dict[self.pad_word] = self.pad_id
self._length += 1
if add_unk:
self.unk_word = '<unk>'
self.unk_id = self._length
self._vocab_dict[self.unk_word] = self.unk_id
self._length += 1
for w in vocab_list:
self._vocab_dict[w] = self._length
self._length += 1
for w, i in self._vocab_dict.items():
self._reverse_vocab_dict[i] = w
def word_to_id(self, word):
if hasattr(self, 'unk_id'):
return self._vocab_dict.get(word, self.unk_id)
return self._vocab_dict[word]
def id_to_word(self, idx):
if hasattr(self, 'unk_word'):
return self._reverse_vocab_dict.get(idx, self.unk_word)
return self._reverse_vocab_dict[idx]
def has_word(self, word):
return word in self._vocab_dict
def __len__(self):
return self._length
class Tokenizer:
def __init__(self, vocab, lower, bert_name):
self.vocab = vocab
self.maxlen = 256
self.lower = lower
self.bert_tokenizer = AutoTokenizer.from_pretrained(bert_name) if bert_name else None
@classmethod
def from_files(cls, fnames, lower=True):
all_tokens = set()
for fname in fnames:
fdata = json.load(open(os.path.join('data', fname), 'r', encoding='utf-8'))
for data in fdata:
all_tokens.update([token.lower() if lower else token for token in Tokenizer.split_text(data['text'])])
return cls(vocab=Vocab(all_tokens), lower=lower, bert_name=None)
@staticmethod
def pad_sequence(sequence, pad_id, maxlen, dtype='int64', padding='post', truncating='post'):
x = ( | np.zeros(maxlen) | numpy.zeros |
import math
import numpy as np
import multiprocessing
"""
Stochastic bit-streams are represented as NumPy arrays. Multi-dimensional arrays
are used to encode numerical vectors and matrices.
"""
def scalar_next_int_power_of_2(x):
"""
Return the next integer power of 2 of x
"""
# return 1<<(int(math.ceil(abs(x))-1)).bit_length()
return 2**(math.ceil(math.log(abs(x), 2)))
def next_int_power_of_2(x):
"""
Vectorized form of scalar_next_int_power_of_2
"""
return np.vectorize(pyfunc=scalar_next_int_power_of_2,
otypes=[np.float32])(x)
def sng(x,no_stoch_samples):
"""
Bipolar stochastic number generator
Parameters
----------
x: Floating-point input value
no_stoch_samples: Bit-stream length (int)
Returns
-------
1D numpy array holding the bit-stream representing the input x
"""
# Draw samples from a uniform distribution
r = np.random.uniform(-1.0,1.0,size=no_stoch_samples)
# Bit-wise comparison
y = x > r
return y.astype(np.int8)
def vec_sng(x,no_stoch_samples):
"""
Vectorised form of sng
Parameters
----------
x: 1D numpy array of floating-point input values
no_stoch_samples: Bit-stream length
Returns
-------
2D numpy array holding the bit-streams representing the values in x
"""
# Initialise the output
y = np.empty((x.size,no_stoch_samples),dtype=np.int8)
for idx in range(x.size):
y[idx,:] = sng(x[idx],no_stoch_samples)
return y.astype(np.int8)
def mat_sng(x,no_stoch_samples):
"""
Vectorised form of sng
Parameters
----------
x: 2D numpy array of floating-point input values
no_stoch_samples: Bit-stream length
Returns
-------
2D numpy array holding the bit-streams representing the values in x
"""
rows = x.shape[0]
cols = x.shape[1]
y = np.empty((rows,cols,no_stoch_samples),dtype=np.int8)
for row_idx in range(rows):
for col_idx in range(cols):
y[row_idx,col_idx,:] = sng(x[row_idx,col_idx],no_stoch_samples)
return y.astype(np.int8)
def get_value(x):
"""
Estimate the value of a stochastic bit-stream
Parameters
----------
x: 1D numpy array holding the input bit-stream
Returns
-------
The value encoded by the bit-stream
"""
y = 2*np.mean(x) - 1
return np.array(y.astype(np.float32))
def vec_sc_value(x):
"""
Vectorised version of the get_value function
Parameters
----------
x: 2D numpy array with stochastic bit-streams
"""
no_inputs = x.shape[0]
# Initialise the output variable
y = np.empty(no_inputs,dtype=np.float32)
for idx in range(no_inputs):
y[idx] = get_value(x[idx,:])
return y
def mat_sc_value(x):
"""
Vectorised version of the get_value function
Parameters
----------
x: 3D numpy array with stochastic bit-streams
"""
rows = x.shape[0]
cols = x.shape[1]
# Initialise the output variable
y = np.empty((rows,cols),dtype=np.float32)
for row_idx in range(rows):
for col_idx in range(cols):
y[row_idx,col_idx] = get_value(x[row_idx,col_idx,:])
return y
def multiply(x,w):
"""
Stochastic XNOR multiplier
Parameters
----------
x: Stochastic bit-stream
w: Floating point weight
Returns
-------
A bit-stream representing the product of x and w
"""
w_seq = sng(w,x.size)
product = np.logical_not(np.logical_xor(x,w_seq))
return np.array(product.astype(np.int8))
def square(x):
# Stochastic squarring function
x_shifted = np.roll(x,shift=1)
sq = np.logical_not(np.logical_xor(x,x_shifted))
return np.array(sq.astype(np.int8))
def invert(x):
# Change of sign
return np.logical_not(x).astype(np.int8)
def add(x,y,s_in,upscale=False,s_out=None):
"""
Scaled addition of two stochastic bit-streams
Parameters
----------
x,y: 1D numpy arrays holding bit-streams to be added
s_in: 1D numpy array holding the scalings of x and y respectively
upsale: Boolean indicating whether saturation arithmetic should be applied
Returns
-------
A 1D numpy array holding the output bit-stream
"""
x_rescaled = np.empty(x.size, dtype=np.int8)
y_rescaled = np.empty(y.size, dtype=np.int8)
# Rescaling of the input bit-streams
if (s_in[0] > s_in[1]):
""" Rescale y """
max_scaling = s_in[0]
s_ratio = s_in[1]/s_in[0]
x_rescaled = x
y_rescaled = multiply(y,s_ratio)
elif (s_in[0] < s_in[1]):
""" Rescale x """
max_scaling = s_in[1]
s_ratio = s_in[0]/s_in[1]
x_rescaled = multiply(x,s_ratio)
y_rescaled = y
else:
""" Inputs have the same scaling """
max_scaling = s_in[0]
x_rescaled = x
y_rescaled = y
# Perform addition with the re-scaled bit-streams
select_val = 0
select_line = sng(select_val,x.size)
x_plus_y = | np.empty(x.size, dtype=np.int8) | numpy.empty |
#!/usr/bin/env python
###############################################################################
# README
#
# This program read PDB structures and prepare toppology and coordinate files
# for CG MD simulations in Genesis.
#
# PDB format:
# 1. Atoms startswith "ATOM "
# 2. Chains should end with "TER" and have different IDs
#
###############################################################################
import numpy as np
import argparse
from tqdm import tqdm
###########################################################################
# Force Field Parameters #
###########################################################################
# ____ _ ____ _ __ __ _____ _____ _____ ____ ____
# | _ \ / \ | _ \ / \ | \/ | ____|_ _| ____| _ \/ ___|
# | |_) / _ \ | |_) | / _ \ | |\/| | _| | | | _| | |_) \___ \
# | __/ ___ \| _ < / ___ \| | | | |___ | | | |___| _ < ___) |
# |_| /_/ \_\_| \_\/_/ \_\_| |_|_____| |_| |_____|_| \_\____/
#
###########################################################################
# ==================
# Physical Constants
# ==================
CAL2JOU = 4.184
# =====================================
# General Parameters: Mass, Charge, ...
# =====================================
ATOM_MASS_DICT = {
'C' : 12.011,
'N' : 14.001,
'O' : 15.999,
'P' : 30.974,
'S' : 32.065,
'H' : 1.008
}
RES_MASS_DICT = {
"ALA" : 71.09,
"ARG" : 156.19,
"ASN" : 114.11,
"ASP" : 115.09,
"CYS" : 103.15,
"CYM" : 103.15,
"CYT" : 103.15,
"GLN" : 128.14,
"GLU" : 129.12,
"GLY" : 57.05,
"HIS" : 137.14,
"ILE" : 113.16,
"LEU" : 113.16,
"LYS" : 128.17,
"MET" : 131.19,
"PHE" : 147.18,
"PRO" : 97.12,
"SER" : 87.08,
"THR" : 101.11,
"TRP" : 186.21,
"TYR" : 163.18,
"VAL" : 99.14,
"DA" : 134.10,
"DC" : 110.10,
"DG" : 150.10,
"DT" : 125.10,
"DP" : 94.97,
"DS" : 83.11,
"RA" : 134.10,
"RC" : 110.10,
"RG" : 150.10,
"RU" : 111.10,
"RP" : 62.97,
"RS" : 131.11
}
RES_CHARGE_DICT = {
"ALA" : 0.0,
"ARG" : 1.0,
"ASN" : 0.0,
"ASP" : -1.0,
"CYS" : 0.0,
"CYM" : 0.0,
"CYT" : 0.0,
"GLN" : 0.0,
"GLU" : -1.0,
"GLY" : 0.0,
"HIS" : 0.0,
"ILE" : 0.0,
"LEU" : 0.0,
"LYS" : 1.0,
"MET" : 0.0,
"PHE" : 0.0,
"PRO" : 0.0,
"SER" : 0.0,
"THR" : 0.0,
"TRP" : 0.0,
"TYR" : 0.0,
"VAL" : 0.0,
"DA" : 0.0,
"DC" : 0.0,
"DG" : 0.0,
"DT" : 0.0,
"DP" : -0.6,
"DS" : 0.0,
"RA" : 0.0,
"RC" : 0.0,
"RG" : 0.0,
"RU" : 0.0,
"RP" : -1.0,
"RS" : 0.0
}
RES_SHORTNAME_DICT = {
"ALA" : "A",
"ARG" : "R",
"ASN" : "N",
"ASP" : "D",
"CYS" : "C",
"CYM" : "C",
"CYT" : "C",
"GLN" : "Q",
"GLU" : "E",
"GLY" : "G",
"HIS" : "H",
"ILE" : "I",
"LEU" : "L",
"LYS" : "K",
"MET" : "M",
"PHE" : "F",
"PRO" : "P",
"SER" : "S",
"THR" : "T",
"TRP" : "W",
"TYR" : "Y",
"VAL" : "V",
"DA" : "A",
"DC" : "C",
"DG" : "G",
"DT" : "T",
"RA" : "A",
"RC" : "C",
"RG" : "G",
"RU" : "U"
}
RES_NAME_SET_PROTEIN = (
"ALA", "ARG", "ASN", "ASP",
"CYS", "GLN", "GLU", "GLY",
"HIS", "ILE", "LEU", "LYS",
"MET", "PHE", "PRO", "SER",
"THR", "TRP", "TYR", "VAL",
"CYM", "CYT")
RES_NAME_SET_DNA = ("DA", "DC", "DG", "DT")
RES_NAME_SET_RNA = ("RA", "RC", "RG", "RU")
# DNA CG residue atom names
ATOM_NAME_SET_DP = ("P", "OP1", "OP2", "O5'", "O1P", "O2P")
ATOM_NAME_SET_DS = ("C5'", "C4'", "C3'", "C2'", "C1'", "O4'", "O2'")
# RNA CG residue atom names
ATOM_NAME_SET_RP = ("P", "OP1", "OP2", "O1P", "O2P")
ATOM_NAME_SET_RS = ("C5'", "C4'", "C3'", "C2'", "C1'", "O5'", "O4'", "O3'", "O2'")
# ==============
# Molecule Types
# ==============
MOL_DNA = 0
MOL_RNA = 1
MOL_PROTEIN = 2
MOL_OTHER = 3
MOL_TYPE_LIST = ["DNA", "RNA", "protein", "other", "unknown"]
# ===============================
# Protein AICG2+ Model Parameters
# ===============================
# AICG2+ bond force constant
AICG_BOND_K = 110.40 * CAL2JOU * 100.0 * 2.0
# AICG2+ sigma for Gaussian angle
AICG_13_SIGMA = 0.15 * 0.1 # nm
# AICG2+ sigma for Gaussian dihedral
AICG_14_SIGMA = 0.15 # Rad ??
# AICG2+ atomistic contact cutoff
AICG_GO_ATOMIC_CUTOFF = 6.5
# AICG2+ pairwise interaction cutoff
AICG_ATOMIC_CUTOFF = 5.0
# AICG2+ hydrogen bond cutoff
AICG_HYDROGEN_BOND_CUTOFF = 3.2
# AICG2+ salt bridge cutoff
AICG_SALT_BRIDGE_CUTOFF = 3.5
# AICG2+ energy cutoffs
AICG_ENE_UPPER_LIM = -0.5
AICG_ENE_LOWER_LIM = -5.0
# average and general AICG2+ energy values
AICG_13_AVE = 1.72
AICG_14_AVE = 1.23
AICG_CONTACT_AVE = 0.55
AICG_13_GEN = 1.11
AICG_14_GEN = 0.87
AICG_CONTACT_GEN = 0.32
# AICG2+ pairwise interaction pairs
AICG_ITYPE_BB_HB = 1 # B-B hydrogen bonds
AICG_ITYPE_BB_DA = 2 # B-B donor-accetor contacts
AICG_ITYPE_BB_CX = 3 # B-B carbon-X contacts
AICG_ITYPE_BB_XX = 4 # B-B other
AICG_ITYPE_SS_HB = 5 # S-S hydrogen bonds
AICG_ITYPE_SS_SB = 6 # S-S salty bridge
AICG_ITYPE_SS_DA = 7 # S-S donor-accetor contacts
AICG_ITYPE_SS_CX = 8 # S-S carbon-X contacts
AICG_ITYPE_SS_QX = 9 # S-S charge-X contacts
AICG_ITYPE_SS_XX = 10 # S-S other
AICG_ITYPE_SB_HB = 11 # S-B hydrogen bonds
AICG_ITYPE_SB_DA = 12 # S-B donor-accetor contacts
AICG_ITYPE_SB_CX = 13 # S-B carbon-X contacts
AICG_ITYPE_SB_QX = 14 # S-B charge-X contacts
AICG_ITYPE_SB_XX = 15 # S-B other
AICG_ITYPE_LR_CT = 16 # long range contacts
AICG_ITYPE_OFFST = 0 # offset
AICG_PAIRWISE_ENERGY = np.zeros(17)
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_HB] = - 1.4247 # B-B hydrogen bonds
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_DA] = - 0.4921 # B-B donor-accetor contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_CX] = - 0.2404 # B-B carbon-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_XX] = - 0.1035 # B-B other
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_HB] = - 5.7267 # S-S hydrogen bonds
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_SB] = -12.4878 # S-S salty bridge
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_DA] = - 0.0308 # S-S donor-accetor contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_CX] = - 0.1113 # S-S carbon-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_QX] = - 0.2168 # S-S charge-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_XX] = 0.2306 # S-S other
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_HB] = - 3.4819 # S-B hydrogen bonds
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_DA] = - 0.1809 # S-B donor-accetor contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_CX] = - 0.1209 # S-B carbon-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_QX] = - 0.2984 # S-B charge-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_XX] = - 0.0487 # S-B other
AICG_PAIRWISE_ENERGY[AICG_ITYPE_LR_CT] = - 0.0395 # long range contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_OFFST] = - 0.1051 # offset
# ============================
# DNA 3SPN.2C Model Parameters
# ============================
# 3SPN.2C bond force constant
DNA3SPN_BOND_K_2 = 60.0 * 2
# 3SPN.2C force constant for Gaussian dihedral
DNA3SPN_DIH_G_K = 7.0
# 3SPN.2C sigma for Gaussian dihedral
DNA3SPN_DIH_G_SIGMA = 0.3
# 3SPN.2C force constant for Gaussian dihedral
DNA3SPN_DIH_P_K = 2.0
# ====================================
# RNA Structure-based Model Parameters
# ====================================
# RNA atomistic contact cutoff
RNA_GO_ATOMIC_CUTOFF = 5.5
# RNA stacking interaction dihedral cutoff
RNA_STACK_DIH_CUTOFF = 40.0
# RNA stacking interaction distance cutoff
RNA_STACK_DIST_CUTOFF = 6.0
# RNA stacking interaction epsilon
RNA_STACK_EPSILON = 2.06
# RNA base pairing epsilon
RNA_BPAIR_EPSILON_2HB = 2.94
RNA_BPAIR_EPSILON_3HB = 5.37
RNA_BOND_K_LIST = {
"PS" : 26.5,
"SR" : 40.3,
"SY" : 62.9,
"SP" : 84.1
}
RNA_ANGLE_K_LIST = {
"PSR" : 18.0,
"PSY" : 22.8,
"PSP" : 22.1,
"SPS" : 47.8
}
RNA_DIHEDRAL_K_LIST = {
"PSPS" : 1.64,
"SPSR" : 1.88,
"SPSY" : 2.82,
"SPSP" : 2.98
}
RNA_PAIR_EPSILON_OTHER = {
"SS" : 1.48,
"BS" : 0.98,
"SB" : 0.98,
"BB" : 0.93
}
# =================
# PWMcos parameters
# =================
# PWMcos atomistic contact cutoff
PWMCOS_ATOMIC_CUTOFF = 4.0
# ======================
# Protein-RNA parameters
# ======================
# protein-RNA Go-term coefficient
PRO_RNA_GO_EPSILON_B = 0.62
PRO_RNA_GO_EPSILON_S = 0.74
# ====================
# GRO TOP File Options
# ====================
# "NREXCL" in "[moleculetype]"
MOL_NR_EXCL = 3
# "CGNR" in "[atoms]"
AICG_ATOM_FUNC_NR = 1
DNA3SPN_ATOM_FUNC_NR = 1
RNA_ATOM_FUNC_NR = 1
# "f" in "[bonds]"
AICG_BOND_FUNC_TYPE = 1
DNA3SPN_BOND_FUNC2_TYPE = 1
DNA3SPN_BOND_FUNC4_TYPE = 21
RNA_BOND_FUNC_TYPE = 1
# "f" in AICG-type "[angles]"
AICG_ANG_G_FUNC_TYPE = 21
# "f" in Flexible-type "[angles]"
AICG_ANG_F_FUNC_TYPE = 22
# "f" in DNA "[angles]"
DNA3SPN_ANG_FUNC_TYPE = 1
# "f" in RNA "[angles]"
RNA_ANG_FUNC_TYPE = 1
# "f" in AICG-type "[dihedral]"
AICG_DIH_G_FUNC_TYPE = 21
# "f" in Flexible-type "[dihedral]"
AICG_DIH_F_FUNC_TYPE = 22
# "f" in DNA Gaussian "[dihedral]"
DNA3SPN_DIH_G_FUNC_TYPE = 21
# "f" in DNA Periodic "[dihedral]"
DNA3SPN_DIH_P_FUNC_TYPE = 1
DNA3SPN_DIH_P_FUNC_PERI = 1
# "f" in RNA Periodic "[dihedral]"
RNA_DIH_FUNC_TYPE = 1
# "f" in Go-contacts "[pairs]"
AICG_CONTACT_FUNC_TYPE = 2
# "f" in RNA Go-contacts "[pairs]"
RNA_CONTACT_FUNC_TYPE = 2
# "f" in pro-RNA Go-contacts "[pairs]"
RNP_CONTACT_FUNC_TYPE = 2
# "f" in protein-DNA PWMcos "[pwmcos]"
PWMCOS_FUNC_TYPE = 1
###############################################################################
# Functions #
###############################################################################
# ____ _ ____ ___ ____ _____ _ _ _ _ ____
# | __ ) / \ / ___|_ _/ ___| | ___| | | | \ | |/ ___|
# | _ \ / _ \ \___ \| | | | |_ | | | | \| | |
# | |_) / ___ \ ___) | | |___ | _| | |_| | |\ | |___
# |____/_/ \_\____/___\____| |_| \___/|_| \_|\____|
#
###############################################################################
# ===================
# Geometric Functions
# ===================
# --------
# Distance
# --------
def compute_distance(coor1, coor2):
# d = coor1 - coor2
# return np.linalg.norm(d)
dx = coor1[0] - coor2[0]
dy = coor1[1] - coor2[1]
dz = coor1[2] - coor2[2]
dist = (dx * dx + dy * dy + dz * dz) ** 0.5
return dist
# -----
# Angle
# -----
def compute_angle(coor1, coor2, coor3):
v1 = coor1 - coor2
v2 = coor3 - coor2
n1 = np.linalg.norm(v1)
n2 = np.linalg.norm(v2)
return np.arccos( np.dot(v1, v2) / n1 / n2) / np.pi * 180.0
def compute_vec_angle(vec1, vec2):
n1 = np.linalg.norm(vec1)
n2 = np.linalg.norm(vec2)
return np.arccos( np.dot(vec1, vec2) / n1 / n2) / np.pi * 180.0
# --------
# Dihedral
# --------
def compute_dihedral(coor1, coor2, coor3, coor4):
v12 = coor2 - coor1
v23 = coor3 - coor2
v34 = coor4 - coor3
c123 = np.cross(v12, v23)
c234 = np.cross(v23, v34)
nc123 = np.linalg.norm(c123)
nc234 = np.linalg.norm(c234)
dih = np.arccos( np.dot(c123, c234) / nc123 / nc234)
c1234 = np.cross(c123, c234)
judge = np.dot(c1234, v23)
dih = dih if judge > 0 else -dih
return dih / np.pi * 180.0
# --------------
# Center of mass
# --------------
def compute_center_of_mass(atom_indices, atom_names, atom_coors):
total_mass = 0
tmp_coor = np.zeros(3)
for i in atom_indices:
a_mass = ATOM_MASS_DICT[atom_names[i][1]]
a_coor = atom_coors[i, :]
total_mass += a_mass
tmp_coor += a_coor * a_mass
com = tmp_coor / total_mass
return com
# ===============================
# Structural Biological Functions
# ===============================
# --------------------
# AICG2+ Protein Model
# --------------------
def is_protein_backbone(atom_name):
if atom_name in ("N", "C", "O", "OXT", "CA"):
return True
return False
def is_protein_hb_donor(atom_name, res_name):
if atom_name[0] == 'N':
return True
elif atom_name[0] == 'S' and res_name == "CYS":
return True
elif atom_name[0] == 'O':
if ( res_name == "SER" and atom_name == "OG" ) or \
( res_name == "THR" and atom_name == "OG1" ) or \
( res_name == "TYR" and atom_name == "OH" ):
return True
return False
def is_protein_hb_acceptor(atom_name):
if atom_name[0] == 'O' or atom_name[0] == 'S':
return True
return False
def is_protein_cation(atom_name, res_name):
if atom_name[0] == 'N':
if ( res_name == "ARG" and atom_name == "NH1" ) or \
( res_name == "ARG" and atom_name == "NH2" ) or \
( res_name == "LYS" and atom_name == "NZ" ):
return True
return False
def is_protein_anion(atom_name, res_name):
if atom_name[0] == 'O':
if ( res_name == "GLU" and atom_name == "OE1" ) or \
( res_name == "GLU" and atom_name == "OE2" ) or \
( res_name == "ASP" and atom_name == "OD1" ) or \
( res_name == "ASP" and atom_name == "OD2" ):
return True
return False
def is_protein_hb_pair(atom_name_1, res_name_1, atom_name_2, res_name_2):
if is_protein_hb_acceptor (atom_name_1) and \
is_protein_hb_donor (atom_name_2, res_name_2):
return True
elif is_protein_hb_acceptor (atom_name_2) and \
is_protein_hb_donor (atom_name_1, res_name_1):
return True
return False
def is_protein_sb_pair(atom_name_1, res_name_1, atom_name_2, res_name_2):
if is_protein_cation (atom_name_1, res_name_1) and \
is_protein_anion (atom_name_2, res_name_2):
return True
elif is_protein_cation (atom_name_2, res_name_2) and \
is_protein_anion (atom_name_1, res_name_1):
return True
return False
def is_protein_nonsb_charge_pair(atom_name_1, res_name_1, atom_name_2, res_name_2):
if is_protein_cation (atom_name_1, res_name_1 ) or \
is_protein_anion (atom_name_1, res_name_1 ) or \
is_protein_cation (atom_name_2, res_name_2 ) or \
is_protein_anion (atom_name_2, res_name_2 ):
return True
return False
def is_protein_go_contact(resid1, resid2, atom_names, atom_coors):
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < AICG_GO_ATOMIC_CUTOFF:
return True
return False
def count_aicg_atomic_contact(resid1, resid2, res_name_1, res_name_2, atom_names, atom_coors):
contact_count = np.zeros(( 17, ), dtype=int)
contact_count[AICG_ITYPE_OFFST] = 1
num_short_range_contact = 0
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
is_hb = is_protein_hb_pair (atom_name_1, res_name_1, atom_name_2, res_name_2)
is_sb = is_protein_sb_pair (atom_name_1, res_name_1, atom_name_2, res_name_2)
is_nonsb_charge = is_protein_nonsb_charge_pair (atom_name_1, res_name_1, atom_name_2, res_name_2)
is_1_backbone = is_protein_backbone (atom_name_1)
is_2_backbone = is_protein_backbone (atom_name_2)
if dist_12 < AICG_GO_ATOMIC_CUTOFF:
contact_count[AICG_ITYPE_LR_CT] += 1
if dist_12 < AICG_ATOMIC_CUTOFF:
num_short_range_contact += 1
if is_1_backbone and is_2_backbone:
if is_hb:
if dist_12 < AICG_HYDROGEN_BOND_CUTOFF:
contact_count[AICG_ITYPE_BB_HB] += 1
else:
contact_count[AICG_ITYPE_BB_DA] += 1
elif atom_name_1[0] == 'C' or atom_name_2[0] == 'C':
contact_count[AICG_ITYPE_BB_CX] += 1
else:
contact_count[AICG_ITYPE_BB_XX] += 1
elif ( not is_1_backbone ) and ( not is_2_backbone ):
if is_hb:
if is_sb:
if dist_12 < AICG_SALT_BRIDGE_CUTOFF:
contact_count[AICG_ITYPE_SS_SB] += 1
else:
contact_count[AICG_ITYPE_SS_QX] += 1
elif dist_12 < AICG_HYDROGEN_BOND_CUTOFF:
contact_count[AICG_ITYPE_SS_HB] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SS_QX] += 1
else:
contact_count[AICG_ITYPE_SS_DA] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SS_QX] += 1
elif atom_name_1[0] == 'C' or atom_name_2[0] == 'C':
contact_count[AICG_ITYPE_SS_CX] += 1
else:
contact_count[AICG_ITYPE_SS_XX] += 1
elif ( is_1_backbone and ( not is_2_backbone ) ) or \
( is_2_backbone and ( not is_1_backbone ) ):
if is_hb:
if dist_12 < AICG_HYDROGEN_BOND_CUTOFF:
contact_count[AICG_ITYPE_SB_HB] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SB_QX] += 1
else:
contact_count[AICG_ITYPE_SB_DA] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SB_QX] += 1
elif atom_name_1[0] == 'C' or atom_name_2[0] == 'C':
contact_count[AICG_ITYPE_SB_CX] += 1
else:
contact_count[AICG_ITYPE_SB_XX] += 1
# control the number of long-range contacts
if AICG_GO_ATOMIC_CUTOFF > AICG_ATOMIC_CUTOFF:
contact_count[AICG_ITYPE_LR_CT] -= num_short_range_contact
else:
contact_count[AICG_ITYPE_LR_CT] = 0
# control the number of salty bridge
if contact_count[AICG_ITYPE_SS_SB] >= 2:
contact_count[AICG_ITYPE_SS_QX] += contact_count[AICG_ITYPE_SS_SB] - 1
contact_count[AICG_ITYPE_SS_SB] = 1
return contact_count
# -----------------
# 3SPN.2C DNA model
# -----------------
def get_DNA3SPN_angle_param(angle_type, base_step):
# Base-Sugar-Phosphate
BSP_params = {
"AA" : 460, "AT" : 370, "AC" : 442, "AG" : 358,
"TA" : 120, "TT" : 460, "TC" : 383, "TG" : 206,
"CA" : 206, "CT" : 358, "CC" : 278, "CG" : 278,
"GA" : 383, "GT" : 442, "GC" : 336, "GG" : 278
}
# Phosphate-Sugar-Base
PSB_params = {
"AA" : 460, "TA" : 120, "CA" : 206, "GA" : 383,
"AT" : 370, "TT" : 460, "CT" : 358, "GT" : 442,
"AC" : 442, "TC" : 383, "CC" : 278, "GC" : 336,
"AG" : 358, "TG" : 206, "CG" : 278, "GG" : 278
}
# Phosphate-Sugar-Phosphate
PSP_params = {
"all" : 300
}
# Sugar-Phosphate-Sugar
SPS_params = {
"AA" : 355, "AT" : 147, "AC" : 464, "AG" : 368,
"TA" : 230, "TT" : 355, "TC" : 442, "TG" : 273,
"CA" : 273, "CT" : 368, "CC" : 165, "CG" : 478,
"GA" : 442, "GT" : 464, "GC" : 228, "GG" : 165
}
angle_params = {
"BSP" : BSP_params,
"PSB" : PSB_params,
"PSP" : PSP_params,
"SPS" : SPS_params
}
return angle_params[angle_type][base_step]
# -------------------------
# RNA structure-based model
# -------------------------
def is_RNA_hydrogen_bond(atom_name_1, atom_name_2):
special_atom_list = ['F', 'O', 'N']
if atom_name_1 in special_atom_list and atom_name_2 in special_atom_list:
return True
return False
def compute_RNA_Go_contact(resid1, resid2, atom_names, atom_coors):
hb_count = 0
min_dist = 1e50
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < RNA_GO_ATOMIC_CUTOFF and is_RNA_hydrogen_bond(atom_name_1[0], atom_name_2[0]):
hb_count += 1
if dist_12 < min_dist:
min_dist = dist_12
return (min_dist, hb_count)
# ------------------------
# protein-DNA interactions
# ------------------------
def is_PWMcos_contact(resid1, resid2, atom_names, atom_coors):
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < PWMCOS_ATOMIC_CUTOFF:
return True
return False
# ------------------------
# protein-RNA interactions
# ------------------------
def is_protein_RNA_go_contact(resid1, resid2, atom_names, atom_coors):
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < AICG_GO_ATOMIC_CUTOFF:
return True
return False
# ------------------
# Other file formats
# ------------------
def read_modified_pfm(pfm_filename):
pfm = {}
with open(pfm_filename, 'r') as fin:
for line in fin:
words = split(line)
if len(words) < 1:
continue
w1 = words[0]
if w1 in "ACGT":
local_list = []
for p in words[1:]:
append(local_list, float(p))
pfm[w1] = local_list
elif w1 in ["CHAIN_A", "CHAIN_B"]:
local_list = []
for dna_id in words[1:]:
append(local_list, int(dna_id))
pfm[w1] = local_list
pfmat = np.array([pfm["A"], pfm["C"], pfm["G"], pfm["T"]])
ppmat = pfmat / pfmat.sum(axis=0)
pwmat0 = -np.log(ppmat)
pwmat = pwmat0 - pwmat0.sum(axis=0) / 4
return (pwmat, pfm["CHAIN_A"], pfm["CHAIN_B"])
# =============================
# Coarse-Graining Structures!!!
# =============================
class AAResidue:
def __init__(self, name, atoms):
self.name = name
self.atoms = atoms
class AAChain:
def __init__(self, chain_id, residues):
self.chain_id = chain_id
self.residues = residues
class CGResidue:
def __init__(self, residue_index, residue_name, atom_name, atoms):
self.res_idx = residue_index
self.res_name = residue_name
self.atm_name = atom_name
self.atoms = atoms
class CGChain:
def __init__(self, first, last, moltype):
self.first = first
self.last = last
self.moltype = moltype
###############################################################################
# ____ ___ ____ _____
# / ___/ _ \| _ \| ____|
# | | | | | | |_) | _|
# | |__| |_| | _ <| |___
# \____\___/|_| \_\_____|
#
###############################################################################
# core function
def pdb_2_top(args):
# -----------------
# Parsing arguments
# -----------------
pdb_name = args.pdb
protein_charge_filename = args.respac
scale_scheme = args.aicg_scale
gen_3spn_itp = args.dna_3spn_param
gen_pwmcos_itp = args.pwmcos
pwmcos_gamma = args.pwmcos_scale
pwmcos_epsil = args.pwmcos_shift
pfm_filename = args.pfm
appendto_filename = args.patch
do_output_psf = args.psf
do_output_cgpdb = args.cgpdb
do_debug = args.debug
do_output_sequence = args.show_sequence
# ===============
# Step 0: numbers
# ===============
aa_num_atom = 0
aa_num_residue = 0
aa_num_chain = 0
num_chain_pro = 0
num_chain_DNA = 0
num_chain_RNA = 0
i_step = 0
# ================
# Step 1: open PDB
# ================
i_step += 1
print("============================================================")
print("> Step {0:>2d}: open PDB file.".format(i_step))
aa_pdb_lines = []
with open(pdb_name, "r") as fin_pdb:
for line in fin_pdb:
if line.startswith("ATOM"):
aa_pdb_lines.append(line.ljust(80))
aa_num_atom += 1
elif line.startswith("TER") or line.startswith("END"):
aa_pdb_lines.append(line.ljust(80))
aa_atom_name = [" " for _ in range(aa_num_atom)]
aa_coor = np.zeros((aa_num_atom, 3))
aa_residues = []
aa_chains = []
i_atom = 0
i_resid = 0
curr_resid = None
curr_chain = None
curr_rname = " "
residue_name = " "
chain_id = '?'
tmp_res_atoms = []
tmp_chain_res = []
for line in aa_pdb_lines:
if line.startswith("TER") or line.startswith("END"):
if len(tmp_res_atoms) > 0:
aa_residues.append(AAResidue(residue_name, tmp_res_atoms[:]))
tmp_res_atoms = []
if len(tmp_chain_res) > 0:
aa_chains.append(AAChain(chain_id, tmp_chain_res[:]))
tmp_chain_res = []
continue
i_atom += 1
atom_name = line[12:16].strip()
residue_name = line[17:21].strip()
chain_id = line[21]
atom_serial = int (line[6 :11])
residue_serial = int (line[22:26])
coor_x = float (line[30:38])
coor_y = float (line[38:46])
coor_z = float (line[46:54])
aa_atom_name [i_atom - 1 ] = atom_name
aa_coor [i_atom - 1 ] = [ coor_x, coor_y, coor_z ]
if residue_serial != curr_resid:
i_resid += 1
tmp_chain_res.append(i_resid - 1)
curr_resid = residue_serial
if len(tmp_res_atoms) > 0:
aa_residues.append(AAResidue(curr_rname, tmp_res_atoms[:]))
tmp_res_atoms = []
curr_rname = residue_name
tmp_res_atoms.append(i_atom - 1)
aa_num_residue = len(aa_residues)
aa_num_chain = len(aa_chains)
print(" > Number of atoms : {0:>10d}".format(aa_num_atom))
print(" > Number of residues: {0:>10d}".format(aa_num_residue))
print(" > Number of chains : {0:>10d}".format(aa_num_chain))
# ===============================
# Step 2: find out molecule types
# ===============================
i_step += 1
print("============================================================")
print("> Step {0:>2d}: set molecular types for every chain.".format(i_step))
cg_num_particles = 0
cg_chain_mol_types = np.zeros(aa_num_chain, dtype=int)
cg_chain_length = np.zeros(aa_num_chain, dtype=int)
for i_chain in range( aa_num_chain ):
chain = aa_chains[i_chain]
mol_type = -1
for i_res in chain.residues:
res_name = aa_residues[i_res].name
if res_name in RES_NAME_SET_PROTEIN:
tmp_mol_type = MOL_PROTEIN
elif res_name in RES_NAME_SET_DNA:
tmp_mol_type = MOL_DNA
elif res_name in RES_NAME_SET_RNA:
tmp_mol_type = MOL_RNA
else:
tmp_mol_type = MOL_OTHER
if mol_type == -1:
mol_type = tmp_mol_type
elif tmp_mol_type != mol_type:
errmsg = "BUG: Inconsistent residue types in chain {} ID - {} residue - {} : {} "
print(errmsg.format(i_chain, chain.chain_id, i_res, res_name))
exit()
cg_chain_mol_types[i_chain] = mol_type
n_res = len(chain.residues)
if mol_type == MOL_DNA:
n_particles = 3 * n_res - 1
num_chain_DNA += 1
elif mol_type == MOL_RNA:
n_particles = 3 * n_res - 1
num_chain_RNA += 1
elif mol_type == MOL_PROTEIN:
n_particles = n_res
num_chain_pro += 1
else:
n_particles = 0
cg_chain_length[i_chain] = n_particles
cg_num_particles += n_particles
print(" > Chain {0:>3d} | {1:>7}".format( i_chain + 1, MOL_TYPE_LIST[ mol_type ] ))
print("------------------------------------------------------------")
print(" In total: {0:>5d} protein chains,".format(num_chain_pro))
print(" {0:>5d} DNA strands,".format(num_chain_DNA))
print(" {0:>5d} RNA strands.".format(num_chain_RNA))
# ===========================
# Step 3: Assign CG particles
# ===========================
i_step += 1
print("============================================================")
print("> Step {0:>2d}: assign coarse-grained particles.".format(i_step))
cg_residues = []
cg_chains = []
i_offset_cg_particle = 0
i_offset_cg_residue = 0
for i_chain in range(aa_num_chain):
chain = aa_chains [i_chain]
mol_type = cg_chain_mol_types [i_chain]
i_bead = i_offset_cg_particle
i_resi = i_offset_cg_residue
if mol_type == MOL_PROTEIN:
for i_res in chain.residues:
cg_idx = []
res_name = aa_residues[i_res].name
for i_atom in aa_residues[i_res].atoms:
atom_name = aa_atom_name[i_atom]
if atom_name[0] == 'H':
continue
else:
cg_idx.append(i_atom)
i_bead += 1
i_resi += 1
cg_residues.append(CGResidue(i_resi, res_name, "CA", cg_idx[:]))
elif mol_type == MOL_DNA:
tmp_atom_index_O3p = 0
for i_local_index, i_res in enumerate( chain.residues ):
res_name = aa_residues[i_res].name
cg_DP_idx = [tmp_atom_index_O3p]
cg_DS_idx = []
cg_DB_idx = []
for i_atom in aa_residues[i_res].atoms:
atom_name = aa_atom_name[i_atom]
if atom_name[0] == 'H':
continue
elif atom_name in ATOM_NAME_SET_DP:
cg_DP_idx.append(i_atom)
elif atom_name in ATOM_NAME_SET_DS:
cg_DS_idx.append(i_atom)
elif atom_name == "O3'":
tmp_atom_index_O3p = i_atom
else:
cg_DB_idx.append(i_atom)
i_resi += 1
if i_local_index > 0:
i_bead += 1
cg_residues.append(CGResidue(i_resi, res_name, "DP", cg_DP_idx[:]))
i_bead += 1
cg_residues.append( CGResidue(i_resi, res_name, "DS", cg_DS_idx[:]))
i_bead += 1
cg_residues.append( CGResidue(i_resi, res_name, "DB", cg_DB_idx[:]))
elif mol_type == MOL_RNA:
for i_local_index, i_res in enumerate( chain.residues ):
res_name = aa_residues[i_res].name
cg_RP_idx = []
cg_RS_idx = []
cg_RB_idx = []
for i_atom in aa_residues[i_res].atoms:
atom_name = aa_atom_name[i_atom]
if atom_name[0] == 'H':
continue
elif atom_name in ATOM_NAME_SET_RP:
cg_RP_idx.append(i_atom)
elif atom_name in ATOM_NAME_SET_RS:
cg_RS_idx.append(i_atom)
else:
cg_RB_idx.append(i_atom)
i_resi += 1
if i_local_index > 0:
i_bead += 1
cg_residues.append( CGResidue(i_resi, res_name, "RP", cg_RP_idx[:]))
i_bead += 1
cg_residues.append( CGResidue(i_resi, res_name, "RS", cg_RS_idx[:]))
i_bead += 1
cg_residues.append( CGResidue(i_resi, res_name, "RB", cg_RB_idx[:]))
cg_chains.append(CGChain(i_offset_cg_particle, i_bead - 1, mol_type))
i_offset_cg_particle += cg_chain_length[i_chain]
i_offset_cg_residue += len(chain.residues)
chain_info_str = " > Chain {0:>3d} | # particles: {1:>5d} | {2:>5d} -- {3:>5d} "
for i_chain in range(aa_num_chain):
print(chain_info_str.format(i_chain + 1,
cg_chain_length[i_chain],
cg_chains[i_chain].first + 1,
cg_chains[i_chain].last + 1))
print("------------------------------------------------------------")
print(" In total: {0} CG particles.".format(cg_num_particles))
# =========================================================================
# ____ ____ _____ ___ ____ ___ _ ___ ______ __
# / ___/ ___| |_ _/ _ \| _ \ / _ \| | / _ \ / ___\ \ / /
# | | | | _ | || | | | |_) | | | | | | | | | | _ \ V /
# | |__| |_| | | || |_| | __/| |_| | |__| |_| | |_| | | |
# \____\____| |_| \___/|_| \___/|_____\___/ \____| |_|
#
# =========================================================================
cg_resid_name = [" " for _ in range(cg_num_particles)]
cg_resid_index = np.zeros(cg_num_particles, dtype=int)
cg_bead_name = [" " for _ in range(cg_num_particles)]
cg_bead_type = [" " for _ in range(cg_num_particles)]
cg_bead_charge = np.zeros(cg_num_particles)
cg_bead_mass = np.zeros(cg_num_particles)
cg_bead_coor = np.zeros((cg_num_particles, 3))
cg_chain_id = np.zeros(cg_num_particles, dtype=int)
# protein
top_cg_pro_bonds = []
top_cg_pro_angles = []
top_cg_pro_dihedrals = []
top_cg_pro_aicg13 = []
top_cg_pro_aicg14 = []
top_cg_pro_aicg_contact = []
param_cg_pro_e_13 = []
param_cg_pro_e_14 = []
param_cg_pro_e_contact = []
# DNA
top_cg_DNA_bonds = []
top_cg_DNA_angles = []
top_cg_DNA_dih_Gaussian = []
top_cg_DNA_dih_periodic = []
# RNA
top_cg_RNA_bonds = []
top_cg_RNA_angles = []
top_cg_RNA_dihedrals = []
top_cg_RNA_base_stack = []
top_cg_RNA_base_pair = []
top_cg_RNA_other_contact = []
# protein-DNA
top_cg_pro_DNA_pwmcos = []
# protein-RNA
top_cg_pro_RNA_contact = []
# =================================
# Step 4: AICG2+ model for proteins
# =================================
# _ _
# _ __ _ __ ___ | |_ ___(_)_ __
# | '_ \| '__/ _ \| __/ _ \ | '_ \
# | |_) | | | (_) | || __/ | | | |
# | .__/|_| \___/ \__\___|_|_| |_|
# |_|
#
# =================================
if num_chain_pro > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: processing proteins.".format(i_step))
# --------------------------------
# Step 4.1: find out C-alpha atoms
# --------------------------------
print("------------------------------------------------------------")
print("> {0}.1: determine CA mass, charge, and coordinates.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range( chain.first, chain.last + 1 ):
res_name = cg_residues[i_res].res_name
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom] == "CA":
cg_resid_name [i_res] = res_name
cg_resid_index [i_res] = cg_residues [i_res].res_idx
cg_bead_name [i_res] = "CA"
cg_bead_type [i_res] = res_name
cg_bead_charge [i_res] = RES_CHARGE_DICT [res_name]
cg_bead_mass [i_res] = RES_MASS_DICT [res_name]
cg_bead_coor [i_res] = aa_coor [i_atom]
cg_chain_id [i_res] = i_chain
break
if len(protein_charge_filename) > 0:
try:
with open(protein_charge_filename, 'r') as pro_c_fin:
for line in pro_c_fin:
charge_data = line.split()
if len(charge_data) < 1:
continue
i = int(charge_data[0])
c = float(charge_data[1])
cg_bead_charge[i - 1] = c
except:
print("ERROR in user-defined charge distribution.\n")
exit()
print("> ... DONE!")
# -------------------------
# Step 4.2: AICG2+ topology
# -------------------------
print("------------------------------------------------------------")
print("> {0}.2: AICG2+ topology.".format(i_step))
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.1: AICG2+ local interactions.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range( chain.first, chain.last ):
coor1 = cg_bead_coor[i_res]
coor2 = cg_bead_coor[i_res + 1]
dist12 = compute_distance(coor1, coor2)
top_cg_pro_bonds.append((i_res, dist12))
print("> ... Bond: DONE!")
e_ground_local = 0.0
e_ground_13 = 0.0
num_angle = 0
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range(chain.first, chain.last - 1):
coor1 = cg_bead_coor[i_res ]
coor3 = cg_bead_coor[i_res + 2 ]
dist13 = compute_distance (coor1, coor3)
top_cg_pro_angles.append (i_res)
top_cg_pro_aicg13.append ( (i_res, dist13))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res ],
cg_residues [i_res + 2 ],
cg_resid_name [i_res ],
cg_resid_name [i_res + 2 ],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = np.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_local += e_local
e_ground_13 += e_local
num_angle += 1
param_cg_pro_e_13.append( e_local)
print("> ... Angle: DONE!")
e_ground_14 = 0.0
num_dih = 0
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range(chain.first, chain.last - 2):
coor1 = cg_bead_coor[i_res]
coor2 = cg_bead_coor[i_res + 1]
coor3 = cg_bead_coor[i_res + 2]
coor4 = cg_bead_coor[i_res + 3]
dihed = compute_dihedral(coor1, coor2, coor3, coor4)
top_cg_pro_dihedrals.append(i_res)
top_cg_pro_aicg14.append((i_res, dihed))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res ],
cg_residues [i_res + 3 ],
cg_resid_name [i_res ],
cg_resid_name [i_res + 3 ],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = np.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_local += e_local
e_ground_14 += e_local
num_dih += 1
param_cg_pro_e_14.append( e_local)
print("> ... Dihedral: DONE!")
# ------------------------
# Normalize local energies
# ------------------------
e_ground_local /= (num_angle + num_dih)
e_ground_13 /= num_angle
e_ground_14 /= num_dih
if scale_scheme == 0:
for i in range(len(param_cg_pro_e_13)):
param_cg_pro_e_13[i] *= AICG_13_AVE / e_ground_13
for i in range(len(param_cg_pro_e_14)):
param_cg_pro_e_14[i] *= AICG_14_AVE / e_ground_14
elif scale_scheme == 1:
for i in range(len(param_cg_pro_e_13)):
param_cg_pro_e_13[i] *= -AICG_13_GEN
for i in range(len(param_cg_pro_e_14)):
param_cg_pro_e_14[i] *= -AICG_14_GEN
# -----------------------
# Go type native contacts
# -----------------------
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.2: AICG2+ Go-type native contacts.".format(i_step))
e_ground_contact = 0.0
num_contact = 0
# intra-molecular contacts
print(" Calculating intra-molecular contacts...")
for i_chain in tqdm( range(aa_num_chain) ):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range(chain.first, chain.last - 3):
coor_cai = cg_bead_coor[i_res]
for j_res in range(i_res + 4, chain.last + 1):
coor_caj = cg_bead_coor[j_res]
if is_protein_go_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
native_dist = compute_distance(coor_cai, coor_caj)
num_contact += 1
top_cg_pro_aicg_contact.append((i_res, j_res, native_dist))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res],
cg_residues [j_res],
cg_resid_name [i_res],
cg_resid_name [j_res],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = np.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_contact += e_local
num_contact += 1
param_cg_pro_e_contact.append( e_local)
print("> ... intra-molecular contacts: DONE!")
# inter-molecular ( protein-protein ) contacts
if num_chain_pro > 1:
print(" Calculating inter-molecular contacts...")
for i_chain in tqdm( range(aa_num_chain - 1) ):
chain1 = cg_chains[i_chain]
if chain1.moltype != MOL_PROTEIN:
continue
for j_chain in range(i_chain + 1, aa_num_chain):
chain2 = cg_chains[j_chain]
if chain2.moltype != MOL_PROTEIN:
continue
for i_res in range(chain1.first, chain1.last + 1):
coor_cai = cg_bead_coor[i_res]
for j_res in range(chain2.first, chain2.last + 1):
coor_caj = cg_bead_coor[j_res]
if is_protein_go_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
native_dist = compute_distance(coor_cai, coor_caj)
num_contact += 1
top_cg_pro_aicg_contact.append((i_res, j_res, native_dist))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res],
cg_residues [j_res],
cg_resid_name [i_res],
cg_resid_name [j_res],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = np.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_contact += e_local
num_contact += 1
param_cg_pro_e_contact.append( e_local)
print("> ... inter-molecular contacts: DONE!")
# normalize
e_ground_contact /= num_contact
if scale_scheme == 0:
for i in range(len(param_cg_pro_e_contact)):
param_cg_pro_e_contact[i] *= AICG_CONTACT_AVE / e_ground_contact
elif scale_scheme == 1:
for i in range(len(param_cg_pro_e_contact)):
param_cg_pro_e_contact[i] *= -AICG_CONTACT_GEN
print("------------------------------------------------------------")
print(" > Total number of protein contacts: {0:>12d}".format(len( top_cg_pro_aicg_contact )))
# =============================
# Step 5: 3SPN.2C model for DNA
# =============================
# _
# __| |_ __ __ _
# / _` | '_ \ / _` |
# | (_| | | | | (_| |
# \__,_|_| |_|\__,_|
#
# =============================
if num_chain_DNA > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: processing DNA.".format(i_step))
# ----------------------------------
# Step 5.1: determine P, S, B
# ----------------------------------
print("------------------------------------------------------------")
print("> {0}.1: determine P, S, B mass, charge, and coordinates.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_DNA:
continue
for i_res in range(chain.first, chain.last + 1):
res_name = cg_residues[i_res].res_name
bead_name = cg_residues[i_res].atm_name
bead_type = bead_name if bead_name == "DP" or bead_name == "DS" else res_name
bead_coor = compute_center_of_mass(cg_residues[i_res].atoms, aa_atom_name, aa_coor)
cg_resid_name [i_res] = res_name
cg_resid_index [i_res] = cg_residues[i_res].res_idx
cg_bead_name [i_res] = bead_name
cg_bead_type [i_res] = bead_type
cg_bead_charge [i_res] = RES_CHARGE_DICT [bead_type]
cg_bead_mass [i_res] = RES_MASS_DICT [bead_type]
cg_bead_coor [i_res] = bead_coor
cg_chain_id [i_res] = i_chain
print("> ... DONE!")
# ---------------------------------
# Step 5.2: 3SPN.2C topology
# ---------------------------------
if gen_3spn_itp:
print("------------------------------------------------------------")
print("> {0}.2: 3SPN.2C topology.".format(i_step))
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.1: 3SPN.2C local interactions.".format(i_step))
print(" Calculating intra-molecular contacts...")
for i_chain in tqdm( range(aa_num_chain) ):
chain = cg_chains[i_chain]
if chain.moltype != MOL_DNA:
continue
for i_res in range(chain.first, chain.last + 1):
if cg_bead_name[i_res] == "DS":
# bond S--B
coor_s = cg_bead_coor[i_res]
coor_b = cg_bead_coor[i_res + 1]
r_sb = compute_distance(coor_s, coor_b)
top_cg_DNA_bonds.append(( i_res, i_res + 1, r_sb ))
if i_res + 3 < chain.last:
# bond S--P+1
coor_p3 = cg_bead_coor[i_res + 2]
r_sp3 = compute_distance(coor_s, coor_p3)
top_cg_DNA_bonds.append(( i_res, i_res + 2, r_sp3 ))
# Angle S--P+1--S+1
resname5 = cg_resid_name [i_res] [-1]
resname3 = cg_resid_name [i_res + 3] [-1]
coor_s3 = cg_bead_coor [i_res + 3]
ang_sp3s3 = compute_angle(coor_s, coor_p3, coor_s3)
k = get_DNA3SPN_angle_param("SPS", resname5 + resname3)
top_cg_DNA_angles.append(( i_res, i_res + 2, i_res + 3, ang_sp3s3, k * 2 ))
# Dihedral S--P+1--S+1--B+1
coor_b3 = cg_bead_coor[i_res + 4]
dih_sp3s3b3 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_b3)
top_cg_DNA_dih_periodic.append(( i_res, i_res + 2, i_res + 3, i_res + 4, dih_sp3s3b3 -180.0))
# Dihedral S--P+1--S+1--P+2
if i_res + 6 < chain.last:
coor_p33 = cg_bead_coor[i_res + 5]
dih_sp3s3p33 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_p33)
top_cg_DNA_dih_periodic.append(( i_res, i_res + 2, i_res + 3, i_res + 5, dih_sp3s3p33 - 180.0))
top_cg_DNA_dih_Gaussian.append(( i_res, i_res + 2, i_res + 3, i_res + 5, dih_sp3s3p33 ))
elif cg_bead_name[i_res] == "DP":
# bond P--S
coor_p = cg_bead_coor[i_res]
coor_s = cg_bead_coor[i_res + 1]
r_ps = compute_distance(coor_p, coor_s)
top_cg_DNA_bonds.append(( i_res, i_res + 1, r_ps ))
# angle P--S--B
resname5 = cg_resid_name [i_res - 1] [-1]
resname3 = cg_resid_name [i_res + 2] [-1]
coor_b = cg_bead_coor [i_res + 2]
ang_psb = compute_angle(coor_p, coor_s, coor_b)
k = get_DNA3SPN_angle_param("PSB", resname5 + resname3)
top_cg_DNA_angles.append(( i_res, i_res + 1, i_res + 2, ang_psb, k * 2 ))
if i_res + 4 < chain.last:
# angle P--S--P+1
coor_p3 = cg_bead_coor[i_res + 3]
ang_psp3 = compute_angle(coor_p, coor_s, coor_p3)
k = get_DNA3SPN_angle_param("PSP", "all")
top_cg_DNA_angles.append(( i_res, i_res + 1, i_res + 3, ang_psp3, k * 2 ))
# Dihedral P--S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 4]
dih_psp3s3 = compute_dihedral(coor_p, coor_s, coor_p3, coor_s3)
top_cg_DNA_dih_periodic.append(( i_res, i_res + 1, i_res + 3, i_res + 4, dih_psp3s3 - 180.0))
top_cg_DNA_dih_Gaussian.append(( i_res, i_res + 1, i_res + 3, i_res + 4, dih_psp3s3 ))
elif cg_bead_name[i_res] == "DB":
if i_res + 2 < chain.last:
# angle B--S--P+1
resname5 = cg_resid_name [i_res] [-1]
resname3 = cg_resid_name [i_res + 1] [-1]
coor_b = cg_bead_coor [i_res]
coor_s = cg_bead_coor [i_res - 1]
coor_p3 = cg_bead_coor [i_res + 1]
ang_bsp3 = compute_angle(coor_b, coor_s, coor_p3)
k = get_DNA3SPN_angle_param("BSP", resname5 + resname3)
top_cg_DNA_angles.append(( i_res, i_res - 1, i_res + 1, ang_bsp3, k * 2 ))
# Dihedral B--S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 2]
dih_bsp3s3 = compute_dihedral(coor_b, coor_s, coor_p3, coor_s3)
top_cg_DNA_dih_periodic.append(( i_res, i_res - 1, i_res + 1, i_res + 2, dih_bsp3s3 - 180.0))
else:
errmsg = "BUG: Wrong DNA particle type in chain {}, residue {} : {} "
print(errmsg.format(i_chain, i_res, res_name))
exit()
print("> ... Bond, Angle, Dihedral: DONE!")
# =========================
# RNA structure based model
# =========================
# ____ _ _ _
# | _ \| \ | | / \
# | |_) | \| | / _ \
# | _ <| |\ |/ ___ \
# |_| \_\_| \_/_/ \_\
#
# =========================
if num_chain_RNA > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: processing RNA.".format(i_step))
# ----------------------------------
# determine P, S, B
# ----------------------------------
print("------------------------------------------------------------")
print("> {0}.1: determine P, S, B mass, charge, and coordinates.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_RNA:
continue
for i_res in range(chain.first, chain.last + 1):
res_name = cg_residues[i_res].res_name
bead_name = cg_residues[i_res].atm_name
bead_type = bead_name if bead_name == "RP" or bead_name == "RS" else res_name
cg_resid_name [i_res] = res_name
cg_resid_index [i_res] = cg_residues [i_res].res_idx
cg_bead_name [i_res] = bead_name
cg_bead_type [i_res] = bead_type
cg_bead_charge [i_res] = RES_CHARGE_DICT [bead_type]
cg_bead_mass [i_res] = RES_MASS_DICT [bead_type]
cg_chain_id [i_res] = i_chain
if bead_name == "RP":
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom][0] == 'P':
bead_coor = aa_coor[i_atom]
elif bead_name == "RS":
total_mass = 0
tmp_coor = np.zeros(3)
for i_atom in cg_residues[i_res].atoms:
a_name = aa_atom_name[i_atom]
if a_name in ["C1'", "C2'", "C3'", "C4'", "O4'"]:
a_mass = ATOM_MASS_DICT[a_name[0]]
a_coor = aa_coor[i_atom]
total_mass += a_mass
tmp_coor += a_coor * a_mass
bead_coor = tmp_coor / total_mass
elif bead_name == "RB":
if res_name[-1] == 'A' or res_name[-1] == 'G':
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom] == "N1":
bead_coor = aa_coor[i_atom]
else:
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom] == "N3":
bead_coor = aa_coor[i_atom]
cg_bead_coor[i_res] = bead_coor
print("> ... DONE!")
# -------------------------
# Step 6.2: RNA topology
# -------------------------
print("------------------------------------------------------------")
print("> {0}.2: RNA topology.".format(i_step))
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.1: RNA local interactions.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_RNA:
continue
print(" Calculating intra-molecular contacts...")
for i_res in tqdm( range(chain.first, chain.last + 1) ):
if cg_bead_name[i_res] == "RS":
# bond S--B
coor_s = cg_bead_coor[i_res]
coor_b = cg_bead_coor[i_res + 1]
r_sb = compute_distance(coor_s, coor_b)
base_type = "R" if cg_resid_name[i_res] in ["RA", "RG"] else "Y"
bond_type = "S" + base_type
k = RNA_BOND_K_LIST[bond_type] * CAL2JOU
top_cg_RNA_bonds.append((i_res, i_res + 1, r_sb , k * 2 * 100.0))
# bond S--P+1
if i_res + 2 < chain.last:
coor_p3 = cg_bead_coor[i_res + 2]
r_sp3 = compute_distance(coor_s, coor_p3)
k = RNA_BOND_K_LIST["SP"] * CAL2JOU
top_cg_RNA_bonds.append((i_res, i_res + 2, r_sp3 , k * 2 * 100.0))
if i_res + 4 <= chain.last:
# Angle S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 3]
ang_sp3s3 = compute_angle(coor_s, coor_p3, coor_s3)
k = RNA_ANGLE_K_LIST["SPS"] * CAL2JOU
top_cg_RNA_angles.append((i_res, i_res + 2, i_res + 3, ang_sp3s3, k * 2))
# Dihedral S--P+1--S+1--B+1
coor_b3 = cg_bead_coor[i_res + 4]
dih_sp3s3b3 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_b3)
base_type = "R" if cg_resid_name[i_res + 4] in ["RA", "RG"] else "Y"
dihe_type = "SPS" + base_type
k = RNA_DIHEDRAL_K_LIST[dihe_type] * CAL2JOU
top_cg_RNA_dihedrals.append((i_res, i_res + 2, i_res + 3, i_res + 4, dih_sp3s3b3, k))
# Dihedral S--P+1--S+1--P+2
if i_res + 5 < chain.last:
coor_p33 = cg_bead_coor[i_res + 5]
dih_sp3s3p33 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_p33)
k = RNA_DIHEDRAL_K_LIST["SPSP"] * CAL2JOU
top_cg_RNA_dihedrals.append((i_res, i_res + 2, i_res + 3, i_res + 5, dih_sp3s3p33, k))
elif cg_bead_name[i_res] == "RP":
# bond P--S
coor_p = cg_bead_coor[i_res]
coor_s = cg_bead_coor[i_res + 1]
r_ps = compute_distance(coor_p, coor_s)
k = RNA_BOND_K_LIST["PS"] * CAL2JOU
top_cg_RNA_bonds.append((i_res, i_res + 1, r_ps , k * 2 * 100.0))
# angle P--S--B
coor_b = cg_bead_coor[i_res + 2]
ang_psb = compute_angle(coor_p, coor_s, coor_b)
base_type = "R" if cg_resid_name[i_res + 2] in ["RA", "RG"] else "Y"
angl_type = "PS" + base_type
k = RNA_ANGLE_K_LIST[angl_type] * CAL2JOU
top_cg_RNA_angles.append((i_res, i_res + 1, i_res + 2, ang_psb, k * 2))
if i_res + 4 < chain.last:
# angle P--S--P+1
coor_p3 = cg_bead_coor[i_res + 3]
ang_psp3 = compute_angle(coor_p, coor_s, coor_p3)
k = RNA_ANGLE_K_LIST["PSP"] * CAL2JOU
top_cg_RNA_angles.append((i_res, i_res + 1, i_res + 3, ang_psp3, k * 2))
# Dihedral P--S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 4]
dih_psp3s3 = compute_dihedral(coor_p, coor_s, coor_p3, coor_s3)
k = RNA_DIHEDRAL_K_LIST["PSPS"] * CAL2JOU
top_cg_RNA_dihedrals.append((i_res, i_res + 1, i_res + 3, i_res + 4, dih_psp3s3, k))
elif cg_bead_name[i_res] == "RB":
# do nothing...
pass
# -----------------------
# Go type native contacts
# -----------------------
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.2: RNA Go-type native contacts.".format(i_step))
print( " Calculating intra-molecular contacts..." )
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_RNA:
continue
for i_res in range(chain.first, chain.last - 2):
if cg_bead_name[i_res] == "RP":
continue
coor_i = cg_bead_coor[i_res]
for j_res in range(i_res + 3, chain.last + 1):
if cg_bead_name[j_res] == "RP":
continue
if cg_bead_name[i_res] == "RS" or cg_bead_name[j_res] == "RS":
if j_res < i_res + 6:
continue
coor_j = cg_bead_coor[j_res]
native_dist = compute_distance(coor_i, coor_j)
adist, nhb = compute_RNA_Go_contact(cg_residues[i_res],
cg_residues[j_res],
aa_atom_name,
aa_coor)
if adist > RNA_GO_ATOMIC_CUTOFF:
continue
if j_res == i_res + 3 and cg_bead_name[i_res] == "RB":
coor_i_sug = cg_bead_coor[i_res - 1]
coor_j_sug = cg_bead_coor[j_res - 1]
st_dih = compute_dihedral(coor_i, coor_i_sug, coor_j_sug, coor_j)
if abs( st_dih ) < RNA_STACK_DIH_CUTOFF and adist < RNA_STACK_DIST_CUTOFF:
top_cg_RNA_base_stack.append((i_res, j_res, native_dist, RNA_STACK_EPSILON))
else:
top_cg_RNA_other_contact.append((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER["BB"]))
elif cg_bead_name[i_res] == "RB" and cg_bead_name[j_res] == "RB":
if nhb == 2:
top_cg_RNA_base_pair.append((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_2HB))
elif nhb >= 3:
top_cg_RNA_base_pair.append((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_3HB))
else:
top_cg_RNA_other_contact.append((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER["BB"]))
else:
contact_type = cg_bead_name[i_res][-1] * cg_bead_name[j_res][-1]
top_cg_RNA_other_contact.append((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER[contact_type]))
if num_chain_RNA > 1:
print( " Calculating inter-molecular contacts..." )
for i_chain in tqdm( range(aa_num_chain) ):
chain_1 = cg_chains[i_chain]
if chain_1.moltype != MOL_RNA:
continue
for i_res in range(chain_1.first, chain_1.last + 1):
if cg_bead_name[i_res] == "RP":
continue
coor_i = cg_bead_coor[i_res]
for j_chain in range(i_chain + 1, aa_num_chain):
chain_2 = cg_chains[j_chain]
if chain_2.moltype != MOL_RNA:
continue
for j_res in range(chain_2.first, chain_2.last + 1):
if cg_bead_name[j_res] == "RP":
continue
coor_j = cg_bead_coor[j_res]
native_dist = compute_distance(coor_i, coor_j)
adist, nhb = compute_RNA_Go_contact(cg_residues[i_res],
cg_residues[j_res],
aa_atom_name,
aa_coor)
if adist > RNA_GO_ATOMIC_CUTOFF:
continue
if cg_bead_name[i_res] == "RB" and cg_bead_name[j_res] == "RB":
if nhb == 2:
top_cg_RNA_base_pair.append((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_2HB))
elif nhb >= 3:
top_cg_RNA_base_pair.append((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_3HB))
else:
top_cg_RNA_other_contact.append((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER["BB"]))
else:
contact_type = cg_bead_name[i_res][-1] * cg_bead_name[j_res][-1]
top_cg_RNA_other_contact.append((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER[contact_type]))
print("> ... DONE!")
print("------------------------------------------------------------")
num_rna_contacts = len(top_cg_RNA_base_stack) + len(top_cg_RNA_base_pair) + len(top_cg_RNA_other_contact)
print(" > Total number of RNA contacts: {0:>12d}".format(num_rna_contacts))
# ===========================================================
# Protein-RNA structure-based interactions: Go-like potential
# ===========================================================
# _ _ ____ _ _ _
# _ __ _ __ ___ | |_ ___(_)_ __ | _ \| \ | | / \
# | '_ \| '__/ _ \| __/ _ \ | '_ \ _____| |_) | \| | / _ \
# | |_) | | | (_) | or __/ | | | |_____| _ <| |\ |/ ___ \
# | .__/|_| \___/ \__\___|_|_| |_| |_| \_\_| \_/_/ \_\
# |_|
#
# ============================================================
if num_chain_RNA > 0 and num_chain_pro > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: Generating protein-RNA native contacts.".format(i_step))
print(" Calculating protein-RNA contacts...")
for i_chain in tqdm( range(aa_num_chain) ):
chain_pro = cg_chains[i_chain]
if chain_pro.moltype != MOL_PROTEIN:
continue
for i_res in range(chain_pro.first, chain_pro.last + 1):
coor_i = cg_bead_coor[i_res]
for j_chain in range(1, aa_num_chain + 1):
chain_RNA = cg_chains[j_chain]
if chain_RNA.moltype != MOL_RNA:
continue
for j_res in range(chain_RNA.first, chain_RNA.last + 1):
if cg_bead_name[j_res] == "RP":
continue
if not is_protein_RNA_go_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
continue
coor_j = cg_bead_coor[j_res]
native_dist = compute_distance(coor_i, coor_j)
if cg_bead_name[j_res] == "RS":
top_cg_pro_RNA_contact.append((i_res, j_res, native_dist, PRO_RNA_GO_EPSILON_S))
elif cg_bead_name[j_res] == "RB":
top_cg_pro_RNA_contact.append((i_res, j_res, native_dist, PRO_RNA_GO_EPSILON_B))
print("> ... DONE!")
print("------------------------------------------------------------")
print(" > Total number of protein-RNA contacts: {0:>8d} \n".format( len(top_cg_pro_RNA_contact)))
# ============================================================
# PWMcos parameters: protein-DNA sequence-specific interaction
# ============================================================
# ______ ____ __
# | _ \ \ / / \/ | ___ ___ ___
# | |_) \ \ /\ / /| |\/| |/ __/ _ \/ __|
# | __/ \ V V / | | | | (_| (_) \__ \
# |_| \_/\_/ |_| |_|\___\___/|___/
#
# ============================================================
if gen_pwmcos_itp:
pwmcos_native_contacts = []
if num_chain_pro == 0:
error("Cannot generate PWMcos parameters without protein...")
if num_chain_DNA != 2:
error("Cannot generate PWMcos parameters from more or less than two DNA chains...")
i_step += 1
print("============================================================")
print("> Step {0:>2d}: Generating PWMcos parameters.".format(i_step))
# ----------------------------------
# Step 7.1: determine P, S, B
# ----------------------------------
print("------------------------------------------------------------")
print("> {0}.1: determine contacts between protein and DNA.".format(i_step))
i_count_DNA = 0
for i_chain in range(aa_num_chain):
chain_pro = cg_chains[i_chain]
if chain_pro.moltype != MOL_PROTEIN:
continue
for i_res in range(chain_pro.first, chain_pro.last + 1):
i_res_N = i_res if i_res == chain_pro.first else i_res - 1
i_res_C = i_res if i_res == chain_pro.last else i_res + 1
coor_pro_i = cg_bead_coor [ i_res ]
coor_pro_N = cg_bead_coor [ i_res_N ]
coor_pro_C = cg_bead_coor [ i_res_C ]
for j_chain in range(aa_num_chain):
chain_DNA = cg_chains[j_chain]
if chain_DNA.moltype != MOL_DNA:
continue
for j_res in range(chain_DNA.first + 3, chain_DNA.last - 2):
if cg_bead_name[j_res] != "DB":
continue
if not is_PWMcos_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
continue
j_res_5, j_res_3 = j_res - 3, j_res + 3
coor_dna_j = cg_bead_coor[ j_res ]
coor_dna_5 = cg_bead_coor[ j_res_5 ]
coor_dna_3 = cg_bead_coor[ j_res_3 ]
coor_dna_S = cg_bead_coor[ j_res - 1 ]
vec0 = coor_pro_i - coor_dna_j
vec1 = coor_dna_S - coor_dna_j
vec2 = coor_dna_3 - coor_dna_5
vec3 = coor_pro_N - coor_pro_C
r0 = | np.norm(vec0) | numpy.norm |
from matplotlib import pyplot as plt
from astropy.io.fits import getheader, getdata
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord as SkyCoord
from spectral_cube import SpectralCube
import astropy
import pickle
import astropy.units as u
from astropy.wcs.utils import skycoord_to_pixel
import numpy as np
def yso_asgn_structures(cube_location, YSO_RA, YSO_DEC,name):
cube,header = (getdata(cube_location,header=True))
wcs = WCS(header)
pixel_ra,pixel_dec = skycoord_to_pixel(SkyCoord(YSO_RA,YSO_DEC), wcs)
pixel_ra = pixel_ra.data.tolist()
pixel_dec = pixel_dec.data.tolist()
p1 = [cube[i][int(np.floor(pixel_dec))][int(np.floor(pixel_ra))] for i in range(len(cube))]
p2 = [cube[i][int(np.ceil(pixel_dec))][int(np.ceil(pixel_ra))] for i in range(len(cube))]
p3 = [cube[i][int(np.ceil(pixel_dec))][int(np.floor(pixel_ra))] for i in range(len(cube))]
p4 = [cube[i][int(np.floor(pixel_dec))][int( | np.ceil(pixel_ra) | numpy.ceil |
from __future__ import print_function, division
import os, sys, warnings, platform
from time import time
import numpy as np
#if "PyPy" not in platform.python_implementation():
# from scipy.io import loadmat, savemat
from Kuru.Tensor import unique2d, itemfreq, in2d, makezero
#from Florence.Utils import insensitive
#from .vtk_writer import write_vtu
#try:
# import meshpy.triangle as triangle
# has_meshpy = True
#except ImportError:
# has_meshpy = False
from .HigherOrderMeshing import *
from .NodeArrangement import *
#from .GeometricPath import *
from warnings import warn
from copy import deepcopy
"""
Mesh class providing most of the pre-processing functionalities of the Core module
<NAME> - 13/06/2015
"""
class Mesh(object):
"""Mesh class provides the following functionalities:
1. Generating higher order meshes based on a linear mesh, for tris, tets, quads and hexes
2. Generating linear tri and tet meshes based on meshpy back-end
3. Generating linear tri meshes based on distmesh back-end
4. Finding bounary edges and faces for tris and tets, in case they are not provided by the mesh generator
5. Reading Salome meshes in binary (.dat/.txt/etc) format
6. Reading gmsh files .msh
7. Checking for node numbering order of elements and fixing it if desired
8. Writing meshes to unstructured vtk file format (.vtu) in xml and binary formats,
including high order elements
"""
def __init__(self, element_type=None):
super(Mesh, self).__init__()
# self.faces and self.edges ARE BOUNDARY FACES
# AND BOUNDARY EDGES, RESPECTIVELY
self.degree = None
self.ndim = None
self.edim = None
self.nelem = None
self.nnode = None
self.elements = None
self.points = None
self.corners = None
self.edges = None
self.faces = None
self.element_type = element_type
self.face_to_element = None
self.edge_to_element = None
self.boundary_edge_to_element = None
self.boundary_face_to_element = None
self.all_faces = None
self.all_edges = None
self.interior_faces = None
self.interior_edges = None
# TYPE OF BOUNDARY FACES/EDGES
self.boundary_element_type = None
# FOR GEOMETRICAL CURVES/SURFACES
self.edge_to_curve = None
self.face_to_surface = None
self.spatial_dimension = None
self.reader_type = None
self.reader_type_format = None
self.reader_type_version = None
self.writer_type = None
self.filename = None
self.element_to_set = None
def GetEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetEdgesTri()
elif self.element_type == "quad":
self.GetEdgesQuad()
elif self.element_type == "pent":
self.GetEdgesPent()
elif self.element_type == "tet":
self.GetEdgesTet()
elif self.element_type == "hex":
self.GetEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.all_edges
def GetBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetBoundaryEdgesTri()
elif self.element_type == "quad":
self.GetBoundaryEdgesQuad()
elif self.element_type == "pent":
self.GetBoundaryEdgesPent()
elif self.element_type == "tet":
self.GetBoundaryEdgesTet()
elif self.element_type == "hex":
self.GetBoundaryEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.edges
def GetEdgesQuad(self):
"""Find the all edges of a quadrilateral mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesHex
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesHex":
self.all_edges = edges
return edges
def GetBoundaryEdgesQuad(self):
"""Find boundary edges (lines) of a quadrilateral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetBoundaryEdgesHex(self):
"""Find boundary edges (lines) of hexahedral mesh.
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "quad"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesQuad()
@property
def Bounds(self):
"""Returns bounds of a mesh i.e. the minimum and maximum coordinate values
in every direction
"""
assert self.points is not None
if self.points.shape[1] == 3:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1]),
np.min(self.points[:,2])],
[np.max(self.points[:,0]),
np.max(self.points[:,1]),
np.max(self.points[:,2])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 2:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1])],
[np.max(self.points[:,0]),
np.max(self.points[:,1])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 1:
bounds = np.array([[np.min(self.points[:,0])],
[np.max(self.points[:,0])]])
makezero(bounds)
return bounds
else:
raise ValueError("Invalid dimension for mesh coordinates")
def GetElementsEdgeNumberingQuad(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2,3].
At most a quad can have all its four edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesQuad()
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
# edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetFacesTet()
elif self.element_type == "hex":
self.GetFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.all_faces
def GetBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetBoundaryFacesTet()
elif self.element_type == "hex":
self.GetBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.faces
def GetBoundaryFacesHex(self):
"""Find boundary faces (surfaces) of a hexahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 4 and p > 1:
pass
else:
return
node_arranger = NodeArrangementHex(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetElementsWithBoundaryEdgesQuad(self):
"""Finds elements which have edges on the boundary.
At most a quad can have all its four edges on the boundary.
output:
boundary_edge_to_element: [2D array] array containing elements which have face
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(self.edges.dtype)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.boundary_edge_to_element = boundary_edge_to_element
return self.boundary_edge_to_element
def GetElementsWithBoundaryFacesHex(self):
"""Finds elements which have faces on the boundary.
At most a hexahedral can have all its 8 faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Kuru.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetFacesHex(self):
"""Find all faces (surfaces) in the hexahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 4 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementHex(p-1)[0]
fsize = int((p+1)**3)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetHighOrderMesh(self,p=1, silent=True, **kwargs):
"""Given a linear tri, tet, quad or hex mesh compute high order mesh based on it.
This is a static method linked to the HigherOrderMeshing module"""
if not isinstance(p,int):
raise ValueError("p must be an integer")
else:
if p < 1:
raise ValueError("Value of p={} is not acceptable. Provide p>=1.".format(p))
if self.degree is None:
self.InferPolynomialDegree()
C = p-1
if 'C' in kwargs.keys():
if kwargs['C'] != p - 1:
raise ValueError("Did not understand the specified interpolation degree of the mesh")
del kwargs['C']
# DO NOT COMPUTE IF ALREADY COMPUTED FOR THE SAME ORDER
if self.degree == None:
self.degree = self.InferPolynomialDegree()
if self.degree == p:
return
# SITUATIONS WHEN ANOTHER HIGH ORDER MESH IS REQUIRED, WITH ONE HIGH
# ORDER MESH ALREADY AVAILABLE
if self.degree != 1 and self.degree - 1 != C:
dum = self.GetLinearMesh(remap=True)
self.__dict__.update(dum.__dict__)
if not silent:
print('Generating p = '+str(C+1)+' mesh based on the linear mesh...')
t_mesh = time()
# BUILD A NEW MESH BASED ON THE LINEAR MESH
if self.element_type == 'line':
nmesh = HighOrderMeshLine(C,self,**kwargs)
if self.element_type == 'tri':
if self.edges is None:
self.GetBoundaryEdgesTri()
# nmesh = HighOrderMeshTri(C,self,**kwargs)
nmesh = HighOrderMeshTri_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'tet':
# nmesh = HighOrderMeshTet(C,self,**kwargs)
nmesh = HighOrderMeshTet_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'quad':
if self.edges is None:
self.GetBoundaryEdgesTri()
nmesh = HighOrderMeshQuad(C,self,**kwargs)
elif self.element_type == 'hex':
nmesh = HighOrderMeshHex(C,self,**kwargs)
self.points = nmesh.points
self.elements = nmesh.elements.astype(np.uint64)
if isinstance(self.corners,np.ndarray):
# NOT NECESSARY BUT GENERIC
self.corners = nmesh.corners.astype(np.uint64)
if isinstance(self.edges,np.ndarray):
self.edges = nmesh.edges.astype(np.uint64)
if isinstance(self.faces,np.ndarray):
if isinstance(nmesh.faces,np.ndarray):
self.faces = nmesh.faces.astype(np.uint64)
self.nelem = nmesh.nelem
self.nnode = self.points.shape[0]
self.element_type = nmesh.info
self.degree = C+1
self.ChangeType()
if not silent:
print('Finished generating the high order mesh. Time taken', time()-t_mesh,'sec')
def Line(self, left_point=0., right_point=1., n=10, p=1):
"""Creates a mesh of on a line for 1D rods/beams"""
self.__reset__()
assert p > 0
if not isinstance(left_point,float):
if not isinstance(left_point,int):
raise ValueError("left_point must be a number")
if not isinstance(right_point,float):
if not isinstance(right_point,int):
raise ValueError("right_point must be a number")
left_point = float(left_point)
right_point = float(right_point)
n = int(n)
if n <= 0:
raise ValueError("Number of discretisation cannot be zero or negative: n={}".format(n))
self.element_type = "line"
self.points = np.linspace(left_point,right_point,p*n+1)[:,None]
self.elements = np.zeros((n,p+1),dtype=np.int64)
for i in range(p+1):
self.elements[:,i] = p*np.arange(0,n)+i
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
def Rectangle(self,lower_left_point=(0,0), upper_right_point=(2,1),
nx=5, ny=5, element_type="tri"):
"""Creates a quad/tri mesh of a rectangle"""
if element_type != "tri" and element_type != "quad":
raise ValueError("Element type should either be tri or quad")
if self.elements is not None and self.points is not None:
self.__reset__()
if (lower_left_point[0] > upper_right_point[0]) or \
(lower_left_point[1] > upper_right_point[1]):
raise ValueError("Incorrect coordinate for lower left and upper right vertices")
nx, ny = int(nx), int(ny)
if nx <= 0 or ny <= 0:
raise ValueError("Number of discretisation cannot be zero or negative: nx={} ny={}".format(nx,ny))
from scipy.spatial import Delaunay
x=np.linspace(lower_left_point[0],upper_right_point[0],nx+1)
y=np.linspace(lower_left_point[1],upper_right_point[1],ny+1)
X,Y = np.meshgrid(x,y)
coordinates = np.dstack((X.ravel(),Y.ravel()))[0,:,:]
if element_type == "tri":
tri_func = Delaunay(coordinates)
self.element_type = "tri"
self.elements = tri_func.simplices
self.nelem = self.elements.shape[0]
self.points = tri_func.points
self.nnode = self.points.shape[0]
self.GetBoundaryEdgesTri()
elif element_type == "quad":
self.nelem = int(nx*ny)
elements = np.zeros((self.nelem,4),dtype=np.int64)
dum_0 = np.arange((nx+1)*ny)
dum_1 = np.array([(nx+1)*i+nx for i in range(ny)])
col0 = np.delete(dum_0,dum_1)
elements[:,0] = col0
elements[:,1] = col0 + 1
elements[:,2] = col0 + nx + 2
elements[:,3] = col0 + nx + 1
self.nnode = int((nx+1)*(ny+1))
self.element_type = "quad"
self.elements = elements
self.points = coordinates
self.nnode = self.points.shape[0]
self.GetBoundaryEdgesQuad()
self.GetEdgesQuad()
def GetNodeCommonality(self):
"""Finds the elements sharing a node.
The return values are linked lists [list of numpy of arrays].
Each numpy array within the list gives the elements that contain a given node.
As a result the size of the linked list is nnode
outputs:
els: [list of numpy arrays] element numbers containing nodes
pos: [list of numpy arrays] elemental positions of the nodes
res_flat: [list of numpy arrays] position of nodes in the
flattened element connectivity.
"""
self.__do_essential_memebers_exist__()
elements = self.elements.ravel()
idx_sort = np.argsort(elements)
sorted_elements = elements[idx_sort]
vals, idx_start = np.unique(sorted_elements, return_index=True)
# Sets of indices
flat_pos = np.split(idx_sort, idx_start[1:])
els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])
pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])
# In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once
# vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)
# vals = vals[count > 1]
# res = filter(lambda x: x.size > 1, res)
return els, pos, flat_pos
def Read(self, filename=None, element_type="tri", reader_type=None, reader_type_format=None,
reader_type_version=None, order=0, read_surface_info=False, read_curve_info=False, **kwargs):
"""Convenience mesh reader method to dispatch call to subsequent apporpriate methods"""
if not isinstance(filename,str):
raise ValueError("filename must be a string")
return
if reader_type is not None:
if not isinstance(filename,str):
raise ValueError("filename must be a string")
return
if reader_type is None:
if filename.split('.')[-1] == "msh":
reader_type = "gmsh"
elif filename.split('.')[-1] == "obj":
reader_type = "obj"
elif filename.split('.')[-1] == "unv":
reader_type = "unv"
elif filename.split('.')[-1] == "fro":
reader_type = "fro"
elif filename.split('.')[-1] == "dat":
for key in kwargs.keys():
inkey = insensitive(key)
if "connectivity" in inkey and "delimiter" not in inkey:
reader_type = "read_separate"
break
if reader_type is None:
raise ValueError("Mesh file format was not undertood. Please specify it using reader_type keyword")
self.filename = filename
self.reader_type = reader_type
self.reader_type_format = reader_type_format
self.reader_type_version = reader_type_version
if self.reader_type is 'salome':
#self.ReadSalome(filename, element_type=element_type, read_surface_info=read_surface_info)
raise ValueError("Reader not implemented yet")
elif reader_type is 'GID':
#self.ReadGIDMesh(filename, element_type, order)
raise ValueError("Reader not implemented yet")
elif self.reader_type is 'gmsh':
self.ReadGmsh(filename, element_type=element_type, read_surface_info=read_surface_info, read_curve_info=read_curve_info)
elif self.reader_type is 'obj':
self.ReadOBJ(filename, element_type=element_type, read_surface_info=read_surface_info)
elif self.reader_type is 'fenics':
#self.ReadFenics(filename, element_type)
raise ValueError("Reader not implemented yet")
elif self.reader_type is 'vtu':
self.ReadVTK(filename)
elif self.reader_type is 'abaqus':
self.ReadAbaqus(filename)
elif self.reader_type is 'unv':
#self.ReadUNV(filename, element_type)
raise ValueError("Reader not implemented yet")
elif self.reader_type is 'fro':
#self.ReadFRO(filename, element_type)
raise ValueError("Reader not implemented yet")
elif self.reader_type is 'read_separate':
# READ MESH FROM SEPARATE FILES FOR CONNECTIVITY AND COORDINATES
raise ValueError("Reader not implemented yet")
from Kuru.Utils import insensitive
# return insensitive(kwargs.keys())
#for key in kwargs.keys():
# inkey = insensitive(key)
# if "connectivity" in inkey and "delimiter" not in inkey:
# connectivity_file = kwargs.get(key)
# if "coordinate" in insensitive(key) and "delimiter" not in inkey:
# coordinates_file = kwargs.get(key)
#self.ReadSeparate(connectivity_file,coordinates_file,element_type,
# delimiter_connectivity=',',delimiter_coordinates=',')
elif self.reader_type is 'ReadHDF5':
#self.ReadHDF5(filename)
raise ValueError("Reader not implemented yet")
self.nnode = self.points.shape[0]
# MAKE SURE MESH DATA IS CONTIGUOUS
self.points = np.ascontiguousarray(self.points)
self.elements = np.ascontiguousarray(self.elements)
return
def ReadVTK(self, filename, element_type=None):
"""Read mesh from a vtu file"""
try:
import vtk
except IOError:
raise IOError("vtk is not installed. Please install it first using 'pip install vtk'")
self.__reset__()
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
vmesh = reader.GetOutput()
npieces = vmesh.GetNumberOfPieces()
if npieces > 1:
raise IOError("VTK reader is not prepare to read more than one piece.")
piece = vmesh.GetPiece()
flat_elements, celltypes, element_to_set = [], [], []
for cellid in range(vmesh.GetNumberOfCells()):
cell = vmesh.GetCell(cellid)
celltypes.append(vmesh.GetCellType(cellid))
element_to_set.append(piece)
for ptid in range(cell.GetNumberOfPoints()):
flat_elements.append(cell.GetPointId(ptid))
celltypes = np.array(celltypes, copy=True)
flat_elements = np.array(flat_elements, copy=True)
if not np.all(celltypes == celltypes[0]):
raise IOError("Cannot read VTK files with hybrid elements")
cellflag = celltypes[0]
if cellflag == 5:
self.element_type = "tri"
divider = 3
elif cellflag == 9:
self.element_type = "quad"
divider = 4
elif cellflag == 10:
self.element_type = "tet"
divider = 4
elif cellflag == 12:
self.element_type = "hex"
divider = 8
elif cellflag == 3:
self.element_type = "line"
divider = 2
else:
raise IOError("VTK element type not understood")
if element_type is not None:
if self.element_type != element_type:
raise ValueError("VTK file does not contain {} elements".format(element_type))
points = np.array([vmesh.GetPoint(ptid) for ptid in range(vmesh.GetNumberOfPoints())])
self.elements = np.ascontiguousarray(flat_elements.reshape(int(flat_elements.shape[0]/divider),divider), dtype=np.uint64)
self.points = np.ascontiguousarray(points, dtype=np.float64)
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
# SET OF SETS TO EACH ELEMENTS
element_to_set = | np.array(element_to_set, dtype=np.int64, copy=True) | numpy.array |
import numpy as np
import pf_dynamic_sph as pfs
import pf_dynamic_cart as pfc
class LDA_PolaronHamiltonian:
# """ This is a class that stores information about the Hamiltonian"""
def __init__(self, coherent_state, Params, LDA_funcs, fParams, trapParams, toggleDict):
# Params = [aIBi, mI, mB, n0, gBB]
self.Params = Params
self.LDA_funcs = LDA_funcs
self.fParams = fParams
self.trapParams = trapParams
self.grid = coherent_state.kgrid
self.coordinate_system = coherent_state.coordinate_system
self.kz = coherent_state.kzg_flat
self.k0mask = coherent_state.k0mask
self.k2 = coherent_state.k2_flat
self.dynamicsType = toggleDict['Dynamics']
self.couplingType = toggleDict['Coupling']
self.BEC_density_var = toggleDict['BEC_density']
self.BEC_density_osc = toggleDict['BEC_density_osc']
self.CS_Dyn = toggleDict['CS_Dyn']
self.Pol_Potential = toggleDict['Polaron_Potential']
self.a_osc = trapParams['a_osc']
if self.couplingType == 'frohlich':
[aIBi, mI, mB, n0, gBB] = self.Params
self.gnum = (2 * np.pi / pfs.ur(mI, mB)) * (1 / aIBi)
if(self.coordinate_system == "SPHERICAL_2D"):
self.gnum = pfs.g(self.grid, *Params[0:])
self.Omega0_grid = pfs.Omega(self.grid, 0, *Params[1:])
self.Wk_grid = pfs.Wk(self.grid, *Params[2:])
# self.Wk_grid[self.k0mask] = 1 # k0mask should be all False in the Spherical case so the second line shouldn't do anything
self.Wki_grid = 1 / self.Wk_grid
if(self.coordinate_system == "CARTESIAN_3D"):
self.kxg, self.kyg, self.kzg = coherent_state.kxg, coherent_state.kyg, coherent_state.kzg
self.gnum = pfc.g(self.kxg, self.kyg, self.kzg, coherent_state.dVk[0], *Params[0:])
self.Omega0_grid = pfc.Omega(self.kxg, self.kyg, self.kzg, 0, *Params[1:]).flatten()
self.Wk_grid = pfc.Wk(self.kxg, self.kyg, self.kzg, *Params[2:]).flatten(); self.Wk_grid[self.k0mask] = 1 # this is where |k| = 0 -> changing this value to 1 arbitrarily shouldn't affect the actual calculation as we are setting Beta_k = 0 here too
self.Wki_grid = 1 / self.Wk_grid
# @profile
def update(self, t, system_vars, coherent_state):
amplitude = system_vars[0:-3]
phase = system_vars[-3].real.astype(float)
P = system_vars[-2].real.astype(float)
X = system_vars[-1].real.astype(float)
XLab = X + pfs.x_BEC_osc(t, self.trapParams['omega_BEC_osc'], self.trapParams['RTF_BEC_X'], self.trapParams['a_osc'])
[aIBi, mI, mB, n0, gBB] = self.Params
F_ext_func = self.LDA_funcs['F_ext']; F_pol_func = self.LDA_funcs['F_pol']; F_BEC_osc_func = self.LDA_funcs['F_BEC_osc']; F_Imp_trap_func = self.LDA_funcs['F_Imp_trap']
dP = self.fParams['dP_ext']; F = self.fParams['Fext_mag']
RTF_X = self.trapParams['RTF_BEC_X']; RTF_Y = self.trapParams['RTF_BEC_Y']; RTF_Z = self.trapParams['RTF_BEC_Z']; RG_X = self.trapParams['RG_BEC_X']; RG_Y = self.trapParams['RG_BEC_Y']; RG_Z = self.trapParams['RG_BEC_Z']
n0_TF = self.trapParams['n0_TF_BEC']; n0_thermal = self.trapParams['n0_thermal_BEC']
# omega_BEC_osc = self.trapParams['omega_BEC_osc']
# if self.BEC_density_osc == 'on':
# Xeff = X + pfs.x_BEC_osc(t, omega_BEC_osc, RTF_X, self.a_osc)
# else:
# Xeff = X
# Update BEC density dependent quantities
if self.BEC_density_var == 'on':
# n = pfs.n_BEC(Xeff, 0, 0, n0_TF, n0_thermal, RTF_X, RTF_Y, RTF_Z, RG_X, RG_Y, RG_Z) # ASSUMING PARTICLE IS IN CENTER OF TRAP IN Y AND Z DIRECTIONS
n = pfs.n_BEC(X, 0, 0, n0_TF, n0_thermal, RTF_X, RTF_Y, RTF_Z, RG_X, RG_Y, RG_Z) # ASSUMING PARTICLE IS IN CENTER OF TRAP IN Y AND Z DIRECTIONS
if np.abs(X) >= RTF_X:
n = 0
if(self.coordinate_system == "SPHERICAL_2D"):
self.Omega0_grid = pfs.Omega(self.grid, 0, mI, mB, n, gBB)
self.Wk_grid = pfs.Wk(self.grid, mB, n, gBB)
self.Wki_grid = 1 / self.Wk_grid
if(self.coordinate_system == "CARTESIAN_3D"):
self.Omega0_grid = pfc.Omega(self.kxg, self.kyg, self.kzg, 0, mI, mB, n, gBB).flatten()
self.Wk_grid = pfc.Wk(self.kxg, self.kyg, self.kzg, mB, n, gBB).flatten(); self.Wk_grid[self.k0mask] = 1
self.Wki_grid = 1 / self.Wk_grid
else:
n = n0
# Calculate updates
amplitude[self.k0mask] = 0 # set Beta_k = 0 where |k| = 0 to avoid numerical issues (this is an unphysical point)
system_vars_new = np.zeros(system_vars.size, dtype=complex)
dVk = coherent_state.dVk
betaSum = amplitude + np.conjugate(amplitude)
xp = 0.5 * np.dot(self.Wk_grid, betaSum * dVk)
betaDiff = amplitude - | np.conjugate(amplitude) | numpy.conjugate |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy.optimize as opt # curve_fit, fmin, fmin_tnc
import jams.functions as functions # from jams
from jams.mad import mad # from jams
import warnings
# import pdb
# ----------------------------------------------------------------------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
method='reichstein', shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data.
It uses either
1. a fit of Reco vs. temperature to all nighttime data, or
2. several fits over the season of Reco vs. temperature as in Reichstein et al. (2005), or
3. the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
method='reichstein', shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Optional Input
--------------
If method = 'day' | 'lasslop', extra inputs are
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
method if 'global' | 'falge': fit of Reco vs. temperature to all nighttime data
if 'local' | 'reichstein': method of Reichstein et al. (2005)
if 'day' | 'lasslop': method of Lasslop et al. (2010)
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
If method = 'night' | 'reichstein', extra parameters are
nogppnight if True: Resp=NEE, GPP=0 at night, GPP always positive
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
Negative respiration possible at night when gpp is forced to 0 with nogppnight=True
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual sums of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(np.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
>>> VPD = np.squeeze(dat[8,:])
>>> vpd = np.where(VPD == undef, undef, VPD*100.)
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2014 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
MC, Feb 2013 - ported to Python 3
MC, May 2013 - replaced cost functions by generel cost function cost_abs if possible
AP, Aug 2014 - replaced fmin with fmin_tnc to permit params<0,
permit gpp<0 at any time if nogppnight=True
"""
# Global relationship in Reichstein et al. (2005)
if ((method.lower() == 'global') | (method.lower() == 'falge')):
return nee2gpp_falge(dates, nee, t, isday, undef=undef, shape=shape, masked=masked)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
return nee2gpp_reichstein(dates, nee, t, isday, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Lasslop et al. (2010) method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
return nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
# ----------------------------------------------------------------------
def nee2gpp_falge(dates, nee, t, isday, undef=np.nan,
shape=False, masked=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using a fit of Reco vs. temperature to all nighttime data,
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_falge(dates, nee, t, isday, undef=np.nan, shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual sums of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any
inshape = nee.shape
dates = np.squeeze(dates)
nee = np.squeeze(nee)
t = np.squeeze(t)
isday = np.squeeze(isday)
# Check squeezed shape
if dates.ndim != 1: raise Error('Error nee2gpp_falge: squeezed dates must be 1D array.')
if nee.ndim != 1: raise Error('Error nee2gpp_falge: squeezed nee must be 1D array.')
if t.ndim != 1: raise Error('Error nee2gpp_falge: squeezed t must be 1D array.')
if isday.ndim != 1: raise Error('Error nee2gpp_falge: squeezed isday must be 1D array.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise Error('Error nee2gpp_falge: inputs must have the same size.')
# Transform to masked array with 1D mask
nee = np.ma.array(nee, mask=False)
t = np.ma.array(t, mask=False)
isday = np.ma.array(isday, mask=False)
# mask also undef
if np.isnan(undef):
if np.ma.any(np.isnan(nee)): nee[np.isnan(nee)] = np.ma.masked
if np.ma.any(np.isnan(t)): t[np.isnan(t)] = np.ma.masked
if np.ma.any(np.isnan(isday)): isday[np.isnan(isday)] = np.ma.masked
else:
if np.ma.any(nee==undef): nee[nee==undef] = np.ma.masked
if np.ma.any(t==undef): t[t==undef] = np.ma.masked
if np.ma.any(isday==undef): isday[isday==undef] = np.ma.masked
# Partition - Global relationship as in Falge et al. (2001)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = np.where(~mask)[0]
tt = np.ma.compressed(t[ii])
net = np.ma.compressed(nee[ii])
# p, c = opt.curve_fit(functions.lloyd_fix, tt, net, p0=[2.,200.]) # global parameter, global cov matrix
#p = opt.fmin(functions.cost_lloyd_fix, [2.,200.], args=(tt, net), disp=False)
p = opt.fmin(functions.cost_abs, [2.,200.], args=(functions.lloyd_fix_p, tt, net), disp=False)
Reco = np.ones(ndata)*undef
ii = np.where(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], p[0], p[1])
# GPP
GPP = np.ones(ndata)*undef
ii = np.where(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# Return
if masked:
if np.isnan(undef):
GPP = np.ma.array(GPP, mask=np.isnan(GPP))
Reco = np.ma.array(Reco, mask=np.isnan(Reco))
else:
GPP = np.ma.array(GPP, mask=(GPP == undef))
Reco = np.ma.array(Reco, mask=(Reco == undef))
if shape != False:
if shape != True:
return np.reshape(GPP,shape), np.reshape(Reco,shape)
else:
return np.reshape(GPP,inshape), np.reshape(Reco,inshape)
else:
return GPP, Reco
# ----------------------------------------------------------------------
def nee2gpp_reichstein(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using several fits of Reco vs. temperature of nighttime data
over the season, as in Reichstein et al. (2005), in order to calculate Reco
and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_reichstein(dates, nee, t, isday, undef=np.nan, shape=None, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
shape if False then outputs are 1D arrays (default)
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef (default)
if True: return masked arrays where outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(np.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any
if shape != False:
if shape != True:
inshape = shape
else:
inshape = nee.shape
dates = np.squeeze(dates)
nee = np.squeeze(nee)
t = np.squeeze(t)
isday = np.squeeze(isday)
if shape == False: inshape = nee.shape
# Check squeezed shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed dates must be 1D array.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed nee must be 1D array.')
if t.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed t must be 1D array.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed isday must be 1D array.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_reichstein: inputs must have the same size.')
# Transform to masked array with 1D mask
nee = np.ma.array(nee, mask=False)
t = np.ma.array(t, mask=False)
isday = np.ma.array(isday, mask=False)
# mask also undef
if np.isnan(undef):
if np.ma.any(np.isnan(nee)): nee[np.isnan(nee)] = np.ma.masked
if np.ma.any(np.isnan(t)): t[np.isnan(t)] = np.ma.masked
if np.ma.any(np.isnan(isday)): isday[np.isnan(isday)] = np.ma.masked
else:
if np.ma.any(nee==undef): nee[nee==undef] = np.ma.masked
if np.ma.any(t==undef): t[t==undef] = np.ma.masked
if np.ma.any(isday==undef): isday[isday==undef] = np.ma.masked
# Partition - Local relationship = Reichstein et al. (2005)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = np.where(~mask)[0]
if (ii.size==0):
print('Warning nee2gpp_reichstein: no valid nighttime data.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
jul = dates[ii]
tt = np.ma.compressed(t[ii])
net = np.ma.compressed(nee[ii])
# 1. each 5 days, in 15 day period, fit if range of T > 5
locp = [] # local param
locs = [] # local err
dmin = np.floor(np.amin(jul)).astype(int) # be aware that julian days starts at noon, i.e. 1.0 is 12h
dmax = np.ceil(np.amax(jul)).astype(int) # so the search will be from noon to noon and thus includes all nights
for i in range(dmin,dmax,5):
iii = np.where((jul>=i) & (jul<(i+14)))[0]
niii = iii.size
if niii > 6:
tt1 = tt[iii]
net1 = net[iii]
mm = ~mad(net1, z=4.5) # make fit more robust by removing outliers
if (np.ptp(tt[iii]) >= 5.) & (np.sum(mm) > 6):
# print(i)
#p = opt.fmin(functions.cost_lloyd_fix, [2.,200.], args=(tt1[mm], net1[mm]), disp=False) # robust params
p, temp1, temp2 = opt.fmin_tnc(functions.cost_lloyd_fix, [2.,200.], bounds=[[0.,None],[0.,None]],
args=(tt1[mm], net1[mm]),
approx_grad=True, disp=False)
try:
p1, c = opt.curve_fit(functions.lloyd_fix, tt1[mm], net1[mm], p0=p, maxfev=10000) # params, covariance
if np.all(np.isfinite(c)): # possible return of curvefit: c=inf
s = np.sqrt(np.diag(c))
else:
s = 10.*np.abs(p)
except:
s = 10.*np.abs(p)
locp += [p]
locs += [s]
# if ((s[1]/p[1])<0.5) & (p[1] > 0.): pdb.set_trace()
if len(locp) == 0:
raise ValueError('Error nee2gpp_reichstein: No local relationship found.')
print('Warning nee2gpp_reichstein: No local relationship found.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
locp = np.squeeze(np.array(locp).astype(float))
locs = np.squeeze(np.array(locs).astype(float))
# 2. E0 = avg of best 3
# Reichstein et al. (2005), p. 1430, 1st paragraph.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
iii = np.where((locp[:,1] > 0.) & (locp[:,1] < 450.) & (np.abs(locs[:,1]/locp[:,1]) < 0.5))[0]
niii = iii.size
if niii==0:
# raise ValueError('Error nee2gpp_reichstein: No good local relationship found.')
# loosen the criteria: take the best three estimates anyway
iii = np.where((locp[:,1] > 0.))[0]
niii = iii.size
if niii<1:
raise ValueError('Error nee2gpp_reichstein: No E0>0 found.')
print('Warning nee2gpp_reichstein: No E0>0 found.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
lp = locp[iii,:]
ls = locs[iii,:]
iis = np.argsort(ls[:,1])
bestp = np.mean(lp[iis[0:np.minimum(3,niii)],:],axis=0)
bests = np.mean(ls[iis[0:np.minimum(3,niii)],:],axis=0)
elif niii==1:
bestp = np.squeeze(locp[iii,:])
bests = np.squeeze(locs[iii,:])
elif niii==2:
bestp = np.mean(locp[iii,:],axis=0)
bests = np.mean(locs[iii,:],axis=0)
# ls = locs[iii,:]
# iis = np.argsort(ls[:,1])
else:
lp = locp[iii,:]
ls = locs[iii,:]
iis = np.argsort(ls[:,1])
bestp = np.mean(lp[iis[0:3],:],axis=0)
bests = np.mean(ls[iis[0:3],:],axis=0)
# 3. Refit Rref with fixed E0, each 4 days
refp = [] # Rref param
refii = [] # mean index of data points
E0 = bestp[1]
et = functions.lloyd_fix(tt, 1., E0)
for i in range(dmin,dmax,4):
iii = np.where((jul>=i) & (jul<(i+4)))[0]
niii = iii.size
if niii > 3:
# Calc directly minisation of (nee-p*et)**2
# p = np.sum(net[iii]*et[iii])/np.sum(et[iii]**2)
# p, c = opt.curve_fit(functions.lloyd_only_rref, et[iii], net[iii], p0=[2.])
#p = opt.fmin(functions.cost_lloyd_only_rref, [2.], args=(et[iii], net[iii]), disp=False)
#p = opt.fmin(functions.cost_abs, [2.], args=(functions.lloyd_only_rref_p, et[iii], net[iii]), disp=False)
p, temp1, temp2 = opt.fmin_tnc(functions.cost_abs, [2.], bounds=[[0.,None]],
args=(functions.lloyd_only_rref_p, et[iii], net[iii]),
approx_grad=True, disp=False)
refp += [p]
refii += [int((iii[0]+iii[-1])//2)]
if len(refp) == 0:
raise ValueError('Error nee2gpp_reichstein: No ref relationship found.')
print('Warning nee2gpp_reichstein: No ref relationship found.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
refp = np.squeeze(np.array(refp))
refii = np.squeeze(np.array(refii))
# 4. Interpol Rref
Rref = np.interp(dates, jul[refii], refp)
# 5. Calc Reco
Reco = np.ones(ndata)*undef
ii = np.where(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], Rref[ii], E0)
# 6. Calc GPP
GPP = np.ones(ndata)*undef
ii = np.where(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# 7. Set GPP=0 at night, if wanted
if nogppnight:
mask = isday | nee.mask | t.mask | isday.mask # night
ii = np.where(~mask)[0]
Reco[ii] = nee[ii]
GPP[ii] = 0.
# and prohibit negative gpp at any time
mask = nee.mask | t.mask | (GPP>0.)
ii = np.where(~mask)[0]
Reco[ii] -= GPP[ii]
GPP[ii] = 0.
if masked:
if np.isnan(undef):
GPP = np.ma.array(GPP, mask=np.isnan(GPP))
Reco = np.ma.array(Reco, mask=np.isnan(Reco))
else:
GPP = np.ma.array(GPP, mask=(GPP==undef))
Reco = np.ma.array(Reco, mask=(Reco==undef))
return GPP.reshape(inshape), Reco.reshape(inshape)
# ----------------------------------------------------------------------
def nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=np.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=np.nan,
shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> VPD = np.squeeze(dat[8,:])
>>> vpd = np.where(VPD == undef, undef, VPD*100.)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any
inshape = nee.shape
dates = np.squeeze(dates)
nee = np.squeeze(nee)
t = np.squeeze(t)
isday = np.squeeze(isday)
# Check squeezed shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_lasslop: squeezed dates must be 1D array.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_lasslop: squeezed nee must be 1D array.')
if t.ndim != 1: raise ValueError('Error nee2gpp_lasslop: squeezed t must be 1D array.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_lasslop: squeezed isday must be 1D array.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_lasslop: inputs must have the same size.')
if rg.ndim != 1: raise ValueError('Error nee2gpp_lasslop: squeezed rg must be 1D array.')
if vpd.ndim != 1: raise ValueError('Error nee2gpp_lasslop: squeezed vpd must be 1D array.')
if ((rg.size != ndata) | (vpd.size != ndata)):
raise ValueError('Error nee2gpp_lasslop: lasslop inputs must have the same size as other inputs.')
# Transform to masked array with 1D mask
nee = np.ma.array(nee, mask=False)
t = np.ma.array(t, mask=False)
isday = np.ma.array(isday, mask=False)
rg = np.ma.array(rg, mask=False)
vpd = np.ma.array(vpd, mask=False)
# mask also undef
if np.isnan(undef):
if np.ma.any(np.isnan(nee)): nee[np.isnan(nee)] = np.ma.masked
if np.ma.any(np.isnan(t)): t[np.isnan(t)] = np.ma.masked
if np.ma.any( | np.isnan(isday) | numpy.isnan |
# -*- coding: utf-8 -*-
import numpy as np
import skimage.data
import unittest
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_almost_equal)
from Sandbox.jpeg.jpeg import JpegCompressor
class TestImageFormatTransforms(unittest.TestCase):
"""Test conversions between RGB and other image array formats"""
def setUp(self):
self.data = skimage.data.astronaut()
def test_rgb_to_ypbpr(self):
"""Test RGB to Y'PbPr converter"""
jpeg = JpegCompressor()
ypbpr_out = jpeg.rgb_to_ypbpr(self.data)
# Shape should be the same
self.assertEqual(ypbpr_out.shape, self.data.shape)
# Test Y'PbPr range of values
self.assertGreaterEqual(np.min(ypbpr_out[:, :, 0]), 0)
self.assertLessEqual(np.max(ypbpr_out[:, :, 0]), 1)
self.assertGreaterEqual(np.min(ypbpr_out[:, :, 1:3]), -0.5)
self.assertLessEqual(np.max(ypbpr_out[:, :, 1:3]), 0.5)
k_r = jpeg._k_r
k_g = jpeg._k_g
k_b = jpeg._k_b
# Test data correctness (Red)
red_rgb = np.array([[[1, 0, 0]]])
jpeg = JpegCompressor()
red_ycbcr = jpeg.rgb_to_ypbpr(red_rgb)
assert_allclose(red_ycbcr, [[[k_r, -0.5 * k_r / (1 - k_b), 0.5]]])
# Test data correctness (Green)
green_rgb = np.array([[[0, 1, 0]]])
jpeg = JpegCompressor()
green_ycbcr = jpeg.rgb_to_ypbpr(green_rgb)
assert_allclose(green_ycbcr, [[[k_g, -0.5 * k_g / (1 - k_b),
-0.5 * k_g / (1 - k_r)]]])
# Test data correctness (Blue)
blue_rgb = np.array([[[0, 0, 1]]])
jpeg = JpegCompressor()
blue_ycbcr = jpeg.rgb_to_ypbpr(blue_rgb)
assert_allclose(blue_ycbcr, [[[k_b, 0.5, -0.5 * k_b / (1 - k_r)]]])
# Test data correctness (White)
white_rgb = np.array([[[1, 1, 1]]])
jpeg = JpegCompressor()
white_ycbcr = jpeg.rgb_to_ypbpr(white_rgb)
assert_allclose(white_ycbcr, [[[1, 0, 0]]], atol=1e-10)
def test_gamma_correction(self):
"""Test gamma correction function"""
jpeg = JpegCompressor()
rgb_prime = jpeg.gamma_correct(self.data)
self.assertEqual(rgb_prime.shape, self.data.shape)
self.assertGreaterEqual(np.min(rgb_prime), 0)
self.assertLessEqual(np.max(rgb_prime), 1)
# Test different values of gamma
test_gammas = [.25, .5, .75, 1, 1.25]
for gamma in test_gammas:
y = jpeg.gamma_correct(127, gamma=gamma)
self.assertAlmostEqual(y, (127 / 255)**gamma)
def test_gamma_expansion(self):
"""Test that gamma_expand inverts gamma correct"""
jpeg = JpegCompressor()
rgb_prime = jpeg.gamma_correct(self.data)
rgb_image = jpeg.gamma_expand(rgb_prime)
rms_error = np.sqrt(np.mean((rgb_image - self.data)**2))
# Check that RMS error after decompression is arbitrarily small
self.assertLess(rms_error, 1)
def test_rgb_to_ycbcr(self):
jpeg = JpegCompressor()
ycbcr_image = jpeg.rgb_to_ycbcr(self.data)
# Test size, value ranges, and type
self.assertEqual(ycbcr_image.shape, self.data.shape)
self.assertGreaterEqual(np.min(ycbcr_image), 0)
self.assertLessEqual( | np.max(ycbcr_image) | numpy.max |
""" Produces figure 2, the distribution of mReasoner parameters.
"""
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Parse command line arguments
if len(sys.argv) != 2:
print('usage: python3 plot_fig2_mReasoner_params.py <mReasoner-fit-out>')
sys.exit(99)
output_filename = sys.argv[1]
# Load the output content
mreasoner_data = []
with open(output_filename) as out_file:
modelname = None
mreas_params = None
for line in out_file.readlines():
line = line.strip()
# Identify current model
if line.startswith('Evaluating '):
if 'ccobra_mreasoner.py' in line:
modelname = 'mReasoner'
elif 'phm' in line:
modelname = 'PHM'
continue
# Accommodate for PHM output (which is discarded later on)
line = line.replace('p_entailm', '\'p_entailm\'').replace('direction', '\'direction\'').replace('max_confi', '\'max_confi\'')
line = line.split()
if line[0] == 'Fit':
# Read the fit result line
content = [x.split('=') for x in line if '=' in x]
content = dict([(x, eval(y)) for x, y in content])
content['params'] = dict(content['params'])
# Populate result data
if modelname == 'mReasoner':
mreas_params = content['params']
mreasoner_data.append({
'id': content['id'],
'model': modelname,
'score': content['score'],
'epsilon': content['params']['epsilon'],
'lambda': content['params']['lambda'],
'omega': content['params']['omega'],
'sigma': content['params']['sigma']
})
elif line[0].startswith('[('):
assert len(line) == 1 and modelname == 'mReasoner'
content = dict(eval(line[0]))
# Omit the duplicate parameterization
if content == mreas_params:
continue
# Read the alternative parameter line
mreasoner_data.append({
'id': mreasoner_data[-1]['id'],
'model': modelname,
'score': mreasoner_data[-1]['score'],
'epsilon': content['epsilon'],
'lambda': content['lambda'],
'omega': content['omega'],
'sigma': content['sigma']
})
# Create dataframe from mReasoner data
mreasoner_df = pd.DataFrame(mreasoner_data)[[
'id', 'model', 'score', 'epsilon', 'lambda', 'omega', 'sigma']]
# Initialize plotting
sns.set(style='whitegrid', palette='colorblind')
fig, axs = plt.subplots(2, 2, figsize=(9, 4.5))
# Bin definition
bins01 = np.arange(0, 1.2, 0.1) - 0.05
space08 = (8 - 0.1) / 11
bins08 = np.array(list(np.linspace(0.1, 8, 11)) + [8 + space08]) - (0.5 * space08)
# Plot epsilon
sns.distplot(mreasoner_df['epsilon'], hist=True, bins=bins01, color='C0', ax=axs[0,0])
axs[0,0].set_title(r'Parameter $\epsilon$')
axs[0,0].set_xlabel('')
axs[0,0].set_ylabel('Density')
axs[0,0].set_xticks( | np.linspace(0, 1, 11) | numpy.linspace |
import numpy as np
def CP(x,deg,d=0):
N = np.size(x)
One = np.ones((N,1))
Zero = np.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = np.hstack((Zero,Zero))
elif d > 0:
F = np.hstack((Zero,One))
else:
F = np.hstack((One,x))
return F
else:
F = np.hstack((One,x,np.zeros((N,deg-1))))
for k in range(2,deg+1):
F[:,k:k+1] = 2.*x*F[:,k-1:k]-F[:,k-2:k-1]
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = np.hstack((Zero,One,np.zeros((N,deg-1))))
else:
dark2 = np.zeros((N,deg+1))
for k in range(2,deg+1):
dark2[:,k:k+1] = (2.+2.*dCurr)*dark[:,k-1:k]+2.*x*dark2[:,k-1:k]-dark2[:,k-2:k-1]
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def LeP(x,deg,d=0):
N = np.size(x)
One = np.ones((N,1))
Zero = np.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = np.hstack((Zero,Zero))
elif d > 0:
F = np.hstack((Zero,One))
else:
F = np.hstack((One,x))
return F
else:
F = np.hstack((One,x,np.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = ((2.*k+1.)*x*F[:,k:k+1]-k*F[:,k-1:k])/(k+1.)
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = np.hstack((Zero,One,np.zeros((N,deg-1))))
else:
dark2 = np.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = ((2.*k+1.)*((dCurr+1.)*dark[:,k:k+1]+x*dark2[:,k:k+1])-k*dark2[:,k-1:k])/(k+1.)
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def LaP(x,deg,d=0):
N = np.size(x)
One = np.ones((N,1))
Zero = np.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = np.hstack((Zero,Zero))
elif d > 0:
F = np.hstack((Zero,-One))
else:
F = np.hstack((One,1.-x))
return F
else:
F = np.hstack((One,1.-x,np.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = ((2.*k+1.-x)*F[:,k:k+1]-k*F[:,k-1:k])/(k+1.)
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = np.hstack((Zero,-One,np.zeros((N,deg-1))))
else:
dark2 = np.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = ((2.*k+1.-x)*dark2[:,k:k+1]-(dCurr+1.)*dark[:,k:k+1]-k*dark2[:,k-1:k])/(k+1.)
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def HoPpro(x,deg,d=0):
N = np.size(x)
One = np.ones((N,1))
Zero = np.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = np.hstack((Zero,Zero))
elif d > 0:
F = np.hstack((Zero,One))
else:
F = np.hstack((One,x))
return F
else:
F = np.hstack((One,x,np.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = x*F[:,k:k+1]-k*F[:,k-1:k]
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = np.hstack((Zero,One,np.zeros((N,deg-1))))
else:
dark2 = np.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = (dCurr+1.)*dark[:,k:k+1]+x*dark2[:,k:k+1]-k*dark2[:,k-1:k]
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def HoPphy(x,deg,d=0):
N = np.size(x)
One = np.ones((N,1))
Zero = np.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = np.hstack((Zero,Zero))
elif d > 0:
F = np.hstack((Zero,2.*One))
else:
F = np.hstack((One,2.*x))
return F
else:
F = np.hstack((One,2.*x,np.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = 2.*x*F[:,k:k+1]-2.*k*F[:,k-1:k]
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = np.hstack((Zero,2.*One,np.zeros((N,deg-1))))
else:
dark2 = np.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = 2.*(dCurr+1.)*dark[:,k:k+1]+2.*x*dark2[:,k:k+1]-2.*k*dark2[:,k-1:k]
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def FS(x,deg,d=0):
N = np.size(x)
F = np.zeros((N,deg+1))
if d == 0:
F[:,0] = 1.
for k in range(1,deg+1):
g = np.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = np.cos(g*x)
else:
F[:,k:k+1] = np.sin(g*x)
else:
F[:,0] = 0.
if d%4 == 0:
for k in range(1,deg+1):
g = np.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = g**d*np.cos(g*x)
else:
F[:,k:k+1] = g**d*np.sin(g*x)
elif d%4 == 1:
for k in range(1,deg+1):
g = np.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = -g**d*np.sin(g*x)
else:
F[:,k:k+1] = g**d*np.cos(g*x)
elif d%4 == 2:
for k in range(1,deg+1):
g = np.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = -g**d*np.cos(g*x)
else:
F[:,k:k+1] = -g**d*np.sin(g*x)
else:
for k in range(1,deg+1):
g = np.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = g**d*np.sin(g*x)
else:
F[:,k:k+1] = -g**d*np.cos(g*x)
return F
def nCP(X,deg,d,nC):
# Define functions for use in generating the CP sheet
def MultT(vec):
tout = np.ones((N,1))
for k in range(dim):
tout *= T[:,vec[k]:vec[k]+1,k]
return tout
def Recurse(nC,deg,dim,out,vec,n=0):
if dim > 0:
for x in range(deg+1):
vec[dim] = x
out,n = Recurse(nC,deg,dim-1,out,vec,n=n)
else:
for x in range(deg+1):
vec[dim] = x
if (any(vec>=nC) and | np.sum(vec) | numpy.sum |
import datetime
import numpy as np
import matplotlib.pyplot as plt
from numpy.lib.function_base import append
import sympy as sp
from multiprocessing import Pool
import os
import cppsolver as cs
from tqdm import tqdm
from ..filter import Magnet_UKF, Magnet_KF
from ..solver import Solver, Solver_jac
class Simu_Data:
def __init__(self, gt, snr, result):
self.gt = gt
self.snr = snr
self.result = result
def __len__(self):
return self.gt.shape[0]
def store(self):
np.savez('result/test.npz', gt=self.gt, data=self.result)
class expression:
def __init__(self, mag_count=1):
if mag_count == 1:
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy],
VecB, 'numpy')
elif mag_count == 2:
x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs = sp.symbols(
'x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
x = [x0, x1]
y = [y0, y1]
z = [z0, z1]
M = [M0, M1]
theta = [theta0, theta1]
phy = [phy0, phy1]
VecB = G
for i in range(mag_count):
vecR = sp.Matrix(
[xs - x[i], ys - y[i], zs - z[i]]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0] ** 2 + vecR[1] ** 2 + vecR[2] ** 2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecMi = 1e-7 * sp.exp(M[i]) * sp.Matrix([sp.sin(theta[i]) * sp.cos(
phy[i]), sp.sin(theta[i]) * sp.sin(phy[i]), sp.cos(theta[i])])
VecBi = 3 * vecR * (VecMi.T * vecR) / \
dis ** 5 - VecMi / dis ** 3
VecB += VecBi
VecB = 1e6 * VecB
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x0, y0, z0, M0, theta0, phy0, x1, y1,
z1, M1, theta1, phy1],
VecB, 'numpy')
class Result_Handler:
def __init__(self, simu_data, scale):
self.track_result = []
self.simu_data = simu_data
self.scale = scale
def __add__(self, new):
self.track_result.append(new)
return self
def get_gt_result(self):
a = self.simu_data.gt
b = []
for i in range(len(self.track_result)):
b.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
b = np.stack(b)
return [a, b]
def cal_loss(self):
dist = []
loss = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
point_estimate = np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
])
dist.append(np.linalg.norm(point_gt, 2))
loss.append(np.linalg.norm(point_gt - point_estimate, 2))
dist = 1e2 * np.array(dist)
loss = 1e2 * np.array(loss)
return [self.scale, dist, loss]
def gt_and_route(self):
dist = []
route = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
dist.append(np.linalg.norm(point_gt, 2))
route.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
dist = np.array(dist)
route = np.stack(route, axis=0)
idx = np.argsort(dist)
gt = self.simu_data.gt[idx]
route = route[idx]
return [gt, route]
# plt.plot(dist, loss, label='scale = {}'.format(self.scale))
# plt.legend()
# print('debug')
class Simu_Test:
def __init__(self, start, stop, scales, pSensor=None, resolution=100):
self.scales = scales
self.M = 2.7
self.build_route(start, stop, resolution)
if pSensor is None:
self.build_psensor()
else:
self.pSensor = pSensor
# self.build_expression()
self.params = {
'm': np.log(self.M),
'theta': 0,
'phy': 0,
'gx': 50 / np.sqrt(2) * 1e-6,
'gy': 50 / np.sqrt(2) * 1e-6,
'gz': 0,
}
def build_expression(self):
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy], VecB, 'numpy')
def build_route(self, start, stop, resolution):
# linear route
theta = 90 / 180.0 * np.pi
route = np.linspace(start, stop, resolution)
route = np.stack([route * np.cos(theta), route * np.sin(theta)]).T
route = np.pad(route, ((0, 0), (1, 0)),
mode='constant',
constant_values=0)
self.route = 1e-2 * route
# curvy route
tmp = np.linspace(start, stop, resolution)
route = np.stack([np.sin((tmp-start)/(stop-start) * np.pi * 5),
np.cos((tmp-start)/(stop-start) * np.pi * 5), tmp], axis=0).T
self.route = 1e-2 * route
def build_psensor(self):
self.pSensor = 1e-2 * np.array([
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
])
def simulate_process(self, scale):
print(scale)
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.cal_loss()
def gt_and_result(self):
pSensori = 1 * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.get_gt_result()
def compare_noise_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
if choice == 1:
simu = self.estimate_B(pSensori)
elif choice == 0:
simu = self.estimate_B_even_noise(pSensori)
elif choice == 2:
simu = self.estimate_B_singular_noise(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def compare_3_noise(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['Even Noise', 'Raw Noise', 'Single Noise']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/butterfly.jpg', dpi=900)
def compare_noise_type(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['ALL Noise', 'Only Noise', 'Only Precision']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/compare_noise_type.jpg', dpi=900)
def compare_noise_type_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori, choice)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def simulate(self, loop=1):
results = []
pool = Pool()
for scale in self.scales:
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.simulate_process(scale)
results.append(
pool.apply_async(self.simulate_process, args=(scale, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label='scale = {} cm'.format(int(key) * 2))
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
plt.savefig('result/compare_scale/{}.jpg'.format(name), dpi=900)
def simu_readings(self, pSensor):
simu = self.estimate_B(pSensor, noise_type=3)
simu.store()
def simu_gt_and_result(self, pSensor, route, path, name):
pSensori = pSensor
simu = self.estimate_B(pSensori, route=route)
# simu.store()
# params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
# self.M), 1e-2 * route[0, 0], 1e-2 * (route[0, 1]), 1e-2 * (route[0,
# 2]), 0, 0])
model = Solver_jac(1, route[0, 0], route[0, 1], route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
gt_ang = []
rec_ang = []
results = Result_Handler(simu, 1)
for i in tqdm(range(simu.result.shape[0])):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
gt_ang.append(np.array([0, 0, 1]))
t1 = result['theta0'].value
t2 = result['phy0'].value
rec_ang.append(
np.array(
[np.sin(t1) * np.cos(t2),
np.sin(t1) * np.sin(t2),
np.cos(t1)]))
[gt, route] = results.gt_and_route()
gt_ang = np.stack(gt_ang)
rec_ang = np.stack(rec_ang)
if not os.path.exists(path):
os.makedirs(path)
np.savez(os.path.join(path, name), gt=gt * 1e2, result=route *
1e2, gt_ang=gt_ang, result_ang=rec_ang)
def compare_layout_thread(self, index, pSensori):
overall_noise = np.random.randn(3)
simu = self.estimate_B(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [index, dist, loss]
def compare_layouts(self, pSensors, loop=1):
results = []
pool = Pool()
for index, pSensor in enumerate(pSensors):
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.calculate_process(scale)
# self.compare_layout_thread(index, pSensor)
results.append(
pool.apply_async(self.compare_layout_thread,
args=(index, pSensor)))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
# msg = ['Plane Layout(MIT)', 'Our Current Layout', 'Cube Layout']
msg = ['Best Layout', 'Current Layout']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# plt.savefig('result/compare_layout/{}.jpg'.format(name), dpi=900)
plt.show()
def estimate_B(
self,
pSensor,
route=None,
noise_type=0,
overall_noise=None):
# noise type: 0: noise+precision, 1:only noise, 2: only precision
# 3:none
result = []
exp = expression()
if route is None:
route = self.route
for i in range(route.shape[0]):
routei = route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = 0.8 * np.random.randn(result.shape[0])
Noise_y = 0.8 * np.random.randn(result.shape[0])
Noise_z = 1.2 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
if noise_type != 3:
if noise_type != 2:
result += Noise
if overall_noise is not None:
result += overall_noise
# add sensor resolution
if noise_type != 1:
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(route, SNR, result)
def estimate_B_even_noise(self, pSensor):
result = []
exp = expression()
for i in range(self.route.shape[0]):
routei = self.route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = np.sqrt(2) / 2 * np.random.randn(result.shape[0])
Noise_y = np.sqrt(2) / 2 * np.random.randn(result.shape[0])
Noise_z = np.sqrt(2) / 2 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
result += Noise
# add sensor resolution
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(self.route, SNR, result)
def compare_method_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori)
if choice == 0:
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
if choice == 1:
sensor_count = pSensori.shape[0]
my_filter = Magnet_UKF(
1, pSensori, R_std=[0.8, 0.8, 1.5] * sensor_count)
my_filter.lm_model.fit_params['m0'].value = np.log(self.M)
my_filter.lm_model.fit_params['m0'].vary = False
my_filter.lm_model.fit_params['X0'].value = self.route[0, 0]
my_filter.lm_model.fit_params['Y0'].value = self.route[0, 1]
my_filter.lm_model.fit_params['Z0'].value = self.route[0, 2]
my_filter.ukf.x[0] = self.params['gx']
my_filter.ukf.x[1] = self.params['gy']
my_filter.ukf.x[2] = self.params['gz']
my_filter.kf.x[0] = self.params['gx']
my_filter.kf.x[1] = self.params['gy']
my_filter.kf.x[2] = self.params['gz']
my_filter.kf.x[3] = self.route[0, 0]
my_filter.ukf.x[3] = self.route[0, 0]
my_filter.kf.x[5] = self.route[0, 1]
my_filter.ukf.x[5] = self.route[0, 1]
my_filter.kf.x[7] = self.route[0, 2]
my_filter.ukf.x[7] = self.route[0, 2]
my_filter.kf.x[9] = self.params['theta']
my_filter.ukf.x[9] = self.params['theta']
my_filter.kf.x[11] = self.params['phy']
my_filter.ukf.x[11] = self.params['phy']
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
my_filter.predict()
datai = simu.result[i].reshape(-1)
result = my_filter.update(datai)
results += result
if choice == 2: # simple kf
sensor_count = pSensori.shape[0]
my_filter = Magnet_KF(1, pSensori, R_std=[
0.8, 0.8, 1.5] * sensor_count)
my_filter.lm_model.fit_params['m0'].value = np.log(self.M)
my_filter.lm_model.fit_params['m0'].vary = False
my_filter.lm_model.fit_params['X0'].value = self.route[0, 0]
my_filter.lm_model.fit_params['Y0'].value = self.route[0, 1]
my_filter.lm_model.fit_params['Z0'].value = self.route[0, 2]
my_filter.kf.x[0] = self.params['gx']
my_filter.kf.x[1] = self.params['gy']
my_filter.kf.x[2] = self.params['gz']
my_filter.kf.x[3] = self.route[0, 0]
my_filter.kf.x[5] = self.route[0, 1]
my_filter.kf.x[7] = self.route[0, 2]
my_filter.kf.x[9] = self.params['theta']
my_filter.kf.x[11] = self.params['phy']
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
my_filter.predict()
datai = simu.result[i].reshape(-1, 3)
result = my_filter.update(datai)
results += result
if choice == 3: # simple kf
sensor_count = pSensori.shape[0]
my_filter = Magnet_KF(
1, pSensori, R_std=[0.8, 0.8, 1.5] * sensor_count, ord=3)
my_filter.lm_model.fit_params['m0'].value = np.log(self.M)
my_filter.lm_model.fit_params['m0'].vary = False
my_filter.lm_model.fit_params['X0'].value = self.route[0, 0]
my_filter.lm_model.fit_params['Y0'].value = self.route[0, 1]
my_filter.lm_model.fit_params['Z0'].value = self.route[0, 2]
my_filter.kf.x[0] = self.params['gx']
my_filter.kf.x[1] = self.params['gy']
my_filter.kf.x[2] = self.params['gz']
my_filter.kf.x[3] = self.route[0, 0]
my_filter.kf.x[6] = self.route[0, 1]
my_filter.kf.x[9] = self.route[0, 2]
my_filter.kf.x[12] = self.params['theta']
my_filter.kf.x[15] = self.params['phy']
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
my_filter.predict()
datai = simu.result[i].reshape(-1, 3)
result = my_filter.update(datai)
results += result
return results.cal_loss()
def compare_method(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_method_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['LM', 'MY UKF', "KF on LM results", "KF on LM results ord=3"]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
plt.savefig('result/compare_method/{}.jpg'.format(name), dpi=600)
def compare_softiron(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_softiron_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_softiron_thread, args=(1, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['origin', 'Add softiron', ]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
root = 'result/compare_softiron'
if not os.path.exists(root):
os.makedirs(root)
plt.savefig(os.path.join(root, '{}.jpg'.format(name)), dpi=600)
def compare_softiron_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori)
if choice == 0:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
if choice == 1:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
soft_iron_param = 0.05 * np.random.randn(
simu.result.size//simu.result.shape[0])+1
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1)
datai *= soft_iron_param
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
return results.cal_loss()
def compare_hardiron(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_hardiron_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_hardiron_thread, args=(1, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['origin', 'Add hardiron', ]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
root = 'result/compare_hardiron'
if not os.path.exists(root):
os.makedirs(root)
plt.savefig(os.path.join(root, '{}.jpg'.format(name)), dpi=600)
def compare_hardiron_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori, noise_type=0)
if choice == 0:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
if choice == 1:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
soft_iron_param = 5.0 * np.random.randn(
simu.result.size//simu.result.shape[0])+1
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1)
datai += soft_iron_param
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
return results.cal_loss()
def estimate_B_singular_noise(self, pSensor):
result = []
exp = expression()
for i in range(self.route.shape[0]):
routei = self.route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = np.sqrt(1.5) * np.random.randn(result.shape[0])
Noise_y = 0 * np.random.randn(result.shape[0])
Noise_z = 0 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
result += Noise
# add sensor resolution
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(self.route, SNR, result)
def simulate_2mag_3type_thread(pSensor, params, typ, i):
tmp = []
for j in range(pSensor.shape[0]):
param = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'],
params['Y0'], params['Z0'], params['m'],
params['theta0'], params['phy0'], params['X1'],
params['Y1'], params['Z1'], params['m'],
params['theta1'], params['phy1'],
]
tmp.append(simulate_2mag_3type.exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0)
tmp = tmp.reshape(-1)
print(i, ' finished ')
return [tmp, typ]
def simulate_2mag_3type_delta_thread(pSensor, params, typ, i):
tmp = []
for j in range(pSensor.shape[0]):
param = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'],
params['Y0'], params['Z0'], params['m'],
params['theta0'], params['phy0'], params['X1'],
params['Y1'], params['Z1'], params['m'],
params['theta1'], params['phy1'],
]
# the result after a short period of time
r = 1 * 1e-2 * np.random.rand()
theta = np.random.rand() * np.pi
phy = np.random.rand() * 2 * np.pi
dx0 = r * np.sin(theta) * np.cos(phy)
dy0 = r * np.sin(theta) * np.sin(phy)
dz0 = r * np.cos(theta)
r = 1 * 1e-2 * np.random.rand()
theta = np.random.rand() * np.pi
phy = np.random.rand() * 2 * np.pi
dx1 = r * np.sin(theta) * np.cos(phy)
dy1 = r * np.sin(theta) * np.sin(phy)
dz1 = r * np.cos(theta)
param2 = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'] + dx0,
params['Y0'] + dy0, params['Z0'] + dz0, params['m'],
params['theta0'], params['phy0'], params['X1'] + dx1,
params['Y1'] + dy1, params['Z1'] + dz1, params['m'],
params['theta1'], params['phy1'],
]
aaa = np.concatenate(
[simulate_2mag_3type.exp.VecB(*param).squeeze(),
simulate_2mag_3type.exp.VecB(*param2).squeeze() -
simulate_2mag_3type.exp.VecB(*param).squeeze()],
axis=0)
tmp.append(aaa)
print(aaa.shape)
tmp = np.concatenate(tmp, axis=0)
tmp = tmp.reshape(-1)
print(i, ' finished ')
return [tmp, typ]
def simulate_2mag_3type(pSensor, size=1000, cls=3, edge=20):
size = int(size)
results = []
types = []
simulate_2mag_3type.exp = expression(2)
pool = Pool()
pool_results = []
i = 0
# for i in range(size * cls):
while(i < size * cls):
# G's Spherical Coordinates
t1 = np.pi * np.random.rand()
t2 = 2 * np.pi * np.random.rand()
# P1's Spherical Coordinates
tt1 = np.pi * np.random.rand()
pp1 = 2 * np.pi * np.random.rand()
# P2's Spherical Coordinates
tt2 = np.pi * np.random.rand()
pp2 = 2 * np.pi * np.random.rand()
typ = i % cls
G = 38.6600
# G = 0.0
if cls == 3:
if typ == 0:
r1 = np.random.rand() * 20 + edge
r2 = np.random.rand() * 20 + edge
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif typ == 1:
r1 = np.random.rand() * 20 + edge
r2 = np.random.rand() * (edge - 5) + 5
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif typ == 2:
r1 = np.random.rand() * (edge - 5) + 5
r2 = np.random.rand() * (edge - 5) + 5
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif cls == 2:
if typ == 0:
r1 = np.random.rand() * 20 + 30
r2 = np.random.rand() * 20 + 10
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif typ == 1:
r1 = np.random.rand() * 20 + 10
r2 = np.random.rand() * 20 + 10
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
# check G and R
# GG = np.linalg.norm(np.array([params['gx'],params['gy'],params['gz']]), ord=2)
# print(GG)
# check if two point are too close to each other
dis = np.linalg.norm(
np.array(
[params['X0'] - params['X1'],
params['Y0'] - params['Y1'],
params['Z0'] - params['Z1']]),
ord=2)
# if dis < 5*1e-2:
# print(dis)
# continue
i += 1
# [tmp, typ] = simulate_2mag_3type_thread(pSensor, params, typ, i)
pool_results.append(pool.apply_async(
simulate_2mag_3type_thread, args=(pSensor, params, typ, i)))
pool.close()
pool.join()
for pool_result in pool_results:
[tmp, typ] = pool_result.get()
results.append(tmp)
types.append(typ)
results = np.concatenate(results, axis=0).reshape(-1, 3)
Noise_x = 0.7 * np.random.randn(results.shape[0])
Noise_y = 0.7 * np.random.randn(results.shape[0])
Noise_z = 1.2 * np.random.randn(results.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
# TODO: Desides whether to use the noise
# results += Noise
# results = np.floor(results * 100.0)
# results = results - np.mod(results, 15)
# results = 1e-2 * results
# compute SNR
G = 1e6 * np.array(
[params['gx'], params['gy'], params['gz']])
signal_power = np.sum(np.power(results - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
results = results.reshape(size * cls, -1)
# np.save('result/3types.npy', result)
types = np.array(types)
return results, types
def simulate_2mag_3type_test(pSensor, size=1000, cls=3):
size = int(size)
results = []
types = []
simulate_2mag_3type.exp = expression(2)
pool = Pool()
pool_results = []
for i in range(size * cls):
# G's Spherical Coordinates
t1 = np.pi * | np.random.rand() | numpy.random.rand |
from __future__ import division
import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import bpy
### Plot the red and blue circular pulses.
##### PARAMETERS
c = 299792458
def Efield(times=np.linspace(-30e-15, 30e-15, 5000), pdur=20e-15, A1=1, lambda1=7.90e-7, ellip=1):
def gauss(x, A=1, x0=0, width=1):
return A*np.exp(-(x-x0)**2/(2*(width/2.35482)**2))
w1 = c/lambda1 * 2 * np.pi
x = A1 * np.sin(w1 * times) * gauss(times, width=pdur)
y = ellip * A1 * np.cos(w1 * times) * gauss(times, width=pdur)
return times, x, y
def make_field(x, y, z, bevel_obj='', name=''):
# Create the data block for the curve.
curveD = bpy.data.curves.new(name, type='CURVE')
curveD.dimensions = '3D'
curveD.resolution_u = 2
coords = | np.vstack((x,y,z)) | numpy.vstack |
"""
Created on 10:25 at 08/07/2021/
@author: bo
"""
import argparse
import os
import numpy as np
import pickle
import data.rruff as rruff
from sklearn.metrics import roc_curve, auc
from scipy.special import expit, softmax
import const
import test
import vis_utils as vis_utils
import data.prepare_data as pdd
import matplotlib
import matplotlib.ticker as ticker
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'text.usetex': True,
# })
matplotlib.rcParams.update({
'font.family': 'serif',
"font.size": 7,
"legend.fontsize": 7,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"legend.title_fontsize": 7,
"axes.titlesize": 7,
})
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter, StrMethodFormatter, NullFormatter
import matplotlib.ticker as mticker
TEXTWIDTH = 6.75133
def give_args():
"""This function is used to give the argument"""
parser = argparse.ArgumentParser(description='Reproduce figures in the paper')
parser.add_argument('--dir2read_exp', type=str, default="../exp_data/exp_group/")
parser.add_argument('--dir2read_data', type=str, default="../data_group/")
parser.add_argument('--dir2save', type=str, default="figures/")
parser.add_argument('--index', type=str, default="figure_1", help="which figure or table do you want to produce?")
parser.add_argument("--save", type=const.str2bool, default=False, help="whether to save the image or not")
parser.add_argument("--pdf_pgf", type=str, default="pgf", help="in what kind of format will I save the image?")
return parser.parse_args()
# ------------------------------------------------------------------------------------
def set_size(width, fraction=1, enlarge=0):
"""
Args:
width: inches
fraction: float
"""
# Width of figure (in pts)
fig_width_in = width * fraction
golden_ratio = (5 ** .5 - 1) / 2
if enlarge != 0:
golden_ratio *= enlarge
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
def give_figure_specify_size(fraction, enlarge=0):
fig = plt.figure()
fig.set_size_inches(set_size(TEXTWIDTH, fraction, enlarge))
return fig
# -------------- First figure --------------------#
def give_data_augmentation_example(tds_dir_use="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pgf", data_path="../data_group/"):
args = const.give_args_test(raman_type="excellent_unoriented")
args["pre_define_tt_filenames"] = False
tr_data, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
show_data_augmentation_example(args, tr_data[0], tr_data[1], label_name_tr,
tds_dir_use, save, pdf_pgf)
def show_data_augmentation_example(args, tr_spectrum, tr_label, label_name_tr,
tds_dir_use="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pdf"):
"""Illustrate the data augmentation process
Args:
args: the arguments that can tell me the maximum and minimum wavenumber
tr_spectrum: [num_spectra, wavenumbers]
tr_label: [num_spectra]
label_name_tr: corresponding names for each class in the tr label
tds_dir_use: the directory to save the data.
save: bool, whether to save the figure
"""
select_index = np.where(label_name_tr == "AlumNa")[0] #AlumNa
tr_select = tr_spectrum[np.where(tr_label == select_index)[0]]
u_spectrum = tr_select[np.random.choice(len(tr_select), 1)[0]]
std_s_spectrum = rruff.calc_std(u_spectrum, 10)
rand_noise = np.random.normal(0, 3, [3, len(u_spectrum)]) # 5 before
generate = abs(np.expand_dims(u_spectrum, 0) + rand_noise * np.expand_dims(std_s_spectrum, 0))
generate = generate / np.max(generate, axis=-1, keepdims=True)
wavenumber = np.arange(args["max_wave"])[args["min_wave"]:]
text_use = ["%s" % label_name_tr[select_index][0], "Synthetic"]
fig = give_figure_specify_size(0.5, 1.1)
ax = fig.add_subplot(111)
for i, s_c in enumerate(["r", "g"]):
ax.plot([], [], color=s_c)
ax.plot(wavenumber, u_spectrum, 'r', lw=0.8)
ax.text(250, 0.5, text_use[0])
for i, s in enumerate(generate):
ax.plot(wavenumber, s + i + 1, 'g', lw=0.8)
ax.text(250, 0.5 + i + 1, text_use[-1])
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel("Wavenumber (cm" + r"$^{-1})$")
ax.set_ylabel("Intensity (a.u.)")
if save:
plt.savefig(
tds_dir_use + "/augmentation_example_on_RRUFF_%s.%s" % (label_name_tr[select_index][0],
pdf_pgf),
pad_inches=0, bbox_inches='tight')
# --------------------------- second & third figure ------------------------------#
def show_example_spectra(tds_dir="../exp_data/eerst_paper_figures/", save=False, pdf_pgf="pgf",
data_path="../data_group/"):
"""This function shows the example spectra from each dataset. It should also show the distribution of the classes
"""
dataset = ["RRUFF", "RRUFF", "ORGANIC", "ORGANIC", "BACTERIA"]
raman_type = ["raw", "excellent_unoriented", "organic_target_raw", "organic_target", "bacteria_reference_finetune"]
color_group = ['r', 'g']
fig = give_figure_specify_size(0.5, 3.0)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
im_index = 0
title_group = ["Mineral (r)", "Mineral (p)", "Organic (r)", "Organic (p)", "Bacteria"]
tr_frequency_count = []
for s_data, s_raman in zip(dataset, raman_type):
ax = fig.add_subplot(5, 1, im_index + 1)
args = const.give_args_test(raman_type=s_raman)
args["pre_define_tt_filenames"] = False
if s_data == "RRUFF" or s_data == "ORGANIC":
tr_data, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
else:
tr_data, _, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
tr_spectra, tr_label = tr_data
unique_label, unique_count = np.unique(tr_label, return_counts=True)
if s_data == "RRUFF":
tr_frequency_count.append(unique_count)
if s_data == "RRUFF":
class_name = "Beryl"
select_label = np.where(label_name_tr == class_name)[0]
index = np.where(tr_label == select_label)[0]
else:
select_label = unique_label[np.argmax(unique_count)]
if s_data == "ORGANIC":
select_label = 1
class_name = label_name_tr[select_label]
if s_data == "ORGANIC":
class_name = "Benzidine"
index = np.where(tr_label == select_label)[0]
if len(index) > 15:
index = np.random.choice(index, 5, replace=False)
_spectra = tr_spectra[index]
if s_data == "RRUFF":
wavenumber = np.arange(args["max_wave"])[args["min_wave"]:]
ax.set_xlim((0, 1500))
elif s_data == "BACTERIA":
wavenumber = np.load("../bacteria/wavenumbers.npy")
elif s_data == "ORGANIC":
wavenumber = np.linspace(106.62457839661, 3416.04065695651, np.shape(tr_spectra)[1])
for j, s in enumerate(_spectra):
ax.plot(wavenumber, s, alpha=0.8, lw=0.8)
ax.set_title(title_group[im_index] + ": " + class_name)
im_index += 1
if s_raman == "bacteria_finetune":
ax.set_xlabel("Wavenumber (cm" + r"$^{-1})$")
ax_global.set_ylabel("Intensity (a.u.)\n\n")
plt.subplots_adjust(hspace=0.47)
if save:
plt.savefig(tds_dir + "/example_spectra.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
title_group = ["Mineral (r)", "Mineral (p)"]
fig = give_figure_specify_size(0.5, 0.8)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
max_count = np.max([np.max(np.unique(v, return_counts=True)[1]) for v in tr_frequency_count])
for i, s in enumerate(tr_frequency_count):
ax = fig.add_subplot(1, 2, i + 1)
ax.hist(s, bins=np.max(s), ec="white", lw=0.4)
ax.set_yscale("symlog")
ax.set_ylim((0, max_count))
if i == 1:
ax.yaxis.set_ticks_position('none')
ax.yaxis.set_major_formatter(plt.NullFormatter())
else:
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:.0f}'))
ax.set_title(title_group[i])
plt.subplots_adjust(wspace=0.04)
ax_global.set_xlabel("\n\n Number of spectra per class")
ax_global.set_ylabel("Number of classes \n\n")
if save:
plt.savefig(tds_dir + "/class_distribution_on_RRUFF.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
# -------------------- figure 4 --------------------------
def give_uncertainty_distribution_figure_with_confidence_interval(tds_dir="../exp_data/eerst_paper_figures/",
save=False,
pdf_pgf="pgf",
path_init="../", use_nll_or_prob="prob",
data_path="../data_group/", strategy="sigmoid"):
_, rruff_raw_avg, rruff_raw_std = get_multiple_rruff_uncertainty("raw", path_init,
use_nll_or_prob=use_nll_or_prob,
data_path=data_path, strategy=strategy)
_, rruff_pre_avg, rruff_pre_std = get_multiple_rruff_uncertainty("excellent_unoriented", path_init,
use_nll_or_prob=use_nll_or_prob,
data_path=data_path, strategy=strategy)
_, organic_raw_avg, organic_raw_std = get_multiple_organic_uncertainty("organic_target_raw", data_path=data_path, path_init=path_init,
use_nll_or_prob="prob", strategy=strategy)
_, organic_pre_avg, organic_pre_std = get_multiple_organic_uncertainty("organic_target", data_path=data_path, path_init=path_init,
use_nll_or_prob="prob", strategy=strategy)
_, bacteria_avg, bacteria_std = get_multiple_bacteria_uncertainty(path_init,
use_nll_or_prob=use_nll_or_prob,
data_path=data_path, strategy=strategy)
color_use = ["r", "g", "b", "orange", "m"]
title_group = "Correct match (%)"
dataset = ["Mineral (r)", "Mineral (p)", "Organic (r)", "Organic (p)", "Bacteria"]
fig = give_figure_specify_size(0.5, 1.25)
ax = fig.add_subplot(111)
for j, stat in enumerate([[rruff_raw_avg, rruff_raw_std],
[rruff_pre_avg, rruff_pre_std],
[organic_raw_avg, organic_raw_std],
[organic_pre_avg, organic_pre_std],
[bacteria_avg, bacteria_std]]):
if strategy != "none":
plot_fillx_filly(stat[0][0]*100, stat[1][0],
stat[0][1]*100, stat[1][1], ax, color_use=color_use[j])
else:
plot_fillx_filly(stat[0][0], stat[1][0], stat[0][1]*100, stat[1][1],
ax, color_use=color_use[j])
ax.legend(dataset, loc='best', handlelength=1.1, handletextpad=0.5,
borderpad=0.25) # bbox_to_anchor=(1.0, 0.8), loc="upper left",
if strategy == "softmax" or strategy == "sigmoid":
ax.plot([0, 100], [0, 100], ls='--', color='black')
ax.set_xlim((0, 100))
ax.set_ylim((0, 100))
ax.set_ylabel(title_group)
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.set_xlabel("Similarity score")
if save:
plt.savefig(tds_dir + "/uncertainty_distribution_for_the_test_dataset_with_confidence_interval_%s.%s" % (strategy, pdf_pgf),
pad_inches=0, bbox_inches='tight')
def motivation_for_conformal_prediction_bacteria(save=False, pdf_pgf="pgf",
path_init="../exp_data/exp_group/",
path2save="../exp_data/eerst_paper_figures/",
data_path="../data_group/"):
dataset = ["BACTERIA"]
output_bacteria = motivation_for_conformal_prediction(dataset[0], select_length=1, show=False, path_init=path_init,
data_path=data_path)
two_select_index = np.where(np.array([len(v) for v in output_bacteria[4]]) == 2)[0]
fig = give_figure_specify_size(1.1, 0.8)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
ax = fig.add_subplot(2, 2, 1)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=579, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(2, 2, 3)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=two_select_index[5], ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(1, 2, 2)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=463, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
plt.subplots_adjust(wspace=0.04)
ax_global.set_xlabel("\nWavenumber (cm" + r"$^{-1}$" + ")")
ax_global.set_ylabel("Intensity (a.u.) \n")
return output_bacteria
def motivation_for_conformal_prediction_multiple_datasets(save=False, pdf_pgf="pgf",
path_init="../exp_data/exp_group/",
path2save="../exp_data/eerst_paper_figures/",
data_path="../data_group/"):
dataset = ["RRUFF_excellent_unoriented",
"RRUFF_raw",
"BACTERIA"]
fig = give_figure_specify_size(1.1, 0.8)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
output_rruff_r = motivation_for_conformal_prediction(dataset[1], select_length=1, show=False, path_init=path_init,
data_path=data_path)
output_rruff_p = motivation_for_conformal_prediction(dataset[0], select_length=1, show=False, path_init=path_init,
data_path=data_path)
output_bacteria = motivation_for_conformal_prediction(dataset[2], select_length=1, show=False, path_init=path_init,
data_path=data_path)
ax = fig.add_subplot(2, 3, 1)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=579, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(2, 3, 4)
_show_motivation_for_conformal_prediction(*output_rruff_p, select_index=25, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(1, 3, 2)
_show_motivation_for_conformal_prediction(*output_rruff_r, select_index=145, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(1, 3, 3)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=463, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
plt.subplots_adjust(wspace=0.04)
ax_global.set_xlabel("\nWavenumber (cm" + r"$^{-1}$" + ")")
ax_global.set_ylabel("Intensity (a.u.) \n")
if save:
plt.savefig(path2save + "conformal_motivation.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
def _calc_motivation_for_conformal_prediction(alpha_use=0.05, use_original_weight="original",
dataset="BACTERIA",
path_init="../exp_data/exp_group/",
data_path="../data_group/"):
if dataset == "BACTERIA":
wavenumbers = np.load("../bacteria/wavenumbers.npy")
raman_type = "bacteria_random_reference_finetune"
args = const.give_args_test(raman_type=raman_type)
args["pre_define_tt_filenames"] = False
tr_data, val_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
tr_spectra, tt_spectra = tr_data[0], tt_data[0]
tr_label_group = [tr_data[1], tr_data[1]]
val_label, tt_label = val_data[1], tt_data[1]
path2load = path_init + "bacteria_reference_finetune/tds/"
s_split = 1
path = path2load + [v for v in os.listdir(path2load) if "split_%d" % s_split in v and ".txt" not in v][0] + "/"
val_prediction = pickle.load(open(path + "validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(path + "test_prediction.obj", "rb"))
elif "RRUFF" in dataset:
raman_type = dataset.split("RRUFF_")[1]
dataset = "RRUFF"
args = const.give_args_test(raman_type=raman_type)
wavenumbers = np.arange(args["max_wave"])[args["min_wave"]:]
args["pre_define_tt_filenames"] = False
tr_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
[_, reference_val_label], [_, val_label] = pdd.get_fake_reference_and_test_data(tr_data, 1, data=dataset)
tr_label_group = [reference_val_label, tr_data[1]]
tr_spectra, tt_spectra = tr_data[0], tt_data[0]
tt_label = tt_data[1]
path2load = path_init + "%s/tds/" % raman_type
s_split = 1
path = path2load + [v for v in os.listdir(path2load) if "split_%d" % s_split in v and '.txt' not in v][0] + "/"
val_prediction = pickle.load(open(path + "validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(path + "test_prediction.obj", "rb"))
if use_original_weight == "original":
val_pred_en, tt_pred_en = val_prediction[0]["ensemble_avg"], tt_prediction[0]["ensemble_avg"]
else:
val_pred_en, tt_pred_en = val_prediction[1]["ensemble_avg"], tt_prediction[1]["ensemble_avg"]
val_pred_baseon_cls, _ = test.reorganize_similarity_score(val_pred_en, tr_label_group[0])
tt_pred_baseon_cls, tt_corr_tr_index = test.reorganize_similarity_score(tt_pred_en, tr_label_group[1])
val_prediction_score = give_calibration_single_score_prediction(val_pred_baseon_cls, True, val_label)
threshold = np.quantile(val_prediction_score, alpha_use)
tt_top1 = np.argmax(tt_pred_baseon_cls, axis=-1)
accu = [v == q for v, q in zip(tt_top1, tt_label)]
tt_prediction, \
tt_accuracy = give_test_prediction_baseon_single_score_threshold(tt_pred_baseon_cls,
True, tt_label,
threshold)
tt_pred_softmax = softmax(tt_pred_baseon_cls, axis=-1)
tt_correct_or_wrong = [1 if tt_label[i] in v else 0 for i, v in enumerate(tt_prediction)]
return tr_label_group, [val_label, tt_label], [tr_spectra, tt_spectra], \
tt_pred_softmax, tt_prediction, tt_correct_or_wrong, tt_corr_tr_index, label_name_tr, wavenumbers
def _show_motivation_for_conformal_prediction(tr_label_group, tt_label,
tr_spectra, tt_spectra,
tt_prediction, tt_pred_baseon_cls_softmax,
tt_corr_tr_index,
label_name,
wavenumbers, select_index, ax, save, pdf_pgf, path2save):
"""Args
select_index: a single index
save: bool variable
"""
_tr_corr_index = np.where(tr_label_group[1] == tt_label[select_index])[0]
if len(tt_prediction[select_index]) >= 3:
height = 1.5
elif len(tt_prediction[select_index]) == 2:
height = 1.2
else:
height = 1.0
if not ax:
fig = give_figure_specify_size(0.5, height)
ax = fig.add_subplot(111)
color_input = 'r'
color_group = ['g', 'b', 'orange', "c", "tab:blue"]
select_prediction = tt_prediction[select_index]
score = tt_pred_baseon_cls_softmax[select_index]
score_select = score[select_prediction]
score_select_sort_index = np.argsort(score_select)[::-1]
select_prediction = select_prediction[score_select_sort_index]
score_select_sorted = score_select[score_select_sort_index]
input_name = "Input: %s" % label_name[tt_label[select_index]]
scale = 1.4
ax.plot(wavenumbers, tt_spectra[select_index] + len(select_prediction) * scale, color=color_input)
if len(label_name) == 30:
x_loc = 450
else:
x_loc = 100
ax.text(x_loc, len(select_prediction) * scale + 0.95, input_name, color=color_input)
for i, s in enumerate(select_prediction):
if s == tt_label[select_index]:
color_use = color_input
else:
color_use = color_group[i]
_tr_corr_index = tt_corr_tr_index[select_index][s]
match_name = "Match: %s (p=%.2f)" % (label_name[s], score_select_sorted[i])
ax.plot(wavenumbers, tr_spectra[_tr_corr_index] + (len(select_prediction) - i - 1) * scale,
color=color_use)
ax.text(x_loc, (len(select_prediction) - i - 1) * scale + 1, match_name, color=color_use)
ax.yaxis.set_major_formatter(plt.NullFormatter())
if save:
_name = label_name[tt_label[select_index]]
plt.savefig(path2save + "conformal_motivation_%s_%d.%s" % (_name, select_index, pdf_pgf),
pad_inches=0, bbox_inches='tight')
def motivation_for_conformal_prediction(dataset="RRUFF_excellent_unoriented",
select_length=3, path_init="../", show=False, save=False,
pdf_pgf="pgf", data_path="../data_group/"):
if dataset == "RRUFF_excellent_unoriented":
alpha_use = 0.01
elif dataset == "RRUFF_raw":
alpha_use = 0.0005
elif dataset == "BACTERIA":
alpha_use = 0.05
tr_label_group, [val_label, tt_label], [tr_spectra, tt_spectra], \
tt_pred_softmax, tt_prediction, tt_correct_or_wrong, \
tt_corr_tr_index, label_name, wavenumbers = _calc_motivation_for_conformal_prediction(alpha_use=alpha_use,
dataset=dataset,
path_init=path_init,
data_path=data_path)
def filter_index(select_length):
tt_index = []
for i, v in enumerate(tt_prediction):
prob_subset = tt_pred_softmax[i, v]
prob_subset_sort_index = np.argsort(prob_subset)[::-1]
_pred_label = np.array(v)[prob_subset_sort_index]
if len(v) == select_length and tt_correct_or_wrong[i] == 1 and _pred_label[-1] == tt_label[i]:
tt_index.append(i)
return tt_index
if select_length != 0:
tt_index = filter_index(select_length)
select_index = np.random.choice(tt_index, 1)
else:
if dataset == "RRUFF_raw":
select_index = [191, 182, 145]
elif dataset == "RRUFF_excellent_unoriented":
select_index = [25, 594, 312, 1213, 53]
elif dataset == "BACTERIA":
select_index = [463]
if show:
for _select_index in select_index:
_show_motivation_for_conformal_prediction(tr_label_group, tt_label,
tr_spectra, tt_spectra,
tt_prediction, tt_pred_softmax,
tt_corr_tr_index,
label_name, wavenumbers, _select_index, ax=None, save=save,
pdf_pgf=pdf_pgf, path2save=None)
return tr_label_group, tt_label, tr_spectra, tt_spectra, tt_prediction, tt_pred_softmax, tt_corr_tr_index, \
label_name, wavenumbers
def give_conformal_prediction_for_bacteria_paper(path_init="../",
use_original_weight="original",
tds_dir=None, save=False, pdf_pgf="pdf",
data_path="../data_group/",
apply_softmax="none"):
alpha_group = np.linspace(0, 0.20, 10)
path2load, split_version = get_path_for_conformal(path_init, "bacteria_reference_finetune")
stat_bacteria = main_plot_for_scoring_rule(path2load, split_version,
"bacteria_random_reference_finetune",
"BACTERIA", use_original_weight,
alpha_group, show=False, data_path=data_path, apply_softmax=apply_softmax)
fig = give_figure_specify_size(1.0, 0)
title_group = ["Bacteria: 82.71"]
loc = [[0.80, 0.92]]
orig_perf = [82.71]
orig_perf = [v - 1 for v in orig_perf]
for i, stat in enumerate([stat_bacteria]):
stat_avg = np.mean(stat, axis=0)
ax = fig.add_subplot(2, 2, 1)
x_axis = 100 - alpha_group * 100
ax.plot(x_axis, stat_avg[:, 0] * 100, color='r', marker='.')
ax.plot(x_axis, x_axis, color='g', ls=':')
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.set_xlim(np.min(x_axis), np.max(x_axis))
ax.set_ylim(np.min(x_axis), np.max(x_axis))
ax.set_ylabel("Empirical coverage (%)")
ax.xaxis.set_major_formatter(plt.NullFormatter())
# plt.axis('square')
ax.set_title(title_group[i])
ax = fig.add_subplot(2, 2, 3)
ax.plot(x_axis, stat_avg[:, 1], color='b', marker='.')
# plt.axis('square')
# ax.set_yscale("symlog")
ax.set_ylabel("Average set size")
ax.set_xlabel("Theoretical coverage (1 - " + r'$\alpha$' + ")" + "(%)")
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.xaxis.set_major_formatter(FuncFormatter(form3))
dataset = ["BACTERIA"]
output_bacteria = motivation_for_conformal_prediction(dataset[0], select_length=1, show=False, path_init=path_init,
data_path=data_path)
two_select_index = np.where(np.array([len(v) for v in output_bacteria[4]]) == 2)[0]
# fig = give_figure_specify_size(1.1, 0.8)
# ax_global = vis_utils.ax_global_get(fig)
# ax_global.set_xticks([])
# ax_global.set_yticks([])
# ax = fig.add_subplot(3, 2, 2)
# _show_motivation_for_conformal_prediction(*output_bacteria, select_index=579, ax=ax, save=False, pdf_pgf="None",
# path2save=None)
# ax.xaxis.set_major_formatter(plt.NullFormatter())
ax = fig.add_subplot(2, 2, 2)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=two_select_index[-4], ax=ax, save=False, pdf_pgf="None",
path2save=None)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_title("Example prediction set")
ax.set_ylabel("Intensity (a.u.)")
ax = fig.add_subplot(2, 2, 4)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=463, ax=ax, save=False, pdf_pgf="None",
path2save=None)
ax.set_ylabel("Intensity (a.u.)")
ax.set_xlabel("Wavenumber")
# plt.subplots_adjust(wspace=0.23)
# ax_global.set_xlabel("\nWavenumber (cm" + r"$^{-1}$" + ")")
# ax_global.set_ylabel("Intensity (a.u.) \n")
plt.subplots_adjust(hspace=0.1, wspace=0.2)
if save:
if pdf_pgf == "pdf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size_%s.pdf" % apply_softmax,
pad_inches=0, bbox_inches='tight')
elif pdf_pgf == "pgf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size.pgf",
pad_inches=0, bbox_inches='tight')
def give_conformal_prediction_for_multiple_datasets(path_init="../",
use_original_weight="weighted",
tds_dir=None, save=False, pdf_pgf="pdf",
data_path="../data_group/"):
# rruff raw
alpha_group_group = []
alpha_group = np.linspace(0, 0.03, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "raw")
stat_rruff_raw = main_plot_for_scoring_rule(path2load, split_version,
"raw", "RRUFF", use_original_weight,
alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.05, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "excellent_unoriented")
stat_rruff_preprocess = main_plot_for_scoring_rule(path2load, split_version,
"excellent_unoriented", "RRUFF",
"original", alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.011, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "organic_target_raw")
stat_organic_raw = main_plot_for_scoring_rule(path2load, split_version, "organic_target_raw", "ORGANIC",
"original", alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.04, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "organic_target")
stat_organic = main_plot_for_scoring_rule(path2load, split_version, "organic_target", "ORGANIC",
"original", alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.20, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "bacteria_reference_finetune")
stat_bacteria = main_plot_for_scoring_rule(path2load, split_version,
"bacteria_random_reference_finetune",
"BACTERIA", use_original_weight,
alpha_group, show=False, data_path=data_path)
fig = give_figure_specify_size(0.5, 4.0)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
ax_global.spines['top'].set_visible(False)
ax_global.spines['right'].set_visible(False)
ax_global.spines['bottom'].set_visible(False)
ax_global.spines['left'].set_visible(False)
title_group = ["Mineral (r): 94.48", "Mineral (p): 91.86", "Organic (r): 98.26", "Organic (p): 98.26",
"Bacteria: 82.71"]
loc = [[0.97, 0.958], [0.95, 0.95], [0.989, 0.987], [0.96, 0.987], [0.80, 0.92]]
orig_perf = [94.48, 91.86, 98.26, 98.26, 82.71]
orig_perf = [v - 1 for v in orig_perf]
for i, stat in enumerate([stat_rruff_raw, stat_rruff_preprocess,
stat_organic_raw, stat_organic, stat_bacteria]):
stat_avg = np.mean(stat, axis=0)
ax = fig.add_subplot(len(title_group), 1, i + 1)
vis_utils.show_twinx(alpha_group_group[i] * 100, stat_avg[:, 0] * 100, stat_avg[:, 1],
ax=ax)
ax.set_title(title_group[i])
ax.set_ylim(bottom=orig_perf[i])
ax.set_yticks(np.linspace(orig_perf[i], 100, 4))
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.xaxis.set_major_formatter(FuncFormatter(form3))
ax_global.set_ylabel("Empirical coverage (%) \n\n\n", color='r')
ax_global_t = ax_global.twinx()
ax_global_t.set_yticks([])
ax_global_t.spines['top'].set_visible(False)
ax_global_t.spines['right'].set_visible(False)
ax_global_t.spines['bottom'].set_visible(False)
ax_global_t.spines['left'].set_visible(False)
# ax_global_t.grid(None)
ax_global_t.set_ylabel("\n\n\n Average set size", color='g')
ax_global.set_xlabel("\n \n Theoretical coverage (1 - " + r'$\alpha$' + ")" + "(%)")
plt.subplots_adjust(hspace=0.47)
if save:
if pdf_pgf == "pdf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size.pdf",
pad_inches=0, bbox_inches='tight')
elif pdf_pgf == "pgf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size.pgf",
pad_inches=0, bbox_inches='tight')
def give_qualitative_result_allinone(path_init, tds_dir="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pdf", data_path="../data_group/"):
fig = give_figure_specify_size(1.2, 0.5)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
dataset_names = ["Mineral (r)", "Mineral (p)", "Organic", "Bacteria"]
for i in range(4):
ax_g_0 = fig.add_subplot(2, 4, i + 1)
ax_g_1 = fig.add_subplot(2, 4, i + 1 + 4)
if i == 0:
give_qualitative_result_rruff_raw(path_init, [ax_g_0, ax_g_1], data_path=data_path)
elif i == 1:
give_qualitative_result_rruff_preprocess(path_init, [ax_g_0, ax_g_1], data_path=data_path)
elif i == 2:
give_qualitative_result_organic(path_init, [ax_g_0, ax_g_1], data_path=data_path)
elif i == 3:
give_qualitative_result_bacteria(path_init, [ax_g_0, ax_g_1], data_path=data_path)
if i == 0:
ax_g_0.set_ylabel("Correct")
ax_g_1.set_ylabel("Wrong")
ax_g_0.set_title(dataset_names[i])
ax_global.set_xlabel("\n Wavenumber (cm" + r"$^{-1})$")
ax_global.set_ylabel("Intensity (a.u.)\n\n")
plt.subplots_adjust(wspace=0.05, hspace=0.05)
if save:
plt.savefig(tds_dir + "/qualitative_result.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
def form3(x, pos):
""" This function returns a string with 3 decimal places, given the input x"""
return '%.1f' % x
def find_the_best_threshold_and_evaluate_accuracy(val_prediction, tt_ensemble,
selected_index,
reference_label_val,
val_label, reference_label_tt, tt_label, predicted_label_tt,
voting_number):
"""This function finds the best threshold (uncertainty) based on the validation dataset. Then we group
the test predictions to low-uncertainty and high-uncertainty group and evaluate the matching accuracy under
each group
Args:
val_prediction: [original_val, weighted_val]
tt_ensemble: [original_tt_ensemble, weighted_tt_ensemble]
selected_index: [selected index for original, selected index for the weighted]
reference_label_val: the ground truth for the validation dataset
val_label: the ground truth for the validation dataset
reference_label_tt: the ground truth for the test dataset
tt_label: the ground truth for the test data
predicted_label_tt: the predicted label (it needs to be result after
applying majority voting for the bacteria dataset)
voting_number: the majority voting numbers
"""
keys = list(val_prediction[0].keys())
val_original_ensemble, \
val_weighted_ensemble = np.zeros_like(val_prediction[0][keys[0]]), np.zeros_like(val_prediction[0][keys[0]])
val_ensemble = [val_original_ensemble, val_weighted_ensemble]
for i, s_stat in enumerate(val_prediction):
for j, key in enumerate(s_stat.keys()):
if j in selected_index[i]:
val_ensemble[i] += s_stat[key]
val_ensemble = [v / len(selected_index[0]) for v in val_ensemble]
val_pred_baseon_class = [test.reorganize_similarity_score(v, reference_label_val)[0] for v in
val_ensemble]
if len(voting_number) == 0:
val_prediction = [reference_label_val[np.argmax(v, axis=-1)] for v in val_ensemble]
else:
val_prediction = []
for i, s_val_pred in enumerate(val_ensemble):
_, _pred_label = vis_utils.majority_voting(s_val_pred, reference_label_val,
val_label, voting_number[i])
val_prediction.append(_pred_label)
val_threshold = []
for i in range(2):
correct_or_wrong = np.array([0 if v == q else 1 for v, q in zip(val_prediction[i], val_label)])
if i == 0:
norm_pred = softmax(val_pred_baseon_class[i], axis=-1)
else:
norm_pred = val_pred_baseon_class[i]
selected_predict = norm_pred[np.arange(len(val_label)), val_prediction[i]]
_nll = -np.log(selected_predict)
fpr, tpr, thresholds = roc_curve(correct_or_wrong, _nll)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = thresholds[optimal_idx]
val_threshold.append(optimal_threshold)
stat_baseon_uncertainty = np.zeros([2, 4])
for i in range(2):
tt_pred_baseon_class, _ = test.reorganize_similarity_score(tt_ensemble[i],
reference_label_tt)
if i == 0:
tt_pred_baseon_class = softmax(tt_pred_baseon_class, axis=-1)
select_predict = tt_pred_baseon_class[np.arange(len(tt_label)), predicted_label_tt[i]]
_nll = -np.log(select_predict)
correct_or_wrong = np.array([0 if v == q else 1 for v, q in zip(predicted_label_tt[i], tt_label)])
high_uncertainty_index = np.where(_nll >= val_threshold[i])[0]
high_uncertainty_correct = len(high_uncertainty_index) - np.sum(correct_or_wrong[high_uncertainty_index])
low_uncertainty_index = np.where(_nll < val_threshold[i])[0]
low_uncertainty_correct = len(low_uncertainty_index) - np.sum(correct_or_wrong[low_uncertainty_index])
stat_baseon_uncertainty[i, :] = [low_uncertainty_correct, len(low_uncertainty_index),
high_uncertainty_correct, len(high_uncertainty_index)]
return stat_baseon_uncertainty, val_threshold
def _give_uncertainty_distribution_for_single_dataset(dataset, raman_type,
num_select, voting_number, uncertainty, prediction_status,
split_version=100, qualitative_study=False, path_init="../",
get_similarity=False, data_path="../data_group/", strategy="sigmoid"):
path2load = path_init + "/%s/" % raman_type + "/tds/"
folder2read = [v for v in os.listdir(path2load) if os.path.isdir(path2load + v) and "split_%d" % split_version in v]
dir2load_data = path_init + "/%s/" % raman_type
dir2load_data = [dir2load_data + "/" + v + "/data_splitting/" for v in os.listdir(dir2load_data) if
"tds" not in v and "version_%d" % split_version in v][0]
folder2read = folder2read[0]
original_weight_stat = ["original", "weighted"]
folder2read = path2load + folder2read
val_prediction = pickle.load(open(folder2read + "/validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(folder2read + "/test_prediction.obj", "rb"))
original_val, weighted_val = val_prediction
original_tt, weighted_tt = tt_prediction
args = const.give_args_test(raman_type=raman_type)
args["pre_define_tt_filenames"] = True
validation_accuracy = np.zeros([len(list(original_val.keys())) - 1, 2])
if dataset == "RRUFF" or dataset == "ORGANIC":
if dataset == "RRUFF":
tr_data, tt_data, _, label_name_tr = test.get_data(args, dir2load_data, read_twin_triple="cls",
print_info=False, dir2read=data_path)
else:
tr_data, tt_data, _, label_name_tr = test.get_data(args, dir2load_data, read_twin_triple="cls",
print_info=False, dir2read=data_path)
fake_val_reference, fake_val_data = pdd.get_fake_reference_and_test_data(tr_data, 1, data=dataset)
reference_val_label, val_label = fake_val_reference[1], fake_val_data[1]
for j, key in enumerate(list(original_val.keys())[:-1]):
_val_pred = original_val[key]
if strategy == "sigmoid" or strategy == "sigmoid_softmax":
_val_pred = expit(_val_pred)
_correct = np.sum(fake_val_reference[1][np.argmax(_val_pred, axis=-1)] == fake_val_data[1]) / len(
fake_val_data[0])
validation_accuracy[j, 0] = _correct
for j, key in enumerate(list(weighted_val.keys())[:-1]):
_val_pred = weighted_val[key]
_correct = np.sum(fake_val_reference[1][np.argmax(_val_pred, axis=-1)] == fake_val_data[1]) / len(
fake_val_data[0])
validation_accuracy[j, 1] = _correct
else:
tr_data, val_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
reference_val_label, val_label = tr_data[1], val_data[1]
for m, stat in enumerate([original_val, weighted_val]):
for j, key in enumerate(list(stat.keys())[:-1]):
if m == 0:
if strategy == "sigmoid" or strategy == "sigmoid_softmax":
_val_pred = expit(stat[key])
else:
_val_pred = stat[key]
_correct = np.sum(tr_data[1][np.argmax(_val_pred, axis=-1)] == val_data[1]) / len(val_data[1])
validation_accuracy[j, m] = _correct
original_select = np.argsort(validation_accuracy[:, 0])[-num_select:]
weighted_select = np.argsort(validation_accuracy[:, 1])[-num_select:]
for j, key in enumerate(list(original_tt.keys())):
if j == 0:
original_tt_ensemble = np.zeros_like(original_tt[key])
if strategy == "sigmoid" or strategy == "sigmoid_softmax":
original_tt[key] = expit(original_tt[key])
if j in original_select:
original_tt_ensemble += original_tt[key]
original_tt_ensemble /= len(original_select)
for j, key in enumerate(list(weighted_tt.keys())):
if j == 0:
weighted_tt_ensemble = np.zeros_like(weighted_tt[key])
if j in weighted_select:
weighted_tt_ensemble += weighted_tt[key]
weighted_tt_ensemble /= len(weighted_select)
predicted_label_on_test_data = []
correspond_tr_index = []
for j, single_stat in enumerate([original_tt_ensemble, weighted_tt_ensemble]):
if dataset != "BACTERIA":
_pred_label = tr_data[1][np.argmax(single_stat, axis=-1)]
accuracy = np.sum(_pred_label == np.array(tt_data[1])) / len(tt_data[0])
else:
accuracy, _pred_label = vis_utils.majority_voting(single_stat, tr_data[1],
tt_data[1], voting_number[j])
pred_baseon_class, corr_tr_index = test.reorganize_similarity_score(single_stat,
tr_data[1])
if strategy == "softmax":
pred_baseon_class = softmax(pred_baseon_class, axis=-1)
_nll_prediction = pred_baseon_class[np.arange(len(tt_data[0])), _pred_label]
print("NLL prediction", np.max(_nll_prediction), np.min(_nll_prediction))
_nll_score = _nll_prediction
if split_version == 100:
uncertainty.update({"%s_%s_%s" % (dataset, raman_type, original_weight_stat[j]): _nll_score})
else:
uncertainty.update({"%s_%s_%s_version_%d" % (dataset, raman_type, original_weight_stat[j],
split_version): _nll_score})
_pred_stat = np.concatenate([np.expand_dims(tt_data[1], axis=-1),
np.expand_dims(_pred_label, axis=-1)], axis=-1)
if split_version == 100:
prediction_status.update({"%s_%s_%s" % (dataset, raman_type, original_weight_stat[j]): _pred_stat})
else:
prediction_status.update({"%s_%s_%s_version_%d" % (dataset, raman_type, original_weight_stat[j],
split_version): _pred_stat})
print("%s + %s + %s : %.4f" % (dataset, raman_type, original_weight_stat[j], accuracy))
predicted_label_on_test_data.append(_pred_label)
correspond_tr_index.append(corr_tr_index)
accuracy_baseon_uncertainty, \
optimal_threshold = find_the_best_threshold_and_evaluate_accuracy([original_val, weighted_val],
[original_tt_ensemble, weighted_tt_ensemble],
[original_select, weighted_select],
reference_val_label,
val_label,
tr_data[1], tt_data[1],
predicted_label_on_test_data, voting_number)
if not qualitative_study:
return uncertainty, prediction_status, accuracy_baseon_uncertainty, optimal_threshold
else:
if not get_similarity:
return uncertainty, prediction_status, correspond_tr_index, \
optimal_threshold, tr_data, tt_data, label_name_tr, np.arange(args["max_wave"])[args["min_wave"]:]
else:
return original_val, original_tt_ensemble, original_select, \
reference_val_label, val_label, tr_data[1], tt_data[1]
def give_original_weight_uncertainty(uncertainty, prediction_status, dataset, use_nll_or_prob="nll"):
stat_orig, stat_weight = {}, {}
min_value = 0
high_value = [6 if use_nll_or_prob == "nll" else 1][0]
if dataset == "RRUFF_R":
num_bins = 5 # 8
elif dataset == "RRUFF_P":
num_bins = 5
elif dataset == "ORGANIC":
num_bins=3
else:
num_bins = 7
uncertainty_array, prediction_array = [], []
for key in uncertainty.keys():
predict_prob = uncertainty[key]
print(key, np.max(predict_prob), np.min(predict_prob))
_stat = group_uncertainty_and_prediction(predict_prob,
prediction_status[key],
min_value, high_value, num_bins, False)
if "weight" in key:
stat_weight[key] = _stat
else:
stat_orig[key] = _stat
prediction_array.append(prediction_status[key])
uncertainty_array.append(predict_prob)
if dataset == "RRUFF_Rs" or dataset == "RRUFF_Ps" or dataset == "ORGANICs":
return stat_weight
else:
return stat_orig, prediction_array, uncertainty_array
def give_avg_std_for_uncertainty(stat_weight):
stat = [[] for _ in range(3)]
max_dim = np.max([np.shape(stat_weight[key])[1] for key in stat_weight.keys()])
for key in stat_weight.keys():
_value = stat_weight[key]
if np.shape(_value)[1] < max_dim:
_value = np.concatenate([_value, np.zeros([len(_value), max_dim - np.shape(_value)[1]])],
axis=-1)
for j in range(3):
stat[j].append(_value[j])
for j, v in enumerate(stat):
stat[j] = np.array(v)
tot = stat[1] + stat[2]
tot[tot == 0] = 1
stat_c_percent = stat[1] / tot
stat_w_percent = stat[2] / tot
percent_stat = [stat_c_percent, stat_w_percent]
stat_avg, stat_std = [], []
for j in range(3):
if j == 0:
x_avg = np.sum(stat[0], axis=0) / np.sum(stat[0] != 0, axis=0)
else:
_divide = np.sum(percent_stat[j - 1] != 0, axis=0)
_divide[_divide == 0] = 1
x_avg = np.sum(percent_stat[j - 1], axis=0) / _divide
stat_avg.append(x_avg)
x_std = np.zeros_like(x_avg)
for m in range(np.shape(stat[0])[1]):
if j == 0:
v = stat[j][:, m]
else:
v = percent_stat[j - 1][:, m]
if len(v[v != 0]) > 0:
if np.sum(v[v != 0]) != 0:
x_std[m] = 1.95 * np.std(v[v != 0]) / np.sqrt(np.sum(v != 0))
stat_std.append(x_std)
return stat_avg, stat_std
def give_calibration_single_score_prediction(prediction, apply_softmax, label):
if apply_softmax == "softmax":
prediction = softmax(prediction, axis=-1)
elif apply_softmax == "sigmoid":
prediction = expit(prediction)
prediction_score = prediction[np.arange(len(prediction)), label]
return prediction_score
def give_test_prediction_baseon_single_score_threshold(prediction, apply_softmax, label, threshold, show=False):
if apply_softmax == "softmax":
prediction = softmax(prediction, axis=-1)
elif apply_softmax == "sigmoid":
prediction = expit(prediction)
prediction_select = [np.where(v >= threshold)[0] for v in prediction]
prediction_select = [v if len(v) > 0 else [np.argmax(prediction[i])] for i, v in enumerate(prediction_select)]
accuracy = [1 for i, v in enumerate(prediction_select) if label[i] in v]
if show:
print("Matching accuracy %.2f" % (np.sum(accuracy) / len(label)))
return prediction_select, np.sum(accuracy) / len(label)
def main_plot_for_scoring_rule(path2load, data_split, raman_type, dataset, use_original_or_weighted,
alpha_group, show=False, data_path="../data_group/", apply_softmax=True):
statistics_group = np.zeros([len(data_split), len(alpha_group), 2])
if dataset == "RRUFF":
args = const.give_args_test(raman_type=raman_type)
args["pre_define_tt_filenames"] = False
tr_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
[_, reference_val_label], [_, val_label] = pdd.get_fake_reference_and_test_data(tr_data, 1, data=dataset)
tr_label_group = [reference_val_label, tr_data[1]]
tt_label = tt_data[1]
elif dataset == "BACTERIA":
args = const.give_args_test(raman_type="bacteria_random_reference_finetune")
args["pre_define_tt_filenames"] = False
tr_data, val_data, tt_data, _, _ = test.get_data(args, None, read_twin_triple="cls", print_info=False,
dir2read=data_path)
tr_label_group = [tr_data[1], tr_data[1]]
val_label, tt_label = val_data[1], tt_data[1]
else:
args = const.give_args_test(raman_type=raman_type)
args["pre_define_tt_filenames"] = True
for split_index, s_split in enumerate(data_split):
path = path2load + [v for v in os.listdir(path2load) if "split_%d" % s_split in v and ".txt" not in v][0] + "/"
# path = path2load + "split_%d/" % s_split
val_prediction = pickle.load(open(path + "validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(path + "test_prediction.obj", "rb"))
if use_original_or_weighted == "original":
val_pred_en, tt_pred_en = val_prediction[0]["ensemble_avg"], tt_prediction[0]["ensemble_avg"]
elif use_original_or_weighted == "weighted":
val_pred_en, tt_pred_en = val_prediction[1]["ensemble_avg"], tt_prediction[1]["ensemble_avg"]
if dataset == "ORGANIC":
_fake_reference_label, val_label, _tr_label, tt_label = _get_dir_update(path2load, s_split, args,
data_path=data_path)
tr_label_group = [_fake_reference_label, _tr_label]
val_pred_baseon_cls, _ = test.reorganize_similarity_score(val_pred_en, tr_label_group[0])
tt_pred_baseon_cls, _ = test.reorganize_similarity_score(tt_pred_en, tr_label_group[1])
# Theoretical accuracy
_tt_top1 = np.argmax(tt_pred_baseon_cls, axis=1)
_compare = np.sum([v == q for v, q in zip(_tt_top1, tt_label)])
# print("Theoretical accuracy", _compare / len(_tt_top1))
print(np.max(val_pred_baseon_cls), np.min(val_pred_baseon_cls))
_statistics = give_plot_for_first_scoring_rule(val_pred_baseon_cls,
tt_pred_baseon_cls,
val_label, tt_label,
alpha_group=alpha_group, ax=None, show=show, apply_softmax=apply_softmax)
statistics_group[split_index] = _statistics
if show:
statistics_avg = np.mean(statistics_group, axis=0)
statistics_conf = np.std(statistics_group, axis=0) # / np.sqrt(len(data_split))
vis_utils.show_twinx(alpha_group, statistics_avg[:, 0], statistics_avg[:, 1], None)
return statistics_group
def get_path_for_conformal(path_init, raman_type):
path2load = [path_init + v for v in os.listdir(path_init) if v == raman_type][0] + "/tds/"
split_version = [int(v.split("split_")[1].split(".txt")[0]) for v in os.listdir(path2load) if
"split_" in v and '.txt' in v]
return path2load, split_version
def give_plot_for_first_scoring_rule(val_prediction, test_prediction, val_label, tt_label,
alpha_group, ax=None, show=False, apply_softmax=True):
val_prediction_score = give_calibration_single_score_prediction(val_prediction, apply_softmax, val_label)
stat_group = np.zeros([len(alpha_group), 2])
for i, s_alpha in enumerate(alpha_group):
threshold = | np.quantile(val_prediction_score, s_alpha) | numpy.quantile |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from HParams import HParams
from models_AAE import DecoderAAE, EncoderAAE, InnerDecoder, InnerEncoder
TINY = 1e-15
class SketchAAE():
def __init__(self, hp: HParams):
self.hp = hp
self.device = torch.device("cuda" if torch.cuda.is_available() and not hp.fast_debug else "cpu") #and os.name != 'nt' #and not hp.fast_debug
self.innerEncoder = InnerEncoder(self.hp).to(self.device)
self.innerDecoder = InnerDecoder(self.hp).to(self.device)
self.encoder = EncoderAAE(self.innerEncoder, self.hp).to(self.device)
self.decoder = DecoderAAE(self.innerDecoder, self.hp).to(self.device)
self.create_optims()
def create_optims(self):
self.train(inner=False, outer=True, skip_zero_grad=True)
self.encoder_params =filter(lambda p: p.requires_grad, self.encoder.parameters())
self.decoder_params =filter(lambda p: p.requires_grad, self.decoder.parameters())
self.fw_encoder_optimizer = optim.Adam(self.encoder_params, self.hp.lr)
self.fw_decoder_optimizer = optim.Adam(self.decoder_params, self.hp.lr)
self.inner_encoder_optimizer = optim.Adam(self.innerEncoder.parameters(), self.hp.inner_lr)
self.inner_decoder_optimizer = optim.Adam(self.innerDecoder.parameters(), self.hp.inner_lr)
def zero_grad_models(self):
self.encoder.zero_grad()
self.decoder.zero_grad()
self.innerEncoder.zero_grad()
self.innerDecoder.zero_grad()
def zero_grad_optims(self):
self.fw_encoder_optimizer.zero_grad()
self.fw_decoder_optimizer.zero_grad()
self.inner_encoder_optimizer.zero_grad()
self.inner_decoder_optimizer.zero_grad()
def reset_inner(self):
self.innerEncoder.reset_grads()
self.innerDecoder.reset_grads()
def lr_decay(self, outer=False, inner=False):
def opt_decay(optimizer, hp: HParams):
"""Decay learning rate by a factor of lr_decay"""
for param_group in optimizer.param_groups:
if param_group['lr']>hp.min_lr:
param_group['lr'] *= hp.lr_decay
if outer:
opt_decay(self.fw_encoder_optimizer, self.hp)
opt_decay(self.fw_decoder_optimizer, self.hp)
if inner:
opt_decay(self.inner_encoder_optimizer, self.hp)
opt_decay(self.inner_decoder_optimizer, self.hp)
def train(self, outer=True, inner=True, skip_zero_grad=False):
self.encoder.train(outer)
self.decoder.train(outer)
self.innerEncoder.train(inner)
self.innerDecoder.train(inner)
for param in self.encoder.parameters():
param.requires_grad = outer
for param in self.decoder.parameters():
param.requires_grad = outer
for param in self.innerEncoder.parameters():
param.requires_grad = inner
for param in self.innerDecoder.parameters():
param.requires_grad = inner
if not skip_zero_grad:
self.zero_grad_models()
self.zero_grad_optims()
def eval(self):
self.train(outer=False, inner=False)
def optim_step(self, inner=True, outer=True):
if inner:
nn.utils.clip_grad_norm_(self.innerEncoder.parameters(), self.hp.grad_clip)
nn.utils.clip_grad_norm_(self.innerDecoder.parameters(), self.hp.grad_clip)
self.inner_encoder_optimizer.step()
self.inner_decoder_optimizer.step()
if outer:
nn.utils.clip_grad_norm_(self.encoder_params, self.hp.grad_clip)
nn.utils.clip_grad_norm_(self.decoder_params, self.hp.grad_clip)
self.fw_encoder_optimizer.step()
self.fw_decoder_optimizer.step()
self.zero_grad_models()
self.zero_grad_optims()
def train_inner(self, train=True):
def sample_categorical(batch_size, n_classes):
cat = np.random.randint(0, n_classes, batch_size)
onehot = np.eye(n_classes)[cat].astype('float32')
return torch.from_numpy(onehot)
def CELoss(x_pred,x_target,use_mean=True):
assert x_target.size() == x_pred.size(), "size fail ! "+str(x_target.size()) + " " + str(x_pred.size())
logged_x_pred = torch.log(TINY+x_pred)
if use_mean:
cost_value = torch.mean(-torch.sum(x_target * logged_x_pred, dim=1))
else:
cost_value = -torch.sum(x_target * logged_x_pred)
return cost_value
self.train(inner=train, outer=False)
b_size = self.hp.batch_size
input_onehot = sample_categorical(b_size, n_classes=self.hp.cat_dims).to(self.device)
input_style = torch.rand(b_size, self.hp.style_dims, device=self.device) #
#not sure if we should do a softmax on input_onehot...
cat_style = torch.cat([input_onehot, input_style],1).requires_grad_()
z = self.innerDecoder(cat_style)
out_onehot, out_style = self.innerEncoder(z)
LC = CELoss(out_onehot, input_onehot)
LS = F.mse_loss(out_style, input_style)
L3 = MapLoss(out_style, varPct=0.01)
L1 = LC+LS+L3
if train:
L1.backward()
self.optim_step(inner=train, outer=False)
input_encoder = F.softsign(torch.randn(b_size, self.innerEncoder.input_size, device=self.device)).requires_grad_()
out_cat, out_style2 = self.innerEncoder(input_encoder)
cat_style2 = torch.cat([out_cat, out_style2],1)
output_decoder = self.innerDecoder(cat_style2)
LF = F.mse_loss(output_decoder, input_encoder)
L2 = LF
if train:
L2.backward()
self.optim_step(inner=train, outer=False)
loss = L1+L2
return val(loss), val(LF), val(LC), val(LS)
def train_reconstruction(self, batch, lengths):
self.train(inner=False, outer=True)
LS, LP, map_loss = self._forward(batch, lengths)
loss = LS+LP+map_loss
loss.backward()
self.optim_step(inner=False, outer=True)
return val(loss), val(LS), val(LP), val(map_loss)
def _forward(self, batch, lengths):
batch_len = batch.size()[1]
cat, style = self.encoder(batch, batch_len)
sos = torch.stack([torch.tensor([0,0,1,0,0], device=self.device, dtype=torch.float)]*batch_len).unsqueeze(0)
inputs = torch.cat([sos, batch],0)
z = torch.cat([cat, style],1)
self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, self.rho_xy, self.q, _, _ = self.decoder(inputs, z, self.Nmax, hidden_cell=None)
mask,dx,dy,p = self.make_target(batch, lengths)
LS, LP = self.reconstruction_loss(mask,dx,dy,p)
map_loss = MapLoss(style, cntPct=0.00000001, varPct=0.000000001) #moving center, leave var at 0.000000001
return LS, LP, map_loss
def make_target(self, batch, lengths):
eos = torch.stack([torch.tensor([0,0,0,0,1], device=self.device, dtype=torch.float)]*batch.size()[1]).unsqueeze(0)
batch = torch.cat([batch, eos], 0)
mask = torch.zeros(self.Nmax+1, batch.size()[1], device=self.device)
for indice,length in enumerate(lengths):
mask[:length,indice] = 1
dx = torch.stack([batch.data[:,:,0]]*self.hp.M,2)
dy = torch.stack([batch.data[:,:,1]]*self.hp.M,2)
p1 = batch.data[:,:,2]
p2 = batch.data[:,:,3]
p3 = batch.data[:,:,4]
p = torch.stack([p1,p2,p3],2)
return mask,dx,dy,p
def bivariate_normal_pdf(self, dx, dy):
z_x = ((dx-self.mu_x)/self.sigma_x)**2
z_y = ((dy-self.mu_y)/self.sigma_y)**2
z_xy = (dx-self.mu_x)*(dy-self.mu_y)/(self.sigma_x*self.sigma_y)
z = z_x + z_y -2*self.rho_xy*z_xy
exp = torch.exp(-z/(2*(1-self.rho_xy**2)))
norm = 2*np.pi*self.sigma_x*self.sigma_y*torch.sqrt(1-self.rho_xy**2)
return exp/norm
def reconstruction_loss(self, mask, dx, dy, p):
len = mask.size()[1]
pdf = self.bivariate_normal_pdf(dx, dy)
ls_all = mask*torch.log(TINY+torch.sum(self.pi * pdf, 2))
#not checking min because it performs worse
LS = -torch.sum(ls_all)/float(self.Nmax*len)
lp_all = p*torch.log(TINY+self.q)
#lp_zeros = torch.zeros_like(lp_all).detach()
#lp_all = torch.min(lp_all, lp_zeros)
LP = -torch.sum(lp_all)/float(self.Nmax*len)
return LS, LP
def generation_for_category(self, category, x_count=10, y_count=10, x_offset=8, y_offset=8):
with torch.no_grad():
self.eval()
cat = torch.zeros((self.hp.cat_dims), device=self.device)
cat[category] = 1
cat = F.softmax(cat, dim=0)
out = []
style = torch.zeros((self.hp.style_dims), device=self.device)
xedges = np.linspace(0, 1, num=x_count)
yedges = np.linspace(0, 1, num=y_count)
for y in range(y_count):
for x in range(x_count):
style[0] = xedges[x]
style[1] = yedges[y]
z = torch.cat([cat, style]).unsqueeze(0)
s = torch.tensor([0,0,1,0,0], device=self.device, dtype=torch.float).view(1,1,-1)
seq_x = []
seq_y = []
seq_z = []
hidden_cell = None
for i in range(self.hp.max_seq_length):
self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \
self.rho_xy, self.q, hidden, cell = \
self.decoder(s, z, 0, hidden_cell=hidden_cell)
hidden_cell = (hidden, cell)
# sample from parameters:
s, dx, dy, pen_down, eos = self.sample_next_state()
#------
if i == self.hp.max_seq_length - 1:
eos = True
seq_x.append(dx)
seq_y.append(dy)
seq_z.append(pen_down or eos)
if eos:
break
# visualize result:
x_sample = np.cumsum(seq_x, 0)
y_sample = np.cumsum(seq_y, 0)
if x_sample.min() < 0:
x_sample[:] += -x_sample.min()
if y_sample.min() < 0:
y_sample[:] += -y_sample.min()
x_sample = np.abs(x_sample)
y_sample = np.abs(y_sample)
width = x_sample.max()
height = y_sample.max()
v_max = max(width, height)
if v_max > (x_offset-1) or v_max > (y_offset-1):
x_scale = ((x_offset-1) / v_max)
y_scale = ((y_offset-1) / v_max)
x_sample[:] *= x_scale
y_sample[:] *= y_scale
#switching x/y here is intentional
x_sample[:] += (y * y_offset)
y_sample[:] -= (x * x_offset)
z_sample = np.array(seq_z)
sequence = np.stack([x_sample,y_sample,z_sample]).T
out.append(sequence)
return np.concatenate(out)
def conditional_generation(self, batch, lengths, Nmax):
# should remove dropouts:
self.eval()
with torch.no_grad():
# encode:
batch = torch.unsqueeze(batch, 1)
cat, style = self.encoder(batch, 1)
#cat = F.softmax(cat, dim=0)
z = torch.cat([cat, style],1)
s = torch.tensor([0,0,1,0,0], device=self.device, dtype=torch.float).view(1,1,-1)
seq_x = []
seq_y = []
seq_z = []
hidden_cell = None
for i in range(Nmax):
# decode:
self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \
self.rho_xy, self.q, hidden, cell = \
self.decoder(s, z, Nmax, hidden_cell=hidden_cell)
hidden_cell = (hidden, cell)
# sample from parameters:
s, dx, dy, pen_down, eos = self.sample_next_state()
#------
seq_x.append(dx)
seq_y.append(dy)
seq_z.append(pen_down)
if eos:
print("count: ", i)
break
# visualize result:
x_sample = np.cumsum(seq_x, 0)
y_sample = np.cumsum(seq_y, 0)
z_sample = np.array(seq_z)
sequence = np.stack([x_sample,y_sample,z_sample]).T
return sequence
def sample_next_state(self):
# get mixture indice:
pi = self.adjust_temp_tensor(self.pi.data[0,0,:]).cpu().numpy()
pi_idx = np.random.choice(self.hp.M, p=pi)
# get pen state:
q = self.adjust_temp_tensor(self.q.data[0,0,:]).cpu().numpy()
q_idx = | np.random.choice(3, p=q) | numpy.random.choice |
# graph utility for warehouse optimisation
#%% import packages
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import math
#%% import packages from other folders
import logproj.ml_graphs as dg
from logproj.ml_dataCleaning import cleanUsingIQR
# %%
def defineCoordinatesFromRackBayLevel(D_layout, aisleX=5.0, bayY=0.9):
# definisce le coordinate x e y per ogni location in base al
# numero di corsia (rack)
# numero di campata (bay)
# da utilizzare quando le coordinate cartesiane non vengono mappate
# scrive in output sulle colonne loccodex e loccodey del dataframe D_layout
print(f"Assuming aisle width of {aisleX} meters and bay width (pallet) of {bayY} meters")
#identifico corsie
D_layout['loccodex']=-1
D_layout['loccodey']=-1
allAisles=list(set(D_layout.rack))
allAisles.sort()
j=0
#scorro tutte le corsie
for x in allAisles:
#assegno la coordinata x in base alla distanza fra i corridoi
idx_x=D_layout.rack==x
D_layout['loccodex'].loc[idx_x]=aisleX*j
j=j+1
#identifico tutte le campate in corsia
allBays=list(set(D_layout['bay'].loc[idx_x]))
i=0
for y in allBays:
#assegno la coordinata y in base al passo fra una campata e l'altra
# per ipotesi tutte le corsie iniziano sul fronte
idx_y=(D_layout.rack==x) & (D_layout.bay==y)
D_layout['loccodey'].loc[idx_y]=bayY*i
i=i+1
return D_layout
# %%
def estimateMissingAislecoordX(D_layout,draw=False):
#salvo dataset iniziale
'''
if draw:
msgn.matrix(D_layout)
plt.title("Initial Layout Data")
plt.savefig("01InitialDataset.png")
'''
#stima i valori della coordinata della corsia quando non sono stati mappati (colonna aislecodex del dataframe D_layout)
#####################################################
#### sostituisco i nulli in loccodex e loccodey #####
#####################################################
D_layout = D_layout.reset_index()
#se ho l'indicazione dei rack
if 'rack' in D_layout.columns:
D_layout=D_layout.sort_values(['rack', 'bay'], ascending=[True, True])
allRacks=list(set(D_layout.rack.dropna()))
for rack in allRacks:
D_rack=D_layout[D_layout.rack==rack]
#provo a calcolarmi un valor medio della corsia
avgXCoord=np.mean(D_rack.loccodex)
if not(math.isnan(avgXCoord)): #se ho trovato un valore
D_rack['loccodex'].fillna(avgXCoord, inplace=True)
else:# se ho tutti valori nulli cerco nell'intorno e interpolo
D_rack_null = D_layout[['rack','loccodex']].drop_duplicates()
D_rack_null=D_rack_null.sort_values('rack')
D_rack_null['loccodex'].fillna(method='backfill', inplace=True)
fillValue=float(D_rack_null[D_rack_null.rack==rack].loccodex)
# A questo punto sostituisco
D_rack['loccodex'].fillna(fillValue, inplace=True)
#a questo punto setto i valori delle corsie in base a nearest neighbor
D_rack['loccodey'].interpolate(method ='linear', limit_direction ='forward', inplace=True)
#aggiorno D_layout
D_layout.loc[D_rack.index] = D_rack
#elimino eventuali nulli rimasti
D_layout=D_layout.sort_values(by=['rack','bay'])
print(f"====={len(D_layout[D_layout.loccodex.isnull()])} x coordinates have been randomly interpolated")
D_layout['loccodex'].fillna(method='ffill', inplace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
D_layout['loccodex'].fillna(method='bfill', inplace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
else:
print("No rack information")
'''
if draw:
msgn.matrix(D_layout)
plt.title("Fill LoccodeX and LoccodeY")
plt.savefig("02FillXY.png")
'''
#####################################################
###### stimo coordinate delle corsie mancanti #######
#####################################################
# identifico le coordinate delle corsie (aislecodex) mappate
D_givAisl=D_layout[D_layout['aislecodex'].notna()]
D_givAisl=D_givAisl[['loccodex','aislecodex']]
D_givAisl=D_givAisl.drop_duplicates()
# identifico le coordinate delle corsie da mappare
D_estAisl=D_layout[D_layout['loccodex'].notna()].loccodex
allXcoords=list(set(D_estAisl))
allXcoords.sort()
#accoppio le coordinate, metto nella stessa corsia le piu' lontane
dist=0
for j in range(1,len(allXcoords)):
dist=dist+np.abs(allXcoords[j]-allXcoords[j-1])
if len(allXcoords)>1:
avg_dist=dist/(len(allXcoords)-1)
else:
avg_dist=0
#se la distanza e' maggiore alla media accoppio nella stessa corsia
D_estAisl=pd.DataFrame(columns=D_givAisl.columns)
j=0
while j<len(allXcoords):
if j < len(allXcoords)-1: #per ogni corsia eccetto l'ultima
dist=np.abs(allXcoords[j+1]-allXcoords[j])
if dist>=avg_dist: # se sono piu' lontane della media affacciano sulla stessa corsia (vale anche in caso di parita' cos' da considerare il caso in cui siano equidistanziate)
aisle=min(allXcoords[j+1],allXcoords[j]) + dist/2
D_estAisl=D_estAisl.append(pd.DataFrame([[allXcoords[j],aisle]],columns=D_estAisl.columns))
D_estAisl=D_estAisl.append(pd.DataFrame([[allXcoords[j+1],aisle]],columns=D_estAisl.columns))
j=j+2 # ho accopiato due, salto di due
else: #altrimenti fa corsia da sola
D_estAisl=D_estAisl.append(pd.DataFrame([[allXcoords[j],allXcoords[j]]],columns=D_estAisl.columns))
j=j+1 # ho accoppiato di una, salto di una
elif j == len(allXcoords)-1: # se sono all'ultima corsia
D_estAisl=D_estAisl.append(pd.DataFrame([[allXcoords[j],allXcoords[j]]],columns=D_estAisl.columns))
j=j+1 # ho accoppiato di una, salto di una
#plt.scatter(allXcoords, np.ones(len(allXcoords)))
#plt.scatter(D_estAisl.loccodex, np.ones(len(allXcoords)))
#plt.scatter(D_estAisl.aislecodex, np.ones(len(allXcoords)), c='r', marker='*', s=2)
# data cleaning
#replace None with nan
D_layout.replace(to_replace=[None], value=np.nan, inplace=True)
#check null aisle values
index = D_layout['aislecodex'].index[D_layout['aislecodex'].apply(np.isnan)]
for rows in index:
loccodex=D_layout.loc[rows].loccodex
#if the value is known
if loccodex in D_givAisl.loccodex:
D_layout['aislecodex'].loc[rows]=float(D_givAisl[D_givAisl['loccodex']==loccodex].aislecodex)
else:
D_layout['aislecodex'].loc[rows]=float(D_estAisl[D_estAisl['loccodex']==loccodex].aislecodex)
'''
if draw:
msgn.matrix(D_layout)
plt.title("Fill aislecodeX")
plt.savefig("03FillaislecodeX.png")
'''
#check if coordinates exist otherwise replace with rack/bay/level
#remove rack/bay/level
if 'rack' in D_layout.columns:
D_layout=D_layout.sort_values(by=['rack','bay'])
else:
D_layout=D_layout.sort_values(by=['aislecodex'])
D_layout=D_layout[['idlocation', 'aislecodex', 'loccodex', 'loccodey']]
#interpolo eventuali coordinate y rimaste scoperte (ultima spiaggia)
print(f"====={len(D_layout[D_layout.loccodey.isnull()])} y coordinates have been randomly interpolated")
D_layout['loccodey'].interpolate(method ='linear', limit_direction ='forward', inplace=True)
D_layout['loccodey'].fillna(method='ffill', inplace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
D_layout['loccodey'].fillna(method='bfill', inplace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
'''
if draw:
msgn.matrix(D_layout)
plt.title("Final dataset")
plt.savefig("04Fill nan loccodey.png")
plt.close('all')
'''
#remove null
#D_layout=D_layout.dropna()
#arrotondo le x al metro e le y al decimetro per ridurre errori nella mappatura
D_layout['aislecodex']=np.round(D_layout['aislecodex'],0)
D_layout['loccodey']=np.round(D_layout['loccodey'],0)
return D_layout
# %%
def defineGraphNodes(D_layout, D_IO):
#la funzione definisce la corrispondenza fra idlocation e nodi
#vengono definite corrispondenze per locazioni fisiche e IO
# (alle locazioni fittizzie sono gia' state assegnate le coordinate dell'IO)
#la funzione restituisce una tabella D_nodes con le coordinate dei nodi
#un dizionario D_res con la corrispondenza fra idlocation (key) e idnode (values)
#un dataframe D_IO con le coordinate di input/output
# definisco tutti i nodi del grafo
D_nodes=D_layout[['aislecodex','loccodey']].drop_duplicates().reset_index(drop=True)
#plt.scatter(D_nodes.aislecodex, D_nodes.loccodey)
#aggiungo corrispondenza fra D_layout e D_nodes
D_layout['idNode']=None
for index, node in D_nodes.iterrows():
idx_node=(D_layout.aislecodex==node.aislecodex) & (D_layout.loccodey==node.loccodey)
D_layout.idNode.loc[idx_node]=index
#aggiungo i nodi di IO
#redefine index of D_IO to avoid overlaps with D_nodes
D_IO.index = np.arange(max(D_nodes.index.values)+1, max(D_nodes.index.values) + 1 + len(D_IO))
for index, node in D_IO.iterrows():
idx_node=node.idlocation # prendo l'idlocation della fake
temp = pd.DataFrame([[idx_node, node.loccodex, node.loccodex, node.loccodey, index]],columns=D_layout.columns)
D_layout=D_layout.append(temp)
D_res=D_layout[['idlocation','idNode']]
D_res=D_res.drop_duplicates()
#D_res.set_index('idlocation',drop=True)
#D_res=D_res['idNode'].to_dict()
D_res_dict = dict(zip(D_res.idlocation, D_res.idNode))
return D_nodes, D_res_dict, D_IO
def addtraversaledges(D_nodes,list_aisles,edgeTable,columns_edgeTable, index_source, index_target):
D_Aisle1=D_nodes[D_nodes.aislecodex==list_aisles[index_source]] #identifico le coordinate della prima corsia
D_Aisle2=D_nodes[D_nodes.aislecodex==list_aisles[index_target]] #identifico le coordinate della prima corsia
#se mi trovo a collegare due corsie "tradizionali" (entrambe con piu' di una ubica)
if (len(D_Aisle1)>1) & (len(D_Aisle2)>1):
#identifico le due ubiche sul fondo
node1_front_index=D_Aisle1['loccodey'].idxmax()
node2_front_index=D_Aisle2['loccodey'].idxmax()
#aggiungo l'arco
#nodeFrom=D_Aisle1.index[node1_front_index]
#nodeTo=D_Aisle2.index[node2_front_index]
length=np.round(np.abs(D_Aisle1.aislecodex.loc[node1_front_index]-D_Aisle2.aislecodex.loc[node2_front_index]),1)
temp=pd.DataFrame([[node1_front_index,node2_front_index,length]],columns=columns_edgeTable)
edgeTable=edgeTable.append(temp)
#print([node1_front_index,node2_front_index])
#identifico le due ubiche sul fronte
node1_front_index=D_Aisle1['loccodey'].idxmin()
node2_front_index=D_Aisle2['loccodey'].idxmin()
#aggiungo l'arco
#nodeFrom=D_Aisle1.index[node1_front_index]
#nodeTo=D_Aisle2.index[node2_front_index]
length=np.round(np.abs(D_Aisle1.aislecodex.loc[node1_front_index]-D_Aisle2.aislecodex.loc[node2_front_index]),1)
temp=pd.DataFrame([[node1_front_index,node2_front_index,length]],columns=columns_edgeTable)
edgeTable=edgeTable.append(temp)
else: #qui sto connettendo ubiche singole (ad esempio zone a terra)
if len(D_Aisle1)>1: # se la prima e' una corsia tradizionale
#identifico le due coordinate della prima corsia
node1_back_index=D_Aisle1['loccodey'].idxmax()
node1_front_index=D_Aisle1['loccodey'].idxmin()
node2_front_index=D_Aisle2['loccodey'].idxmax() # restituisce l'indice dell'unica ubica
#effettuo solo un collegamento alla piu' vicina (calcolo entrambe le distanze)
length_back=np.round(np.abs(D_Aisle1.aislecodex.loc[node1_back_index]-D_Aisle2.aislecodex.loc[node2_front_index]) + np.abs(D_Aisle1.loccodey.loc[node1_back_index]-D_Aisle2.loccodey.loc[node2_front_index]),1)
length_front=np.round( | np.abs(D_Aisle1.aislecodex.loc[node1_front_index]-D_Aisle2.aislecodex.loc[node2_front_index]) | numpy.abs |
#!/usr/bin/env python
"""
Until the 18 Apr 2017 there is no version of abcpmc that allows for np.random seeds within multiprocessing processes!
The alternative astroABC that can handle this. One simple workaround is to set the np.seed(...) within the function that has to be called.
"""
from __future__ import division,print_function
import argparse
import time
import datetime
import os
import copy
import traceback
""" In the local folder ... """
import surveysim.music2.mockobs as mockobs
import surveysim.music2.loadsnap as loadsnap
import surveysim.music2.radiomodel as radiomodel
""" ==="""
import multiprocessing as mupro
import numpy as np
import pandas as pd
import clusterbuster.surveyclasses as cbclass
import clusterbuster.iout.misc as iom
import clusterbuster.surveyut as suut
import clusterbuster.constants as myu
import time
import timeit
import random
from random import uniform
from astropy.cosmology import FlatLambdaCDM
#====
par = argparse.ArgumentParser()
par.add_argument('--par', default='MUSIC2COOL_NVSS', help='parset file to run' )
""" "--par 'MUSIC_NVSS01.parset" """
args, unknown = par.parse_known_args()
#=========================================================================================================================================================
POISON_PILL = "STOP"
"""Originally main() was designed as independent functions because I wanted to prevent the buffer to overflow.
Now I use these two functions as the one that controls the ABC (main) and the one that does the computation (shedule task)"""
def main(parfile, workdir=None, ABC=None, verbose=False, survey=None, index=None, Clfile='clusterCSV/MUSIC2-AGN',
processTasks=True):
"""
input: parameter file:
ABC : None or a list of parameters (what a pity that a dictionary is not possible with the current abcpmc or ABCpmc module)
output: some strings to track down the output directories
once a survey is not None all the default survey values will be used;
i.e. the parfile will be considered much
For future: - delete survey findings (or add for different detection parameter)
or make the Rmodel changeable
If pase['default'] is False then a run will be made with one shell and one single cluster realisation per snapshot
For abcpmc MUSIC-2 vanilla use MUSIC2-AGN for clusterCSV and MUSIC2_NVSS02_SSD.parset for parset
MUSIC-2cooling use MUSIC2-AGN for clusterCSV and MUSIC2COOL_NVSS.parset for parset
"""
RModelID = os.getpid() # get process id
seed = random.randrange(4294967295)
np.random.seed(seed=seed)
"""also possible : np.random.seed(seed=None)
... this here is only to save the seed, which is not needed, because all the random stuff can be reconstructed from the cluster statistics
processes from random are well seeded even without this, somehow numpy needs this kind of seed
but only in abcpmc and not ABC
import random
random.randrange(sys.maxsize)
"""
# === Read parset; then extract fundamental parameters ... Parset OBLIGATORY has be saved as 'parsets/*.parset'
if workdir is None: workdir = os.path.dirname(__file__) + '/'
pase, Z = suut.interpret_parset(parfile, repository=workdir + '/parsets/')
# TODO: workdir should be the same as pase['miscdir'] yet the issue is that it is unknown before miscdir is known.
if survey is None:
surveyN = parfile.replace('.parset', '')
savefolder = pase['outf'] + surveyN
logfolder = pase['outf'] + surveyN
else:
surveyN = survey.name
savefolder = survey.outfolder
logfolder = survey.logfolder
# Constants
# ==== Cosmological Parameter - MUSIC-2
Omega_M = 0.27 # Matter density parameter
# === Create folder if needed
iom.check_mkdir(savefolder)
iom.check_mkdir(logfolder)
smt = iom.SmartTiming(rate=pase['smarttime_sub'], logf=savefolder + '/smt');
smt(task='Prepare_Clusters')
# === Create detection information
dinfo = cbclass.DetInfo(beam=[float(pase['S_beam']), float(pase['S_beam']), 0], spixel=float(pase['S_pixel']),
rms=float(pase['RMSnoise']) * 1e-6,
limit=float(pase['RMSnoise']) * float(pase['Detthresh']) * 1e-6,
nucen=float(pase['nu_obs']), center=(0, 0), survey='UVcoverage')
if survey is None:
"""Create all galaxy clusters:
All these steps are to decide which clusters to use. Better placement: in SurveyUtils"""
""" Read the cluster lists from MUSIC-2 for all snapshots """
all_clusters = pd.read_csv('%s%s_allclusters.csv' % (pase['miscdata'], Clfile))
if verbose:
all_clusters.info()
zsnap_list = pd.Series(all_clusters['redshift'].unique())
snapidlist = pd.Series(all_clusters['snapID'].unique())
clusterIDs = list(all_clusters['clID'].unique())
NclusterIDs = [len(all_clusters[all_clusters['redshift'] == z]) for z in zsnap_list]
misslist = np.loadtxt(pase['miscdata'] + pase['missFile'])
""" e.g. cluster 10# was not (re)simulated, in neither of the MUSIC-2 simulations (also 7 has some issues?)"""
GClList = []
if pase['snaplistz'] != 'None':
snaplistz = [float(z) for z in iom.str2list(pase['snaplistz'])]
snapidlist = [float(z) for z in iom.str2list(pase['snapidlist'].replace(' ', ''))]
zsnap_list = (snaplistz[::-1])[0:11] # 0:17 List of available sn [0:11]
snapidlist = (snapidlist[::-1])[0:11] # 0:17 [0:11]
NclusterIDs = [len(all_clusters['clID'].unique().tolist())] * len(snaplistz)
if verbose: print('NclusterIDs', NclusterIDs[0])
use_list = [True] * len(zsnap_list) # Also put some False, you don't really want to use the z=4.0 snapshots!
Vsimu = (1.0 / (myu.H0 / 100.)) ** 3 # Gpc**3 comoving volume
""" Iterate trough each shell of your z-onion and attribute clusters to them
with z-range and percentage of covered sky, we have z=0.1
"""
if not suut.TestPar(pase['default']):
N_shells = 1
else:
N_shells = float(pase['N_shells'])
shells_z, delta_z = np.linspace(Z[0], Z[1], num=N_shells + 1, retstep=True)
cosmo = FlatLambdaCDM(H0=myu.H0, Om0=Omega_M)
DCMRs = cosmo.comoving_volume(shells_z).value / 1e9
count = 0
for (zlow, zhigh, VCMlow, VCMhigh) in zip(shells_z[0:-1], shells_z[1:], DCMRs[0:-1], DCMRs[1:]):
""" Iterate through each shell of the observed volume
and assign clusters
"""
boundaries_z = (zlow, zhigh)
VCM = (VCMlow, VCMhigh)
z_central = np.mean(boundaries_z)
if not suut.TestPar(pase['default']):
z_central = 0.051
choosen = suut.assign_snaps(zsnap_list, boundaries_z, VCM[1] - VCM[0], NclusterIDs,
sigma_z=float(pase['sigma_z']),
skycoverage=float(pase['surv_compl']), Vsimu=Vsimu, use_list=use_list,
fake=(not suut.TestPar(pase['default'])), logmode=None)
for (snap, kk) in choosen:
l = all_clusters[(all_clusters["clID"] == clusterIDs[kk])
& (all_clusters["snapID"] == snapidlist[snap])]
""" It would be good if you could directly access the element like in an ordered list,
as this would dramatically speed up the process
"""
""" Skips missing snapshots --> they will also miss in the .csv"""
if len(l) == 0:
if verbose:
print('__ Missing snapshot:', clusterIDs[kk], snapidlist[snap])
continue
ids = int(l["clID"])
M200 = float(l["M200"])
# Filter for the cluster masses. Please mind that this filtering step is also redshift dependent
if suut.TestPar(pase['empicut']) and np.log10(M200) < (13.6 + 2 * z_central):
continue
count += 1
# Decide on the projection of the cluster
# it would be great if a random initializer between 0 and 1 could have been saved,
if suut.TestPar(pase['rotation']):
theta = np.arccos(uniform(0, 2) - 1)
phi = uniform(0, 2 * np.pi)
psi = uniform(0, 2 * np.pi)
else:
theta = 0
phi = 0
psi = 0
# Create mockObs and the galaxyCluster_simulation
mockObs = cbclass.MockObs(count, theta=theta, phi=phi, psi=psi, snap=snapidlist[snap],
z_snap=zsnap_list[snap], clid=ids,
snapfolder=pase['xrayfolder'], xrayfolder=pase['xrayfolder'],
headerc=pase['headerC'])
GClList.append(
cbclass.Galaxycluster_simulation("MUSIC2%05i-%06i-%06i" % (ids, snapidlist[snap], count), count,
z=z_central, M200=M200, dinfo=dinfo,
mockobs=mockObs)) # , **addargs
# Also create a list of the chosen clusters for later loockup
GClList = sorted(GClList, key=lambda x: (x.mockobs.clid, -x.mockobs.snap))
if verbose: print('Length of GClList:', len(GClList))
""" New approach: Create a list of modes for the radio emission (Rmodels)
"""
surmodel = None
if ABC is None:
"""CAVEAT: The issue with the currently used ABC routines is that you have to give them arrays. Which is why
the corresponding model associated has to be defined at this layer.
Giving the procedure a function which would create this array would allow all of this to be defined
in the top layer of ABC
"""
RModel = cbclass.RModel(RModelID, effList=[float(pase['eff'])], B0=float(pase['B0']),
kappa=float(pase['kappa']), compress=float(pase['compress']))
if suut.TestPar(pase['redSnap']):
RModel.effList = RModel.effList[0]
elif len(ABC) == 1:
""" Vary only efficiency """
(lgeff) = ABC
RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=1, kappa=0.5, compress=float(pase['compress']))
elif len(ABC) == 2:
""" Vary efficiency and B0"""
(lgeff, lgB0) = ABC
RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=0.5,
compress=float(pase['compress']))
print('#== Begin Processing task')
elif len(ABC) == 3:
""" Varies the standard model """
(lgeff, lgB0, kappa) = ABC
RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
compress=float(pase['compress']))
elif len(ABC) == 4:
""" Varies the standard model + detection probability """
(lgeff, lgB0, kappa, survey_filter_pca_b) = ABC
RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
compress=float(pase['compress']))
surmodel = cbclass.SurModel(b=survey_filter_pca_b)
elif len(ABC) == 6:
(lgeff, lgB0, kappa, lgt0, lgt1, lgratio) = ABC
RModel = cbclass.PreModel_Hoeft(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
compress=float(pase['compress']), t0=10**lgt0, t1=10**lgt1, ratio=10**lgratio)
Rm = RModel
elif len(ABC) == 7:
(lgeff, lgB0, kappa, survey_filter_pca_b, lgratio, lgt0, lgt1) = ABC
RModel = cbclass.PreModel_Hoeft(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
compress=float(pase['compress']), t0=10 ** lgt0, t1=10 ** lgt1,
ratio=10 ** lgratio)
Rm = RModel
surmodel = cbclass.SurModel(b=survey_filter_pca_b)
else:
print('RunSurvey::main: model unknown')
return
""" Create survey """
outfolder = '%s_%05i/' % (logfolder, RModelID)
survey = cbclass.Survey(GClList, survey='%s' % (parfile.replace('.parset', '')),
emi_max=float(pase['RMSnoise']) * 1e-3 * 200,
cnt_levels=[float(pase['RMSnoise']) * 2 ** i for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]],
saveFITS=(ABC is None), savewodetect=suut.TestPar(pase['savewodetect']), dinfo=dinfo,
surshort='MUSIC2', Rmodel=RModel, outfolder=outfolder, logfolder=logfolder)
survey.set_surmodel(surmodel)
survey.set_seed_dropout()
else:
""" If you directly loaded a survey, just use its internal Rmodel """
RModel = survey.Rmodel
if verbose: print('Outfolder:', survey.outfolder)
"""=== Create a Task Cube, modify it & save it
The function of this Taskcube formerly was to keep track of all the computations that were already done in my multiproccecing version of ClusterBuster
Now it is outdated and isn't maintained anymore. After a BUG I didn't want to fix I decommisened this functionality
"""
Taskcube = np.zeros((len(survey.GCls), len([RModel])))
# A cube of all possible entries , efficiency is always fully computed and thus not in the Taskcube
if suut.TestPar(pase['reCube']):
Taskcube = np.load(logfolder + '/TaskCube.npy')
smt.MergeSMT_simple(iom.unpickleObject(logfolder + '/smt'))
if int(pase['reCL']) + int(pase['reRM']) > 0:
for (GCl_ii, RModelID), status in np.ndenumerate(Taskcube[:, :]):
if verbose: print('GCl_ii, RModelID:', GCl_ii, RModelID)
if GClList[GCl_ii].mockobs.clid > int(pase['reCL']) and int(pase['reRM']):
break
else:
Taskcube[GCl_ii, RModelID] = 1
np.save(logfolder + '/TaskCube',
Taskcube) # also to be pickled/saved: Levels Cluster&TaskID --> B0, kappa, (z) --> eff0
""""""
if verbose: print('#== Begin Processing task')
""" This is the most important task! """
while processTasks:
processTasks, smt = DoRun((pase, survey), smt, verbose=verbose)
print('RModelID %i of run %s finished' % (RModelID, surveyN))
return survey
def main_ABC(params, parfile='MUSIC2_NVSS02_SSD.parset', Clfile='clusterCSV/MUSIC2', verbose=False):
survey = main(parfile, ABC=params, Clfile=Clfile, verbose=verbose)
""" MUSIC-2 """
survey.dinfo = survey.GCls[0].dinfo
survey.scatterkwargs = {"alpha": 0.15, "fmt": "^", "markersize": 7}
survey.histkwargs = {"alpha": 0.15}
survey = suut.AddFilesToSurvey(survey, savefolder=survey.outfolder, verbose=False, clusterwise=False)
return survey
# from http://stackoverflow.com/questions/2553354/how-to-get-a-variable-name-as-a-string-in-python
def LoadSnap_multiprocessing(pase, realisations, Rmodel, getstring=False, verbose=False):
smt = iom.SmartTiming()
smt(task='LoadSnaps')
gcl = realisations[0]
""" We load the radio cubes """
strSn = (pase['snapfolder'] + 'SHOCKS_%05i/cluster.%05i.snap.%03i.shocks') % (gcl.mockobs.clid, gcl.mockobs.clid, gcl.mockobs.snap)
# if suut.TestPar(pase['useMiniCube']): # subset of snapshot, pickled
# strSn = strSn.replace('cluster.', 'clusterSUBSET.')
# if verbose: print('Loading snapshot:',strSn)
# print('___ RunSurvey::LoadSnap_multiprocessing::',strSn,pase['useMiniCube'],suut.TestPar(pase['useMiniCube']))
# snap = iom.unpickleObject(strSn)
# try:
# snap = iom.unpickleObject(strSn)
# except:
# with open('/data/ErrorLog.txt',"a") as f:
# for gcl in realisations:
# f.write(strSn+'\n')
#
# else: # original snapshot
# if verbose: print('Loading snapshot:',strSn)
# snap = loadsnap.Loadsnap(strSn,headerc=pase['headerC']) # original snapshot
if suut.TestPar(pase['useMiniCube']): # asks: original or modified snapshot?
strSn = strSn.replace('cluster.', 'clusterSUBSET.')
if verbose: print('Loading snapshot:',strSn)
snap = loadsnap.Loadsnap(strSn,headerc=pase['headerC'])
"""psi and machfiles could become sharred (or global) arrays, but as they are quite small < 1MB, the impact on performance should be snmall"""
PreSnap = radiomodel.PrepareRadioCube(snap, psiFile=pase['miscdata']+pase['PSItable'], machFile=pase['miscdata']+pase['DSAtable'])
PreSnap = ( radiomodel.PiggyBagSnap(PreSnap[0]), PreSnap[1] )
if getstring:
return PreSnap, strSn, smt
else:
return PreSnap, smt
def varname(var):
for k, v in list(locals().iteritems()):
if v is var:
a_as_str = k
return a_as_str #[ k for k,v in locals().iteritems() if v is var][0]
def pool_wait(queues, limit, affix='', tmax = 1e3):
tsleep = 0
waitT = 0.3
while sum([q.qsize() for q in queues]) > limit and tsleep < tmax: #stage 3 neclected
if tsleep == 0:
message = "[%s] Gonna sleep, because of " % (affix)
stringlist = ['+%s(%i)' % (varname(q),q.qsize()) for q in queues]
string = ' '.join(stringlist)
message += string
message += " > %i" %(limit)
print(message)
time.sleep(waitT)
tsleep += waitT
print('pool_wait:', sum([q.qsize() for q in queues]))
if tsleep > 0:
print("[%s] Slept for %.1f seconds. We don't want to shedule our memory to dead, do we?" % (affix, tsleep) )
return
def RadioAndMock_loaded(val, verbose=True):
smt = iom.SmartTiming()
smt(task='RadioAndMock_initialization')
(snapMF, pase, realisations, survey) = val
Rmodel = survey.Rmodel
##=== Stage II - DoMockObs
if verbose:
print('Start compiling MockObservations for further models of cluster #%5i snap #%3i with in total %i realisations.'
% (realisations[0].mockobs.clid, realisations[0].mockobs.snap, len(realisations)))
GClrealisations_return = []
smt(task='Shed_DoMockObs_misc')
# This result of this computation is independent of rotation and because of this was put here
for kk, realisation in enumerate(realisations): # update information on the total radio power (at the rest frame frequency) in the simulational volume
""" This is wrong and has to be fixed in the future!!!!
Currently, we make this code really SMELLY and hard to understand
"""
(radiosnap, subsmt, poisson_factor) = radiomodel.CreateRadioCube(snapMF, Rmodel, realisation.mockobs.z_snap,
nuobs=pase['nu_obs'], logging=False)[0:3]
smt.MergeSMT_simple(subsmt, silent=True)
""" also possible: realisations[kk].Rvir = radiocube[0].head['Rvir']"""
if realisations[kk].M200.value == 0:
try:
realisations[kk].M200.value = radiosnap.head['M200']
except:
realisations[kk].Mvir.value = radiosnap.head['Mvir']
realisations[kk].updateInformation(massproxis=True)
""" Here we add the radio emission due to pre-existing electrons """
if isinstance(Rmodel, cbclass.PreModel_Gelszinnis):
randfactor = 10**np.random.normal(0, Rmodel.p_sigma, 1)
realisation.PreNorm = randfactor*Rmodel.p0
radiosnap.radiPre += realisations.PreNorm * radiosnap.radiPre
elif isinstance(Rmodel, cbclass.PreModel_Hoeft):
realisation.poisson_factor = poisson_factor
""" Here we compute the volume weighted radio emission """
radiosum = np.sum(radiosnap.radi)
borders = 2*realisations[0].R200*radiosnap.head ['hubble']/radiosnap.head ['aexpan']
whereR200 = np.where(np.sqrt(np.power(radiosnap.pos[:,0], 2) + np.power(radiosnap.pos[:,1], 2)
+ np.power(radiosnap.pos[:,2], 2)) < borders/2)
radiosum_R200 = np.sum(radiosnap.radi[whereR200])
# update information on the total radio power (at the rest frame frequency) in the simulational volume
realisations[kk].P_rest.value = radiosum # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
realisations[kk].Prest_vol.value = radiosum_R200 # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
if not suut.TestPar(pase['cutradio']):
radiosnapUse = copy.deepcopy(radiosnap)
if hasattr(radiosnapUse, 'radiPre'):
radiosnapUse.radi += radiosnapUse.radiPre
#print('Run_MockObs:: Ratio of PREs to total emission', (np.sum(radiosnapUse.radiPre)) / (np.sum(radiosnapUse.radi) + np.sum(radiosnapUse.radiPre)))
radiocube = (radiosnapUse, Rmodel, survey) # Rmodel is added to the tuple
else:
print('Beware, This is slow and should not be paralised!!!! This part is not implemented')
radiocube = (radiosnap, Rmodel, survey) # Rmodel is added to the tuple
smt(task='Shed_DoMockObs')
(nouse, subsmt, GClrealisation_used, Rmodel) = mockobs.Run_MockObs(radiocube, [realisation],
saveFITS=survey.saveFITS, savewodetect=survey.savewodetect,
side_effects=True)
GClrealisations_return += GClrealisation_used
smt.MergeSMT_simple(subsmt, silent=True)
return (GClrealisations_return, Rmodel), smt
def RadioAndMock(val, verbose=True):
smt = iom.SmartTiming()
smt(task='RadioAndMock_initialization')
(pase, realisations, survey) = val
Rmodel = survey.Rmodel
PreSnap, smt_add = LoadSnap_multiprocessing(pase, realisations, Rmodel)
if len(PreSnap) > 0:
snapMF = PreSnap
(radiosnap, subsmt) = radiomodel.CreateRadioCube(snapMF, Rmodel, realisations[0].mockobs.z_snap, nuobs=pase['nu_obs'])[0:2]
smt.MergeSMT_simple(subsmt, silent=True)
""" This weird interresult comes from
output.put( outp + (Rmodel,)) #Rmodel is added to the tuple
(radiocube, subsmt, Rmodel) = stage1_out.get()
stage1_list.append( ( radiocube, Rmodel, survey) )
"""
radiocube = (radiosnap, Rmodel, survey) #Rmodel is added to the tuple
##=== Stage II - DoMockObs
if verbose: print('Start compiling MockObservations for further models of cluster #%5i and snap #%3i with in total %i realisations.' % (realisations[0].mockobs.clid , realisations[0].mockobs.snap, len(realisations)))
smt(task='Shed_DoMockObs')
# This result of this computation is independent of rotation and because of this was put here
for kk, real in enumerate(realisations): # update information on the total radio power (at the rest frame frequency) in the simulational volume
""" This is wrong and has to be fixed in the future!!!! """
""" also possible: realisations[kk].Rvir = radiocube[0].head['Rvir']"""
if realisations[kk].M200.value == 0:
try:
realisations[kk].M200.value = radiocube[0].head['M200']
except:
realisations[kk].Mvir.value = radiocube[0].head['Mvir']
realisations[kk].updateInformation(massproxis=True)
""" Here we compute the volume weighted radio emission """
radiosum = np.sum(radiocube[0].radi)
borders = 2*realisations[0].R200*radiocube[0].head ['hubble']/radiocube[0].head ['aexpan']
whereR200 = np.where( np.sqrt(np.power(radiocube[0].pos[:,0],2) + np.power(radiocube[0].pos[:,1],2) + np.power(radiocube[0].pos[:,2],2) ) < borders/2 )
radiosum_R200 = np.sum(radiocube[0].radi[whereR200])
# update information on the total radio power (at the rest frame frequency) in the simulational volume
for kk,real in enumerate(realisations):
realisations[kk].P_rest.value = radiosum # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
realisations[kk].Prest_vol.value = radiosum_R200 # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
smt(task='Shed_DoMockObs_misc')
locations = [survey.outfolder]
if not suut.TestPar(pase['cutradio']):
radiocubeUse = radiocube
else:
print('Beware, This is slow and should not be paralised!!!! This part is not implemented')
radiocubeUse = None #radiomodel.PiggyBagSnap_cut(snap, radiocube[0], float(pase['cutradio'])),Rmodel,survey)]
(nouse, subsmt, GClrealisations_used, Rmodel) = mockobs.Run_MockObs(radiocubeUse, realisations, saveFITS=survey.saveFITS, savewodetect=survey.savewodetect, writeClusters=True) #Mach=pase['Mach'], Dens=pase['Dens'],
smt.MergeSMT_simple(subsmt, silent=True)
return (GClrealisations_used, Rmodel), smt
def RadioCuts(val, compradio=False):
smt = iom.SmartTiming()
smt(task='RadioAndMock_initialization')
(pase, realisations, survey) = val
Rmodel = survey.Rmodel
snapMF, strSn, smt_add = LoadSnap_multiprocessing(pase,realisations,Rmodel, getstring=True)
if compradio:
(snap,subsmt) = radiomodel.CreateRadioCube(snapMF, Rmodel, realisations[0].mockobs.z_snap, nuobs=pase['nu_obs'])[0:2]
smt.MergeSMT_simple(subsmt, silent=True)
smt(task='UpdateHeader')
realisations[0].z.value = realisations[0].mockobs.z_snap
realisations[0].Mvir.value = snap.head['Mvir']*snap.head['hubble']
realisations[0].updateInformation()
snap.head['M200'] = realisations[0].M200.value
""" Here we compute the volume weighted radio emission """
radiosum = np.sum(snap.radi)
borders = 2*realisations[0].R200*snap.head ['hubble']/snap.head ['aexpan']
whereR200 = np.where( np.sqrt(np.power(snap.pos[:,0],2) + np.power(snap.pos[:,1],2) + np.power(snap.pos[:,2],2) ) < borders/2 )
radiosum_R200 = np.sum(snap.radi[whereR200])
# update information on the total radio power (at the rest frame frequency) in the simulational volume
for kk,real in enumerate(realisations):
realisations[kk].P_rest.value = radiosum # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
realisations[kk].Prest_vol.value = radiosum_R200 # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
else:
(snap, MF) = snapMF
smt(task='Shed_DoMockObs_misc')
if suut.TestPar(pase['redSnap']):
# This is all part of reducing the data load. A shrinked version of the snapshot with less particles is saved in the custom format that is a fork of the gadget format
redsnap = radiomodel.PiggyBagSnap_cut(snap, snap, float(pase['cutradio']), cutmask = MF )
strSn = strSn.replace('cluster.', 'clusterSUBSET.')
strSn = strSn.replace(pase['snapfolder'], pase['outf'])
print(strSn)
redsnap.savedata(strSn) # original snapshot
if compradio:
print('np.sum(radiocube[0].radi)', np.sum(snap.radi), '-->', np.sum(redsnap.radi), 'i.e.', radiosum, radiosum_R200, 'pickled to', strSn)
return (realisations, Rmodel), smt
def mupro_Output_NicePickleClusters( in_queue, output):
# Just pickles the relic for the highest efficiency, mechanism: It asks for the image
for well in in_queue:
(Clrealisations, Rmodel) = well
outputMod = output #+ '_%05i/' % (Rmodel.id)
iom.check_mkdir(outputMod + '/pickled')
for GCl in Clrealisations:
filename = 'MonsterPickle-Snap%02i' % (GCl.mockobs.snap)
iom.pickleObject( (GCl, Rmodel), outputMod + '/pickled/', filename, append = True)
return
def DoRun(inputs, smt, verbose=False, countmax=500, countmax_relics=1500):
""" Please mind that this procedure determines early if the number of detected relics becomes to large!"""
(pase, survey) = inputs
count = 0
count_relics = 0
realisations = []
realisations_list = [] # rotated realisations of one and the same cluster
survey.GCls = sorted(survey.GCls, key= iom.Object_natural_keys)
# This checks if the snapshot has to be loaded ... I would use a pool of loaded, snapshots ... but this is cumbersome
# Each snapshot is loaded only once ... If the number of models&rotations to be tested is high enough the produced overhead is low
# This requires that the clusters are ordered due to cluster number and snapshot
for gcl in survey.GCls:
if (len(realisations) == 0 or gcl.mockobs.clid != realisations[0].mockobs.clid or gcl.mockobs.snap != realisations[0].mockobs.snap):
if len(realisations) > 0:
realisations_list.append(realisations)
realisations = [gcl]
else:
realisations.append(gcl)
realisations_list.append(realisations)
for realisations in realisations_list:
gcl = realisations[0]
if verbose:
print('Recognized ID %5i, snap %3i, #%i in cluster file' % (gcl.mockobs.clid, gcl.mockobs.snap, gcl.mockobs.id) )
smt(task='LoadSnaps')
""" We load the radio cubes """
strSn = (pase['snapfolder'] + 'SHOCKS_%05i/cluster.%05i.snap.%03i.shocks') % (gcl.mockobs.clid, gcl.mockobs.clid, gcl.mockobs.snap)
""" SMELLY: This causes some issues, as there are two different 'load' versions for one and the same task """
if suut.TestPar(pase['useMiniCube']): # original snapshot
strSn = strSn.replace('cluster.', 'clusterSUBSET.')
if verbose:
print('Loading snapshot:', strSn)
snap = loadsnap.Loadsnap(strSn, headerc=pase['headerC'])
smt(task='PrepareRadioCubes')
PreSnap = radiomodel.PrepareRadioCube(snap, psiFile=pase['miscdata']+pase['PSItable'],
machFile=pase['miscdata']+pase['DSAtable'])
if 1==2:
snap_phd = radiomodel.PrepareRadioCube(snap, psiFile=pase['miscdata'] + pase['PSItable'],
machFile=pase['miscdata'] + pase['DSAtable'], machmin=0.0)[0]
print(type(snap_phd.mach))
Mstat = np.asarray(snap_phd.mach)/1.045+1e-5
Astat = np.asarray(snap_phd.hsml)/np.asarray(loadsnap.comH_to_phys(snap_phd.head))**2
Rhostat = np.asarray(snap.rdow)
Tstat = np.asarray(snap.udow)
Psistat = np.asarray(snap.DSAPsi)
bins = | np.linspace(-1, 3, num=1300, endpoint=True) | numpy.linspace |
# python 3.7
"""Contains the generator class of StyleGAN2.
This class is derived from the `BaseGenerator` class defined in
`base_generator.py`.
"""
import numpy as np
import torch
from . import model_settings
from .base_generator import BaseGenerator
from .stylegan2_generator_network import StyleGAN2GeneratorNet
__all__ = ['StyleGAN2Generator']
class StyleGAN2Generator(BaseGenerator):
"""Defines the generator class of StyleGAN2.
Same as StyleGAN, StyleGAN2 also has Z space, W space, and W+ (WP) space.
"""
def __init__(self, model_name, logger=None):
super().__init__(model_name, logger)
assert self.gan_type == 'stylegan2'
def build(self):
self.check_attr('w_space_dim')
self.check_attr('g_architecture_type')
self.check_attr('fused_modulate')
self.truncation_psi = model_settings.STYLEGAN2_TRUNCATION_PSI
self.truncation_layers = model_settings.STYLEGAN2_TRUNCATION_LAYERS
self.randomize_noise = model_settings.STYLEGAN2_RANDOMIZE_NOISE
self.net = StyleGAN2GeneratorNet(
resolution=self.resolution,
z_space_dim=self.z_space_dim,
w_space_dim=self.w_space_dim,
image_channels=self.image_channels,
architecture_type=self.g_architecture_type,
fused_modulate=self.fused_modulate,
truncation_psi=self.truncation_psi,
truncation_layers=self.truncation_layers,
randomize_noise=self.randomize_noise)
self.num_layers = self.net.num_layers
self.model_specific_vars = ['truncation.truncation']
def convert_tf_weights(self, test_num=10):
# pylint: disable=import-outside-toplevel
import sys
import pickle
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# pylint: enable=import-outside-toplevel
sess = tf.compat.v1.InteractiveSession()
self.logger.info(f'Loading tf weights from `{self.tf_weight_path}`.')
self.check_attr('tf_code_path')
sys.path.insert(0, self.tf_code_path)
with open(self.tf_weight_path, 'rb') as f:
_, _, tf_net = pickle.load(f) # G, D, Gs
sys.path.pop(0)
self.logger.info(f'Successfully loaded!')
self.logger.info(f'Converting tf weights to pytorch version.')
tf_vars = dict(tf_net.__getstate__()['variables'])
tf_vars.update(
dict(tf_net.components.mapping.__getstate__()['variables']))
tf_vars.update(
dict(tf_net.components.synthesis.__getstate__()['variables']))
state_dict = self.net.state_dict()
for pth_var_name, tf_var_name in self.net.pth_to_tf_var_mapping.items():
assert tf_var_name in tf_vars
assert pth_var_name in state_dict
self.logger.debug(f' Converting `{tf_var_name}` to `{pth_var_name}`.')
var = torch.from_numpy(np.array(tf_vars[tf_var_name]))
if 'weight' in pth_var_name:
if 'fc' in pth_var_name:
var = var.permute(1, 0)
elif 'conv' in pth_var_name:
var = var.permute(3, 2, 0, 1)
state_dict[pth_var_name] = var
self.logger.info(f'Successfully converted!')
self.logger.info(f'Saving pytorch weights to `{self.weight_path}`.')
for var_name in self.model_specific_vars:
del state_dict[var_name]
torch.save(state_dict, self.weight_path)
self.logger.info(f'Successfully saved!')
self.load()
# Start testing if needed.
if test_num <= 0 or not tf.test.is_built_with_cuda():
self.logger.warning(f'Skip testing the weights converted from tf model!')
sess.close()
return
self.logger.info(f'Testing conversion results.')
self.net.eval().to(self.run_device)
total_distance = 0.0
for i in range(test_num):
latent_code = self.easy_sample(1)
tf_output = tf_net.run(latent_code, # latents_in
None, # labels_in
truncation_psi=self.truncation_psi,
truncation_cutoff=self.truncation_layers,
randomize_noise=self.randomize_noise)
pth_output = self.synthesize(latent_code)['image']
distance = np.average(np.abs(tf_output - pth_output))
self.logger.debug(f' Test {i:03d}: distance {distance:.6e}.')
total_distance += distance
self.logger.info(f'Average distance is {total_distance / test_num:.6e}.')
sess.close()
def sample(self, num, latent_space_type='z', **kwargs):
"""Samples latent codes randomly.
Args:
num: Number of latent codes to sample. Should be positive.
latent_space_type: Type of latent space from which to sample latent code.
Only [`z`, `w`, `wp`] are supported. Case insensitive. (default: `z`)
Returns:
A `numpy.ndarray` as sampled latend codes.
Raises:
ValueError: If the given `latent_space_type` is not supported.
"""
latent_space_type = latent_space_type.lower()
if latent_space_type == 'z':
latent_codes = np.random.randn(num, self.z_space_dim)
elif latent_space_type in ['w', 'wp']:
z = self.easy_sample(num, latent_space_type='z')
latent_codes = []
for inputs in self.get_batch_inputs(z, self.ram_size):
outputs = self.easy_synthesize(latent_codes=inputs,
latent_space_type='z',
generate_style=False,
generate_image=False)
latent_codes.append(outputs[latent_space_type])
latent_codes = np.concatenate(latent_codes, axis=0)
if latent_space_type == 'w':
assert latent_codes.shape == (num, self.w_space_dim)
elif latent_space_type == 'wp':
assert latent_codes.shape == (num, self.num_layers, self.w_space_dim)
else:
raise ValueError(f'Latent space type `{latent_space_type}` is invalid!')
return latent_codes.astype(np.float32)
def preprocess(self, latent_codes, latent_space_type='z', **kwargs):
"""Preprocesses the input latent code if needed.
Args:
latent_codes: The input latent codes for preprocessing.
latent_space_type: Type of latent space to which the latent codes belong.
Only [`z`, `w`, `wp`] are supported. Case insensitive. (default: `z`)
Returns:
The preprocessed latent codes which can be used as final input for the
generator.
Raises:
ValueError: If the given `latent_space_type` is not supported.
"""
if not isinstance(latent_codes, np.ndarray):
raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
latent_space_type = latent_space_type.lower()
if latent_space_type == 'z':
latent_codes = latent_codes.reshape(-1, self.z_space_dim)
norm = np.linalg.norm(latent_codes, axis=1, keepdims=True)
latent_codes = latent_codes / norm * | np.sqrt(self.z_space_dim) | numpy.sqrt |
# MIT License
#
# Copyright (c) 2020 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the python API for tsdate.
"""
import unittest
import collections
import json
import warnings
import math
import numpy as np
import scipy
import msprime
import tsinfer
import tskit
import tsdate
from tsdate.base import NodeGridValues
from tsdate.prior import (SpansBySamples, PriorParams, ConditionalCoalescentTimes,
fill_priors, gamma_approx)
from tsdate.date import (Likelihoods, LogLikelihoods, LogLikelihoodsStreaming,
InOutAlgorithms, posterior_mean_var, constrain_ages_topo,
get_dates, date)
from tsdate.util import nodes_time
import utility_functions
class TestBasicFunctions(unittest.TestCase):
"""
Test for some of the basic functions used in tsdate
"""
def test_alpha_prob(self):
self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 3), 1.)
self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 4), 0.25)
def test_tau_expect(self):
self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 10), 1.8)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 100), 0.09)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(100, 100), 1.98)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(5, 10), 0.4)
def test_tau_squared_conditional(self):
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_squared_conditional(1, 10), 4.3981418)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_squared_conditional(100, 100),
-4.87890977e-18)
def test_tau_var(self):
self.assertEqual(
ConditionalCoalescentTimes.tau_var(2, 2), 1)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_var(10, 20), 0.0922995960)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_var(50, 50), 1.15946186)
def test_gamma_approx(self):
self.assertEqual(gamma_approx(2, 1), (4., 2.))
self.assertEqual(gamma_approx(0.5, 0.1), (2.5, 5.0))
class TestNodeTipWeights(unittest.TestCase):
def verify_weights(self, ts):
span_data = SpansBySamples(ts)
# Check all non-sample nodes in a tree are represented
nonsample_nodes = collections.defaultdict(float)
for tree in ts.trees():
for n in tree.nodes():
if not tree.is_sample(n):
# do not count a span of a node where there are no sample descendants
nonsample_nodes[n] += (tree.span if tree.num_samples(n) > 0 else 0)
self.assertEqual(set(span_data.nodes_to_date), set(nonsample_nodes.keys()))
for id, span in nonsample_nodes.items():
self.assertAlmostEqual(span, span_data.node_spans[id])
for focal_node in span_data.nodes_to_date:
wt = 0
for _, weights in span_data.get_weights(focal_node).items():
self.assertTrue(0 <= focal_node < ts.num_nodes)
wt += np.sum(weights['weight'])
self.assertLessEqual(max(weights['descendant_tips']), ts.num_samples)
if not np.isnan(wt):
# Dangling nodes will have wt=nan
self.assertAlmostEqual(wt, 1.0)
return span_data
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
span_data = self.verify_weights(ts)
# with a single tree there should only be one weight
for node in span_data.nodes_to_date:
self.assertTrue(len(span_data.get_weights(node)), 1)
self.assertTrue(2 in span_data.get_weights(2)[ts.num_samples]['descendant_tips'])
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
n = ts.num_samples
span_data = self.verify_weights(ts)
# with a single tree there should only be one weight
for node in span_data.nodes_to_date:
self.assertTrue(len(span_data.get_weights(node)), 1)
for nd, expd_tips in [
(4, 3), # Node 4 (root) expected to have 3 descendant tips
(3, 2)]: # Node 3 (1st internal node) expected to have 2 descendant tips
self.assertTrue(
np.isin(span_data.get_weights(nd)[n]['descendant_tips'], expd_tips))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
n = ts.num_samples
span_data = self.verify_weights(ts)
# with a single tree there should only be one weight
for node in span_data.nodes_to_date:
self.assertTrue(len(span_data.get_weights(node)), 1)
for nd, expd_tips in [
(6, 4), # Node 6 (root) expected to have 4 descendant tips
(5, 3), # Node 5 (1st internal node) expected to have 3 descendant tips
(4, 2)]: # Node 4 (2nd internal node) expected to have 3 descendant tips
self.assertTrue(
np.isin(span_data.get_weights(nd)[n]['descendant_tips'], expd_tips))
def test_two_trees(self):
ts = utility_functions.two_tree_ts()
n = ts.num_samples
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(5, n, 3), 1.0) # Root on R tree
self.assertEqual(span_data.lookup_weight(4, n, 3), 0.2) # Root on L tree ...
# ... but internal node on R tree
self.assertEqual(span_data.lookup_weight(4, n, 2), 0.8)
self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) # Internal nd on L tree
def test_missing_tree(self):
ts = utility_functions.two_tree_ts().keep_intervals(
[(0, 0.2)], simplify=False)
n = ts.num_samples
# Here we have no reference in the trees to node 5
with self.assertLogs(level="WARNING") as log:
SpansBySamples(ts)
self.assertGreater(len(log.output), 0)
self.assertIn("5", log.output[-1]) # Should mention the node number
self.assertIn("simplify", log.output[-1]) # Should advise to simplify
ts = ts.simplify()
span_data = self.verify_weights(ts)
# Root on (deleted) R tree is missing
self.assertTrue(5 not in span_data.nodes_to_date)
self.assertEqual(span_data.lookup_weight(4, n, 3), 1.0) # Root on L tree ...
# ... but internal on (deleted) R tree
self.assertFalse(np.isin(span_data.get_weights(4)[n]['descendant_tips'], 2))
self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) # Internal nd on L tree
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
n = ts.num_samples
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(7, n, 3), 1.0)
self.assertEqual(span_data.lookup_weight(6, n, 1), 0.5)
self.assertEqual(span_data.lookup_weight(6, n, 3), 0.5)
self.assertEqual(span_data.lookup_weight(5, n, 2), 0.5)
self.assertEqual(span_data.lookup_weight(5, n, 3), 0.5)
self.assertEqual(span_data.lookup_weight(4, n, 2), 0.75)
self.assertEqual(span_data.lookup_weight(4, n, 3), 0.25)
self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0)
@unittest.skip("Unary node is internal then the oldest node")
def test_tree_with_unary_nodes_oldest(self):
ts = utility_functions.two_tree_ts_with_unary_n3()
n = ts.num_samples
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(9, n, 4), 0.5)
self.assertEqual(span_data.lookup_weight(8, n, 4), 1.0)
self.assertEqual(span_data.lookup_weight(7, n, 1), 0.5)
self.assertEqual(span_data.lookup_weight(7, n, 4), 0.5)
self.assertEqual(span_data.lookup_weight(6, n, 2), 0.5)
self.assertEqual(span_data.lookup_weight(6, n, 4), 0.5)
self.assertEqual(span_data.lookup_weight(5, n, 2), 0.5)
self.assertEqual(span_data.lookup_weight(4, n, 2), 1.0)
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(3, ts.num_samples, 3), 1.0)
def test_larger_find_node_tip_weights(self):
ts = msprime.simulate(10, recombination_rate=5,
mutation_rate=5, random_seed=123)
self.assertGreater(ts.num_trees, 1)
self.verify_weights(ts)
def test_dangling_nodes_warn(self):
ts = utility_functions.single_tree_ts_n2_dangling()
with self.assertLogs(level="WARNING") as log:
self.verify_weights(ts)
self.assertGreater(len(log.output), 0)
self.assertIn("dangling", log.output[0])
def test_single_tree_n2_delete_intervals(self):
ts = utility_functions.single_tree_ts_n2()
deleted_interval_ts = ts.delete_intervals([[0.5, 0.6]])
n = deleted_interval_ts.num_samples
span_data = self.verify_weights(ts)
span_data_deleted = self.verify_weights(deleted_interval_ts)
self.assertEqual(span_data.lookup_weight(2, n, 2),
span_data_deleted.lookup_weight(2, n, 2))
def test_single_tree_n4_delete_intervals(self):
ts = utility_functions.single_tree_ts_n4()
deleted_interval_ts = ts.delete_intervals([[0.5, 0.6]])
n = deleted_interval_ts.num_samples
span_data = self.verify_weights(ts)
span_data_deleted = self.verify_weights(deleted_interval_ts)
self.assertEqual(span_data.lookup_weight(4, n, 2),
span_data_deleted.lookup_weight(4, n, 2))
self.assertEqual(span_data.lookup_weight(5, n, 3),
span_data_deleted.lookup_weight(5, n, 3))
self.assertEqual(span_data.lookup_weight(6, n, 4),
span_data_deleted.lookup_weight(6, n, 4))
def test_two_tree_ts_delete_intervals(self):
ts = utility_functions.two_tree_ts()
deleted_interval_ts = ts.delete_intervals([[0.5, 0.6]])
n = deleted_interval_ts.num_samples
span_data = self.verify_weights(ts)
span_data_deleted = self.verify_weights(deleted_interval_ts)
self.assertEqual(span_data.lookup_weight(3, n, 2),
span_data_deleted.lookup_weight(3, n, 2))
self.assertAlmostEqual(
span_data_deleted.lookup_weight(4, n, 2)[0], 0.7 / 0.9)
self.assertAlmostEqual(
span_data_deleted.lookup_weight(4, n, 3)[0], 0.2 / 0.9)
self.assertEqual(span_data.lookup_weight(5, n, 3),
span_data_deleted.lookup_weight(3, n, 2))
@unittest.skip("YAN to fix")
def test_truncated_nodes(self):
Ne = 1e2
ts = msprime.simulate(
10, Ne=Ne, length=400, recombination_rate=1e-4, random_seed=12)
truncated_ts = utility_functions.truncate_ts_samples(
ts, average_span=200, random_seed=123)
span_data = self.verify_weights(truncated_ts)
raise NotImplementedError(str(span_data))
class TestMakePrior(unittest.TestCase):
# We only test make_prior() on single trees
def verify_priors(self, ts, prior_distr):
# Check prior contains all possible tips
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add(ts.num_samples)
priors_df = priors[ts.num_samples]
self.assertEqual(priors_df.shape[0], ts.num_samples + 1)
return(priors_df)
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=1., beta=1., mean=1., var=1.)))
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=-0.34657359, beta=0.69314718, mean=1., var=1.)))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
prior2mv = {'mean': 1/3, 'var': 1/9}
prior3mv = {'mean': 1+1/3, 'var': 1+1/9}
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=-1.44518588, beta=0.69314718, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=0.04492816, beta=0.48550782, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=1., beta=3., **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv)))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
self.skipTest("Fill in values instead of np.nan")
prior2mv = {'mean': np.nan, 'var': np.nan}
prior3mv = {'mean': np.nan, 'var': np.nan}
prior4mv = {'mean': np.nan, 'var': np.nan}
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
self.assertTrue(np.allclose(
priors[4], PriorParams(alpha=np.nan, beta=np.nan, **prior4mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
self.assertTrue(np.allclose(
priors[4], PriorParams(alpha=np.nan, beta=np.nan, **prior4mv)))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
self.skipTest("Fill in values instead of np.nan")
prior3mv = {'mean': np.nan, 'var': np.nan}
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
priors = self.verify_prior(ts, 'gamma')
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
self.skipTest("Fill in values instead of np.nan")
prior2mv = {'mean': np.nan, 'var': np.nan}
prior3mv = {'mean': np.nan, 'var': np.nan}
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
def test_single_tree_ts_with_unary(self):
ts = utility_functions.single_tree_ts_with_unary()
self.skipTest("Fill in values instead of np.nan")
prior2mv = {'mean': np.nan, 'var': np.nan}
prior3mv = {'mean': np.nan, 'var': np.nan}
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=1., beta=3., **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv)))
def test_two_tree_mutation_ts(self):
ts = utility_functions.two_tree_mutation_ts()
self.skipTest("Fill in values instead of np.nan")
prior2mv = {'mean': np.nan, 'var': np.nan}
prior3mv = {'mean': np.nan, 'var': np.nan}
priors = self.verify_priors(ts, 'lognorm')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=np.nan, beta=np.nan, **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=np.nan, beta=np.nan, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(np.allclose(
priors[2], PriorParams(alpha=1., beta=3., **prior2mv)))
self.assertTrue(np.allclose(
priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv)))
class TestMixturePrior(unittest.TestCase):
alpha_beta = [PriorParams.field_index('alpha'), PriorParams.field_index('beta')]
def get_mixture_prior_params(self, ts, prior_distr):
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add(ts.num_samples, approximate=False)
mixture_priors = priors.get_mixture_prior_params(span_data)
return(mixture_priors)
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
np.allclose(mixture_priors[2, self.alpha_beta], [1., 1.]))
mixture_priors = self.get_mixture_prior_params(ts, 'lognorm')
self.assertTrue(
np.allclose(mixture_priors[2, self.alpha_beta], [-0.34657359, 0.69314718]))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
self.assertTrue(
np.allclose(mixture_priors[4, self.alpha_beta], [1.6, 1.2]))
mixture_priors = self.get_mixture_prior_params(ts, 'lognorm')
self.assertTrue(
np.allclose(mixture_priors[3, self.alpha_beta], [-1.44518588, 0.69314718]))
self.assertTrue(
np.allclose(mixture_priors[4, self.alpha_beta], [0.04492816, 0.48550782]))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
np.allclose(mixture_priors[4, self.alpha_beta], [0.81818182, 3.27272727]))
self.assertTrue(
np.allclose(mixture_priors[5, self.alpha_beta], [1.8, 3.6]))
self.assertTrue(
np.allclose(mixture_priors[6, self.alpha_beta], [1.97560976, 1.31707317]))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
np.allclose(mixture_priors[3, self.alpha_beta], [1.6, 1.2]))
def test_two_trees(self):
ts = utility_functions.two_tree_ts()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
# Node 4 should be a mixture between 2 and 3 tips
self.assertTrue(
np.allclose(mixture_priors[4, self.alpha_beta], [0.60377, 1.13207]))
self.assertTrue(
np.allclose(mixture_priors[5, self.alpha_beta], [1.6, 1.2]))
def test_single_tree_ts_with_unary(self):
ts = utility_functions.single_tree_ts_with_unary()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
# Root is a 3 tip prior
self.assertTrue(
np.allclose(mixture_priors[7, self.alpha_beta], [1.6, 1.2]))
# Node 6 should be a 50:50 mixture between 1 and 3 tips
self.assertTrue(
np.allclose(mixture_priors[6, self.alpha_beta], [0.44444, 0.66666]))
# Node 5 should be a 50:50 mixture of 2 and 3 tips
self.assertTrue(
np.allclose(mixture_priors[5, self.alpha_beta], [0.80645, 0.96774]))
# Node 4 should be a 75:25 mixture of 2 and 3 tips
self.assertTrue(
np.allclose(mixture_priors[4, self.alpha_beta], [0.62025, 1.06329]))
# Node 3 is a 2 tip prior
self.assertTrue(
np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
def test_two_tree_mutation_ts(self):
ts = utility_functions.two_tree_mutation_ts()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
np.allclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
# Node 4 should be a mixture between 2 and 3 tips
self.assertTrue(
np.allclose(mixture_priors[4, self.alpha_beta], [0.60377, 1.13207]))
self.assertTrue(
np.allclose(mixture_priors[5, self.alpha_beta], [1.6, 1.2]))
def check_intervals(self, ts, delete_interval_ts, keep_interval_ts):
tests = list()
for distr in ['gamma', 'lognorm']:
mix_priors = self.get_mixture_prior_params(ts, distr)
for interval_ts in [delete_interval_ts, keep_interval_ts]:
mix_priors_ints = self.get_mixture_prior_params(interval_ts, distr)
for internal_node in range(ts.num_samples, ts.num_nodes):
tests.append(np.allclose(
mix_priors[internal_node, self.alpha_beta],
mix_priors_ints[internal_node, self.alpha_beta]))
return tests
def test_one_tree_n2_intervals(self):
ts = utility_functions.single_tree_ts_n2()
delete_interval_ts = ts.delete_intervals([[0.5, 0.6]])
keep_interval_ts = ts.keep_intervals([[0, 0.1]])
tests = self.check_intervals(ts, delete_interval_ts, keep_interval_ts)
self.assertTrue(np.all(tests))
def test_two_tree_mutation_ts_intervals(self):
ts = utility_functions.two_tree_mutation_ts()
ts_extra_length = utility_functions.two_tree_ts_extra_length()
delete_interval_ts = ts_extra_length.delete_intervals([[0.75, 1.25]])
keep_interval_ts = ts_extra_length.keep_intervals([[0, 1.]])
tests = self.check_intervals(ts, delete_interval_ts, keep_interval_ts)
self.assertTrue(np.all(tests))
class TestPriorVals(unittest.TestCase):
def verify_prior_vals(self, ts, prior_distr):
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add(ts.num_samples, approximate=False)
grid = np.linspace(0, 3, 3)
mixture_priors = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr)
return prior_vals
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[2], [0, 1, 0.22313016]))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.011109]))
self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.3973851]))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.00467134]))
self.assertTrue(np.allclose(prior_vals[5], [0, 1, 0.02167806]))
self.assertTrue(np.allclose(prior_vals[6], [0, 1, 0.52637529]))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.3973851]))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.011109]))
self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.080002]))
self.assertTrue(np.allclose(prior_vals[5], [0, 1, 0.3973851]))
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[7], [0, 1, 0.397385]))
self.assertTrue(np.allclose(prior_vals[6], [0, 1, 0.113122]))
self.assertTrue(np.allclose(prior_vals[5], [0, 1, 0.164433]))
self.assertTrue(np.allclose(prior_vals[4], [0, 1, 0.093389]))
self.assertTrue(np.allclose(prior_vals[3], [0, 1, 0.011109]))
def test_one_tree_n2_intervals(self):
ts = utility_functions.single_tree_ts_n2()
delete_interval_ts = ts.delete_intervals([[0.1, 0.3]])
keep_interval_ts = ts.keep_intervals([[0.4, 0.6]])
prior_vals = self.verify_prior_vals(ts, 'gamma')
prior_vals_keep = self.verify_prior_vals(keep_interval_ts, 'gamma')
prior_vals_delete = self.verify_prior_vals(delete_interval_ts, 'gamma')
self.assertTrue(np.allclose(prior_vals[2], prior_vals_keep[2]))
self.assertTrue(np.allclose(prior_vals[2], prior_vals_delete[2]))
class TestLikelihoodClass(unittest.TestCase):
def poisson(self, param, x, normalize=True):
ll = np.exp(-param) * param ** x / scipy.special.factorial(x)
if normalize:
return ll / np.max(ll)
else:
return ll
def log_poisson(self, param, x, normalize=True):
with np.errstate(divide='ignore'):
ll = np.log(np.exp(-param) * param ** x / scipy.special.factorial(x))
if normalize:
return ll - np.max(ll)
else:
return ll
def test_get_mut_edges(self):
ts = utility_functions.two_tree_mutation_ts()
mutations_per_edge = Likelihoods.get_mut_edges(ts)
for e in ts.edges():
if e.child == 3 and e.parent == 4:
self.assertEqual(mutations_per_edge[e.id], 2)
elif e.child == 0 and e.parent == 5:
self.assertEqual(mutations_per_edge[e.id], 1)
else:
self.assertEqual(mutations_per_edge[e.id], 0)
def test_create_class(self):
ts = utility_functions.two_tree_mutation_ts()
grid = np.array([0, 1, 2])
lik = Likelihoods(ts, grid)
loglik = LogLikelihoods(ts, grid)
self.assertRaises(AssertionError, lik.get_mut_lik_fixed_node, ts.edge(0))
self.assertRaises(AssertionError, lik.get_mut_lik_lower_tri, ts.edge(0))
self.assertRaises(AssertionError, lik.get_mut_lik_upper_tri, ts.edge(0))
self.assertRaises(AssertionError, loglik.get_mut_lik_fixed_node, ts.edge(0))
self.assertRaises(AssertionError, loglik.get_mut_lik_lower_tri, ts.edge(0))
self.assertRaises(AssertionError, loglik.get_mut_lik_upper_tri, ts.edge(0))
def test_no_theta_class(self):
ts = utility_functions.two_tree_mutation_ts()
grid = np.array([0, 1, 2])
lik = Likelihoods(ts, grid, theta=None)
self.assertRaises(RuntimeError, lik.precalculate_mutation_likelihoods)
def test_precalc_lik_lower(self):
ts = utility_functions.single_tree_ts_n3()
grid = np.array([0, 1, 2])
eps = 0
theta = 1
lik = Likelihoods(ts, grid, theta, eps)
for method in (0, 1, 2):
# TODO: Remove this loop and hard-code one of the methods after perf testing
lik.precalculate_mutation_likelihoods(unique_method=method)
self.assertEquals(ts.num_trees, 1)
span = ts.first().span
dt = grid
num_muts = 0
n_internal_edges = 0
expected_lik_dt = self.poisson(dt * (theta / 2 * span), num_muts)
for edge in ts.edges():
if ts.node(edge.child).is_sample():
self.assertRaises(AssertionError, lik.get_mut_lik_lower_tri, edge)
self.assertRaises(AssertionError, lik.get_mut_lik_upper_tri, edge)
fixed_edge_lik = lik.get_mut_lik_fixed_node(edge)
self.assertTrue(np.allclose(fixed_edge_lik, expected_lik_dt))
else:
n_internal_edges += 1 # only one internal edge in this tree
self.assertLessEqual(n_internal_edges, 1)
self.assertRaises(AssertionError, lik.get_mut_lik_fixed_node, edge)
lower_tri = lik.get_mut_lik_lower_tri(edge)
self.assertAlmostEqual(lower_tri[0], expected_lik_dt[0])
self.assertAlmostEqual(lower_tri[1], expected_lik_dt[1])
self.assertAlmostEqual(lower_tri[2], expected_lik_dt[0])
self.assertAlmostEqual(lower_tri[3], expected_lik_dt[2])
self.assertAlmostEqual(lower_tri[4], expected_lik_dt[1])
self.assertAlmostEqual(lower_tri[5], expected_lik_dt[0])
def test_precalc_lik_upper_multithread(self):
ts = utility_functions.two_tree_mutation_ts()
grid = np.array([0, 1, 2])
eps = 0
theta = 1
for L, pois in [(Likelihoods, self.poisson), (LogLikelihoods, self.log_poisson)]:
for normalize in (True, False):
lik = L(ts, grid, theta, eps, normalize=normalize)
dt = grid
for num_threads in (None, 1, 2):
n_internal_edges = 0
lik.precalculate_mutation_likelihoods(num_threads=num_threads)
for edge in ts.edges():
if not ts.node(edge.child).is_sample():
n_internal_edges += 1 # only two internal edges in this tree
self.assertLessEqual(n_internal_edges, 2)
if edge.parent == 4 and edge.child == 3:
num_muts = 2
elif edge.parent == 5 and edge.child == 4:
num_muts = 0
else:
self.fail("Unexpected edge")
span = edge.right - edge.left
expected_lik_dt = pois(
dt * (theta / 2 * span), num_muts, normalize=normalize)
upper_tri = lik.get_mut_lik_upper_tri(edge)
self.assertAlmostEqual(upper_tri[0], expected_lik_dt[0])
self.assertAlmostEqual(upper_tri[1], expected_lik_dt[1])
self.assertAlmostEqual(upper_tri[2], expected_lik_dt[2])
self.assertAlmostEqual(upper_tri[3], expected_lik_dt[0])
self.assertAlmostEqual(upper_tri[4], expected_lik_dt[1])
self.assertAlmostEqual(upper_tri[5], expected_lik_dt[0])
def test_tri_functions(self):
ts = utility_functions.two_tree_mutation_ts()
grid = np.array([0, 1, 2])
eps = 0
theta = 1
lik = Likelihoods(ts, grid, theta, eps)
lik.precalculate_mutation_likelihoods()
for e in ts.edges():
if e.child == 3 and e.parent == 4:
exp_branch_muts = 2
exp_span = 0.2
self.assertEqual(e.right - e.left, exp_span)
self.assertEqual(lik.mut_edges[e.id], exp_branch_muts)
pois_lambda = grid * theta / 2 * exp_span
cumul_pois = np.cumsum(self.poisson(pois_lambda, exp_branch_muts))
lower_tri = lik.get_mut_lik_lower_tri(e)
self.assertTrue(
np.allclose(lik.rowsum_lower_tri(lower_tri), cumul_pois))
upper_tri = lik.get_mut_lik_upper_tri(e)
self.assertTrue(
np.allclose(
lik.rowsum_upper_tri(upper_tri)[::-1],
cumul_pois))
def test_no_theta_class_loglikelihood(self):
ts = utility_functions.two_tree_mutation_ts()
grid = np.array([0, 1, 2])
lik = LogLikelihoods(ts, grid, theta=None)
self.assertRaises(RuntimeError, lik.precalculate_mutation_likelihoods)
def test_logsumexp(self):
lls = np.array([0.1, 0.2, 0.5])
ll_sum = np.sum(lls)
log_lls = np.log(lls)
self.assertEqual(LogLikelihoods.logsumexp(log_lls), np.log(ll_sum))
def test_log_tri_functions(self):
ts = utility_functions.two_tree_mutation_ts()
grid = np.array([0, 1, 2])
eps = 0
theta = 1
lik = Likelihoods(ts, grid, theta, eps)
loglik = LogLikelihoods(ts, grid, theta=theta, eps=eps)
lik.precalculate_mutation_likelihoods()
loglik.precalculate_mutation_likelihoods()
for e in ts.edges():
if e.child == 3 and e.parent == 4:
exp_branch_muts = 2
exp_span = 0.2
self.assertEqual(e.right - e.left, exp_span)
self.assertEqual(lik.mut_edges[e.id], exp_branch_muts)
self.assertEqual(loglik.mut_edges[e.id], exp_branch_muts)
pois_lambda = grid * theta / 2 * exp_span
cumul_pois = np.cumsum(self.poisson(pois_lambda, exp_branch_muts))
lower_tri = lik.get_mut_lik_lower_tri(e)
lower_tri_log = loglik.get_mut_lik_lower_tri(e)
self.assertTrue(
np.allclose(lik.rowsum_lower_tri(lower_tri), cumul_pois))
with np.errstate(divide='ignore'):
self.assertTrue(
np.allclose(loglik.rowsum_lower_tri(lower_tri_log),
np.log(cumul_pois)))
upper_tri = lik.get_mut_lik_upper_tri(e)
upper_tri_log = loglik.get_mut_lik_upper_tri(e)
self.assertTrue(
np.allclose(
lik.rowsum_upper_tri(upper_tri)[::-1],
cumul_pois))
with np.errstate(divide='ignore'):
self.assertTrue(
np.allclose(
loglik.rowsum_upper_tri(upper_tri_log)[::-1],
np.log(cumul_pois)))
def test_logsumexp_streaming(self):
lls = np.array([0.1, 0.2, 0.5])
ll_sum = np.sum(lls)
log_lls = np.log(lls)
self.assertTrue(np.allclose(LogLikelihoodsStreaming.logsumexp(log_lls),
np.log(ll_sum)))
class TestNodeGridValuesClass(unittest.TestCase):
# TODO - needs a few more tests in here
def test_init(self):
num_nodes = 5
ids = np.array([3, 4])
timepoints = np.array(range(10))
store = NodeGridValues(num_nodes, ids, timepoints, fill_value=6)
self.assertEquals(store.grid_data.shape, (len(ids), len(timepoints)))
self.assertEquals(len(store.fixed_data), (num_nodes-len(ids)))
self.assertTrue(np.all(store.grid_data == 6))
self.assertTrue(np.all(store.fixed_data == 6))
ids = np.array([3, 4], dtype=np.int32)
store = NodeGridValues(num_nodes, ids, timepoints, fill_value=5)
self.assertEquals(store.grid_data.shape, (len(ids), len(timepoints)))
self.assertEquals(len(store.fixed_data), num_nodes-len(ids))
self.assertTrue(np.all(store.fixed_data == 5))
def test_set_and_get(self):
num_nodes = 5
grid_size = 2
fill = {}
for ids in ([3, 4], []):
np.random.seed(1)
store = NodeGridValues(
num_nodes, np.array(ids, dtype=np.int32), np.array(range(grid_size)))
for i in range(num_nodes):
fill[i] = np.random.random(grid_size if i in ids else None)
store[i] = fill[i]
for i in range(num_nodes):
self.assertTrue(np.all(fill[i] == store[i]))
self.assertRaises(IndexError, store.__getitem__, num_nodes)
def test_bad_init(self):
ids = [3, 4]
self.assertRaises(ValueError, NodeGridValues, 3, np.array(ids),
np.array([0, 1.2, 2]))
self.assertRaises(AttributeError, NodeGridValues, 5, np.array(ids), -1)
self.assertRaises(ValueError, NodeGridValues, 5, np.array([-1]),
np.array([0, 1.2, 2]))
def test_clone(self):
num_nodes = 10
grid_size = 2
ids = [3, 4]
orig = NodeGridValues(num_nodes, np.array(ids), np.array(range(grid_size)))
orig[3] = np.array([1, 2])
orig[4] = np.array([4, 3])
orig[0] = 1.5
orig[9] = 2.5
# test with np.zeros
clone = NodeGridValues.clone_with_new_data(orig, 0)
self.assertEquals(clone.grid_data.shape, orig.grid_data.shape)
self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape)
self.assertTrue(np.all(clone.grid_data == 0))
self.assertTrue(np.all(clone.fixed_data == 0))
# test with something else
clone = NodeGridValues.clone_with_new_data(orig, 5)
self.assertEquals(clone.grid_data.shape, orig.grid_data.shape)
self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape)
self.assertTrue(np.all(clone.grid_data == 5))
self.assertTrue(np.all(clone.fixed_data == 5))
# test with different
scalars = np.arange(num_nodes - len(ids))
clone = NodeGridValues.clone_with_new_data(orig, 0, scalars)
self.assertEquals(clone.grid_data.shape, orig.grid_data.shape)
self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape)
self.assertTrue(np.all(clone.grid_data == 0))
self.assertTrue(np.all(clone.fixed_data == scalars))
clone = NodeGridValues.clone_with_new_data(
orig, np.array([[1, 2], [4, 3]]))
for i in range(num_nodes):
if i in ids:
self.assertTrue(np.all(clone[i] == orig[i]))
else:
self.assertTrue(np.isnan(clone[i]))
clone = NodeGridValues.clone_with_new_data(
orig, np.array([[1, 2], [4, 3]]), 0)
for i in range(num_nodes):
if i in ids:
self.assertTrue(np.all(clone[i] == orig[i]))
else:
self.assertEquals(clone[i], 0)
def test_bad_clone(self):
num_nodes = 10
ids = [3, 4]
orig = NodeGridValues(num_nodes, np.array(ids), np.array([0, 1.2]))
self.assertRaises(
ValueError,
NodeGridValues.clone_with_new_data,
orig, np.array([[1, 2, 3], [4, 5, 6]]))
self.assertRaises(
ValueError,
NodeGridValues.clone_with_new_data,
orig, 0, np.array([[1, 2], [4, 5]]))
class TestAlgorithmClass(unittest.TestCase):
def test_nonmatching_prior_vs_lik_timepoints(self):
ts = utility_functions.single_tree_ts_n3()
timepoints1 = np.array([0, 1.2, 2])
timepoints2 = np.array([0, 1.1, 2])
priors = tsdate.build_prior_grid(ts, timepoints1)
lls = Likelihoods(ts, timepoints2)
self.assertRaisesRegexp(ValueError, "timepoints", InOutAlgorithms, priors, lls)
def test_nonmatching_prior_vs_lik_fixednodes(self):
ts1 = utility_functions.single_tree_ts_n3()
ts2 = utility_functions.single_tree_ts_n2_dangling()
timepoints = np.array([0, 1.2, 2])
priors = tsdate.build_prior_grid(ts1, timepoints)
lls = Likelihoods(ts2, priors.timepoints)
self.assertRaisesRegexp(ValueError, "fixed", InOutAlgorithms, priors, lls)
class TestInsideAlgorithm(unittest.TestCase):
def run_inside_algorithm(self, ts, prior_distr, normalize=True):
priors = tsdate.build_prior_grid(ts, timepoints=np.array([0, 1.2, 2]),
approximate_priors=False,
prior_distribution=prior_distr)
theta = 1
eps = 1e-6
lls = Likelihoods(ts, priors.timepoints, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(priors, lls)
algo.inside_pass(normalize=normalize)
return algo, priors
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(np.allclose(algo.inside[2], np.array([0, 1, 0.10664654])))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.0114771635])))
self.assertTrue(np.allclose(algo.inside[4], np.array([0, 1, 0.1941815518])))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(np.allclose(algo.inside[4], np.array([0, 1, 0.00548801])))
self.assertTrue(np.allclose(algo.inside[5], np.array([0, 1, 0.0239174])))
self.assertTrue(np.allclose(algo.inside[6], np.array([0, 1, 0.26222197])))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.12797265])))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
algo, priors = self.run_inside_algorithm(ts, 'gamma', normalize=False)
# priors[3][1] * Ll_(0->3)(1.2 - 0 + eps) ** 2
node3_t1 = priors[3][1] * scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 0.2) ** 2
# priors[3][2] * sum(Ll_(0->3)(2 - t + eps))
node3_t2 = priors[3][2] * scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 0.2) ** 2
self.assertTrue(np.allclose(algo.inside[3],
np.array([0, node3_t1, node3_t2])))
"""
priors[4][1] * (Ll_(2->4)(1.2 - 0 + eps) * (Ll_(1->4)(1.2 - 0 + eps)) *
(Ll_(3->4)(1.2-1.2+eps) * node3_t1)
"""
node4_t1 = priors[4][1] * (scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 1) * scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 0.8) *
((scipy.stats.poisson.pmf(0, (1e-6) * 0.5 * 0.2) * node3_t1)))
"""
priors[4][2] * (Ll_(2->4)(2 - 0 + eps) * Ll_(1->4)(2 - 0 + eps) *
(sum_(t'<2)(Ll_(3->4)(2-t'+eps) * node3_t))
"""
node4_t2 = priors[4][2] * (scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 1) * scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 0.8) * ((scipy.stats.poisson.pmf(
0, (0.8 + 1e-6) * 0.5 * 0.2) * node3_t1) +
(scipy.stats.poisson.pmf(0, (1e-6 + 1e-6) * 0.5 * 0.2) * node3_t2)))
self.assertTrue(np.allclose(algo.inside[4], np.array([0, node4_t1, node4_t2])))
"""
priors[5][1] * (Ll_(4->5)(1.2 - 1.2 + eps) * (node3_t ** 0.8)) *
(Ll_(0->5)(1.2 - 0 + eps) * 1)
raising node4_t to 0.8 is geometric scaling
"""
node5_t1 = priors[5][1] * (scipy.stats.poisson.pmf(
0, (1e-6) * 0.5 * 0.8) * (node4_t1 ** 0.8)) * (scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 0.8))
"""
prior[5][2] * (sum_(t'<1.2)(Ll_(4->5)(1.2 - 0 + eps) * (node3_t ** 0.8)) *
(Ll_(0->5)(1.2 - 0 + eps) * 1)
"""
node5_t2 = priors[5][2] * ((scipy.stats.poisson.pmf(
0, (0.8 + 1e-6) * 0.5 * 0.8) * (node4_t1 ** 0.8)) +
(scipy.stats.poisson.pmf(0, (1e-6 + 1e-6) * 0.5 * 0.8) *
(node4_t2 ** 0.8))) * (scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 0.8))
self.assertTrue(np.allclose(algo.inside[5], np.array([0, node5_t1, node5_t2])))
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(np.allclose(algo.inside[7], np.array([0, 1, 0.25406637])))
self.assertTrue(np.allclose(algo.inside[6], np.array([0, 1, 0.07506923])))
self.assertTrue(np.allclose(algo.inside[5], np.array([0, 1, 0.13189998])))
self.assertTrue(np.allclose(algo.inside[4], np.array([0, 1, 0.07370801])))
self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.01147716])))
def test_two_tree_mutation_ts(self):
ts = utility_functions.two_tree_mutation_ts()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(np.allclose(algo.inside[3], np.array([0, 1, 0.02176622])))
# self.assertTrue(np.allclose(upward[4], np.array([0, 2.90560754e-05, 1])))
# NB the replacement below has not been hand-calculated
self.assertTrue(np.allclose(algo.inside[4], np.array([0, 3.63200499e-11, 1])))
# self.assertTrue(np.allclose(upward[5], np.array([0, 5.65044738e-05, 1])))
# NB the replacement below has not been hand-calculated
self.assertTrue(np.allclose(algo.inside[5], np.array([0, 7.06320034e-11, 1])))
def test_dangling_fails(self):
ts = utility_functions.single_tree_ts_n2_dangling()
print(ts.draw_text())
print("Samples:", ts.samples())
priors = tsdate.build_prior_grid(ts, timepoints=np.array([0, 1.2, 2]))
theta = 1
eps = 1e-6
lls = Likelihoods(ts, priors.timepoints, theta, eps)
algo = InOutAlgorithms(priors, lls)
self.assertRaisesRegexp(ValueError, "dangling", algo.inside_pass)
class TestOutsideAlgorithm(unittest.TestCase):
def run_outside_algorithm(
self, ts, prior_distr="lognorm", normalize=False,
ignore_oldest_root=False):
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr)
priors.add(ts.num_samples, approximate=False)
grid = np.array([0, 1.2, 2])
mixture_priors = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr)
theta = 1
eps = 1e-6
lls = Likelihoods(ts, grid, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(prior_vals, lls)
algo.inside_pass()
algo.outside_pass(normalize=normalize, ignore_oldest_root=ignore_oldest_root)
return algo
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
for prior_distr in ('lognorm', 'gamma'):
algo = self.run_outside_algorithm(ts, prior_distr)
# Root, should this be 0,1,1 or 1,1,1
self.assertTrue(np.array_equal(
algo.outside[2], np.array([1, 1, 1])))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
for prior_distr in ('lognorm', 'gamma'):
algo = self.run_outside_algorithm(ts, prior_distr)
# self.assertTrue(np.allclose(
# downward[3], np.array([0, 1, 0.33508884])))
self.assertTrue(np.allclose(algo.outside[4], np.array([1, 1, 1])))
# self.assertTrue(np.allclose(
# posterior[3], np.array([0, 0.99616886, 0.00383114])))
# self.assertTrue(np.allclose(
# posterior[4], np.array([0, 0.83739361, 0.16260639])))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
for prior_distr in ('lognorm', 'gamma'):
algo = self.run_outside_algorithm(ts, prior_distr)
# self.assertTrue(np.allclose(
# downward[4], np.array([0, 1, 0.02187283])))
# self.assertTrue(np.allclose(
# downward[5], np.array([0, 1, 0.41703272])))
# Root, should this be 0,1,1 or 1,1,1
self.assertTrue(np.allclose(
algo.outside[6], np.array([1, 1, 1])))
def test_outside_before_inside_fails(self):
ts = utility_functions.single_tree_ts_n2()
priors = tsdate.build_prior_grid(ts)
theta = 1
lls = Likelihoods(ts, priors.timepoints, theta)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(priors, lls)
self.assertRaises(RuntimeError, algo.outside_pass)
def test_normalize_outside(self):
ts = msprime.simulate(50, Ne=10000, mutation_rate=1e-8, recombination_rate=1e-8)
normalize = self.run_outside_algorithm(ts, normalize=True)
no_normalize = self.run_outside_algorithm(ts, normalize=False)
self.assertTrue(
np.allclose(
normalize.outside.grid_data[:],
(no_normalize.outside.grid_data[:] /
np.max(
no_normalize.outside.grid_data[:], axis=1)[:, np.newaxis])))
def test_ignore_oldest_root(self):
ts = utility_functions.single_tree_ts_mutation_n3()
ignore_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=True)
use_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=False)
self.assertTrue(~np.array_equal(
ignore_oldest.outside[3], use_oldest.outside[3]))
# When node is not used in outside algorithm, all values should be equal
self.assertTrue(np.all(ignore_oldest.outside[3] == ignore_oldest.outside[3][0]))
self.assertTrue(np.all(use_oldest.outside[4] == use_oldest.outside[4][0]))
def test_ignore_oldest_root_two_mrcas(self):
ts = utility_functions.two_tree_two_mrcas()
ignore_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=True)
use_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=False)
self.assertTrue(~np.array_equal(
ignore_oldest.outside[7], use_oldest.outside[7]))
self.assertTrue(~np.array_equal(
ignore_oldest.outside[6], use_oldest.outside[6]))
# In this example, if the outside algorithm was *not* used, nodes 4 and 5 should
# have same outside values. If it is used, node 5 should seem younger than 4
self.assertTrue(np.array_equal(
ignore_oldest.outside[4], ignore_oldest.outside[5]))
self.assertTrue(~np.array_equal(
use_oldest.outside[4], use_oldest.outside[5]))
class TestTotalFunctionalValueTree(unittest.TestCase):
"""
Tests to ensure that we recover the total functional value of the tree.
We can also recover this property in the tree sequence in the special case where
all node times are known (or all bar one).
"""
def find_posterior(self, ts, prior_distr):
grid = np.array([0, 1.2, 2])
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add(ts.num_samples, approximate=False)
mixture_priors = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr)
theta = 1
eps = 1e-6
lls = Likelihoods(ts, grid, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(prior_vals, lls)
algo.inside_pass()
posterior = algo.outside_pass(normalize=False)
self.assertTrue(np.array_equal(np.sum(
algo.inside.grid_data * algo.outside.grid_data, axis=1),
np.sum(algo.inside.grid_data * algo.outside.grid_data, axis=1)))
self.assertTrue(np.allclose(np.sum(
algo.inside.grid_data * algo.outside.grid_data, axis=1),
np.sum(algo.inside.grid_data[-1])))
return posterior, algo
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
for distr in ('gamma', 'lognorm'):
posterior, algo = self.find_posterior(ts, distr)
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
for distr in ('gamma', 'lognorm'):
posterior, algo = self.find_posterior(ts, distr)
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
for distr in ('gamma', 'lognorm'):
posterior, algo = self.find_posterior(ts, distr)
def test_one_tree_n3_mutation(self):
ts = utility_functions.single_tree_ts_mutation_n3()
for distr in ('gamma', 'lognorm'):
posterior, algo = self.find_posterior(ts, distr)
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
for distr in ('gamma', 'lognorm'):
posterior, algo = self.find_posterior(ts, distr)
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
for distr in ('gamma', 'lognorm'):
posterior, algo = self.find_posterior(ts, distr)
class TestGilTree(unittest.TestCase):
"""
Test results against hardcoded values Gil independently worked out
"""
def test_gil_tree(self):
for cache_inside in [False, True]:
ts = utility_functions.gils_example_tree()
span_data = SpansBySamples(ts)
prior_distr = 'lognorm'
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add(ts.num_samples, approximate=False)
grid = np.array([0, 0.1, 0.2, 0.5, 1, 2, 5])
mixture_prior = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_prior, grid, ts, prior_distr=prior_distr)
prior_vals.grid_data[0] = [0, 0.5, 0.3, 0.1, 0.05, 0.02, 0.03]
prior_vals.grid_data[1] = [0, 0.05, 0.1, 0.2, 0.45, 0.1, 0.1]
theta = 2
eps = 0.01
lls = Likelihoods(ts, grid, theta, eps=eps, normalize=False)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(prior_vals, lls)
algo.inside_pass(normalize=False, cache_inside=cache_inside)
algo.outside_pass(normalize=False)
self.assertTrue(
np.allclose(np.sum(algo.inside.grid_data * algo.outside.grid_data,
axis=1), [7.44449E-05, 7.44449E-05]))
self.assertTrue(
np.allclose(np.sum(algo.inside.grid_data * algo.outside.grid_data,
axis=1), np.sum(algo.inside.grid_data[-1])))
class TestOutsideEdgesOrdering(unittest.TestCase):
"""
Test that edges_by_child_desc() and edges_by_child_then_parent_desc() order edges
correctly.
"""
def edges_ordering(self, ts, fn):
fixed_nodes = set(ts.samples())
priors = tsdate.build_prior_grid(ts)
theta = None
liklhd = LogLikelihoods(ts, priors.timepoints, theta,
eps=1e-6, fixed_node_set=fixed_nodes, progress=False)
dynamic_prog = InOutAlgorithms(priors, liklhd, progress=False)
if fn == "outside_pass":
edges_by_child = dynamic_prog.edges_by_child_desc()
seen_children = list()
last_child_time = None
for child, edges in edges_by_child:
for edge in edges:
self.assertTrue(edge.child not in seen_children)
cur_child_time = ts.tables.nodes.time[child]
if last_child_time:
self.assertTrue(cur_child_time <= last_child_time)
seen_children.append(child)
last_child_time = ts.tables.nodes.time[child]
elif fn == "outside_maximization":
edges_by_child = dynamic_prog.edges_by_child_then_parent_desc()
seen_children = list()
last_child_time = None
for child, edges in edges_by_child:
last_parent_time = None
for edge in edges:
cur_parent_time = ts.tables.nodes.time[edge.parent]
if last_parent_time:
self.assertTrue(cur_parent_time >= last_parent_time)
last_parent_time = cur_parent_time
self.assertTrue(child not in seen_children)
cur_child_time = ts.tables.nodes.time[child]
if last_child_time:
self.assertTrue(cur_child_time <= last_child_time)
seen_children.append(child)
last_child_time = ts.tables.nodes.time[child]
def test_two_tree_outside_traversal(self):
"""
This is for the outside algorithm, where we simply want to traverse the ts
from oldest child nodes to youngest, grouping all child nodes of same id
together. In the outside maximization algorithm, we want to traverse the ts from
oldest child nodes to youngest, grouping all child nodes of same id together.
"""
ts = utility_functions.two_tree_two_mrcas()
self.edges_ordering(ts, "outside_pass")
self.edges_ordering(ts, "outside_maximization")
def test_simulated_inferred_outside_traversal(self):
ts = msprime.simulate(500, Ne=10000, length=5e4, mutation_rate=1e-8,
recombination_rate=1e-8, random_seed=12)
sample_data = tsinfer.SampleData.from_tree_sequence(ts, use_sites_time=False)
inferred_ts = tsinfer.infer(sample_data)
self.edges_ordering(inferred_ts, "outside_pass")
self.edges_ordering(inferred_ts, "outside_maximization")
class TestMaximization(unittest.TestCase):
"""
Test the outside maximization function
"""
def run_outside_maximization(self, ts, prior_distr="lognorm"):
priors = tsdate.build_prior_grid(ts, prior_distribution=prior_distr)
Ne = 0.5
theta = 1
eps = 1e-6
lls = Likelihoods(ts, priors.timepoints, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(priors, lls)
algo.inside_pass()
return lls, algo, algo.outside_maximization(Ne, eps=eps)
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
for prior_distr in ('lognorm', 'gamma'):
lls, algo, maximized_ages = self.run_outside_maximization(ts, prior_distr)
self.assertTrue(np.array_equal(
maximized_ages,
np.array([0, 0, lls.timepoints[np.argmax(algo.inside[2])]])))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
for prior_distr in ('lognorm', 'gamma'):
lls, algo, maximized_ages = self.run_outside_maximization(ts, prior_distr)
node_4 = lls.timepoints[np.argmax(algo.inside[4])]
ll_mut = scipy.stats.poisson.pmf(
0, (node_4 - lls.timepoints[:np.argmax(algo.inside[4]) + 1] + 1e-6) *
1 / 2 * 1)
result = ll_mut / np.max(ll_mut)
inside_val = algo.inside[3][:(np.argmax(algo.inside[4]) + 1)]
node_3 = lls.timepoints[np.argmax(
result[:np.argmax(algo.inside[4]) + 1] * inside_val)]
self.assertTrue(np.array_equal(
maximized_ages,
np.array([0, 0, 0, node_3, node_4])))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
for prior_distr in ('lognorm', 'gamma'):
lls, algo, maximized_ages = self.run_outside_maximization(ts, prior_distr)
node_5 = lls.timepoints[np.argmax(algo.inside[5])]
ll_mut = scipy.stats.poisson.pmf(
0, (node_5 - lls.timepoints[:np.argmax(algo.inside[5]) + 1] + 1e-6) *
1 / 2 * 0.8)
result = ll_mut / np.max(ll_mut)
inside_val = algo.inside[4][:(np.argmax(algo.inside[5]) + 1)]
node_4 = lls.timepoints[np.argmax(
result[:np.argmax(algo.inside[5]) + 1] * inside_val)]
ll_mut = scipy.stats.poisson.pmf(
0, (node_4 - lls.timepoints[:np.argmax(algo.inside[4]) + 1] + 1e-6) *
1 / 2 * 0.2)
result = ll_mut / np.max(ll_mut)
inside_val = algo.inside[3][:(np.argmax(algo.inside[4]) + 1)]
node_3 = lls.timepoints[np.argmax(
result[:np.argmax(algo.inside[4]) + 1] * inside_val)]
self.assertTrue(np.array_equal(
maximized_ages,
np.array([0, 0, 0, node_3, node_4, node_5])))
class TestDate(unittest.TestCase):
"""
Test inputs to tsdate.date()
"""
def test_date_input(self):
ts = utility_functions.single_tree_ts_n2()
self.assertRaises(ValueError, tsdate.date, ts, 1, method="foobar")
def test_sample_as_parent_fails(self):
ts = utility_functions.single_tree_ts_n3_sample_as_parent()
self.assertRaises(NotImplementedError, tsdate.date, ts, 1)
def test_recombination_not_implemented(self):
ts = utility_functions.single_tree_ts_n2()
self.assertRaises(NotImplementedError, tsdate.date, ts, 1,
recombination_rate=1e-8)
class TestBuildPriorGrid(unittest.TestCase):
"""
Test tsdate.build_prior_grid() works as expected
"""
def test_bad_timepoints(self):
ts = msprime.simulate(2, random_seed=123)
for bad in [-1, np.array([1]), np.array([-1, 2, 3]), np.array([1, 1, 1]),
"foobar"]:
self.assertRaises(ValueError, tsdate.build_prior_grid, ts, timepoints=bad)
for bad in [np.array(["hello", "there"])]:
self.assertRaises(TypeError, tsdate.build_prior_grid, ts, timepoints=bad)
def test_bad_prior_distr(self):
ts = msprime.simulate(2, random_seed=12)
self.assertRaises(ValueError, tsdate.build_prior_grid, ts,
prior_distribution="foobar")
class TestPosteriorMeanVar(unittest.TestCase):
"""
Test posterior_mean_var works as expected
"""
def test_posterior_mean_var(self):
ts = utility_functions.single_tree_ts_n2()
grid = np.array([0, 1.2, 2])
for distr in ('gamma', 'lognorm'):
posterior, algo = TestTotalFunctionalValueTree().find_posterior(ts, distr)
ts_node_metadata, mn_post, vr_post = posterior_mean_var(
ts, grid, posterior, 0.5)
self.assertTrue(np.array_equal(mn_post,
[0, 0, np.sum(grid * posterior[2]) /
np.sum(posterior[2])]))
def test_node_metadata_single_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
grid = np.array([0, 1.2, 2])
posterior, algo = TestTotalFunctionalValueTree().find_posterior(ts, "lognorm")
ts_node_metadata, mn_post, vr_post = posterior_mean_var(ts, grid, posterior, 0.5)
self.assertTrue(json.loads(
ts_node_metadata.node(2).metadata)["mn"] == mn_post[2])
self.assertTrue(json.loads(
ts_node_metadata.node(2).metadata)["vr"] == vr_post[2])
def test_node_metadata_simulated_tree(self):
larger_ts = msprime.simulate(
10, mutation_rate=1, recombination_rate=1, length=20)
_, mn_post, _, _, eps, _ = get_dates(larger_ts, 10000)
dated_ts = date(larger_ts, 10000)
metadata = dated_ts.tables.nodes.metadata
metadata_offset = dated_ts.tables.nodes.metadata_offset
unconstrained_mn = [
json.loads(met.decode())["mn"] for met in tskit.unpack_bytes(
metadata,
metadata_offset) if len(met.decode()) > 0]
self.assertTrue(np.array_equal(unconstrained_mn,
mn_post[larger_ts.num_samples:]))
self.assertTrue(np.all(
dated_ts.tables.nodes.time[larger_ts.num_samples:] >=
mn_post[larger_ts.num_samples:]))
class TestConstrainAgesTopo(unittest.TestCase):
"""
Test constrain_ages_topo works as expected
"""
def test_constrain_ages_topo(self):
"""
Set node 3 to be older than node 4 in two_tree_ts
"""
ts = utility_functions.two_tree_ts()
post_mn = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 3.0])
eps = 1e-6
nodes_to_date = np.array([3, 4, 5])
constrained_ages = constrain_ages_topo(ts, post_mn, eps, nodes_to_date)
self.assertTrue(
np.array_equal(
np.array([0.0, 0.0, 0.0, 2.0, 2.000001, 3.0]), constrained_ages
)
)
def test_constrain_ages_topo_no_nodes_to_date(self):
ts = utility_functions.two_tree_ts()
post_mn = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 3.0])
eps = 1e-6
nodes_to_date = None
constrained_ages = constrain_ages_topo(ts, post_mn, eps, nodes_to_date)
self.assertTrue(
np.array_equal(
np.array([0.0, 0.0, 0.0, 2.0, 2.000001, 3.0]), constrained_ages
)
)
def test_constrain_ages_topo_unary_nodes_unordered(self):
ts = utility_functions.single_tree_ts_with_unary()
post_mn = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 0.5, 5.0, 1.0])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
np.allclose(
np.array([0.0, 0.0, 0.0, 2.0, 2.000001, 2.000002, 5.0, 5.000001]),
constrained_ages,
)
)
def test_constrain_ages_topo_part_dangling(self):
ts = utility_functions.two_tree_ts_n2_part_dangling()
post_mn = np.array([1.0, 0.0, 0.0, 0.1, 0.05])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
np.allclose(np.array([1.0, 0.0, 0.0, 1.000001, 1.000002]), constrained_ages)
)
def test_constrain_ages_topo_sample_as_parent(self):
ts = utility_functions.single_tree_ts_n3_sample_as_parent()
post_mn = np.array([0.0, 0.0, 0.0, 3.0, 1.0])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
np.allclose(np.array([0.0, 0.0, 0.0, 3.0, 3.000001]), constrained_ages)
)
def test_two_tree_ts_n3_non_contemporaneous(self):
ts = utility_functions.two_tree_ts_n3_non_contemporaneous()
post_mn = np.array([0.0, 0.0, 3.0, 4.0, 0.1, 4.1])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
np.allclose(np.array([0.0, 0.0, 3.0, 4.0, 4.000001, 4.1]), constrained_ages)
)
class TestPreprocessTs(unittest.TestCase):
"""
Test preprocess_ts works as expected
"""
def verify(self, ts, minimum_gap=None, remove_telomeres=None, **kwargs):
with self.assertLogs("tsdate.util", level="INFO") as logs:
if minimum_gap is not None and remove_telomeres is not None:
ts = tsdate.preprocess_ts(ts, minimum_gap=minimum_gap,
remove_telomeres=remove_telomeres)
elif minimum_gap is not None and remove_telomeres is None:
ts = tsdate.preprocess_ts(ts, minimum_gap=minimum_gap)
elif remove_telomeres is not None and minimum_gap is None:
ts = tsdate.preprocess_ts(ts, remove_telomeres=remove_telomeres)
else:
ts = tsdate.preprocess_ts(ts, **kwargs)
messages = [record.msg for record in logs.records]
self.assertIn("Beginning preprocessing", messages)
return ts
def test_no_sites(self):
ts = utility_functions.two_tree_ts()
self.assertRaises(ValueError, tsdate.preprocess_ts, ts)
def test_invariant_sites(self):
# Test that passing kwargs to simplify works as expected
ts = utility_functions.site_no_mutations()
with warnings.catch_warnings(record=True) as w:
removed = self.verify(ts)
self.assertTrue(removed.num_sites == 0)
self.assertTrue(len(w) == 1)
self.assertTrue(
tsdate.preprocess_ts(
ts, **{"filter_sites": False}).num_sites == ts.num_sites)
def test_no_intervals(self):
ts = utility_functions.two_tree_mutation_ts()
self.assertTrue(
ts.tables.edges == self.verify(ts, remove_telomeres=False).tables.edges)
self.assertTrue(
ts.tables.edges == self.verify(ts, minimum_gap=0.05).tables.edges)
def test_delete_interval(self):
ts = utility_functions.ts_w_data_desert(40, 60, 100)
trimmed = self.verify(ts, minimum_gap=20, remove_telomeres=False)
lefts = trimmed.tables.edges.left
rights = trimmed.tables.edges.right
self.assertTrue(
not np.any(np.logical_and(lefts > 41, lefts < 59)))
self.assertTrue(
not np.any(np.logical_and(rights > 41, rights < 59)))
def test_remove_telomeres(self):
ts = utility_functions.ts_w_data_desert(0, 5, 100)
removed = self.verify(ts, minimum_gap=ts.get_sequence_length())
lefts = removed.tables.edges.left
rights = removed.tables.edges.right
self.assertTrue(
not np.any( | np.logical_and(lefts > 0, lefts < 4) | numpy.logical_and |
from __future__ import division
import numpy as np
from numpy import newaxis as na
np.seterr(invalid='raise')
import scipy.stats as stats
import scipy.weave
import operator, copy
from ..basic.clustering import GammaCompoundDirichlet
from ..basic.util import rle
##################################################
# Misc #
##################################################
# TODO scaling by self.state_dim in concresampling is the confusing result of
# having a DirGamma object and not a WLDPGamma object! make one
# TODO reuse Multinomial/Categorical code
# TODO change concentrationresampling from mixin to metaprogramming?
# TODO add model ref, change trans_counts to cached property
class ConcentrationResampling(object):
def __init__(self,state_dim,alpha_a_0,alpha_b_0,gamma_a_0,gamma_b_0):
self.gamma_obj = GammaCompoundDirichlet(state_dim,gamma_a_0,gamma_b_0)
self.alpha_obj = GammaCompoundDirichlet(state_dim,alpha_a_0,alpha_b_0)
def resample(self):
# multiply by state_dim because the trans objects divide by it (since
# their parameters correspond to the DP parameters, and so they convert
# into weak limit scaling)
self.alpha_obj.resample(self.trans_counts,weighted_cols=self.beta)
self.alpha = self.alpha_obj.concentration
self.gamma_obj.resample(self.m)
self.gamma = self.gamma_obj.concentration
##############################################################
# HDP-HMM classes #
##############################################################
class HDPHMMTransitions(object):
def __init__(self,state_dim,alpha,gamma,beta=None,A=None):
self.state_dim = state_dim
self.alpha = alpha
self.gamma = gamma
if A is None or beta is None:
self.resample()
else:
self.A = A
self.beta = beta
### Gibbs sampling
def resample(self,states_list=[]):
trans_counts = self._count_transitions(states_list)
m = self._get_m(trans_counts)
self._resample_beta(m)
self._resample_A(trans_counts)
def copy_sample(self):
new = copy.deepcopy(self)
if hasattr(new,'trans_counts'):
del new.trans_counts
if hasattr(new,'m'):
del new.m
return new
def _resample_beta(self,m):
self.beta = np.random.dirichlet(self.gamma / self.state_dim + m.sum(0) + 1e-2)
def _resample_A(self,trans_counts):
self.A = stats.gamma.rvs(self.alpha * self.beta + trans_counts + 1e-2)
self.A /= self.A.sum(1)[:,na]
def _count_transitions(self,states_list):
trans_counts = np.zeros((self.state_dim,self.state_dim),dtype=np.int32)
for states in states_list:
if len(states) >= 2:
for idx in xrange(len(states)-1):
trans_counts[states[idx],states[idx+1]] += 1
self.trans_counts = trans_counts
return trans_counts
def _get_m_slow(self,trans_counts):
m = np.zeros((self.state_dim,self.state_dim),dtype=np.int32)
if not (0 == trans_counts).all():
for (rowidx, colidx), val in np.ndenumerate(trans_counts):
if val > 0:
m[rowidx,colidx] = (np.random.rand(val) < self.alpha * self.beta[colidx] \
/(np.arange(val) + self.alpha*self.beta[colidx])).sum()
self.m = m
return m
def _get_m(self,trans_counts):
N = trans_counts.shape[0]
m = np.zeros((N,N),dtype=np.int32)
if not (0 == trans_counts).all():
alpha, beta = self.alpha, self.beta
scipy.weave.inline(
'''
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
int tot = 0;
for (int k=0; k<trans_counts[N*i+j]; k++) {
tot += ((double)rand())/RAND_MAX < (alpha * beta[j])/(k+alpha*beta[j]);
}
m[N*i+j] = tot;
}
}
''',
['trans_counts','N','m','alpha','beta'],
extra_compile_args=['-O3'])
self.m = m
return m
### max likelihood
# TODO these methods shouldn't really be in this class... maybe put them in
# a base class
def max_likelihood(self,stateseqs,expectations_list=None):
if expectations_list is not None:
trans_counts = self._count_weighted_transitions(expectations_list,self.A)
else:
trans_counts = self._count_transitions(stateseqs)
errs = np.seterr(invalid='ignore',divide='ignore')
self.A = trans_counts / trans_counts.sum(1)[:,na]
np.seterr(**errs)
self.A[ | np.isnan(self.A) | numpy.isnan |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $File: test-feature.py
# $Date: Wed Dec 11 22:01:13 2013 +0800
# $Author: <NAME> <zxytim[at]gmail[dot]com>
import glob
import traceback
import sys
import random
import os
import time
import numpy as np
from itertools import izip
import multiprocessing
import operator
from collections import defaultdict
# from sklearn.mixture import GMM
from sklearn.mixture import GaussianMixture as GMM
from .feature import BOB as bob_MFCC
from .feature import MFCC
from .sample import Sample
concurrency = multiprocessing.cpu_count()
class GMMSet:
def __init__(self, gmm_order=32):
self.gmms = []
self.gmm_order = 32
self.y = []
def fit_new(self, x, label):
self.y.append(label)
gmm = GMM(self.gmm_order)
gmm.fit(x)
self.gmms.append(gmm)
def cluster_by_label(self, X, y):
Xtmp = defaultdict(list)
for ind, x in enumerate(X):
label = y[ind]
Xtmp[label].extend(x)
yp, Xp = zip(*Xtmp.items())
return Xp, yp
def fit(self, X, y):
X, y = self.cluster_by_label(X, y)
for ind, x in enumerate(X):
self.fit_new(x, y[ind])
def gmm_score(self, gmm, x):
return np.exp(np.sum(gmm.score(x)) / 1000)
def predict_one(self, x):
scores = [self.gmm_score(gmm, x) for gmm in self.gmms]
return self.y[max(enumerate(scores), key=operator.itemgetter(1))[0]]
def predict(self, X):
return map(self.predict_one, X)
def predict_task(gmmset, x_test):
return gmmset.predict_one(x_test)
def do_test(X_train, y_train, X_test, y_test):
start = time.time()
gmmset = GMMSet()
print('training ...')
gmmset.fit(X_train, y_train)
nr_correct = 0
print('time elapsed: ', time.time() - start)
print('predicting...')
start = time.time()
pool = multiprocessing.Pool(concurrency)
predictions = []
for x_test, label_true in izip(X_test, y_test):
predictions.append(pool.apply_async(predict_task, args=(gmmset, x_test)))
for ind, (x_test, label_true) in enumerate(zip(X_test, y_test)):
label_pred = predictions[ind].get()
is_wrong = '' if label_pred == label_true else ' wrong'
print("{} {}{}".format(label_pred, label_true, is_wrong))
if label_pred == label_true:
nr_correct += 1
print('time elapsed: ', time.time() - start)
print("{}/{} {:.2f}".format(nr_correct, len(y_test), float(nr_correct) / len(y_test)))
pool.close()
def main():
if len(sys.argv) == 1:
print("Usage: {} <dir_contains_feature_file>".format(sys.argv[0]))
sys.exit(1)
dirs = sys.argv[1]
print('reading data ...')
X_train, y_train = [], []
with open(os.path.join(dirs, 'enroll.lst')) as f:
for line in f:
line = line.split('=')
label = int(line[0])
fname = line[1].strip().rsplit('/')[1]
mat = []
with open(os.path.join(dirs, fname)) as feaf:
for line in feaf:
line = map(float, line.strip().split())
line = np.array(line)
mat.append(line)
X_train.append(np.array(mat))
y_train.append(label)
print("length of X_train: ", len(X_train))
X_test, y_test = [], []
with open(os.path.join(dirs, 'test.lst')) as f:
for line in f:
line = line.split('=')
label = int(line[0])
fname = line[1].strip().rsplit('/')[1]
mat = []
with open(os.path.join(dirs, fname)) as feaf:
for line in feaf:
line = map(float, line.strip().split())
mat.append( | np.array(line) | numpy.array |
import numpy as np
import time
import torch
from lietorch import SE3, LieGroupParameter
from scipy.spatial.transform import Rotation as R
import cv2
import configargparse
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from utils import *
from render_utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np.random.seed(0)
"""
example usage:
- install any missing requirements from https://github.com/salykovaa/inerf/blob/main/requirements.txt
- cd to folder containing demo_est_rel_pose.py
- python demo_est_rel_pose.py --config relative_pose_estimation_configs/teddy_bear.txt
"""
def estimate_relative_pose(coarse_model, fine_model, renderer, sensor_image, start_pose, K, general_args, extra_arg_dict, obs_img_pose=None, obs_img=None):
b_print_comparison_metrics = obs_img_pose is not None
b_generate_overlaid_images = b_print_comparison_metrics and obs_img is not None
obs_img_noised = sensor_image
W_obs = sensor_image.shape[0]
H_obs = sensor_image.shape[1]
# find points of interest of the observed image
POI = find_POI(obs_img_noised, DEBUG) # xy pixel coordinates of points of interest (N x 2)
obs_img_noised = (np.array(obs_img_noised) / 255.).astype(np.float32)
# create meshgrid from the observed image
coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, W_obs - 1, W_obs), np.linspace(0, H_obs - 1, H_obs)), -1), dtype=int)
# create sampling mask for interest region sampling strategy
interest_regions = np.zeros((H_obs, W_obs, ), dtype=np.uint8)
interest_regions[POI[:,1], POI[:,0]] = 1
I = extra_arg_dict['dil_iter']
interest_regions = cv2.dilate(interest_regions, np.ones((extra_arg_dict['kernel_size'], extra_arg_dict['kernel_size']), np.uint8), iterations=I)
interest_regions = np.array(interest_regions, dtype=bool)
interest_regions = coords[interest_regions]
# not_POI contains all points except of POI
coords = coords.reshape(H_obs * W_obs, 2)
not_POI = set(tuple(point) for point in coords) - set(tuple(point) for point in POI)
not_POI = np.array([list(point) for point in not_POI]).astype(int)
# Create pose transformation model
start_pose = SE3_to_trans_and_quat(start_pose)
starting_pose = SE3(torch.from_numpy(start_pose).float().cuda())
starting_pose = LieGroupParameter(starting_pose)
optimizer = torch.optim.Adam(params=[starting_pose], lr=extra_arg_dict['lrate'], betas=(0.9, 0.999))
# calculate angles and translation of the observed image's pose
if b_print_comparison_metrics:
phi_ref = np.arctan2(obs_img_pose[1,0], obs_img_pose[0,0])*180/np.pi
theta_ref = np.arctan2(-obs_img_pose[2, 0], np.sqrt(obs_img_pose[2, 1]**2 + obs_img_pose[2, 2]**2))*180/np.pi
psi_ref = np.arctan2(obs_img_pose[2, 1], obs_img_pose[2, 2])*180/np.pi
translation_ref = np.sqrt(obs_img_pose[0,3]**2 + obs_img_pose[1,3]**2 + obs_img_pose[2,3]**2)
testsavedir = os.path.join(extra_arg_dict['output_dir'], extra_arg_dict['model_name'])
os.makedirs(testsavedir, exist_ok=True)
# imgs - array with images are used to create a video of optimization process
if b_generate_overlaid_images:
imgs = []
for k in range(300):
rand_inds = np.random.choice(interest_regions.shape[0], size=extra_arg_dict['batch_size'], replace=False)
batch = interest_regions[rand_inds]
target_s = obs_img_noised[batch[:, 1], batch[:, 0]]
target_s = torch.Tensor(target_s).to(device)
rays_o, rays_d = get_rays(H_obs, W_obs, K, starting_pose.retr().matrix()) # (H, W, 3), (H, W, 3)
rays_o = rays_o[batch[:, 1], batch[:, 0]] # (N_rand, 3)
rays_d = rays_d[batch[:, 1], batch[:, 0]]
batch_rays = torch.stack([rays_o, rays_d], 0)
rgb, _, _, _ = renderer.render_from_rays(H_obs,
W_obs,
K,
chunk=general_args.chunk,
rays=batch_rays,
coarse_model=coarse_model,
fine_model=fine_model,
retraw=True)
optimizer.zero_grad()
loss = img2mse(rgb, target_s)
loss.backward()
optimizer.step()
new_lrate = extra_arg_dict['lrate'] * (0.8 ** ((k + 1) / 100))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lrate
# print results periodically
if b_print_comparison_metrics and ((k + 1) % 20 == 0 or k == 0):
print('Step: ', k)
print('Loss: ', loss)
with torch.no_grad():
pose_dummy = starting_pose.retr().matrix().cpu().detach().numpy()
# calculate angles and translation of the optimized pose
phi = np.arctan2(pose_dummy[1, 0], pose_dummy[0, 0]) * 180 / np.pi
theta = np.arctan2(-pose_dummy[2, 0], np.sqrt(pose_dummy[2, 1] ** 2 + pose_dummy[2, 2] ** 2)) * 180 / np.pi
psi = np.arctan2(pose_dummy[2, 1], pose_dummy[2, 2]) * 180 / np.pi
translation = np.sqrt(pose_dummy[0,3]**2 + pose_dummy[1,3]**2 + pose_dummy[2,3]**2)
#translation = pose_dummy[2, 3]
# calculate error between optimized and observed pose
phi_error = abs(phi_ref - phi) if abs(phi_ref - phi)<300 else abs(abs(phi_ref - phi)-360)
theta_error = abs(theta_ref - theta) if abs(theta_ref - theta)<300 else abs(abs(theta_ref - theta)-360)
psi_error = abs(psi_ref - psi) if abs(psi_ref - psi)<300 else abs(abs(psi_ref - psi)-360)
rot_error = phi_error + theta_error + psi_error
translation_error = abs(translation_ref - translation)
print('Rotation error: ', rot_error)
print('Translation error: ', translation_error)
print('-----------------------------------')
'''
if b_generate_overlaid_images:
with torch.no_grad():
rgb, _, _, _ = renderer.render_from_pose(H_obs,
W_obs,
K,
chunk=general_args.chunk,
c2w=starting_pose.retr().matrix()[:3, :4],
coarse_model=coarse_model,
fine_model=fine_model,
retraw=True)
rgb = rgb.cpu().detach().numpy()
rgb8 = to8b(rgb)
ref = to8b(obs_img)
filename = os.path.join(testsavedir, str(k)+'.png')
dst = cv2.addWeighted(rgb8, 0.7, ref, 0.3, 0)
imageio.imwrite(filename, dst)
imgs.append(dst)
if b_generate_overlaid_images:
imageio.mimwrite(os.path.join(testsavedir, 'video.gif'), imgs, fps=8) #quality = 8 for mp4 format
'''
print("Done with main relative_pose_estimation loop")
def SE3_to_trans_and_quat(data):
rot = data[:3, :3]
trans = data[:3, 3]
r = R.from_matrix(rot)
quat = r.as_quat()
return np.concatenate([trans, quat])
def find_POI(img_rgb, DEBUG=False): # img - RGB image in range 0...255
img = np.copy(img_rgb)
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sift = cv2.SIFT_create()
keypoints = sift.detect(img_gray, None)
if DEBUG:
img = cv2.drawKeypoints(img_gray, keypoints, img)
show_img("Detected points", img)
xy = [keypoint.pt for keypoint in keypoints]
xy = np.array(xy).astype(int)
# Remove duplicate points
xy_set = set(tuple(point) for point in xy)
xy = np.array([list(point) for point in xy_set]).astype(int)
return xy # pixel coordinates
rot_psi = lambda phi: np.array([
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1]])
rot_theta = lambda th: np.array([
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1]])
rot_phi = lambda psi: np.array([
[np.cos(psi), -np.sin(psi), 0, 0],
[ | np.sin(psi) | numpy.sin |
import os.path
from os import path
import sys
import numpy as np
from random import seed
from random import randrange
# FIX: OPTIMAL SOLUTION SHOWING
precision = 0.0001
# Prints a bar
def print_split():
print("--------------------------------------------------\n")
# Prints current vectors A, b, c
def print_current(A, b, c):
print("c:\n", c)
#print(c)
print("A:\n", A)
#print(A)
print("b:\n", b)
#print(b)
def print_system(A, b, c, F):
print("c:\n", c)
#print(c)
print("A:\n", A)
#print(A)
print("b:\n", b)
#print(b)
print("F:\n", F)
def swap_cols_2d(arr, frm, to):
arr[:,[frm, to]] = arr[:,[to, frm]]
def swap(arr, frm, to):
tmp = arr[to]
arr[to] = arr[frm]
arr[frm] = tmp
def append_horizontally(A, B):
if len(A) != len(B):
raise Exception('The matrices must have the same height!\n')
else:
np.stack((A, B), axis = 1)
return A
def get_first_negative(vec):
vec_length = len(vec)
for i in range(vec_length):
if vec[i] < 0:
return i
return -1
def is_non_negative(b):
b_length = len(b)
for i in range(b_length):
if b[i] < 0:
return i
return -1
def is_non_negative_matrix_row(A, row):
A_width = len(A[0])
for i in range(A_width):
if A[row, i] < 0:
return i
return -1
def find_pivot(c, A, row):
pivot_index = 0
max_value = -np.Inf
A_width = len(A[0])
for i in range(A_width):
if A[row, i] < 0:
value = c[i] / A[row, i]
if value > max_value:
max_value = value
pivot_index = i
return pivot_index
def update_system(c, A, b, F, p_row, p_col):
n = len(A)
m = len(A[0])
# Row of pivot / pivot
coef = A[p_row, p_col]
for i in range(m):
A[p_row, i] /= coef
b[p_row] /= coef
# Clearing p_col columns
for i in range(n):
if i == p_row:
continue
coef = -A[i, p_col]
for j in range(m):
A[i, j] += coef * A[p_row, j]
b[i] += coef * b[p_row]
coef = -c[p_col]
for j in range(m):
c[j] += coef * A[p_row, j]
F += coef * b[p_row]
return c, A, b, F
def input_vars(option):
input_type = 0
#
# --- Two phase simplex ---
#
if option == 1:
while input_type != 1 and input_type != 2:
input_type = int(input("Select the type of input:\n\t1: Manual input\n\t2: Input from file\nSelected: "))
print_split()
# --- Manual input ---
if input_type == 1:
print("---- NOTE ----\nThe file must be in the following format:\nN M\t\t\t\tWhere N - number of equations and M - number of variables\n\"min\" or \"max\"\t\t\tGoal function minimisation or maximisation\nc1 c2 c3 ... cM\nA11 A12 ... A1M _ b1\nA21 A22 ... A2M _ b2\n................\nAN1 AN2 ... ANM _ bN\n for x1, x2, x3, ... xM >= 0\t\tWhere '_' should be '<', '>' or '='\n")
print_split()
n = int(input("Input the number of equations: "))
m = int(input("Input the number of variables: "))
# Reserving and initializing space for the Objective function
in_c = np.zeros(m, dtype=np.float64)
# Reserving and initializing space for the coeficients
in_A = np.zeros((n, m), dtype=np.float64)
# Reserving and initializing space for the solution values
in_b = np.zeros(n, dtype=np.float64)
otype = input("Input \"min\" or \"max\" for the Objective function: ")
if otype != "min" and otype != "max":
print("The Objective function type is wrong it should be either \"min\" or \"max\"!\n")
return -1
# Input the Objective function
tmp = input("Objective function: ").split()
i = 0
for c in tmp:
in_c[i] = float(c)
i += 1
# Converting the Objective function to minimise since the program is made with minimisation
if otype == "max":
print("Converting max to min.")
in_c *= -1
sign = np.empty(n, dtype=str)
unit_columns = []
# Inputing the variables and solutions to the arrays
for i in range(n):
# Input a line to a tmp variable
tmp = input(str(i) + ": ").split()
for j in range(m):
in_A[i, j] = float(tmp[j])
sign[i] = tmp[m]
in_b[i] = float(tmp[m + 1])
# No transformation required, since this program works with "<"
# And "=" can be represented both as ">=" and "<="
if sign[i] == "=":
continue
# If the entered sign is ">" than we transform it to "<"
if sign[i] == ">":
for j in range(m):
in_A[i, j] *= -1
in_b[i] *= -1
unit_columns.append(i)
# Add new variables to convert inequation to equation:
# x1 + x2 + x3 + ... + xn <= c
# => x1 + x2 + x3 + ... + xn + x = c
# Thus appending an identity matrix
unit_columns.sort()
A_tmp = np.empty((n, 0))
for i in unit_columns:
A_unit = np.zeros((n, 1))
for j in range(n):
if i == j:
A_unit[j] = 1
A_tmp = np.hstack((A_tmp, A_unit))
in_c = np.hstack((0, in_c))
in_A = np.hstack((A_tmp, in_A))
# Set b to be positive:
for i in range(n):
if(in_b[i] < 0):
if i in unit_columns:
unit_columns.remove(i)
in_A[i] *= -1
in_b[i] *= -1
elif input_type == 2:
# Read from file
print("---- NOTE ----\nEnter the relative path to the file, from under \"/examples/\" \ne.g. For file 1.txt, write \"1.txt\", that will load \"/examples/1.txt\"")
print_split()
print("---- NOTE ----\nThe file must be in the following format:\nN M\t\t\t\tWhere N - number of equations and M - number of variables\n\"min\" or \"max\"\t\t\tGoal function minimisation or maximisation\nc1 c2 c3 ... cM\nA11 A12 ... A1M _ b1\nA21 A22 ... A2M _ b2\n................\nAN1 AN2 ... ANM _ bN\n for x1, x2, x3, ... xM >= 0\t\tWhere '_' should be '<', '>' or '='\n")
print_split()
print_split()
print_split()
file_name = input("Enter the file name: ")
# Creating absolute path to the file in folder examples
file_dir = os.path.dirname(__file__)
rel_path = "examples/" + file_name
abs_file_path = os.path.join(file_dir, rel_path)
# Checking if the file exists
if os.path.exists(abs_file_path) == False:
# File not found, throw error
print("The file doesn't exist!")
raise Exception("The file didn't load because it doesn't exist")
# File found, opening
f = open(abs_file_path, 'r')
in_c = []
# Read 2 first lines
for i in range(3):
if i == 0:
n, m = [int(x) for x in next(f).split()] # Reads the dimensions
if i == 1:
otype = next(f).split()
otype = otype[0]
if otype != "min" and otype != "max":
f.close()
raise Exception("The Objective function type is wrong it should be either \"min\" or \"max\"!\n")
if i == 2:
in_c = np.append(in_c, [float(x) for x in next(f).split()]) # Reads the Objective functiom
# Converting the Objective function to minimise since the program is made with minimisation
if otype == "max":
print("Converting max to min.")
in_c *= -1
tmp_array = []
for line in f: # Read the next lines
tmp_array.append([str(x) for x in line.split()])
f.close() # File not needed, all is in tmp_array
# Formatting the input
in_A = []
sign = []
in_b = []
unit_columns = []
# print(tmp_array)
for i in range(n):
for j in range(m + 2):
if j < m:
in_A.append(float(tmp_array[i][j]))
elif j == m:
sign.append(tmp_array[i][j])
elif j == (m + 1):
in_b.append(float(tmp_array[i][j]))
# No transformation required, since this program works with "<"
# And "=" can be represented both as ">=" and "<="
if sign[i] == "=":
continue
# If the entered sign is ">" than we transform it to "<"
if sign[i] == ">":
in_A_length = len(in_A)
for j in range(i*m, in_A_length, 1):
in_A[j] *= -1
in_b[i] *= -1
unit_columns.append(i)
# Converting the final list to numpy array
in_A = np.array(in_A)
in_A = in_A.reshape(n, m)
# Add new variables to convert inequation to equation:
# x1 + x2 + x3 + ... + xn <= c
# => x1 + x2 + x3 + ... + xn + x = c
# Thus appending an identity matrix
unit_columns.sort()
A_tmp = np.empty((n, 0))
for i in unit_columns:
A_unit = np.zeros((n, 1))
for j in range(n):
if i == j:
A_unit[j] = 1
A_tmp = np.hstack((A_tmp, A_unit))
in_c = np.hstack((0, in_c))
in_A = np.hstack((A_tmp, in_A))
# Set b to be positive:
for i in range(n):
if(in_b[i] < 0):
if i in unit_columns:
unit_columns.remove(i)
in_A[i] *= -1
in_b[i] *= -1
return in_A, in_c, in_b, unit_columns
#
# --- Dual simplex ---
#
elif option == 2:
while input_type != 1 and input_type != 2:
input_type = int(input("Select the type of input:\n\t1: Manual input\n\t2: Input from file\nSelected: "))
print_split()
# --- Manual input ---
if input_type == 1:
print("---- NOTE ----\nThe file must be in the following format:\nN M\t\t\t\tWhere N - number of equations and M - number of variables\n\"min\" or \"max\"\t\t\tGoal function minimisation or maximisation\nc1 c2 c3 ... cM\nA11 A12 ... A1M _ b1\nA21 A22 ... A2M _ b2\n................\nAN1 AN2 ... ANM _ bN\n for x1, x2, x3, ... xM >= 0\t\tWhere '_' should be '<', '>' or '='\n")
print_split()
n = int(input("Input the number of equations: "))
m = int(input("Input the number of variables: "))
# Reserving and initializing space for the Objective function
in_c = | np.zeros(m, dtype=np.float64) | numpy.zeros |
"""
Test suite for the hits module.
<NAME> 2018
"""
import unittest
import numpy as np
import pandas as pd
import warnings
from array import array
from numba import NumbaWarning
import math
# Functions to run tests on
# Equivalent to from . import * but more verbose
try:
from hits.hitdetector import identify_through_magnitude,\
plot_anomaly, identify_through_gradient, Abuelmaatti, point_density, \
filter_through_response, anomaly_density, rms_diff, stdev_diff, rms, \
stdev
from hits.hitsimulator import hit_distribution, flux, p_distribution, \
freq, generate_event, generate_data, masses, tp_distribution, \
time_distribution, AOCSResponse
except(ImportError):
from .hitdetector import identify_through_magnitude, plot_anomaly, \
identify_through_gradient, Abuelmaatti, point_density, \
filter_through_response, anomaly_density, rms_diff, stdev_diff, rms, \
stdev
from .hitsimulator import hit_distribution, flux, p_distribution, freq, \
generate_event, generate_data, masses, tp_distribution, \
time_distribution, AOCSResponse
# -----------hitdetector.py tests----------------------------------------------
class TestHitDetectorIdentifyFuncs(unittest.TestCase):
def setUp(self):
# Create dummy data with anomalies to test for hits.
obmt = np.linspace(0, 10, 1000)
rate = np.zeros(1000)
# Generate a random number of hits between 4 and 25.
self.hits = np.random.randint(4, 25)
hit_loc = | np.linspace(2, 900, self.hits) | numpy.linspace |
from SemiSupervisedLearning import visualisations
import matplotlib.pyplot as plt
import numpy as np
import torch
import pathlib
def to_cuda(elements):
"""
Transfers every object in elements to GPU VRAM if available.
elements can be a object or list/tuple of objects
"""
if torch.cuda.is_available():
if type(elements) == tuple or type(elements) == list:
return [x.cuda() for x in elements]
return elements.cuda()
return elements
def get_data_to_tensors(data, batch_size):
train_data_subsample, test_data = [], []
for (images, classes) in data.batch_generator(training=True, batch_size=batch_size):
images = normalize_images(images)
images, classes = torch.from_numpy(images).float(), torch.from_numpy(classes).float()
images = images.permute(0, 3, 1, 2) # change axis from NHWC to NCHW
batch = (images, classes)
train_data_subsample.append(batch)
for (images, classes) in data.batch_generator(training=False, batch_size=batch_size):
images = normalize_images(images)
images, classes = torch.from_numpy(images).float(), torch.from_numpy(classes).float()
images = images.permute(0, 3, 1, 2) # change axis from NHWC to NCHW
batch = (images, classes)
test_data.append(batch)
return (train_data_subsample, test_data)
def normalize_images(images):
# Assuming pixel values are more or less the same for all images
# We pick the first image of the batch
image = images[0]
pixels = np.asarray(image)
means = pixels.mean(axis=(0, 1), dtype='float64')
stds = pixels.std(axis=(0, 1), dtype='float64')
pixels = (pixels - means) / stds
# Apply normalization to all images in the batch
norm_images = []
for i in range(len(images)):
norm_images.append((images[i] - means) / stds)
norm_images = np.array(norm_images)
return norm_images
def make_reconstructions(autoencoder, vis_data, num_images, batch_size, image_dimensions, title):
# Extremely inefficient way of doing this
# Forward all images, then selecting the ones i want to visualize
images = []
reconstructions = []
labels = []
for image_batch, label in vis_data:
#Make reconstruction
image_batch = to_cuda(image_batch)
reconstruction_batch, aux = autoencoder(image_batch)
# Convert from tensor to numpy
image_batch = image_batch.reshape(
image_batch.shape[0],
image_batch.shape[2],
image_batch.shape[3],
image_batch.shape[1]
)
image_batch = image_batch.cpu().detach().numpy()
label = label.cpu().detach().numpy()
reconstruction_batch = reconstruction_batch.reshape(
reconstruction_batch.shape[0],
reconstruction_batch.shape[2],
reconstruction_batch.shape[3],
reconstruction_batch.shape[1]
)
reconstruction_batch = reconstruction_batch.cpu().detach().numpy()
images.extend(image_batch)
labels.extend(label)
reconstructions.extend(reconstruction_batch)
vis_images = images[1000: 1000 + num_images]
vis_reconstructions = reconstructions[1000: 1000 +num_images]
vis_labels = labels[1000: 1000 + num_images]
#
visualisations.show_images_and_reconstructions(np.array(vis_images), title, vis_labels)
visualisations.show_images_and_reconstructions(np.array(vis_reconstructions),
f'{title}_reconstructions', vis_labels)
return np.array(images), np.array(reconstructions), | np.array(labels) | numpy.array |
# Author: <NAME> <<EMAIL>>
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
def global_optimization(objective_function, boundaries, optimizer, maxf,
x0=None, approx_grad=True, random=np.random,
*args, **kwargs):
"""Maximize objective_function within given boundaries.
This function optimizes an objective function in a search space with the
given boundaries. The optimizer may use up to maxf evaluations of the
objective function. The optimizer is specified by a string which may be
any of "direct", "direct+lbfgs", "random", "random+lbfgs", "cmaes", or
"cmaes+lbfgs".
"""
if optimizer in ["direct", "direct+lbfgs"]:
# Use DIRECT to perform approximate global optimization of
# objective_function
try:
import nlopt
except ImportError:
raise Exception("'direct' optimizer requires the package nlopt."
"You may install it using "
"'sudo apt-get install python-nlopt'")
nlopt.srand(0)
opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, boundaries.shape[0])
opt.set_lower_bounds(boundaries[:, 0])
opt.set_upper_bounds(boundaries[:, 1])
opt.set_maxeval(maxf)
def prox_func(params, grad):
# Note: nlopt minimizes function, hence the minus
func_value = -objective_function(params)
if np.iterable(func_value):
return func_value[0]
else:
return func_value
opt.set_min_objective(prox_func)
x0 = opt.optimize(boundaries.mean(1))
elif optimizer in ["random", "random+lbfgs"]:
# Sample maxf points uniform randomly from the search space and
# remember the one with maximal objective value
if x0 is not None:
f_opt = objective_function(x0)
else:
f_opt = -np.inf
for _ in range(maxf):
x0_trial = \
random.uniform(size=boundaries.shape[0]) \
* (boundaries[:, 1] - boundaries[:, 0]) \
+ boundaries[:, 0]
f_trial = objective_function(x0_trial)
if f_trial > f_opt:
f_opt = f_trial
x0 = x0_trial
elif optimizer in ["cmaes", "cmaes+lbfgs"]:
# Use CMAES to perform approximate global optimization of
# objective_function
if x0 is None:
x0 = boundaries.mean(1)
x0 = fmin_cma(lambda x, compute_gradient=False: -objective_function(x),
x0=x0, xL=boundaries[:, 0], xU=boundaries[:, 1],
sigma0=kwargs.get("sigma0", 0.01), maxfun=maxf)
elif x0 is None:
raise Exception("Unknown optimizer %s and x0 is None."
% optimizer)
if optimizer in ["direct", "random", "cmaes"]:
# return DIRECT/Random/CMAES solution without refinement
return x0
elif optimizer in ["lbfgs", "direct+lbfgs", "random+lbfgs", "cmaes+lbfgs"]:
# refine solution with L-BFGS
def proxy_function(x):
return -objective_function(x)
res = fmin_l_bfgs_b(proxy_function, x0,
approx_grad=True,
bounds=boundaries, disp=0)
return res[0]
else:
raise Exception("Unknown optimizer %s" % optimizer)
def fmin_cma(objective_function, x0, xL, xU, sigma0=0.01, maxfun=1000):
""" Minimize objective function in hypercube using CMA-ES.
This function optimizes an objective function in a search space bounded by
a hypercube. One corner of the hypercube is given by xL and the opposite by
xU. The initial mean of the search distribution is given by x0. The search
space is scaled internally to the unit hypercube to accommodate CMA-ES.
Parameters
----------
objective_function : callable
The objective function to be minimized. Must return a scalar value
x0 : array-like
Initial mean of the search distribution
xL: array-like
Lower, left corner of the bounding hypercube
xU: array-like
Upper, right corner of the bounding hypercube
sigma0: float, default=0.01
Initial variance of search distribution of CMA-ES
maxfun: int, default=1000
Maximum number of evaluations of the objective function after which the
optimization is stopped.
Returns
----------
x_opt : array-like
The minimum of objective function identified by CMA-ES
"""
try:
from bolero.optimizer import fmin
except ImportError:
raise Exception("'cmaes' optimizer requires the package bolero.")
x0 = np.asarray(x0)
xL = | np.asarray(xL) | numpy.asarray |
from __future__ import absolute_import, division
import numpy as np
from ..logger import msg
from ..utils import area_of_polygon, getMid
def calc_kG(d, mesh, prop_from_node, silent=True):
"""Calculate the geometric stiffness matrix for a given input mesh
Parameters
----------
d : (N) array-like
Result from a static analysis, are used to compute the current membrane
stress distribution
mesh : :class:`pyNastran.bdf.BDF` object
The object must have the proper edge references as those returned by
:func:`.read_mesh` or :func:`.read_delaunay`
prop_from_node : bool
If the constitutive properties are assigned per node. Otherwise they
are considered assigned per element
Returns
-------
kG : (N, N) array-like
The geometric stiffness matrix
"""
msg('Calculating KG...', silent=silent)
dof = 5
n = d.shape[0] // dof
#TODO allocate less memory here...
kG = np.zeros((n*dof, n*dof), dtype=np.float64)
for edge in mesh.edges.values():
tria1 = edge.trias[0]
Ac = edge.Ac
ipts = edge.ipts
mid1 = getMid(tria1)
tmp = np.array([mid1, edge.n2.xyz, edge.n1.xyz])
Ac1 = area_of_polygon(tmp[:, 0], tmp[:, 1])
if len(edge.trias) == 1:
tria2 = None
elif len(edge.trias) == 2:
tria2 = edge.trias[1]
mid2 = getMid(tria2)
tmp = np.array([mid2, edge.n1.xyz, edge.n2.xyz])
Ac2 = area_of_polygon(tmp[:, 0], tmp[:, 1])
else:
raise RuntimeError('Found %d trias for edge' % len(edge.trias))
indices = set()
for ipt in ipts:
indices.add(ipt.n1.index)
indices.add(ipt.n2.index)
indices.add(ipt.n3.index)
indices = sorted(list(indices))
if len(ipts) == 3:
indices.append(0) # fourth dummy index
indexpos = dict([[ind, i] for i, ind in enumerate(indices)])
i1, i2, i3, i4 = indices
f1 = np.array([0, 0, 0, 0], dtype=float)
f2 = np.array([0, 0, 0, 0], dtype=float)
f3 = np.array([0, 0, 0, 0], dtype=float)
f4 = np.array([0, 0, 0, 0], dtype=float)
nx1 = ipts[0].nx
ny1 = ipts[0].ny
le1 = ipts[0].le
f1[indexpos[ipts[0].n1.index]] = ipts[0].f1
f1[indexpos[ipts[0].n2.index]] = ipts[0].f2
f1[indexpos[ipts[0].n3.index]] = ipts[0].f3
nx2 = ipts[1].nx
ny2 = ipts[1].ny
le2 = ipts[1].le
f2[indexpos[ipts[1].n1.index]] = ipts[1].f1
f2[indexpos[ipts[1].n2.index]] = ipts[1].f2
f2[indexpos[ipts[1].n3.index]] = ipts[1].f3
nx3 = ipts[2].nx
ny3 = ipts[2].ny
le3 = ipts[2].le
f3[indexpos[ipts[2].n1.index]] = ipts[2].f1
f3[indexpos[ipts[2].n2.index]] = ipts[2].f2
f3[indexpos[ipts[2].n3.index]] = ipts[2].f3
if len(ipts) == 3:
nx4 = 0
ny4 = 0
le4 = 0
else:
nx4 = ipts[3].nx
ny4 = ipts[3].ny
le4 = ipts[3].le
f4[indexpos[ipts[3].n1.index]] = ipts[3].f1
f4[indexpos[ipts[3].n2.index]] = ipts[3].f2
f4[indexpos[ipts[3].n3.index]] = ipts[3].f3
f11, f12, f13, f14 = f1
f21, f22, f23, f24 = f2
f31, f32, f33, f34 = f3
f41, f42, f43, f44 = f4
if prop_from_node:
pn1 = edge.n1.prop
pn2 = edge.n2.prop
po1 = edge.othernode1.prop
if tria2 is None:
A = 4/9*pn1.A + 4/9*pn2.A + 1/9*po1.A
B = 4/9*pn1.B + 4/9*pn2.B + 1/9*po1.B
else:
po2 = edge.othernode2.prop
A = 5/12*pn1.A + 5/12*pn2.A + 1/12*po1.A + 1/12*po2.A
B = 5/12*pn1.B + 5/12*pn2.B + 1/12*po1.B + 1/12*po2.B
else:
prop1 = tria1.prop
if tria2 is None:
A = prop1.A
B = prop1.B
else:
prop2 = tria2.prop
A = (Ac1*prop1.A + Ac2*prop2.A)/Ac
B = (Ac1*prop1.B + Ac2*prop2.B)/Ac
d1 = d[i1*dof: i1*dof+5]
d2 = d[i2*dof: i2*dof+5]
d3 = d[i3*dof: i3*dof+5]
d4 = d[i4*dof: i4*dof+5]
# d1... are [4, 4] [4, 5]
dc = np.dot( | np.array([f1, f2, f3, f4]) | numpy.array |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for scoring functions."""
import logging
from makani.lib.python.batch_sim import flight_modes as flight_modes_module
import numpy as np
from scipy import signal as sp_signal
from scipy import stats
from scipy.interpolate import interp1d
# Define telemetry selectors.
# To expand the dictionary, add variable in the format:
# 'name':{
# 'source_a': lambda a: a[path to 'name' in source_a dictionary],
# 'source_b': lambda b: b[path to 'name' in source_b dictionary],
# 'method': 'interpolation method', default is 'linear',
# ...}
_TELEMETRY_SELECTORS = {
'time': {
'sim': lambda s: s['time'],
'control': lambda c: c['time']},
'flight_mode': {
'control': lambda c: c['flight_mode'],
'method': 'nearest'},
'flight_mode_time': {
'control': lambda c: c['flight_mode_time']},
'gs02_mode': {
'control': lambda c: c['control_input']['gs_sensors']['mode'],
'sim': lambda s: s['gs02']['mode']},
'gs02_transform_stage': {
'sim': lambda s: s['gs02']['transform_stage'],
'control':
lambda c: c['control_input']['gs_sensors']['transform_stage']},
'airspeed': {
'sim': lambda s: s['wing']['apparent_wind_b']['v'],
'control': lambda c: c['state_est']['apparent_wind']['sph_f']['v']},
'airspeed_cmd': {
'control': lambda c: c['crosswind']['airspeed_cmd']},
'apparent_wind_vector': {
'control': lambda c: c['state_est']['apparent_wind']['vector']},
'body_rates': {
'sim': lambda s: s['wing']['omega'],
'control': lambda c: c['state_est']['pqr']},
'alpha': {
'sim': lambda s: s['wing']['apparent_wind_b']['alpha'],
'control': lambda c: c['state_est']['apparent_wind']['sph_f']['alpha']},
'alpha_cmd': {
'control': lambda c: c['crosswind']['alpha_cmd']},
'beta': {
'sim': lambda s: s['wing']['apparent_wind_b']['beta'],
'control': lambda c: c['state_est']['apparent_wind']['sph_f']['beta']},
'beta_cmd': {
'control': lambda c: c['crosswind']['beta_cmd']},
'gs_azimuth_error': {
'sim': lambda s: s['gs02']['a_error']},
'platform_azi': {
'sim': lambda s: s['gs02']['azimuth'],
'control': lambda c: c['control_input']['perch']['perch_azi'][:, 0]},
'gs_detwist_cmd': {
'control': lambda c: c['control_output']['detwist_cmd']},
'gs_detwist_pos': {
'control': lambda c: c['control_input']['gs_sensors']['detwist_pos']},
'gsg_yoke': {
'sim': lambda s: s['gsg']['gsg_yoke'],
'control': lambda c: c['control_input']['gsg']['azi'][:, 0]},
'gsg_termination': {
'sim': lambda s: s['gsg']['gsg_termination'],
'control': lambda c: c['control_input']['gsg']['ele'][:, 0]},
'path_radius_target': {
'control': lambda c: c['crosswind']['path_radius_target']},
'payout': {
'control': lambda c: c['state_est']['winch']['payout']},
'wing_pos_cw': {
'control': lambda c: c['crosswind']['current_pos_cw']},
'wing_pos_g_cmd': {
'control': lambda c: c['hover']['wing_pos_g_cmd']},
'wing_xg': {
'sim': lambda s: s['wing']['Xg'],
'control': lambda c: c['state_est']['Xg']},
'wing_acc': {
'sim': lambda s: s['wing']['Ab'],
'control': lambda c: c['state_est']['Ab_f']},
'hover_angles': {
'control': lambda c: c['hover']['angles']},
'hover_angles_cmd': {
'control': lambda c: c['hover']['angles_cmd']},
'hover_gain_ramp_scale': {
'control': lambda c: c['hover']['gain_ramp_scale']},
'angular_acc': {
'sim': lambda s: s['wing']['domega']},
'tether_elevation': {
# Because the vessel and platform frames differ only by a rotation
# around the z-axis, elevations with respect to the two frames are
# numerically equal.
'sim': lambda s: s['tether']['Xv_start_elevation'],
'control':
lambda c: c['state_est']['tether_ground_angles']['elevation_p']},
'tether_elevation_valid': {
'control':
lambda c: c['state_est']['tether_ground_angles']['elevation_valid']
},
'tether_azimuth': {
'sim': lambda s: s['tether']['Xv_start_azimuth']},
'tether_tension': {
'sim': lambda s: s['wing']['tether_force_b']['tension'],
'control':
lambda c: c['state_est']['tether_force_b']['sph']['tension']},
'tether_tension_cmd': {
'control':
lambda c: c['hover']['tension_cmd']},
'tether_pitch': {
'sim': lambda s: s['wing']['tether_force_b']['pitch'],
'control': lambda c: c['state_est']['tether_force_b']['sph']['pitch']},
'tether_roll': {
'sim': lambda s: s['wing']['tether_force_b']['roll'],
'control': lambda c: c['state_est']['tether_force_b']['sph']['roll']},
'tether_moment': {
'sim': lambda s: s['wing']['fm_tether']['moment']},
'tether_xg_start': {
'sim': lambda s: s['tether']['Xg_start'],
'control': lambda c: c['state_est']['tether_anchor']['pos_g']},
'tether_xg_end': {
'sim': lambda s: s['tether']['Xg_end']},
'tether_xg_nodes': {
'sim': lambda s: s['tether']['Xg_nodes']},
'rotor_speeds': {
'sim': lambda s: abs(s['rotors']['omega']),
# The controller telemetry already reports back absolute values.
'control': lambda c: c['control_input']['rotors']},
'rotor_freestream_speeds': {
'sim': lambda s: s['rotors']['v_freestream'],
'control': lambda c: c['v_app_locals']},
'rotor_gyro_moments': {
'sim': lambda s: s['rotors']['gyro_moment']},
'rotor_thrusts': {
'sim': lambda s: s['rotors']['thrust']},
'motor_torques': {
'sim': lambda s: s['stacked_power_sys']['motor_torques']},
'thrust_moment': {
'control': lambda c: c['thrust_moment']},
'thrust_moment_avail': {
'control': lambda c: c['thrust_moment_avail']},
'electric_power': {
'sim': lambda s: s['power_sys']['P_elec']},
'aero_power': {
'sim': lambda s: s['rotors']['aero_power']},
'flaps': {
'sim': lambda s: s['wing']['flaps'],
'control': lambda c: c['control_input']['flaps']},
'servo_shaft_torques': {
'sim': lambda s: s['servo_sensor']['external_shaft_torques']},
'wing_vel_trans_in': {
'control': lambda c: c['trans_in']['wing_vel_ti']},
'wing_vel_trans_in_y_cmd': {
'control': lambda c: c['trans_in']['wing_vel_ti_y_cmd']},
'wind_g_vector_f_slow': {
'control': lambda c: c['state_est']['wind_g']['vector_f_slow']},
'ground_voltage': {
'sim': lambda s: s['stacked_power_sys']['ground_voltage']},
'tether_current': {
'sim': lambda s: s['stacked_power_sys']['tether_current']},
'block_voltages': {
'sim': lambda s: s['stacked_power_sys']['block_voltages']},
'loop_angle': {
'control': lambda c: c['crosswind']['loop_angle'],
'method': 'nearest'},
'dcm_g2b': {
'sim': lambda s: s['wing']['dcm_g2b']['d'],
'control': lambda c: c['state_est']['dcm_g2b']['d'],
'method': 'nearest'},
'dcm_g2v': {
'sim': lambda s: s['buoy']['dcm_g2v']['d'],
'control': lambda c: c['state_est']['vessel']['dcm_g2v']['d'],
'method': 'nearest'},
'buoy_xg': {
'sim': lambda s: s['buoy']['Xg'],
'control': lambda c: c['state_est']['vessel']['pos_g']},
'accum_kite_loops': {
'control': lambda c: c['crosswind']['loop_count']},
'accum_detwist_loops': {
'control': lambda c: c['detwist_loop_count']},
'water_line': {
'sim': lambda s: s['buoy']['water_line_pos_z_v']},
'buoy_yaw_angle_from_eq': {
'sim': lambda s: s['buoy']['yaw_angle_from_eq']},
'buoy_accel_g': {
'sim': lambda s: s['buoy']['vessel_origin_accel_g']},
}
def GetDistToWrappedLimits(value, start_limit, end_limit,
wrap_left, wrap_right):
"""Returns value for min distance from value to limits on wrapped scale.
Arguments:
value: Value to be evaluated. Can be list-like or single value. Values must
be between wrap_left and wrap_right.
start_limit: The beginning of a range on wrapped scale.
end_limit: The end of a range on wrapped scale.
wrap_left: Minimum value for wrapping scale.
wrap_right: Maximum value for wrapping scale.
Returns:
Minimum distance that value is from range limits.
Positive values indicate value is between range specified by start_limit
and end_limit. Negative values indicate value is outside of range.
"""
wrap_range = wrap_right - wrap_left
if not hasattr(value, '__iter__'):
value = [value]
# Unwrap end limit if needed so limits are in order.
if end_limit < start_limit:
end_limit_ordered = end_limit + wrap_range
else:
end_limit_ordered = end_limit
for ii, v in enumerate(value):
assert v >= wrap_left and v <= wrap_right, (
'Values must be between wrap_left and wrap_right.')
if end_limit < start_limit and v < end_limit:
# If limits go around wrap and value was in limits before wrap,
# unwrap value.
v += wrap_range
if v > start_limit and v < end_limit_ordered:
# If inside the bad range, give positive value
value[ii] = min(abs(v - start_limit),
abs(v - end_limit_ordered))
else:
# If outside bad range, give negative value.
value[ii] = -min(abs(v - start_limit),
abs(v - end_limit_ordered),
# Also check wrapped values to limits.
abs(v + wrap_range - end_limit_ordered),
abs(v - wrap_range - start_limit))
if len(value) == 1:
return value[0]
else:
return value
def _GetValueAndSource(sim, control, name, sources):
"""Returns value of specified telemetry 'name' and 'source'.
Arguments:
sim: Simulator telemetry dictionary.
control: Controller telemetry dictionary.
name: [string] Telemetry variable e.g. 'airspeed' or 'alpha'.
sources: [list of strings] The list of telemetry sources in their
priority order. Data is returned from the first source that's
available, and data from other sources are interpolated
accordingly.
Raises:
ValueError: Requested 'name' not available.
"""
if name not in _TELEMETRY_SELECTORS:
raise ValueError('Requested name "%s" not available.' % name)
all_sources = _TELEMETRY_SELECTORS[name].keys()
for source in sources:
if source in all_sources:
selector = _TELEMETRY_SELECTORS[name][source]
telemetry = None
if source == 'sim' and sim is not None:
telemetry = sim
elif source == 'control' and control is not None:
telemetry = control
if telemetry is not None:
try:
return selector(telemetry), source
except ValueError:
logging.error('Cannot find "%s" in %s".', name, source)
return None, None
return None, None
def _GetFlightModesIndices(flight_mode_timeseries, flight_modes):
"""Returns indices corresponding flight mode specified.
Arguments:
flight_mode_timeseries: 'flight_mode' timeseries data.
flight_modes: [string or list of strings] Optional flight mode.
For example, 'kFlightModeCrosswindNormal' or
['kFlightModeCrosswindNormal', 'kFlightModeCrosswindPrepTransOut'].
"""
if isinstance(flight_modes, str):
flight_modes = [flight_modes]
modes_indices = np.empty(0, dtype=int)
for flight_mode in flight_modes:
mode_indices = np.argwhere(
flight_mode_timeseries
== flight_modes_module.GetFlightModes()[flight_mode])
modes_indices = np.append(modes_indices, mode_indices)
return np.sort(modes_indices)
# Note: Since interpolation of integer array is performed here, there is a
# possibility of offsetting flight mode transitions by a cycle.
def _GetInterpolatedValue(sim_time, control_time, data_value, method):
"""Returns control telemetry data_value interpolated to simulator time."""
if not method:
method = 'linear'
def _Interpolate(sim_time, control_time, data_value): # pylint: disable=missing-docstring
assert data_value.shape
if len(data_value.shape) == 1:
if np.size(data_value) == 1:
return data_value.repeat(sim_time.size)
else:
return interp1d(control_time, data_value, kind=method,
bounds_error=False, axis=0,
fill_value=(data_value[0], data_value[-1]))(sim_time)
else:
# If this an N-D array where N > 1 (e.g., motor_voltages[:, 8]),
# each slice of this array needs to be interpolated.
new_shape = (sim_time.shape[0],) + data_value.shape[1:]
data_out = np.empty(new_shape, dtype=data_value.dtype)
for i in np.nditer(data_value.shape[1:]):
slice_index = [slice(None)] + list(i)
source_value = data_value[slice_index]
data_out[slice_index] = interp1d(
control_time, source_value, kind=method, bounds_error=False,
axis=0, fill_value=(source_value[0], source_value[-1]))(
sim_time)
return data_out
if isinstance(data_value, dict):
all_fields = data_value.keys()
data_value_out = {}
elif isinstance(data_value, np.ndarray) and data_value.dtype.names:
all_fields = data_value.dtype.names
new_shape = (len(sim_time),) + data_value.shape[1:]
data_value_out = np.empty(new_shape, dtype=data_value.dtype)
else:
if | np.isnan(data_value) | numpy.isnan |
#!/usr/bin/env python
# coding: utf-8
from typing import Tuple
import numpy as np
import PathReducer.calculate_rmsd as rmsd
import pandas as pd
import math
import glob
import os
import sys
import ntpath
import MDAnalysis as mda
import PathReducer.plotting_functions as plotting_functions
from periodictable import *
from sklearn import *
from sympy import solve, Symbol
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def read_traj_file(*args, **kwargs) -> Tuple[str, np.ndarray, np.ndarray]:
"""
Reads in a trajectory using MDAnalysis' Universe class, documentation and information on parameters found here: (https://www.mdanalysis.org/docs/documentation_pages/core/universe.html#MDAnalysis.core.universe.Universe). A topology file is always required, however there are multiple ways of setting up a universe for a trajectory. Examples include:
u = Universe(topology, trajectory) # read system from file(s)
u = Universe(pdbfile) # read atoms and coordinates from PDB or GRO
u = Universe(topology, [traj1, traj2, ...]) # read from a list of trajectories
u = Universe(topology, traj1, traj2, ...) # read from multiple trajectories
The trajectory being read in should be already pruned (of explicit solvent, backbone residues, and anything that you don't want PCA to capture. The function then returns a numpy array of all of the atom types of the system, and a numpy array of the Cartesian coordinates of each atom for every frame.
:param topology: str (.pdb, .top, .gro etc)
:param coordinates: str (.dcd, .nc, .xyz etc)
:return extensionless_system_name
atom_list
cartesians
"""
u = mda.Universe(*args, **kwargs)
system_name = path_leaf(u.filename)
extensionless_system_name = os.path.splitext(system_name)[0]
n_frames = len(u.trajectory)
n_atoms = len(u.atoms)
cartesians = np.ndarray((n_frames, n_atoms, 3))
try:
atom_list = u.atoms.elements
except AttributeError:
atom_list = u.atoms.types
for frame_index, ts in enumerate(u.trajectory):
cartesians[frame_index] = ts.positions
return extensionless_system_name, atom_list, cartesians
def read_xyz_file(path):
""" Reads in an xyz file from path as a DataFrame. This DataFrame is then turned into a 3D array such that the
dimensions are (number of points) X (number of atoms) X 3 (Cartesian coordinates). The system name (based on the
filename), list of atoms in the system, and Cartesian coordinates are output.
:param path: path to xyz file to be read
:return extensionless_system_name: str
atom_list: numpy array
cartesians: numpy array
"""
system_name = path_leaf(path)
print("File being read is: %s" % system_name)
extensionless_system_name = os.path.splitext(system_name)[0]
data = pd.read_csv(path, header=None, delim_whitespace=True, names=['atom', 'X', 'Y', 'Z'])
n_atoms = int(data.loc[0][0])
n_lines_per_frame = int(n_atoms + 2)
data_array = np.array(data)
data_reshape = np.reshape(data_array, (int(data_array.shape[0]/n_lines_per_frame), n_lines_per_frame,
data_array.shape[1]))
cartesians = data_reshape[:, 2::, 1::].astype(np.float)
atom_list = data_reshape[0, 2::, 0]
return extensionless_system_name, atom_list, cartesians
def remove_atoms_by_type(atom_types_to_remove, atom_list, cartesians):
"""
Removes specific atoms if they are not wanted for PCA
:param atom_list: list of atoms in the structure
:param cartesians: cartesian coordinates of each frame
:return: cartesian coordinates of each frame with specific atom types removed
"""
matches_indexes = [i for i, x in enumerate(atom_list) if x in atom_types_to_remove]
cartesians_sans_atoms = np.delete(cartesians, list(matches_indexes), axis=1)
atom_list_sans_atoms = np.delete(atom_list, list(matches_indexes), axis=0)
return atom_list_sans_atoms, cartesians_sans_atoms
def calculate_velocities(cartesians, timestep=1):
"""
Calculate velocities at each timestep given Cartesian coordinates. Velocities at the first and last point are
extrapolated.
:param cartesians: Cartesian coordinates along trajectory
:param timestep: time step between frames in units of fs, default=1
:return: velocities
"""
velocities = []
for i in range(0, len(cartesians)):
if i == 0:
velocity = (cartesians[i + 1] - cartesians[i]) / timestep
elif i == len(cartesians) - 1:
velocity = (cartesians[i] - cartesians[i - 1]) / timestep
else:
velocity = (cartesians[i + 1] - cartesians[i - 1]) / 2 * timestep
velocities.append(velocity)
return velocities
def calculate_momenta(velocities, atoms):
"""
:param cartesians: Cartesian coordinates along trajectory
:param timestep: time step between frames in units of fs, default=1
:return: velocities
"""
velocities = np.array(velocities)
atoms = np.array(atoms)
atom_masses = np.array([formula(atom).mass for atom in atoms])
momenta = velocities * atom_masses[np.newaxis, :, np.newaxis]
return momenta
def set_atom_one_to_origin(coordinates):
coordinates_shifted = coordinates - coordinates[:, np.newaxis, 0]
return coordinates_shifted
def mass_weighting(atoms, cartesians):
cartesians = np.array(cartesians)
atoms = np.array(atoms)
atom_masses = [formula(atom).mass for atom in atoms]
weighting = np.sqrt(atom_masses)
mass_weighted_cartesians = cartesians * weighting[np.newaxis, :, np.newaxis]
return mass_weighted_cartesians
def remove_mass_weighting(atoms, coordinates):
coordinates = np.array(coordinates)
atoms = np.array(atoms)
atom_masses = [formula(atom).mass for atom in atoms]
weighting = np.sqrt(atom_masses)
unmass_weighted_coords = coordinates / weighting[np.newaxis, :, np.newaxis]
return unmass_weighted_coords
def generate_distance_matrices(coordinates):
""" Generates distance matrices for each structure.
"""
coordinates = np.array(coordinates)
d2 = np.sum((coordinates[:, :, None] - coordinates[:, None, :]) ** 2, axis=3)
return d2
def generate_dihedral_matrices(coordinates):
return coordinates
def generate_and_reshape_ds_big_structures(coordinates):
""" Generates matrix of pairwise distances, which includes pairwise distances for each structure.
:param coordinates:
"""
coordinates = np.array(coordinates)
atoms = int(coordinates.shape[1])
d_re = np.zeros((coordinates.shape[0], int(atoms * (atoms - 1) / 2)))
for i in range(coordinates.shape[0]):
d2 = np.square(metrics.pairwise.euclidean_distances(coordinates[i]))
x = d2[0].shape[0]
dint_re = d2[np.triu_indices(x, k=1)]
d_re[i] = dint_re
return d_re
def reshape_ds(d):
""" Takes only the upper triangle of the distance matrices and reshapes them into 1D arrays.
"""
d_re = []
x = d[0][0].shape[0]
for dint in d:
dint_re = dint[np.triu_indices(x, k=1)]
d_re.append(dint_re)
d_re = np.asarray(d_re)
return d_re
def vector_to_matrix(v):
""" Converts a representation from 1D vector to 2D square matrix. Slightly altered from rmsd package to disregard
zeroes along diagonal of matrix.
:param v: 1D input representation.
:type v: numpy array
:return: Square matrix representation.
:rtype: numpy array
"""
if not (np.sqrt(8 * v.shape[0] + 1) == int(np.sqrt(8 * v.shape[0] + 1))):
print("ERROR: Can not make a square matrix.")
exit(1)
n = v.shape[0]
w = ((-1 + int(np.sqrt(8 * n + 1))) // 2) + 1
m = np.zeros((w, w))
index = 0
for i in range(w):
for j in range(w):
if i > j - 1:
continue
m[i, j] = v[index]
m[j, i] = m[i, j]
index += 1
return m
def distance_matrix_to_coords(v):
""" Converts a (2D square) distance matrix representation of a structure to Cartesian coordinates (first 3 columns
correspond to 3D xyz coordinates) via a Gram matrix.
:param v: 1D vector, numpy array
:return: 3D Cartesian coordinates, numpy array
"""
d = vector_to_matrix(v)
d_one = np.reshape(d[:, 0], (d.shape[0], 1))
m = (-0.5) * (d - np.matmul(np.ones((d.shape[0], 1)), np.transpose(d_one)) - np.matmul(d_one,
| np.ones((1, d.shape[0])) | numpy.ones |
# coding=utf-8
import numpy as np
import pandas as pd
import pytest
from matplotlib import pyplot as plt
from pythonpic.algorithms.current_deposition import current_deposition
from pythonpic.classes import Particle, PeriodicTestGrid, NonperiodicTestGrid
from pythonpic.classes import TestSpecies as Species
from pythonpic.configs.run_laser import initial, npic, number_cells
from pythonpic.helper_functions.helpers import make_sure_path_exists
from pythonpic.helper_functions.physics import lightspeed, electric_charge, \
electron_rest_mass
@pytest.fixture(params=np.arange(3, 4, 0.2))
def _position(request):
return request.param
@pytest.fixture(params=np.arange(-0.9, 1, 0.2))
def _velocity(request):
return request.param
@pytest.fixture(params=(True, False))
def _truefalse(request):
return request.param
_truefalse2 = _truefalse
def error_table(investigated_density, target_density):
inexact_indices = investigated_density != target_density
error = investigated_density - target_density
error[inexact_indices] *= 100 / target_density[inexact_indices]
return error
def test_single_particle_longitudinal_deposition(_position, _velocity):
g = NonperiodicTestGrid(T = 1, L=7, NG=7)
s = Particle(g, _position * g.dx, _velocity, )
dt = g.dt
g.current_density_x[...] = 0
g.current_density_yz[...] = 0
# current_deposition(g, s, dt)
current_deposition(g.current_density_x, g.current_density_yz, s.v,
s.x, g.dx, dt, s.q)
collected_longitudinal_weights = g.current_density_x.sum() / s.v[0, 0]
def plot_longitudinal():
fig, ax = plt.subplots()
ax.scatter(s.x, 0)
new_positions = s.x + s.v[:, 0] * dt
title = f"x0: {s.x[0]} v: {s.v[0,0]} x1: {new_positions[0]} w: {collected_longitudinal_weights}"
ax.annotate(title, xy=(new_positions[0], 0), xytext=(s.x[0], 0.3),
arrowprops=dict(facecolor='black', shrink=0.05, linewidth=0.5), horizontalalignment='right')
ax.set_xticks(g.x)
ax.set_xticks(np.arange(0, g.L, g.dx / 2), minor=True)
ax.grid(which='minor', alpha=0.3)
ax.grid(which='major', alpha=0.7)
ax.set_title(title)
ax.scatter(new_positions, 0)
ax.plot(g.x, g.current_density_x[1:-2], "go-", alpha=0.7, linewidth=3, label=f"jx")
ax.legend()
plt.show()
return title + " instead of 1"
assert np.isclose(collected_longitudinal_weights, 1), plot_longitudinal()
def test_single_particle_transversal_deposition(_position, _velocity):
g = PeriodicTestGrid(1, L=7, NG=7)
s = Particle(g, _position * g.dx, _velocity, 1, -1)
dt = g.dx / s.c
new_positions = s.x + s.v[:, 0] * dt
g.current_density_x[...] = 0
g.current_density_yz[...] = 0
print("\n\n=====Start run===")
# current_deposition(g, s, dt)
current_deposition(g.current_density_x, g.current_density_yz, s.v, s.x,
g.dx, dt, s.q)
total_currents = g.current_density_yz.sum(axis=0) / s.v[0, 1:]
total_sum_currents = g.current_density_yz.sum()
x_velocity = s.v[0, 0]
collected_transversal_weights = total_currents
print("total", total_currents)
print("x velocity", x_velocity)
print("weights", collected_transversal_weights)
def plot_transversal():
fig, ax = plt.subplots()
ax.scatter(s.x, 0)
title = f"x0: {s.x[0]} v: {s.v[0,0]} x1: {new_positions[0]:.3f} w: {collected_transversal_weights}"
ax.annotate(title, xy=(new_positions[0], 0), xytext=(s.x[0], 0.3),
arrowprops=dict(facecolor='black', shrink=0.05, linewidth=0.5), horizontalalignment='right')
ax.set_xticks(g.x)
ax.set_xticks(np.arange(0, g.L, g.dx / 2), minor=True)
ax.grid(which='minor', alpha=0.3)
ax.grid(which='major', alpha=0.7)
ax.set_title(title)
ax.scatter(new_positions, 0)
for i, label in {1: 'y', 2: 'z'}.items():
ax.plot(g.x + 0.5, g.current_density_yz[1:-1, i - 1], "o-", alpha=0.7, linewidth=i + 3, label=f"j{label}")
ax.legend()
plt.show()
return title + " instead of 1"
assert np.allclose(collected_transversal_weights, 1), plot_transversal()
assert | np.isclose(total_sum_currents, 0) | numpy.isclose |
#
# RawIO
# Copyright (c) 2021 <NAME>.
#
from cv2 import getRotationMatrix2D, warpPerspective
from lsd import line_segment_detector
from numpy import abs, array, asarray, arctan2, pi, rad2deg, vstack, zeros_like
from PIL import Image
from sklearn.linear_model import RANSACRegressor
from .constrain import constrain_crop_transform
def align_level (image: Image.Image, constrain_crop: bool=True, max_theta: float=4., max_trials: int=2000) -> Image.Image:
"""
Level an image.
Parameters:
image (PIL.Image): Input image.
constrain_crop (bool): Apply a constrain crop to remove borders.
max_theta (float): Maximum angle that can be corrected in degrees.
max_trials (int): Maximum trials for fitting geometry model.
Returns:
PIL.Image: Result image.
"""
# Extract lines
scale = 1200. / image.width
min_length = image.width * 0.05
image_arr = asarray(image)
lines = line_segment_detector(image_arr, scale=scale, angle_tolerance=18.)
lines = lines[lines[:,6] > min_length,:4]
# Get vertical lines
MAX_ANGLE = 12.
lines_t = rad2deg( | arctan2(lines[:,3] - lines[:,1], lines[:,2] - lines[:,0]) | numpy.arctan2 |
import cv2
import time
import numpy as np
from random import randint
import argparse
from predict import *
import glob
parser = argparse.ArgumentParser(description='Run keypoint detection')
parser.add_argument("--device", default="gpu", help="Device to inference on")
parser.add_argument("--image_file", default="group.jpg", help="Input image")
parser.add_argument("--image_folder", help="Input folder")
parser.add_argument("--video_file", help="Input video")
parser.add_argument("--protoFile", default="group.jpg", help="Input image")
parser.add_argument("--weightsFile", default="group.jpg", help="Input image")
args = parser.parse_args()
protoFile = args.protoFile
weightsFile = args.weightsFile
nPoints = 18
# COCO Output Format
keypointsMapping = ['Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr', 'R-Hip', 'R-Knee', 'R-Ank', 'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye', 'L-Eye', 'R-Ear', 'L-Ear']
POSE_PAIRS = [[1,2], [1,5], [2,3], [3,4], [5,6], [6,7],
[1,8], [8,9], [9,10], [1,11], [11,12], [12,13],
[1,0], [0,14], [14,16], [0,15], [15,17],
[2,17], [5,16] ]
# index of pafs correspoding to the POSE_PAIRS
# e.g for POSE_PAIR(1,2), the PAFs are located at indices (31,32) of output, Similarly, (1,5) -> (39,40) and so on.
mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44],
[19,20], [21,22], [23,24], [25,26], [27,28], [29,30],
[47,48], [49,50], [53,54], [51,52], [55,56],
[37,38], [45,46]]
colors = [ [0,100,255], [0,100,255], [0,255,255], [0,100,255], [0,255,255], [0,100,255],
[0,255,0], [255,200,100], [255,0,255], [0,255,0], [255,200,100], [255,0,255],
[0,0,255], [255,0,0], [200,200,0], [255,0,0], [200,200,0], [0,0,0]]
def getKeypoints(probMap, threshold=0.1):
mapSmooth = cv2.GaussianBlur(probMap,(3,3),0,0)
mapMask = np.uint8(mapSmooth>threshold)
keypoints = []
#find the blobs
contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#for each blob find the maxima
for cnt in contours:
blobMask = np.zeros(mapMask.shape)
blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)
maskedProbMap = mapSmooth * blobMask
_, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)
keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))
return keypoints
# Find valid connections between the different joints of a all persons present
def getValidPairs(output):
valid_pairs = []
invalid_pairs = []
n_interp_samples = 10
paf_score_th = 0.1
conf_th = 0.7
# loop for every POSE_PAIR
for k in range(len(mapIdx)):
# A->B constitute a limb
pafA = output[0, mapIdx[k][0], :, :]
pafB = output[0, mapIdx[k][1], :, :]
pafA = cv2.resize(pafA, (frameWidth, frameHeight))
pafB = cv2.resize(pafB, (frameWidth, frameHeight))
# Find the keypoints for the first and second limb
candA = detected_keypoints[POSE_PAIRS[k][0]]
candB = detected_keypoints[POSE_PAIRS[k][1]]
nA = len(candA)
nB = len(candB)
# If keypoints for the joint-pair is detected
# check every joint in candA with every joint in candB
# Calculate the distance vector between the two joints
# Find the PAF values at a set of interpolated points between the joints
# Use the above formula to compute a score to mark the connection valid
if( nA != 0 and nB != 0):
valid_pair = np.zeros((0,3))
for i in range(nA):
max_j=-1
maxScore = -1
found = 0
for j in range(nB):
# Find d_ij
d_ij = np.subtract(candB[j][:2], candA[i][:2])
norm = np.linalg.norm(d_ij)
if norm:
d_ij = d_ij / norm
else:
continue
# Find p(u)
interp_coord = list(zip(np.linspace(candA[i][0], candB[j][0], num=n_interp_samples),
np.linspace(candA[i][1], candB[j][1], num=n_interp_samples)))
# Find L(p(u))
paf_interp = []
for k in range(len(interp_coord)):
paf_interp.append([pafA[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))],
pafB[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))] ])
# Find E
paf_scores = | np.dot(paf_interp, d_ij) | numpy.dot |
# 对数据集中的点云,批量执行构建树和查找,包括kdtree和octree,并评测其运行时间
import random
import math
import numpy as np
import time
import os
import struct
from scipy.spatial import KDTree
import octree as octree
import kdtree as kdtree
from result_set import KNNResultSet, RadiusNNResultSet
np.seterr(all='raise')
def read_velodyne_bin(path):
'''
:param path:
:return: homography matrix of the point cloud, N*3
'''
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2]])
return np.asarray(pc_list, dtype=np.float32)
def main():
# configuration
leaf_size = 32
min_extent = 0.0001
k = 8
radius = 1
# root_dir = '/Users/renqian/cloud_lesson/kitti' # 数据集路径
root_dir = './data' # 数据集路径
cat = os.listdir(root_dir)
iteration_num = len(cat)
print("scipy ---------------")
construction_time_sum = 0
knn_time_sum = 0
radius_time_sum = 0
brute_time_sum = 0
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
db_np = read_velodyne_bin(filename)
begin_t = time.time()
root = KDTree(db_np, leaf_size)
construction_time_sum += time.time() - begin_t
begin_t = time.time()
query = db_np[0, :]
result_set = KNNResultSet(capacity=k)
distance, indices = root.query(x=query, k=k)
output = ''
for i, item in enumerate(zip(indices, distance)):
output += '%d - %.2f\n' % (item[0], item[1])
# print(output)
knn_time_sum += time.time() - begin_t
begin_t = time.time()
indices = root.query_ball_point(query, radius)
output = ''
for i, index in enumerate(indices):
output += '%d - %.2f\n' % (index, | np.linalg.norm(db_np[index] - query) | numpy.linalg.norm |
""" This module contains a class GwGxg that calculates some
descriptive statistics from a series of groundwater head measurements
used by groundwater practitioners in the Netherlands
History: Created 16-08-2015, last updated 12-02-1016
Migrated to acequia on 15-06-2019
@author: <NAME>
"""
import math
from datetime import datetime
import datetime as dt
import warnings
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
import acequia as aq
def stats_gxg(ts,reflev='datum'):
"""Return table with GxG statistics
Parameters
----------
ts : aq.GwSeries, pd.Series
Groundwater head time series
reflev : {'datum','surface'}, optional
Reference level for groundwater heads
Returns
-------
pd.DataFrame
"""
gxg = aq.GxgStats(ts)
return gxg.gxg(reflev=reflev)
class GxgStats:
"""Calculate descriptive statistics for time series of measured heads
Parameters
----------
gw : aq.GwSeries, pd.Series
timeseries of groundwater head measurements relative to datum level
srname : str, optional
name of groundwater head series
surface : float, optional
surface level height (if ref='datum' this option is ignored)
Notes
-----
In the Netherlands, traditionally groundwater head series are
summarized using decriptive statistics that characterise the mean
highest level (GHG), the mean lowest level (GLG) and the mean spring
level (GVG). These three measures together are reffered to as the GxG.
The definitions of GHG, GLG and GVG are based on time series with
measured heads on the 14th and 28th of each month. Therefore the time
series of measrued heads is internally resampled to values on the 14th
and 28yh before calculating the GxG statistics.
For further reference:
<NAME> and <NAME> (1985). 'Water table classes:
a method to decribe seasonal fluctuation and duration of water table
classes on Dutch soil maps.' Agricultural Water Management 10 (1985)
109 - 125. Elsevier Science Publishers, Amsterdam.
"""
N14 = 18
## REFERENCE = ['datum','surface']
APPROXIMATIONS = ['SLUIJS82','HEESEN74','SLUIJS76a','SLUIJS76b',
'SLUIJS89pol','SLUIJS89sto','RUNHAAR89','GAAST06',]
VGDATES = ['apr1','apr15','mar15']
VGREFDATE = 'apr1'
def __init__(self, gw, srname=None, surface=None):
"""Return GxG object"""
if isinstance(gw,aq.GwSeries):
self._ts = gw.heads(ref='datum')
self.srname = gw.name()
if surface is None:
self._surface = gw.surface()
else:
self._surface = surflevel
self._gw = gw
elif isinstance(gw,pd.Series):
self._ts = gw
self.srname = self._ts.name
self._surface = surface
self._gw = None
else:
raise(f'{gw} is not of type aq.GwSeries or pd.Series')
self._ts1428 = aq.ts1428(self._ts,maxlag=3,remove_nans=False)
self._xgnap = self._calculate_xg_nap()
def _yearseries(self,ts,dtype='float64'):
"""Return empty time series with years as index with all years
between min(year) and max(year) in index (no missing years)"""
if isinstance(ts,pd.Series):
years = set(ts.index.year)
elif isinstance(ts,(list,set,np.ndarray)):
years = set(ts)
else:
raise(f'{ts} must be list-like')
minyear = min(years)
maxyear= max(years)
sr = Series(index=range(minyear,maxyear+1),dtype=dtype,name='year')
return sr
def vg3(self):
"""Return VG3 (Spring Level) for each year
VG3 is calculated as the mean of groundwater head
levels on 14 march, 28 march and 14 april
Return
------
pd.Series
Notes
-----
Calculation of GVG based on the average of three dates was
introduced by Finke et al. (1999)
References
----------
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>
& <NAME> (1999). Actuele grondwaterinformatie 1:10.000 in de
waterschappen Wold en Wieden en Meppelerdiep. Gebruik van digitale
maaiveldshoogtes bij de kartering van GHG, GVG en GLG. SC-rapport
633. (in Dutch).
"""
self._vg3 = self._yearseries(self._ts1428)
for i,year in enumerate(self._vg3.index):
v1 = self._ts1428[dt.datetime(year,3,14)]
v2 = self._ts1428[dt.datetime(year,3,28)]
v3 = self._ts1428[dt.datetime(year,4,14)]
with warnings.catch_warnings():
# numpy raises a silly warning with nanmean on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty slice')
self._vg3[year] = np.round(np.nanmean([v1,v2,v3]),2)
self._vg3.name = 'VG3'
return self._vg3
def vg1(self,refdate=VGREFDATE,maxlag=7):
"""Return VG (Spring Level) for each year as the measurement
closest to refdate
Parameters
----------
refdate : {'apr1','apr15','mar15'}, default 'apr1'
reference date for estimating VG
maxlag : number
maximum allowed difference between measurement date en refdate
Return
------
pd.Series
Notes
-----
The VG (Voorjaars Grondwaterstand, Spring Level) is estimated as
the single measurement closest to the reference date given by
refdate.
The reference date for calculation of the GVG was changed from
april 15 to april 1st in de early eighties. In 2000 the
Cultuurtechnisch Vademecum proposed march 15 as the new reference
date for the GVG but this proposal was not generally adopted.
In practice april 1st is allways used as reference date and this
is used as default for calculations.
References
----------
<NAME>, J.W.J., <NAME> & <NAME> (2009). Actuele
grondwaterstandsituatie in natuurgebieden. Rapport 94 WOT. Alterra,
Wageningen. (in Dutch).
"""
if refdate not in self.VGDATES:
warnings.warn((f'Reference date {refdate} for GVG is not '
f'recognised. Reference date \'{self.VGREFDATE}\' is '
f'assumed.'))
refdate = self.VGREFDATE
vg1 = self._yearseries(self._ts1428)
for i,year in enumerate(vg1.index):
if refdate=='apr1':
date = dt.datetime(year,4,1)
if refdate=='apr15':
date = dt.datetime(year,4,15)
if refdate=='mar15':
date = dt.datetime(year,3,15)
daydeltas = self._ts.index - date
mindelta = np.amin(np.abs(daydeltas))
sr_nearest = self._ts[np.abs(daydeltas) == mindelta]
maxdelta = pd.to_timedelta(f'{maxlag} days')
if (mindelta <= maxdelta):
vg1[year] = np.round(sr_nearest.iloc[0],2)
vg1.name = f'VG{refdate}'
return vg1
def _calculate_xg_nap(self):
"""Calculate xg statistics for eacht year and return table"""
hydroyears = aq.hydroyear(self._ts1428)
sr = self._yearseries(hydroyears)
xg = pd.DataFrame(index=sr.index)
xg.index.name = 'year'
for year in xg.index:
ts = self._ts1428[hydroyears==year]
ts = ts[ts.notnull()]
n1428 = len(ts)
if not np.isnan(n1428):
n1428 = math.floor(n1428)
hg3 = np.nan
lg3 = np.nan
if n1428 >= self.N14:
hg3 = ts.nlargest(n=3).mean()
lg3 = ts.nsmallest(n=3).mean()
hg3w = np.nan
lg3s = np.nan
if n1428 >= self.N14:
ts_win = ts[aq.season(ts)=='winter']
ts_sum = ts[aq.season(ts)=='summer']
hg3w = ts_win.nlargest(n=3).mean()
lg3s = ts_sum.nsmallest(n=3).mean()
xg.loc[year,'hg3'] = np.round(hg3,2)
xg.loc[year,'lg3'] = np.round(lg3,2)
xg.loc[year,'hg3w'] = np.round(hg3w,2)
xg.loc[year,'lg3s'] = np.round(lg3s,2)
xg['vg3'] = self.vg3()
for date in self.VGDATES:
xg[f'vg_{date}'] = self.vg1(refdate=date)
xg.loc[year,'n1428'] = n1428
return xg
def xg(self,reference='datum',name=True):
"""Return table of GxG groundwater statistics for each
hydrological year
Parameters
----------
reference : {'datum','surface'}, default 'datum'
reference level for gxg statistics
name : bool, default True
include series name in index
Return
------
pd.DataFrame"""
if reference not in ['datum','surface']:
warnings.warn((f'Reference level \'{reference}\' is not allowed. '
f'Reference level \'datum\' is assumed.'))
reference = 'datum'
xg = self._xgnap.copy()
if name==True:
xg = pd.concat({self.srname: xg}, names=['series'])
if reference=='datum':
return xg
for col in xg.columns:
if col in ['n1428']:
continue
xg[col] = (self._surface - xg[col])*100
xg[col] = xg[col].apply(lambda x:math.floor(x) if
not np.isnan(x) else x)
##if not np.isnan(xg[col]):
## xg[col] = math.floor(xg[col])
return xg
def gxg(self,reference='datum',minimal=False):
"""Return table with GxG for one head series
Parameters
----------
minimal : bool, default True
return minimal selection of stats
reference : {'datum','surface'}, default 'datum'
reference level for gxg statistics
Returns
-------
pd.DataFrame"""
"""
if hasattr(self,'_minimal'):
if self._minimal!=minimal:
self._reset()
self._minimal = minimal
if self._reflev==reflev:
if hasattr(self,'_gxg'):
return self._gxg
else:
self._reset()
self._validate_reflev (reflev)
"""
xg = self.xg(reference=reference,name=False)
gxg = pd.Series(name=self.srname,dtype='object')
for col in xg.columns:
sr = xg[col][xg[col].notnull()]
if reference=='datum':
gxg[col] = np.round(sr.mean(),2)
if reference=='surface':
##gxg[col] = np.round(sr.mean())
if not np.isnan(sr.mean()):
gxg[col] = math.floor(sr.mean())
else:
gxg[col] = np.nan
if col=='n1428':
gxg[col] = math.floor(sr.mean())
# calculate gt
gxg['gt'] = self.gt()
gxg['gxgref'] = reference
# calculate std
for col in xg.columns:
if col in ['n1428',]: #'measfrq']:
continue
if reference=='datum':
gxg[col+'_std'] = np.round(xg[col].std(
skipna=True),2)
elif reference=='surface':
sr = xg[col]
gxg[col+'_std'] = np.round(sr.std(skipna=True))
else:
raise ValueError((f'Reference level {reference} is not valid.',
f'Valid reference levels are \'datum\' or \'surface\'.'))
# calculate standard error
for col in xg.columns:
if col in ['n1428',]:
continue
if reference=='datum':
sr = xg[col]
gxg[col+'_se'] = np.round(sr.std(skipna=True
)/np.sqrt(sr.count()),2)
if reference=='surface':
sr = xg[col]
gxg[col+'_se'] = np.round(sr.std(skipna=True
)/np.sqrt(sr.count()),0)
# count nyears
for col in xg.columns:
if col in ['n1428',]:
continue
sr = xg[col][xg[col].notnull()]
gxg[f'{col}_nyrs'] = np.round(sr.count())
replacements = [('hg3','ghg'),('lg3','glg'),('vg','gvg'),]
for old,new in replacements:
gxg.index = gxg.index.str.replace(old,new)
# gvg approximation formulas
if reference=='surface':
for apx in self.APPROXIMATIONS:
rowname = 'gvg_'+apx.lower()
gxg[rowname] = self.gvg_approximate(apx)
self._gxg = gxg
if minimal:
colnames = ['ghg','glg','gvg3','gvg_apr1','gt','gxgref',
'n1428',]
gxg = gxg[gxg.index.intersection(colnames)]
return gxg
def ghg(self):
"""Return mean highest level (GHG)"""
if not hasattr(self,'_gxg'):
self.gxg()
return self._gxg['ghg']
def glg(self):
"""Return mean highest level (GHG)"""
if not hasattr(self,'_gxg'):
self.gxg()
return self._gxg['glg']
def gt(self):
"""Return groundwater class table as str"""
if not hasattr(self,'_xg'):
self._calculate_xg_nap()
# do not call self._gxg to avoid recursion error because gt()
# is used in gxg()
with warnings.catch_warnings():
# numpy raises a silly warning with nanmean on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty slice')
ghg = (self._surface - np.nanmean(self._xgnap['hg3']))*100
glg = (self._surface - np.nanmean(self._xgnap['lg3']))*100
if (ghg<20) & (glg<50):
return 'I'
if (ghg<25) & (50<glg<80):
return 'II'
if (25<ghg<40) & (50<glg<80):
return 'II*'
if (ghg<25) & (80<glg<120):
return 'III'
if (25<ghg<40) & (80<glg<120):
return 'III*'
if (ghg>40) & (80<glg<120):
return 'IV'
if (ghg<25) & (glg>120):
return 'V'
if (25<ghg<40) & (glg>120):
return 'V*'
if (40<ghg<80) & (glg>120):
return 'VI'
if (80<ghg<140):
return 'VII'
if (ghg>140):
return 'VII*'
return np.nan
# acer palmatum
def gvg_approximate(self,formula=None):
"""Return GVG calculated with approximation based on GHG and GLG
Parameters
----------
formula : {'VDS82','VDS89pol','VDS89sto','RUNHAAR'}, default 'VDS82'
Notes
-----
Values for GHG and GLG can be estimated from visual soil profile
characteristics, allowing mapping of groundwater classes on soil
maps. GVG unfortunately can not be estimeted is this way.
Therefore, several regression formulas have been given in litera-
ture for estimating GVG from GHG and GLG estimates. Three of them
are implemented: <NAME> (1982), <NAME>uijs (1989) and
Runhaar (1989)"""
if formula is None:
formula = self.APPROXIMATIONS[0]
if formula not in self.APPROXIMATIONS:
warnings.warn(f'GVG approximation formula name {formula} not'
f'recognised. {self.APPROXIMATIONS[0]} is assumed.')
if not hasattr(self,'_xgnap'):
self._calculate_xg_nap()
if formula in ['SLUIS89pol','SLUIS89sto']:
with warnings.catch_warnings():
# numpy raises a silly warning with nanmean on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty slice')
GHG = np.nanmean(self._xgnap['hg3w'])
GLG = np.nanmean(self._xgnap['lg3s'])
else:
with warnings.catch_warnings():
# numpy raises a silly warning with nanmean on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty slice')
GHG = np.nanmean(self._xgnap['hg3'])
GLG = np.nanmean(self._xgnap['lg3'])
GHG = (self._surface-GHG)*100
GLG = (self._surface-GLG)*100
if formula=='HEESEN74': # april 15th
GVG = 0.2*(GLG-GHG)+GHG+12
elif formula=='SLUIJS76a': # april 14th
GVG = 0.15*(GLG-GHG)+(1.01*GHG)+14.3
elif formula=='SLUIJS76b': # april 14th
GVG = 1.03*GHG+27.3
elif formula=='SLUIJS82':
GVG = 5.4 + 1.02*GHG + 0.19*(GLG-GHG)
elif formula=='RUNHAAR89':
GVG = 0.5 + 0.85*GHG + 0.20*GLG # (+/-7,5cm)
elif formula=='SLUIJS89pol':
GVG = 12.0 + 0.96*GHG + 0.17*(GLG-GHG)
elif formula=='SLUIJS89sto':
GVG = 4.0 + 0.97*GHG + 0.15*(GLG-GHG)
elif formula=='GAAST06':
GVG = 13.7 + 0.70*GHG + 0.25*GLG
else:
raise ValueError((f'\'{formula}\' was not recognised as a gvg '
f'approximation formula. Valid names are '
f'{self.APPROXIMATIONS}'))
if not | np.isnan(GVG) | numpy.isnan |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import tempfile
import warnings
import numpy
from numpy import testing as npt
import tables
from tables import Atom, ClosedNodeError, NoSuchNodeError
from tables.utils import byteorders
from tables.tests import common
from tables.tests.common import allequal
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
from six.moves import range
#warnings.resetwarnings()
class BasicTestCase(TestCase):
"""Basic test for all the supported typecodes present in numpy.
All of them are included on pytables.
"""
endiancheck = False
def write_read(self, testarray):
a = testarray
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
a = b
fileh.create_array(root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = root.somearray.read()
# Compare them. They should be equal.
if common.verbose and not allequal(a, b):
print("Write and read arrays differ!")
# print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
# print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
if a.dtype.kind != "S":
print("Array written byteorder:", a.dtype.byteorder)
print("Array read byteorder:", b.dtype.byteorder)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(bbo, sys.byteorder)
if self.endiancheck:
self.assertNotEqual(bbo, abo)
obj = root.somearray
self.assertEqual(obj.flavor, 'numpy')
self.assertEqual(obj.shape, a.shape)
self.assertEqual(obj.ndim, a.ndim)
self.assertEqual(obj.chunkshape, None)
if a.shape:
nrows = a.shape[0]
else:
# scalar
nrows = 1
self.assertEqual(obj.nrows, nrows)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def write_read_out_arg(self, testarray):
a = testarray
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
a = b
fileh.create_array(root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = numpy.empty_like(a, dtype=a.dtype)
root.somearray.read(out=b)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(abo, bbo)
if self.endiancheck:
self.assertNotEqual(bbo, sys.byteorder)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def write_read_atom_shape_args(self, testarray):
a = testarray
atom = Atom.from_dtype(a.dtype)
shape = a.shape
byteorder = None
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
if b.dtype.byteorder in ('>', '<'):
byteorder = byteorders[b.dtype.byteorder]
a = b
ptarr = fileh.create_array(root, 'somearray',
atom=atom, shape=shape,
title="Some array",
# specify the byteorder explicitly
# since there is no way to deduce
# it in this case
byteorder=byteorder)
self.assertEqual(shape, ptarr.shape)
self.assertEqual(atom, ptarr.atom)
ptarr[...] = a
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = root.somearray.read()
# Compare them. They should be equal.
if common.verbose and not allequal(a, b):
print("Write and read arrays differ!")
# print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
# print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
if a.dtype.kind != "S":
print("Array written byteorder:", a.dtype.byteorder)
print("Array read byteorder:", b.dtype.byteorder)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(bbo, sys.byteorder)
if self.endiancheck:
self.assertNotEqual(bbo, abo)
obj = root.somearray
self.assertEqual(obj.flavor, 'numpy')
self.assertEqual(obj.shape, a.shape)
self.assertEqual(obj.ndim, a.ndim)
self.assertEqual(obj.chunkshape, None)
if a.shape:
nrows = a.shape[0]
else:
# scalar
nrows = 1
self.assertEqual(obj.nrows, nrows)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def setup00_char(self):
"""Data integrity during recovery (character objects)"""
if not isinstance(self.tupleChar, numpy.ndarray):
a = numpy.array(self.tupleChar, dtype="S")
else:
a = self.tupleChar
return a
def test00_char(self):
a = self.setup00_char()
self.write_read(a)
def test00_char_out_arg(self):
a = self.setup00_char()
self.write_read_out_arg(a)
def test00_char_atom_shape_args(self):
a = self.setup00_char()
self.write_read_atom_shape_args(a)
def test00b_char(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
fileh.create_array(fileh.root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = fileh.root.somearray.read()
if isinstance(a, bytes):
self.assertEqual(type(b), bytes)
self.assertEqual(a, b)
else:
# If a is not a python string, then it should be a list
# or ndarray
self.assertTrue(type(b) in [list, numpy.ndarray])
finally:
# Then, delete the file
os.remove(filename)
def test00b_char_out_arg(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
fileh.create_array(fileh.root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = numpy.empty_like(a)
if fileh.root.somearray.flavor != 'numpy':
self.assertRaises(TypeError,
lambda: fileh.root.somearray.read(out=b))
else:
fileh.root.somearray.read(out=b)
self.assertTrue(type(b), numpy.ndarray)
finally:
# Then, delete the file
os.remove(filename)
def test00b_char_atom_shape_args(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
nparr = numpy.asarray(a)
atom = Atom.from_dtype(nparr.dtype)
shape = nparr.shape
if nparr.dtype.byteorder in ('>', '<'):
byteorder = byteorders[nparr.dtype.byteorder]
else:
byteorder = None
ptarr = fileh.create_array(fileh.root, 'somearray',
atom=atom, shape=shape,
byteorder=byteorder,
title="Some array")
self.assertEqual(shape, ptarr.shape)
self.assertEqual(atom, ptarr.atom)
ptarr[...] = a
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = numpy.empty_like(a)
if fileh.root.somearray.flavor != 'numpy':
self.assertRaises(TypeError,
lambda: fileh.root.somearray.read(out=b))
else:
fileh.root.somearray.read(out=b)
self.assertTrue(type(b), numpy.ndarray)
finally:
# Then, delete the file
os.remove(filename)
def setup01_char_nc(self):
"""Data integrity during recovery (non-contiguous character objects)"""
if not isinstance(self.tupleChar, numpy.ndarray):
a = numpy.array(self.tupleChar, dtype="S")
else:
a = self.tupleChar
if a.ndim == 0:
b = a.copy()
else:
b = a[::2]
# Ensure that this numpy string is non-contiguous
if len(b) > 1:
self.assertEqual(b.flags.contiguous, False)
return b
def test01_char_nc(self):
b = self.setup01_char_nc()
self.write_read(b)
def test01_char_nc_out_arg(self):
b = self.setup01_char_nc()
self.write_read_out_arg(b)
def test01_char_nc_atom_shape_args(self):
b = self.setup01_char_nc()
self.write_read_atom_shape_args(b)
def test02_types(self):
"""Data integrity during recovery (numerical types)"""
typecodes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128']
for name in ('float16', 'float96', 'float128',
'complex192', 'complex256'):
atomname = name.capitalize() + 'Atom'
if hasattr(tables, atomname):
typecodes.append(name)
for typecode in typecodes:
a = numpy.array(self.tupleInt, typecode)
self.write_read(a)
b = numpy.array(self.tupleInt, typecode)
self.write_read_out_arg(b)
c = numpy.array(self.tupleInt, typecode)
self.write_read_atom_shape_args(c)
def test03_types_nc(self):
"""Data integrity during recovery (non-contiguous numerical types)"""
typecodes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128', ]
for name in ('float16', 'float96', 'float128',
'complex192', 'complex256'):
atomname = name.capitalize() + 'Atom'
if hasattr(tables, atomname):
typecodes.append(name)
for typecode in typecodes:
a = numpy.array(self.tupleInt, typecode)
if a.ndim == 0:
b1 = a.copy()
b2 = a.copy()
b3 = a.copy()
else:
b1 = a[::2]
b2 = a[::2]
b3 = a[::2]
# Ensure that this array is non-contiguous
if len(b1) > 1:
self.assertEqual(b1.flags.contiguous, False)
if len(b2) > 1:
self.assertEqual(b2.flags.contiguous, False)
if len(b3) > 1:
self.assertEqual(b3.flags.contiguous, False)
self.write_read(b1)
self.write_read_out_arg(b2)
self.write_read_atom_shape_args(b3)
class Basic0DOneTestCase(BasicTestCase):
# Scalar case
title = "Rank-0 case 1"
tupleInt = 3
tupleChar = b"3"
endiancheck = True
class Basic0DTwoTestCase(BasicTestCase):
# Scalar case
title = "Rank-0 case 2"
tupleInt = 33
tupleChar = b"33"
endiancheck = True
class Basic1DZeroTestCase(BasicTestCase):
# This test case is not supported by PyTables (HDF5 limitations)
# 1D case
title = "Rank-1 case 0"
tupleInt = ()
tupleChar = ()
endiancheck = False
class Basic1DOneTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 1"
tupleInt = (3,)
tupleChar = (b"a",)
endiancheck = True
class Basic1DTwoTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 2"
tupleInt = (3, 4)
tupleChar = (b"aaa",)
endiancheck = True
class Basic1DThreeTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 3"
tupleInt = (3, 4, 5)
tupleChar = (b"aaa", b"bbb",)
endiancheck = True
class Basic2DOneTestCase(BasicTestCase):
# 2D case
title = "Rank-2 case 1"
tupleInt = numpy.array(numpy.arange((4)**2))
tupleInt.shape = (4,)*2
tupleChar = numpy.array(["abc"]*3**2, dtype="S3")
tupleChar.shape = (3,)*2
endiancheck = True
class Basic2DTwoTestCase(BasicTestCase):
# 2D case, with a multidimensional dtype
title = "Rank-2 case 2"
tupleInt = numpy.array(numpy.arange((4)), dtype=(numpy.int_, (4,)))
tupleChar = numpy.array(["abc"]*3, dtype=("S3", (3,)))
endiancheck = True
class Basic10DTestCase(BasicTestCase):
# 10D case
title = "Rank-10 test"
tupleInt = numpy.array(numpy.arange((2)**10))
tupleInt.shape = (2,)*10
tupleChar = numpy.array(
["abc"]*2**10, dtype="S3")
tupleChar.shape = (2,)*10
endiancheck = True
class Basic32DTestCase(BasicTestCase):
# 32D case (maximum)
title = "Rank-32 test"
tupleInt = numpy.array((32,))
tupleInt.shape = (1,)*32
tupleChar = numpy.array(["121"], dtype="S3")
tupleChar.shape = (1,)*32
class ReadOutArgumentTests(common.TempFileMixin, TestCase):
def setUp(self):
super(ReadOutArgumentTests, self).setUp()
self.size = 1000
def create_array(self):
array = numpy.arange(self.size, dtype='f8')
disk_array = self.h5file.create_array('/', 'array', array)
return array, disk_array
def test_read_entire_array(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'f8')
disk_array.read(out=out_buffer)
numpy.testing.assert_equal(out_buffer, array)
def test_read_contiguous_slice1(self):
array, disk_array = self.create_array()
out_buffer = numpy.arange(self.size, dtype='f8')
out_buffer = numpy.random.permutation(out_buffer)
out_buffer_orig = out_buffer.copy()
start = self.size // 2
disk_array.read(start=start, stop=self.size, out=out_buffer[start:])
numpy.testing.assert_equal(out_buffer[start:], array[start:])
numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start])
def test_read_contiguous_slice2(self):
array, disk_array = self.create_array()
out_buffer = numpy.arange(self.size, dtype='f8')
out_buffer = numpy.random.permutation(out_buffer)
out_buffer_orig = out_buffer.copy()
start = self.size // 4
stop = self.size - start
disk_array.read(start=start, stop=stop, out=out_buffer[start:stop])
numpy.testing.assert_equal(out_buffer[start:stop], array[start:stop])
numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start])
numpy.testing.assert_equal(out_buffer[stop:], out_buffer_orig[stop:])
def test_read_non_contiguous_slice_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), dtype='f8')
disk_array.read(start=0, stop=self.size, step=2, out=out_buffer)
numpy.testing.assert_equal(out_buffer, array[0:self.size:2])
def test_read_non_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'f8')
out_buffer_slice = out_buffer[0:self.size:2]
# once Python 2.6 support is dropped, this could change
# to assertRaisesRegexp to check exception type and message at once
self.assertRaises(ValueError, disk_array.read, 0, self.size, 2,
out_buffer_slice)
try:
disk_array.read(0, self.size, 2, out_buffer_slice)
except ValueError as exc:
self.assertEqual('output array not C contiguous', str(exc))
def test_buffer_too_small(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), 'f8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
def test_buffer_too_large(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size + 1, ), 'f8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
class SizeOnDiskInMemoryPropertyTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(SizeOnDiskInMemoryPropertyTestCase, self).setUp()
self.array_size = (10, 10)
self.array = self.h5file.create_array(
'/', 'somearray', numpy.zeros(self.array_size, 'i4'))
def test_all_zeros(self):
self.assertEqual(self.array.size_on_disk, 10 * 10 * 4)
self.assertEqual(self.array.size_in_memory, 10 * 10 * 4)
class UnalignedAndComplexTestCase(common.TempFileMixin, TestCase):
"""Basic test for all the supported typecodes present in numpy.
Most of them are included on PyTables.
"""
def setUp(self):
super(UnalignedAndComplexTestCase, self).setUp()
self.root = self.h5file.root
def write_read(self, testArray):
if common.verbose:
print('\n', '-=' * 30)
print("\nRunning test for array with type '%s'" %
testArray.dtype.type)
# Create the array under root and name 'somearray'
a = testArray
if self.endiancheck:
byteorder = {"little": "big", "big": "little"}[sys.byteorder]
else:
byteorder = sys.byteorder
self.h5file.create_array(self.root, 'somearray', a, "Some array",
byteorder=byteorder)
if self.reopen:
self._reopen()
self.root = self.h5file.root
# Read the saved array
b = self.root.somearray.read()
# Get an array to be compared in the correct byteorder
c = a.newbyteorder(byteorder)
# Compare them. They should be equal.
if not allequal(c, b) and common.verbose:
print("Write and read arrays differ!")
print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, self.root.somearray.shape)
if a.dtype.byteorder != "|":
self.assertEqual(a.dtype, b.dtype)
self.assertEqual(a.dtype, self.root.somearray.atom.dtype)
self.assertEqual(byteorders[b.dtype.byteorder], sys.byteorder)
self.assertEqual(self.root.somearray.byteorder, byteorder)
self.assertTrue(allequal(c, b))
def test01_signedShort_unaligned(self):
"""Checking an unaligned signed short integer array"""
r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10)
a = r["f2"]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.int16)
self.write_read(a)
def test02_float_unaligned(self):
"""Checking an unaligned single precision array"""
r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10)
a = r["f1"]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, 0)
self.assertEqual(a.dtype.type, numpy.float32)
self.write_read(a)
def test03_byte_offset(self):
"""Checking an offsetted byte array"""
r = numpy.arange(100, dtype=numpy.int8)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test04_short_offset(self):
"""Checking an offsetted unsigned short int precision array"""
r = numpy.arange(100, dtype=numpy.uint32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test05_int_offset(self):
"""Checking an offsetted integer array"""
r = numpy.arange(100, dtype=numpy.int32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test06_longlongint_offset(self):
"""Checking an offsetted long long integer array"""
r = numpy.arange(100, dtype=numpy.int64)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test07_float_offset(self):
"""Checking an offsetted single precision array"""
r = numpy.arange(100, dtype=numpy.float32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test08_double_offset(self):
"""Checking an offsetted double precision array"""
r = numpy.arange(100, dtype=numpy.float64)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test09_float_offset_unaligned(self):
"""Checking an unaligned and offsetted single precision array"""
r = numpy.rec.array(b'a'*200, formats='i1,3f4,i2', shape=10)
a = r["f1"][3]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.float32)
self.write_read(a)
def test10_double_offset_unaligned(self):
"""Checking an unaligned and offsetted double precision array"""
r = numpy.rec.array(b'a'*400, formats='i1,3f8,i2', shape=10)
a = r["f1"][3]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.float64)
self.write_read(a)
def test11_int_byteorder(self):
"""Checking setting data with different byteorder in a range
(integer)"""
# Save an array with the reversed byteorder on it
a = numpy.arange(25, dtype=numpy.int32).reshape(5, 5)
a = a.byteswap()
a = a.newbyteorder()
array = self.h5file.create_array(
self.h5file.root, 'array', a, "byteorder (int)")
# Read a subarray (got an array with the machine byteorder)
b = array[2:4, 3:5]
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
# Check that the array is back in the correct byteorder
c = array[...]
if common.verbose:
print("byteorder of array on disk-->", array.byteorder)
print("byteorder of subarray-->", b.dtype.byteorder)
print("subarray-->", b)
print("retrieved array-->", c)
self.assertTrue(allequal(a, c))
def test12_float_byteorder(self):
"""Checking setting data with different byteorder in a range (float)"""
# Save an array with the reversed byteorder on it
a = numpy.arange(25, dtype=numpy.float64).reshape(5, 5)
a = a.byteswap()
a = a.newbyteorder()
array = self.h5file.create_array(
self.h5file.root, 'array', a, "byteorder (float)")
# Read a subarray (got an array with the machine byteorder)
b = array[2:4, 3:5]
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
# Check that the array is back in the correct byteorder
c = array[...]
if common.verbose:
print("byteorder of array on disk-->", array.byteorder)
print("byteorder of subarray-->", b.dtype.byteorder)
print("subarray-->", b)
print("retrieved array-->", c)
self.assertTrue(allequal(a, c))
class ComplexNotReopenNotEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = False
reopen = False
class ComplexReopenNotEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = False
reopen = True
class ComplexNotReopenEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = True
reopen = False
class ComplexReopenEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = True
reopen = True
class GroupsArrayTestCase(common.TempFileMixin, TestCase):
"""This test class checks combinations of arrays with groups."""
def test00_iterativeGroups(self):
"""Checking combinations of arrays with groups."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_iterativeGroups..." %
self.__class__.__name__)
# Get the root group
group = self.h5file.root
# Set the type codes to test
# The typecodes below does expose an ambiguity that is reported in:
# http://projects.scipy.org/scipy/numpy/ticket/283 and
# http://projects.scipy.org/scipy/numpy/ticket/290
typecodes = ['b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'f', 'd',
'F', 'D']
if hasattr(tables, 'Float16Atom'):
typecodes.append('e')
if hasattr(tables, 'Float96Atom') or hasattr(tables, 'Float128Atom'):
typecodes.append('g')
if (hasattr(tables, 'Complex192Atom') or
hasattr(tables, 'Complex256Atom')):
typecodes.append('G')
for i, typecode in enumerate(typecodes):
a = numpy.ones((3,), typecode)
dsetname = 'array_' + typecode
if common.verbose:
print("Creating dataset:", group._g_join(dsetname))
self.h5file.create_array(group, dsetname, a, "Large array")
group = self.h5file.create_group(group, 'group' + str(i))
# Reopen the file
self._reopen()
# Get the root group
group = self.h5file.root
# Get the metadata on the previosly saved arrays
for i in range(len(typecodes)):
# Create an array for later comparison
a = numpy.ones((3,), typecodes[i])
# Get the dset object hanging from group
dset = getattr(group, 'array_' + typecodes[i])
# Get the actual array
b = dset.read()
if common.verbose:
print("Info from dataset:", dset._v_pathname)
print(" shape ==>", dset.shape, end=' ')
print(" type ==> %s" % dset.atom.dtype)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %s" % b.dtype)
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.dtype, b.dtype)
self.assertTrue(allequal(a, b))
# Iterate over the next group
group = getattr(group, 'group' + str(i))
def test01_largeRankArrays(self):
"""Checking creation of large rank arrays (0 < rank <= 32)
It also uses arrays ranks which ranges until maxrank.
"""
# maximum level of recursivity (deepest group level) achieved:
# maxrank = 32 (for a effective maximum rank of 32)
# This limit is due to HDF5 library limitations.
minrank = 1
maxrank = 32
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_largeRankArrays..." %
self.__class__.__name__)
print("Maximum rank for tested arrays:", maxrank)
group = self.h5file.root
if common.verbose:
print("Rank array writing progress: ", end=' ')
for rank in range(minrank, maxrank + 1):
# Create an array of integers, with incrementally bigger ranges
a = numpy.ones((1,) * rank, numpy.int32)
if common.verbose:
print("%3d," % (rank), end=' ')
self.h5file.create_array(group, "array", a, "Rank: %s" % rank)
group = self.h5file.create_group(group, 'group' + str(rank))
# Reopen the file
self._reopen()
group = self.h5file.root
if common.verbose:
print()
print("Rank array reading progress: ")
# Get the metadata on the previosly saved arrays
for rank in range(minrank, maxrank + 1):
# Create an array for later comparison
a = numpy.ones((1,) * rank, numpy.int32)
# Get the actual array
b = group.array.read()
if common.verbose:
print("%3d," % (rank), end=' ')
if common.verbose and not allequal(a, b):
print("Info from dataset:", group.array._v_pathname)
print(" Shape: ==>", group.array.shape, end=' ')
print(" typecode ==> %c" % group.array.typecode)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %c" % b.dtype)
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.dtype, b.dtype)
self.assertTrue(allequal(a, b))
# print(self.h5file)
# Iterate over the next group
group = self.h5file.get_node(group, 'group' + str(rank))
if common.verbose:
print() # This flush the stdout buffer
class CopyTestCase(common.TempFileMixin, TestCase):
def test01_copy(self):
"""Checking Array.copy() method."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Copy to another Array
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
# print("dirs-->", dir(array1), dir(array2))
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.title, array2.title)
def test02_copy(self):
"""Checking Array.copy() method (where specified)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Copy to another Array
group1 = self.h5file.create_group("/", "group1")
array2 = array1.copy(group1, 'array2')
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.group1.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
# print("dirs-->", dir(array1), dir(array2))
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.title, array2.title)
def test03_copy(self):
"""Checking Array.copy() method (checking title copying)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', title="title array2")
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
# Assert user attributes
if common.verbose:
print("title of destination array-->", array2.title)
self.assertEqual(array2.title, "title array2")
def test04_copy(self):
"""Checking Array.copy() method (user attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=1)
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Assert user attributes
self.assertEqual(array2.attrs.attr1, "attr1")
self.assertEqual(array2.attrs.attr2, 2)
def test04b_copy(self):
"""Checking Array.copy() method (user attributes not copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05b_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=0)
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Assert user attributes
self.assertEqual(hasattr(array2.attrs, "attr1"), 0)
self.assertEqual(hasattr(array2.attrs, "attr2"), 0)
class CloseCopyTestCase(CopyTestCase):
close = 1
class OpenCopyTestCase(CopyTestCase):
close = 0
class CopyIndexTestCase(common.TempFileMixin, TestCase):
def test01_index(self):
"""Checking Array.copy() method with indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_index..." % self.__class__.__name__)
# Create a numpy
r = numpy.arange(200, dtype='int32')
r.shape = (100, 2)
# Save it in a array:
array1 = self.h5file.create_array(
self.h5file.root, 'array1', r, "title array1")
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print("nrows in array2-->", array2.nrows)
print("and it should be-->", r2.shape[0])
self.assertEqual(r2.shape[0], array2.nrows)
def test02_indexclosef(self):
"""Checking Array.copy() method with indexes (close file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_indexclosef..." % self.__class__.__name__)
# Create a numpy
r = numpy.arange(200, dtype='int32')
r.shape = (100, 2)
# Save it in a array:
array1 = self.h5file.create_array(
self.h5file.root, 'array1', r, "title array1")
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
# Close and reopen the file
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print("nrows in array2-->", array2.nrows)
print("and it should be-->", r2.shape[0])
self.assertEqual(r2.shape[0], array2.nrows)
class CopyIndex1TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 1
class CopyIndex2TestCase(CopyIndexTestCase):
start = 0
stop = -1
step = 1
class CopyIndex3TestCase(CopyIndexTestCase):
start = 1
stop = 7
step = 1
class CopyIndex4TestCase(CopyIndexTestCase):
start = 0
stop = 6
step = 1
class CopyIndex5TestCase(CopyIndexTestCase):
start = 3
stop = 7
step = 1
class CopyIndex6TestCase(CopyIndexTestCase):
start = 3
stop = 6
step = 2
class CopyIndex7TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 10
class CopyIndex8TestCase(CopyIndexTestCase):
start = 6
stop = -1 # Negative values means starting from the end
step = 1
class CopyIndex9TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 1
class CopyIndex10TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 2
class CopyIndex11TestCase(CopyIndexTestCase):
start = -3
stop = -1
step = 2
class CopyIndex12TestCase(CopyIndexTestCase):
start = -1 # Should point to the last element
stop = None # None should mean the last element (including it)
step = 1
class GetItemTestCase(common.TempFileMixin, TestCase):
def test00_single(self):
"""Single element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original first element:", a[0], type(a[0]))
print("Read first element:", arr[0], type(arr[0]))
self.assertTrue(allequal(a[0], arr[0]))
self.assertEqual(type(a[0]), type(arr[0]))
def test01_single(self):
"""Single element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original first element:", a[0], type(a[0]))
print("Read first element:", arr[0], type(arr[0]))
self.assertEqual(a[0], arr[0])
self.assertEqual(type(a[0]), type(arr[0]))
def test02_range(self):
"""Range element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test03_range(self):
"""Range element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test04_range(self):
"""Range element access, strided (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test05_range(self):
"""Range element access, strided (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test06_negativeIndex(self):
"""Negative Index element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last element:", a[-1])
print("Read last element:", arr[-1])
self.assertTrue(allequal(a[-1], arr[-1]))
def test07_negativeIndex(self):
"""Negative Index element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original before last element:", a[-2])
print("Read before last element:", arr[-2])
if isinstance(a[-2], numpy.ndarray):
self.assertTrue(allequal(a[-2], arr[-2]))
else:
self.assertEqual(a[-2], arr[-2])
def test08_negativeRange(self):
"""Negative range element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test09_negativeRange(self):
"""Negative range element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
class GI1NATestCase(GetItemTestCase, TestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6])
charList = numpy.array(["3"], 'S')
charListME = numpy.array(
["321", "221", "121", "021", "421", "521", "621"], 'S')
class GI1NAOpenTestCase(GI1NATestCase):
close = 0
class GI1NACloseTestCase(GI1NATestCase):
close = 1
class GI2NATestCase(GetItemTestCase):
# A more complex example
title = "Rank-1,2 case 2"
numericalList = numpy.array([3, 4])
numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6],
[2, 1, 0, 4, 5, 6, 7],
[4, 3, 2, 1, 0, 4, 5],
[3, 2, 1, 0, 4, 5, 6],
[3, 2, 1, 0, 4, 5, 6]])
charList = numpy.array(["a", "b"], 'S')
charListME = numpy.array(
[["321", "221", "121", "021", "421", "521", "621"],
["21", "21", "11", "02", "42", "21", "61"],
["31", "21", "12", "21", "41", "51", "621"],
["321", "221", "121", "021",
"421", "521", "621"],
["3241", "2321", "13216",
"0621", "4421", "5421", "a621"],
["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S')
class GI2NAOpenTestCase(GI2NATestCase):
close = 0
class GI2NACloseTestCase(GI2NATestCase):
close = 1
class SetItemTestCase(common.TempFileMixin, TestCase):
def test00_single(self):
"""Single element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify a single element of a and arr:
a[0] = b"b"
arr[0] = b"b"
# Get and compare an element
if common.verbose:
print("Original first element:", a[0])
print("Read first element:", arr[0])
self.assertTrue(allequal(a[0], arr[0]))
def test01_single(self):
"""Single element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
a[0] = 333
arr[0] = 333
# Get and compare an element
if common.verbose:
print("Original first element:", a[0])
print("Read first element:", arr[0])
self.assertEqual(a[0], arr[0])
def test02_range(self):
"""Range element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
a[1:3] = b"xXx"
arr[1:3] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test03_range(self):
"""Range element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 3, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test04_range(self):
"""Range element update, strided (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 4, 2)
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test05_range(self):
"""Range element update, strided (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 4, 2)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test06_negativeIndex(self):
"""Negative Index element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = -1
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original last element:", a[-1])
print("Read last element:", arr[-1])
self.assertTrue(allequal(a[-1], arr[-1]))
def test07_negativeIndex(self):
"""Negative Index element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = -2
a[s] = a[s]*2 + 3
arr[s] = arr[s]*2 + 3
# Get and compare an element
if common.verbose:
print("Original before last element:", a[-2])
print("Read before last element:", arr[-2])
if isinstance(a[-2], numpy.ndarray):
self.assertTrue(allequal(a[-2], arr[-2]))
else:
self.assertEqual(a[-2], arr[-2])
def test08_negativeRange(self):
"""Negative range element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(-4, -1, None)
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test09_negativeRange(self):
"""Negative range element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(-3, -1, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test10_outOfRange(self):
"""Out of range update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of arr that are out of range:
s = slice(1, a.shape[0]+1, None)
s2 = slice(1, 1000, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
rng2 = numpy.arange(a[s2].size)*2 + 3
rng2.shape = a[s2].shape
arr[s2] = rng2
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
class SI1NATestCase(SetItemTestCase, TestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = | numpy.array([3, 2, 1, 0, 4, 5, 6]) | numpy.array |
import numpy as np
import random
from scipy import interpolate as spi
from matplotlib import pyplot as plt
from matplotlib import animation
from memoize import memoized
class Results(object):
# TODO: improve docs
def __init__(self, shape=None, fname=None, nsigma=1.):
"""Blalbalba
Parameters
----------
shape : int 2-tuple
Shape of the lattice whose measures are stored.
fname : string
Name of a text file to be imported.
nsigma : float
The error in a measured magnitudes will be nsigma
times the standard deviation.
"""
# Store parameters
self.nsigma = nsigma
if shape != None:
self._shape = tuple(shape)
else:
self._shape = None
# If the filename is provided, read the data from there
if fname != None:
self.readtxt(fname)
else:
# Store parameters
if self._shape == None:
raise ValueError("Lattice shape not given.")
# Initialize results lists
self.Ts = list()
self.mags = list()
self.mag2s = list()
self.mag4s = list()
self.corrmags = list()
self.hamilts = list()
self.hamilt2s = list()
self.hamilt4s = list()
self.corrhamilts = list()
self.nmeasures = list()
self.acceptprobs = list()
self.measureintervals = list()
# Calculate the numer of spins
self.nspins = np.prod(self.shape())
def shape(self):
"""Return lattice shape.
"""
return self._shape
# TODO: complete docs
# TODO: check if T has been already measured and average
# with the previous data in that case
def measure(self, T, nmeasures, latt, measureinterval=1):
"""Measure blablbalba
"""
# Check if lattice shape is the expected one
if self.shape() != latt.shape():
raise ValueError(
"The lattice shape does not match the Results object one.")
# Store parameters
self.Ts.append(T)
self.nmeasures.append(nmeasures)
self.measureintervals.append(measureinterval)
# Initialize variables
mag_last = 0. # Magnetization in the last measure
hamilt_last = 0. # Hamiltonian in the last measure
mag_sum = 0.
mag2_sum = 0.
mag4_sum = 0.
corrmag_sum = 0.
hamilt_sum = 0.
hamilt2_sum = 0.
hamilt4_sum = 0.
corrhamilt_sum = 0.
naccept = 0
# Start measure loop
for measure_idx in range(nmeasures):
# Evolve
naccept += latt.evolve(measureinterval, T)
# Measure
mag = latt.magnetization()
mag2 = mag*mag
hamilt = latt.hamiltonian()
hamilt2 = hamilt*hamilt
mag_sum += np.abs(mag)
mag2_sum += mag2
mag4_sum += mag2*mag2
corrmag_sum += mag*mag_last
hamilt_sum += hamilt
hamilt2_sum += hamilt2
hamilt4_sum += hamilt2*hamilt2
corrhamilt_sum += hamilt*hamilt_last
# Store last measure
mag_last = mag
hamilt_last = hamilt
# Store measures and calculate means
self.mags.append(mag_sum/nmeasures)
self.mag2s.append(mag2_sum/nmeasures)
self.mag4s.append(mag4_sum/nmeasures)
self.corrmags.append(corrmag_sum/(nmeasures - 1))
self.hamilts.append(hamilt_sum/nmeasures)
self.hamilt2s.append(hamilt2_sum/nmeasures)
self.hamilt4s.append(hamilt4_sum/nmeasures)
self.corrhamilts.append(corrhamilt_sum/(nmeasures - 1))
self.acceptprobs.append(
float(naccept)/(nmeasures*measureinterval*latt.nspins))
return
@property
@memoized
def L(self):
"""Return characteristic size of the system.
"""
return np.power(np.prod(self.shape()), 1./len(self.shape()))
# I/O
# ==============================
# TODO: add the data instead of overwriting it and check if the shape
# of the imported file is the same as the object attribute
def readtxt(self, filename):
"""Read data from file.
"""
filedata = np.loadtxt(filename).T
self.Ts = filedata[0].tolist()
self.mags = filedata[1].tolist()
self.mag2s = filedata[2].tolist()
self.mag4s = filedata[3].tolist()
self.corrmags = filedata[4].tolist()
self.hamilts = filedata[5].tolist()
self.hamilt2s = filedata[6].tolist()
self.hamilt4s = filedata[7].tolist()
self.corrhamilts = filedata[8].tolist()
self.acceptprobs = filedata[9].tolist()
self.nmeasures = filedata[10].tolist()
self.measureintervals = filedata[11].tolist()
# Read additional parameters from footer
with open(filename, "r") as f:
lines = f.readlines()
#self._shape = tuple(map(int, lines[-1].split()[2].split(",")))
footer = lines[-1]
# String list with the shape of the lattice
shape_str = footer[footer.find("(")+1:footer.find(")")].split(",")
# If the lattice is 1D, strip leaves an empty string in
# shape_str, for example "(10, )" -> ["10", ""].
# If that is the case, remove the last element.
if shape_str[-1] == "":
shape_str = shape_str[:-1]
self._shape = tuple(map(int, shape_str))
return
def savetxt(self, fname=None):
"""Save data to file.
Parameters
----------
fname : string
Name of the output file. Its default value is
"isingR{0}C{1}.dat" with {0} the number of rows
in the lattice and {1} the number of columns.
"""
if fname == None:
fname = "ising{0}.dat".format(self.shape())
headerstring = (
"Temperature\t "
"Mean mag.\t Mag. 2nd moment\t Mag. 4nd moment\t "
"Mag. time corr.\t "
"Mean hamilt.\t Hamilt. 2nd moment\t Hamilt. 4nd moment\t "
"Hamilt. time corr.\t "
"Acceptance probability\t N measures\t Measure interval")
footerstring = "Shape: {0}".format(self.shape())
np.savetxt(
fname,
np.vstack((
self.Ts, self.mags, self.mag2s, self.mag4s,
self.corrmags, self.hamilts, self.hamilt2s,
self.hamilt4s, self.corrhamilts, self.acceptprobs,
self.nmeasures, self.measureintervals)).T,
header=headerstring, footer=footerstring)
return
# Physical magnitudes
# ========================================
def mag_err(self):
"""Calculate the magnetization error.
"""
# Calculate correlation time
corrtime = corr_time(
self.mags, self.mag2s, self.corrmags, self.nmeasures)
return self.nsigma*samplemean_error(
self.mags, self.mag2s, corrtime, self.nmeasures)
def mag2_err(self):
"""Calculate the error of the squared magnetization mean.
"""
# Calculate correlation time. We are making the assumtion
# that the correlation time of mag2 is the same as mag.
corrtime = corr_time(
self.mags, self.mag2s, self.corrmags, self.nmeasures)
return self.nsigma*samplemean_error(
self.mag2s, self.mag4s, corrtime, self.nmeasures)
def hamilt_err(self):
"""Calculate the Hamiltonian error.
"""
# Calculate correlation time
corrtime = corr_time(
self.hamilts, self.hamilt2s, self.corrhamilts, self.nmeasures)
return self.nsigma*samplemean_error(
self.hamilts, self.hamilt2s, corrtime, self.nmeasures)
def hamilt2_err(self):
"""Calculate the error of the squared Hamiltonian mean.
"""
# Calculate correlation time. We are making the assumtion
# that the correlation time of hamilt2 is the same as hamilt's.
corrtime = corr_time(
self.hamilts, self.hamilt2s, self.corrhamilts, self.nmeasures)
return self.nsigma*samplemean_error(
self.hamilt2s, self.hamilt4s, corrtime, self.nmeasures)
def magsuscept(self):
"""Calculate the magnetic susceptibility.
"""
# Store data to numpy arrays
Ts_arr = np.array(self.Ts)
return self.nspins/Ts_arr*samplevariance(
self.mags, self.mag2s, self.nmeasures)
def magsuscept_err(self):
"""Calculate the magnetic susceptibility error.
"""
# Store data to numpy arrays
Ts_arr = np.array(self.Ts)
return self.nspins/Ts_arr*np.sqrt(
np.power(self.mag2_err(), 2)
+ 4.*np.power(self.mags*self.mag_err(), 2))
def specificheat(self):
"""Calculate the specific heat per spin of the lattice.
"""
# Store data to numpy arrays
Ts_arr = np.array(self.Ts)
return 1./(self.nspins*np.power(Ts_arr, 2))*samplevariance(
self.hamilts, self.hamilt2s, self.nmeasures)
def specificheat_err(self):
"""Calculate the specific heat per spin error.
"""
# Store data to numpy arrays
Ts_arr = np.array(self.Ts)
return 1./(self.nspins*np.power(Ts_arr, 2))*np.sqrt(
np.power(self.hamilt2_err(), 2)
+ 4.*np.power(self.hamilts*self.hamilt_err(), 2))
def binderratio(self):
"""Calculate the Binder ratio or fourth order cumulant.
"""
return (1. - self.mag4s/(3.*np.power(self.mag2s, 2)))
# Scaling
# ========================================
def T_scaled(self, Tcrit, corrlen_exp):
"""Return scaled temperature.
Parameters
----------
Tcrit : float
Critical temperature.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_T(self.Ts, self.L, Tcrit, corrlen_exp)
def mag_scaled(self, mag_exp, corrlen_exp):
"""Return the scaled magnetization.
Parameters
----------
mag_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(self.mags, self.L, mag_exp, corrlen_exp)
def mag_scaled_err(self, mag_exp, corrlen_exp):
"""Return the scaled magnetization error.
Parameters
----------
mag_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(self.mag_err(), self.L, mag_exp, corrlen_exp)
def magsuscept_scaled(self, magsuscept_exp, corrlen_exp):
"""Return the scaled magnetic susceptibility.
Parameters
----------
magsuscept_exp : float
Magnetic susceptibility critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(
self.magsuscept(), self.L, -magsuscept_exp, corrlen_exp)
def magsuscept_scaled_err(self, magsuscept_exp, corrlen_exp):
"""Return the scaled magnetic susceptibility error.
Parameters
----------
magsuscept_exp : float
Magnetic susceptibility critical scaling exponent.
corrlen_exp : float
Correlation length exponent.
"""
return scale_magnitude(
self.magsuscept_err(), self.L, -magsuscept_exp, corrlen_exp)
def specificheat_scaled(self, specheat_exp, corrlen_exp):
"""Return the scaled magnetization.
Parameters
----------
specheat_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(
self.specificheat(), self.L, -specheat_exp, corrlen_exp)
def specificheat_scaled_err(self, specheat_exp, corrlen_exp):
"""Return the scaled magnetization error.
Parameters
----------
specheat_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(
self.specificheat_err(), self.L, -specheat_exp, corrlen_exp)
# Scaling related functions
# ========================================
def scale_T(Ts, L, Tcrit, corrlen_exp):
"""Scale the given temperature array.
Parameters
----------
Ts : list
Temperature list to be scaled.
L : float
Lattice characteristic length.
Tcrit : float
Critical temperature.
corrlen_exp : float
Correlation length exponent on temperature.
"""
Ts_arr = np.array(Ts)
return (1 - Ts_arr/Tcrit)*np.power(L, 1./corrlen_exp)
def scale_magnitude(vals, L, exp, corrlen_exp):
"""Return the scaled value of the given magnitude.
Parameters
----------
vals: float list
Magnetization list to be scaled.
L : float
Lattice characteristic length.
exp : float
Critical scaling exponent of the magnitude.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
vals_arr = np.array(vals)
return vals_arr*np.power(L, exp/corrlen_exp)
def collapse_metric(curves_x, curves_y):
"""Find the collapse metric in the x axis of the given data.
Calculates the collapse metric of the given curves as described
in (Sci Rep. 2016; 6: 38823).
Parameters
----------
curves_x : numpy array list
List with the x array of each curve
curves_y : numpy array list
List with the y array of each curve
Returns
-------
metricval : float
Value of the metric.
"""
# # Check that there is the same nun0
# if not len(curves_x)==len(curves_y):
# raise ValueError('The lists must have the same size')
# We calculate the span of the curves in the x axis, which will
# be used later to normalize the metric.
xmax = np.amax([np.amax(xs) for xs in curves_x])
xmin = np.amin([np.amin(xs) for xs in curves_x])
spanx = xmax - xmin
# Number of overlapping points and metric value initilization
metricval = 0.
N_ovl= 0
# Iteration over different reference curves
for j_ref, (refcurve_x, refcurve_y) in enumerate(zip(curves_x, curves_y)):
# Find the y limits of the reference curve
refymax = np.amax(refcurve_y)
refymin = np.amin(refcurve_y)
# Linearly interpolate the refcurve to get the x of the
# curve as a function of the y
refcurve_x_interp = spi.interp1d(
refcurve_y, refcurve_x, kind='linear')
for j_curve, (curve_x, curve_y) in enumerate(zip(curves_x, curves_y)):
# Ignore the ref curve
if j_curve == j_ref:
break
# Extract the points overlapping the reference curve
condition = np.logical_and(curve_y>=refymin, curve_y<=refymax)
ovl_x = np.extract(condition, curve_x)
ovl_y = np.extract(condition, curve_y)
# Save the number of overlapping points
N_ovl += ovl_x.size
# Distance between curve points and interpolated ref curve
metricval += np.linalg.norm(
ovl_x - refcurve_x_interp(ovl_y), ord=1)
metricval = metricval/(N_ovl*spanx)
return metricval
# Statistical functions
# ===================================
def variance(mean, momnt2):
"""Calculate the sample variance.
Parameters
----------
mean : float (scalar or array)
Mean value.
momnt2 : float (scalar or array)
Second raw moment (mean of the square).
Returns
-------
variance : float (scalar or array)
"""
momnt2_arr = np.array(momnt2)
return momnt2_arr - np.power(mean, 2)
def samplevariance(mean, momnt2, nmeasure):
"""Calculate the sample variance.
Parameters
----------
mean : float (scalar or array)
Mean value.
momnt2 : float (scalar or array)
Second raw moment (mean of the square).
Returns
-------
variance : float (scalar or array)
"""
nmeasure_arr = np.array(nmeasure)
return nmeasure_arr/(nmeasure_arr - 1.)*variance(mean, momnt2)
# TODO: improve docs
# TODO: ensure the units are right
def corr_time(mean, momnt2, corr, nmeasures):
"""Estimate the correlation time in a Markov chain (with rejection).
Estimates the correlation time using the mean value
of the product in consecutive steps and the variance
(it is assumed that the autocorrelation decays
exponentially).
Parameters
----------
mean : float (scalar or array)
Mean of the magnitued.
momnt2 : float (scalar or array)
Second moment of the magnitude.
corr : float (scalar or array)
Mean value of the product of the magnitude in
consecutive measures.
nmeasures: int (scalar or array)
Number of measures.
Returns
-------
corr_time : float (scalar or array)
Estimated correlation time.
"""
# Calculate the variance
var = samplevariance(mean, momnt2, nmeasures)
# Ensure the data is stored in arrays
var_arr = var*np.ones(1)
corr_arr = corr*np.ones(1)
mean_arr = mean*np.ones(1)
# Find the indexes where the variance is not zero
nonzero_idxs = np.argwhere(var_arr != 0)
# Initialize to -1
corr_norm = np.full(corr_arr.shape, -1., dtype=float)
# Calculate the normalized autocorrelation
corr_norm[nonzero_idxs] = (
(corr_arr[nonzero_idxs] - np.power(mean_arr[nonzero_idxs], 2))
/var_arr[nonzero_idxs])
return corr_norm/(1. - corr_norm)
def samplemean_error(mean, momnt2, corrtime, nmeasures):
"""Calculate the sample mean error in rejection with repetition.
Parameters
----------
mean : float (scalar or array)
Sample mean of the calculated magnitued.
momnt2 : float (scalar or array)
Sample second raw moment of the magnitude.
corrtime : float (scalar or array)
Correlation time of the magnitude.
nmeasures: int (scalar or array)
Number of measures.
Returns
-------
error : float (scalar or array)
"""
# Calculate the variance
var = samplevariance(mean, momnt2, nmeasures)
# If the variance is zero, the error is directly zero.
# If we use the formula in those cases a zero division is
# done, so we have to treat the zero values separately.
# Ensure the data is stored in arrays
mean_arr = mean*np.ones(1)
var_arr = var* | np.ones(1) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 20:17:53 2021
@author: <NAME>
<NAME>
<NAME>
<NAME>
"""
# AND
import numpy as np
def degrau(v):
if v >=0:
return 1
else:
return 0
def perceptron(x, w, b):
v = np.dot(w, x) + b
y = degrau(v)
return y
def AND(x):
w = np.array([1,1])
bAND = -1.5
return perceptron(x, w, bAND)
x0 = np.array([0,1])
x1 = np.array([1,1])
x2 = np.array([0,0])
x3 = np.array([1,0])
print("AND ({} + {}) = {}".format(0,1, AND(x0)))
print("AND ({} + {}) = {}".format(1,1, AND(x1)))
print("AND ({} + {}) = {}".format(0,0, AND(x2)))
print("AND ({} + {}) = {}".format(1,0, AND(x3)))
print("----------------------------------------------------------------------")
# OR
# -*- coding: utf-8 -*-
import numpy as np
def degrau(v):
if v >=0:
return 1
else:
return 0
def perceptron(x, w, b):
v = np.dot(w, x) + b
y = degrau(v)
return y
def OR(x):
w = np.array([1,1])
b = -0.5
return perceptron(x, w, b)
x0 = np.array([0,1])
x1 = np.array([1,1])
x2 = np.array([0,0])
x3 = np.array([1,0])
print("OR ({} + {}) = {}".format(0,1, OR(x0)))
print("OR ({} + {}) = {}".format(1,1, OR(x1)))
print("OR ({} + {}) = {}".format(0,0, OR(x2)))
print("OR ({} + {}) = {}".format(1,0, OR(x3)))
print("----------------------------------------------------------------------")
# NOR
import numpy as np
def degrau(v):
if v >=0:
return 1
else:
return 0
def perceptron(x, w, b):
v = np.dot(w, x) + b
y = degrau(v)
return y
def NOT(x):
wNOT = -1.0
bNOT = 0.5
return perceptron(x, wNOT, bNOT)
def OR(x):
w = np.array([1,1])
b = -0.5
return perceptron(x, w, b)
def NOR(x):
output_OR = OR(x)
output_NOT = NOT(output_OR)
return output_NOT
x0 = np.array([0,1])
x1 = np.array([1,1])
x2 = np.array([0,0])
x3 = np.array([1,0])
print("NOR ({} + {}) = {}".format(0,1, NOR(x0)))
print("NOR ({} + {}) = {}".format(1,1, NOR(x1)))
print("NOR ({} + {}) = {}".format(0,0, NOR(x2)))
print("NOR ({} + {}) = {}".format(1,0, NOR(x3)))
print("----------------------------------------------------------------------")
# NAND
# -*- coding: utf-8 -*-
import numpy as np
def degrau(v):
if v >=0:
return 1
else:
return 0
def perceptron(x, w, b):
v = np.dot(w, x) + b
y = degrau(v)
return y
def NOT(x):
wNOT = -1.0
bNOT = 0.5
return perceptron(x, wNOT, bNOT)
def AND(x):
w = np.array([1,1])
bAND = -1.5
return perceptron(x, w, bAND)
def NAND(x):
output_AND = AND(x)
output_NOT = NOT(output_AND)
return output_NOT
x0 = | np.array([0,1]) | numpy.array |
from sacred import Experiment
import logging
import sys
import os
from os.path import join
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
import tqdm
from multiprocessing import Pool
from src.eval_utils import iou_numpy as iou
from configuration import CONFIG
from src.MetaSeg.functions.in_out import components_load, get_indices, probs_gt_load
from src.MetaSeg.functions.helper import load_data
from src.MetaSeg.functions.calculate import meta_nn_predict
from src.datasets.a2d2 import a2d2_to_cityscapes
from src.datasets.cityscapes import num_categories, id_to_catid, trainid_to_catid
from src.log_utils import log_config
ex = Experiment('eval_iou')
log = logging.getLogger()
log.handlers = []
log_format = logging.Formatter('%(asctime)s || %(name)s - [%(levelname)s] - %(message)s')
streamhandler = logging.StreamHandler(sys.stdout)
streamhandler.setFormatter(log_format)
log.addHandler(streamhandler)
log.setLevel('INFO')
ex.logger = log
# build mapping from a2d2 full ID set to cityscapes training ID set
label_mappings = dict(
a2d2={k: id_to_catid[a2d2_to_cityscapes[k]] for k in a2d2_to_cityscapes.keys()},
cityscapes_val=trainid_to_catid,
)
def pool_wrapper(inputs):
return get_ious_for_image(*inputs)
@ex.capture
def get_ious_for_image(image_index,
iou_pred,
thresholds,
args):
confusion_matrices_pos = {t: np.zeros((num_categories, num_categories)) for t in thresholds}
confusion_matrices_neg = {t: np.zeros((num_categories, num_categories)) for t in thresholds}
pred, gt, _ = probs_gt_load(image_index,
input_dir=join(CONFIG.metaseg_io_path, 'input', 'deeplabv3plus', args['dataset']),
preds=True)
# transform a2d2 labels to cityscapes category ids
gt = np.vectorize(label_mappings[args['dataset']].get)(gt)
# transform predictions to cityscapes category ids
pred = np.vectorize(trainid_to_catid.get)(pred)
# load components for constructing the iou mask based on different IoU thresholds
components = components_load(image_index,
components_dir=join(CONFIG.metaseg_io_path, 'components', 'deeplabv3plus',
args['dataset']))
# border of components have been labeled with the negative index of the main component itself
# we want however to include the border of the segment in the evaluation which is why we have to make it
# also positive
components = np.absolute(components)
# -1 because component indices start with 1
components = iou_pred[components - 1]
for t in thresholds:
# confusion_matrices_pos[t] = iou(pred,
# gt,
# n_classes=num_categories,
# update_matrix=confusion_matrices_pos[t],
# ignore_index=0,
# mask=(components >= t))[1]
confusion_matrices_neg[t] = iou(pred,
gt,
n_classes=num_categories,
update_matrix=confusion_matrices_neg[t],
ignore_index=0,
mask=(components < t))[1]
return confusion_matrices_pos, confusion_matrices_neg
@ex.config
def config():
args = dict(
meta_nn_path=join('.', 'src', 'meta_nn.pth'),
save_dir=CONFIG.metaseg_io_path,
load_file=None,
plot_dir=join('.', 'plots'),
steps=51,
gpu=CONFIG.GPU_ID,
dpi=400,
n_workers=CONFIG.NUM_CORES,
max_t=0.75,
dataset='a2d2',
)
if not os.path.exists(args['plot_dir']):
os.makedirs(os.path.abspath(args['plot_dir']), exist_ok=True)
def iou_wrapper(inputs):
return iou(*inputs)
@ex.automain
def main(args, _run, _log):
log_config(_run, _log)
if args['load_file'] is None:
_log.info('Loading data...')
_log.info('Cityscapes...')
# load cityscapes train data for normalization of out of domain data
_, _, _, _, xa_mean, xa_std, classes_mean, classes_std, *_ = load_data('cityscapes')
_log.info('{}...'.format(args['dataset']))
xa, *_, start, _ = load_data(args['dataset'],
xa_mean=xa_mean,
xa_std=xa_std,
classes_mean=classes_mean,
classes_std=classes_std)
# predict iou using MetaSeg metrics
iou_pred = meta_nn_predict(args['meta_nn_path'], xa, gpu=args['gpu'])
# get all available input file IDs
inds = get_indices(join(CONFIG.metaseg_io_path, 'metrics', 'deeplabv3plus', args['dataset']))
# construct thresholds and dictionary for saving
thresholds = | np.linspace(0, 1, args['steps']) | numpy.linspace |
#!/usr/bin/env python
# coding: utf-8
import math
import random
import warnings
import matplotlib
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform, poisson
from statsmodels.distributions.empirical_distribution import ECDF
def discrete_weibull(shape, scale, N2):
x_pre_scale = np.random.weibull(shape, int(5e6))
x = scale * x_pre_scale
f = ECDF(x)
h = np.zeros(N2)
h[0] = f(1.5) - f(0)
for i in range(1, N2):
h[i] = (f(i+1.5) - f(i+0.5)) / (1-f(i+0.5))
s = np.zeros(N2)
s[0] = 1
for i in range(1, N2):
s[i] = s[i-1]*(1-h[i-1])
SI0 = s * h
SI1 = SI0[~np.isnan(SI0)]
SI = np.zeros(N2)
SI[0:len(SI1)] = SI1
return SI
def discrete_lognormal(logmean, logsd, N2):
x = np.random.lognormal(logmean, logsd, int(5e6))
f = ECDF(x)
h = np.zeros(N2)
h[0] = f(1.5) - f(0)
for i in range(1,N2):
h[i] = (f(i+1.5) - f(i+0.5)) / (1-f(i+0.5))
s = np.zeros(N2)
s[0] = 1
for i in range(1,N2):
s[i] = s[i-1]*(1-h[i-1])
SI0 = s * h
SI1 = SI0[~np.isnan(SI0)]
SI = np.zeros(N2)
SI[0:len(SI1)] = SI1
return SI
def cal_gt(shape, scale):
dis_gt = discrete_weibull(shape, scale, 20)
return dis_gt
def cal_inc(logmean, logsd):
dis_inc = discrete_lognormal(logmean, logsd, 20)
return dis_inc
def cal_rep(repmean, repsd, incmean, incsd):
N2 = 30
shape = (repmean**2) / (repsd**2)
scale = repmean / shape
x1 = np.random.gamma(shape, scale, int(5e6))
x2 = np.random.lognormal(incmean, incsd, int(5e6))
f = ECDF(x1+x2)
h = np.zeros(N2)
h[0] = f(1.5) - f(0)
for i in range(1,N2):
h[i] = (f(i+1.5) - f(i+0.5)) / (1-f(i+0.5))
s = np.zeros(N2)
s[0] = 1
for i in range(1,N2):
s[i] = s[i-1]*(1-h[i-1])
SI0 = s * h
SI1 = SI0[~np.isnan(SI0)]
SI = np.zeros(N2)
SI[0:len(SI1)] = SI1
dis_rep = SI
return dis_rep
def select_norm(SI, threshold):
which = lambda lst: list(np.where(lst)[0])
indexSI = which(SI > threshold)
SI0 = np.zeros(indexSI[0])
output = np.append(SI0, SI[indexSI])
output_norm = output / sum(output)
return output_norm
def cal_dis():
gt_mean, gt_sd = map(eval, input(
'Please input the shape and scale of the Weibull distribution for generation time, use blank as a separator \neg. 2.826 5.665, or press enter to use the default values: ').split() or ['2.826', '5.665'])
gt_dis = cal_gt(gt_mean, gt_sd)
gt_cutoff_threshold = eval(input('\nPlease input the threshold of generation distribution \neg. 0.1, or press enter to use the default value: ') or '0.1')
gt_dis_cut = select_norm(gt_dis, gt_cutoff_threshold)
observation_type = eval(
input('\nPlease choose the type of input observation:press 0 for ONSET, press 1 for REPORT ') or '1')
if observation_type == 0:
inc_mean, inc_sd = map(eval, input(
'\nPlease input the logmean and logsd of the lognormal distribution for incubation time, use blank as a separator \neg.1.644 0.33, or press enter to use the default values: ').split() or ['1.644', '0.33'])
inc_dis = cal_inc(inc_mean, inc_sd)
inc_cutoff_threshold = eval(input('\nPlease input the threshold of incubation distribution \neg. 0.1, or press enter to use the default value: ') or '0.1')
inc_dis_cut = select_norm(inc_dis, inc_cutoff_threshold)
return gt_dis_cut, inc_dis_cut
elif observation_type == 1:
inc_mean, inc_sd = map(eval, input(
'\nPlease input the logmean and logsd of the lognormal distribution for incubation time, use blank as a separator \neg.1.644 0.33, or press enter to use the default values: ').split() or ['1.644', '0.33'])
rep_mean, rep_sd = map(eval, input(
'\nPlease input the mean and sd of the Gamma distribution for report time, use blank as a separator \neg.4.9 3.3, or press enter to use the default values: ').split() or ['4.9', '3.3'])
rep_dis = cal_rep(rep_mean, rep_sd, inc_mean, inc_sd)
rep_cutoff_threshold = eval(input(
'\nPlease input the threshold of report delay distribution \neg. 0.09, or press enter to use the default value: ') or '0.09')
rep_dis_cut = select_norm(rep_dis, rep_cutoff_threshold)
return gt_dis_cut, rep_dis_cut
else:
print('Wrong input. Please rerun DARt again.')
class DARt:
Num = 200
sigma_R = 0.1
MG = 1
def __init__(self, filename):
warnings.filterwarnings('ignore')
GT, D_s = cal_dis()
GT = list(GT)
D_s = list(D_s)
self.delay_start_day = next((i for (i, x) in enumerate(D_s) if x), None)
nonzero_delay = D_s[self.delay_start_day:]
if len(GT) >= len(nonzero_delay):
for i in range(len(GT) - len(nonzero_delay)):
nonzero_delay.append(0)
else:
for i in range(len(nonzero_delay) - len(GT)):
GT.append(0)
self.GT = GT
self.len_GT = len(self.GT)
self.delay0 = nonzero_delay
self.filename = filename
self.Ct = None
self.Ct_SD = None
self.date = None
self.N = 0
self.Rt_back = []
self.It_back = []
self.M_back = []
self.Rt_back_CI = []
self.It_back_CI = []
self.It_est = []
self.Rt_est = []
self.It_CI = []
self.Rt_CI = []
self.M_est = []
def readfile(self):
data = pd.read_csv(self.filename)
Ct = data.iloc[:, 1]
date = data.iloc[:, 0]
first_nonzero = []
for i in range(len(Ct)):
if Ct.values[i] == 0:
first_nonzero.append(i)
if first_nonzero:
start_index = first_nonzero[-1]
else:
start_index = 0
self.Ct = list(Ct[start_index:])
self.date = list(date[start_index:])
Ct_mean = np.convolve(self.Ct, np.ones((self.len_GT,)) / self.len_GT, mode='same')
ct_diff = Ct_mean - self.Ct
ct_diff2 = [ct ** 2 for ct in ct_diff]
Ct_Var = np.convolve(ct_diff2, np.ones((self.len_GT,)) / self.len_GT, mode='same')
self.Ct_SD = [math.sqrt(ct) for ct in Ct_Var]
self.N = len(self.Ct)
self.Rt_back = np.zeros(self.N - self.delay_start_day - 1)
self.It_back = np.zeros(self.N - self.delay_start_day - 1)
self.M_back = np.zeros(self.N - self.delay_start_day - 1)
self.Rt_back_CI = np.zeros((2, self.N - self.delay_start_day - 1))
self.It_back_CI = np.zeros((2, self.N - self.delay_start_day - 1))
self.It_est = np.zeros(self.N - self.delay_start_day - 1)
self.Rt_est = np.zeros(self.N - self.delay_start_day - 1)
self.It_CI = np.zeros((2, self.N - self.delay_start_day - 1))
self.Rt_CI = np.zeros((2, self.N - self.delay_start_day - 1))
self.M_est = np.zeros(self.N - self.delay_start_day - 1)
# filtering ============================
def renewal_fun(self, GT, Jt, i):
inc_i = 0
for j in range(len(GT)):
inc_i = inc_i + Jt[i - j - 1] * GT[j]
return inc_i
def independent_sample(self, fn_list):
def sample_fn(n):
return np.stack([fn(n) for fn in fn_list]).T
return sample_fn
def cal_CI(self, particle, weight):
order = np.argsort(particle)
order_particle = particle[order]
cdf = np.cumsum(weight[order])
low_id = np.where(cdf >= 0.025)[0][0]
high_id = np.where(cdf >= 0.975)[0][0]
CI = [order_particle[low_id], order_particle[high_id]]
return CI
def initialize_particle(self, Num):
independent_sample_list = [uniform(1, 5).rvs, uniform(0, 1).rvs]
for i in range(self.len_GT):
independent_sample_list.insert(0, uniform(1, max(self.Ct[0] * 2, 10)).rvs)
prior_fn = self.independent_sample(independent_sample_list)
# particle location
particle_previous_x = prior_fn(Num)
# particle weight
particle_previous_w = 1 / Num * np.ones(Num)
# mode
for n in range(len(particle_previous_x)):
if particle_previous_x[n][-1] < 0.95:
particle_previous_x[n][-1] = 0
else:
particle_previous_x[n][-1] = 1
return particle_previous_x, particle_previous_w
def filtering(self, Num, delay_start_day, N, particle_previous_w, particle_previous_x):
# filtering
all_particle = []
all_weight = []
for t in tqdm(range(self.len_GT, N - delay_start_day - 1)):
# resampling-------------
particle_previous_selection = np.zeros(Num, dtype=int)
for s in range(Num):
u = random.uniform(0, 1)
particle_previous_selection[s] = np.where(np.cumsum(particle_previous_w) > u)[0][0]
particle_previous_selection = np.sort(particle_previous_selection)
particle_previous_resampled = np.zeros(particle_previous_x.shape)
for s in range(Num):
particle_previous_resampled[s, ] = particle_previous_x[particle_previous_selection[s], ]
# transition--------------
particle_current_x = np.zeros(particle_previous_x.shape)
for s in range(Num):
It_s = particle_previous_resampled[s, :self.len_GT]
Rt_s = particle_previous_resampled[s, self.len_GT]
rdn = uniform(0, 1).rvs(1)
if rdn < 0.95:
M_s_new = 0
else:
M_s_new = 1
Rt_s_new = abs(norm(loc=Rt_s, scale=DARt.sigma_R * DARt.MG).rvs(size=1)[0])
if M_s_new == 1:
Rt_s_new = uniform(0, Rt_s + 0.5).rvs(1)
It_end = max(Rt_s_new * sum(np.multiply(It_s, self.GT[::-1])), 0)
It_end_new = poisson.rvs(It_end, size=1)[0]
It_s_new = np.append(It_s[1:], It_end_new)
particle_current_x[s, :self.len_GT] = It_s_new
particle_current_x[s, self.len_GT] = Rt_s_new
particle_current_x[s, self.len_GT+1] = M_s_new
# weight------------------
particle_current_w = np.zeros(Num)
for s in range(Num):
It_s_new = particle_current_x[s, :self.len_GT]
Ct_s = sum(np.multiply(It_s_new, self.delay0[::-1]))
particle_current_w[s] = norm(Ct_s, self.Ct_SD[t + delay_start_day + 1]).pdf(self.Ct[t + delay_start_day + 1])
# normalize------------------
particle_current_w = particle_current_w / sum(particle_current_w)
particle_previous_x = particle_current_x
particle_previous_w = particle_current_w
# save mean
self.It_est[t] = sum(np.multiply(particle_current_x[:, self.len_GT-1], particle_current_w))
self.Rt_est[t] = sum(np.multiply(particle_current_x[:, self.len_GT], particle_current_w))
self.M_est[t] = sum(np.multiply(particle_current_x[:, self.len_GT+1], particle_current_w))
# save confidence interval
self.It_CI[:, t] = self.cal_CI(particle_current_x[:, self.len_GT-1], particle_current_w)
self.Rt_CI[:, t] = self.cal_CI(particle_current_x[:, self.len_GT], particle_current_w)
# save all particles
all_particle.append(particle_current_x)
all_weight.append(particle_current_w)
return all_particle, all_weight
# smoothing ========================
def prob_fun(self, X_tPlus, X_t):
R_tPlus = X_tPlus[self.len_GT]
R_t = X_t[self.len_GT]
M_Plus_t = X_tPlus[self.len_GT+1]
if M_Plus_t == 1:
Prob_Rt = 1 / 5
else:
Prob_Rt = norm.pdf(R_tPlus, loc=R_t, scale=DARt.sigma_R * DARt.MG)
I_tPlus = X_tPlus[:self.len_GT]
I_t = X_t[:self.len_GT]
mu = R_tPlus * sum(np.multiply(I_t, self.GT[::-1]))
Prob_It = poisson.pmf(I_tPlus[-1], mu)
prob = Prob_It * Prob_Rt
return prob
def smoothing(self, Num, N, all_particle, all_weight):
particle_next = all_particle[-1]
weight_next = all_weight[-1]
for i in range(self.len_GT):
self.It_back[-i-1] = sum(np.multiply(particle_next[:, self.len_GT-1-i], weight_next))
self.It_back_CI[:, -i-1] = self.cal_CI(particle_next[:, self.len_GT-1-i], weight_next)
self.Rt_back_CI[:, -1] = self.Rt_CI[:, -1]
self.Rt_back[-1] = self.Rt_est[-1]
for t in tqdm(range(N - self.delay_start_day-3, self.len_GT-1, -1)):
weight_t = all_weight[t-self.len_GT]
particle_t = all_particle[t-self.len_GT]
prob_mat = np.zeros((Num, Num))
for k in range(Num):
for n in range(Num):
prob_mat[k, n] = self.prob_fun(particle_next[k, ], particle_t[n, ])
weight_now = np.zeros(Num)
for i in range(Num):
sum_update = 0
for k in range(Num):
fs = prob_mat[k, i]
v = 0
for n in range(Num):
v = v + weight_t[n] * prob_mat[k, n]
sum_update = sum_update + weight_next[k] * fs / v
weight_now[i] = weight_t[i] * sum_update
weight_next = weight_now / sum(weight_now)
self.Rt_back[t] = sum(np.multiply(particle_t[:, self.len_GT], weight_next))
self.It_back[t] = sum(np.multiply(particle_t[:, self.len_GT-1], weight_next))
self.M_back[t] = sum(np.multiply(particle_t[:, self.len_GT+1], weight_next))
self.Rt_back_CI[:, t] = self.cal_CI(particle_t[:, self.len_GT], weight_next)
self.It_back_CI[:, t] = self.cal_CI(particle_t[:, self.len_GT-1], weight_next)
particle_next = particle_t
def cal_r(self):
self.readfile()
particle_previous_x, particle_previous_w = self.initialize_particle(DARt.Num)
all_particle, all_weight = self.filtering(DARt.Num, self.delay_start_day, self.N, particle_previous_w, particle_previous_x)
self.smoothing(DARt.Num, self.N, all_particle, all_weight)
PF_result = pd.DataFrame({'Date': self.date[self.len_GT:(-1 - self.delay_start_day)],
'Rt_smooth': self.Rt_back[self.len_GT:], 'Rt_smooth_5%': self.Rt_back_CI[0, self.len_GT:],
'Rt_smooth_95%': self.Rt_back_CI[1, self.len_GT:],
'Rt_filter': self.Rt_est[self.len_GT:], 'Rt_est_5%': self.Rt_CI[0, self.len_GT:], 'Rt_est_95%': self.Rt_CI[1, self.len_GT:],
'Mt_smooth': self.M_back[self.len_GT:], 'Mt_filter': self.M_est[self.len_GT:],
'Jt_smooth': self.It_back[self.len_GT:], 'Jt_smooth_5%': self.It_back_CI[0, self.len_GT:],
'Jt_smooth_95%': self.It_back_CI[1, self.len_GT:],
'Jt_filter': self.It_est[self.len_GT:], 'Jt_est_5%': self.It_CI[0, self.len_GT:], 'Jt_est_95%': self.It_CI[1, self.len_GT:],
'Ct': | np.array(self.Ct) | numpy.array |
from argparse import Namespace
import csv
import pickle
import numpy as np
from tqdm import tqdm
from chemprop.data.utils import get_data, get_task_names
from chemprop.features import get_features_generator
from chemprop.sklearn_train import predict
from chemprop.utils import makedirs
def predict_sklearn(args: Namespace):
print('Loading data')
data = get_data(path=args.test_path)
print('Computing morgan fingerprints')
morgan_fingerprint = get_features_generator('morgan')
for datapoint in tqdm(data, total=len(data)):
datapoint.set_features(morgan_fingerprint(mol=datapoint.smiles, radius=args.radius, num_bits=args.num_bits))
print(f'Predicting with an ensemble of {len(args.checkpoint_paths)} models')
sum_preds = np.zeros((len(data), args.num_tasks))
for checkpoint_path in tqdm(args.checkpoint_paths, total=len(args.checkpoint_paths)):
with open(checkpoint_path, 'rb') as f:
model = pickle.load(f)
model_preds = predict(
model=model,
model_type=args.model_type,
dataset_type=args.dataset_type,
features=data.features()
)
sum_preds += | np.array(model_preds) | numpy.array |
# coding: utf8
########################################################################
# #
# Control law : tau = P(q*-q^) + D(v*-v^) + tau_ff #
# #
########################################################################
from matplotlib import pyplot as plt
import pinocchio as pin
import numpy as np
import numpy.matlib as matlib
import tsid
import FootTrajectoryGenerator as ftg
import FootstepPlanner
import pybullet as pyb
import utils
import time
pin.switchToNumpyMatrix()
########################################################################
# Class for a PD with feed-forward Controller #
########################################################################
class controller:
""" Inverse Dynamics controller that take into account the dynamics of the quadruped to generate
actuator torques to apply on the ground the contact forces computed by the MPC (for feet in stance
phase) and to perform the desired footsteps (for feet in swing phase)
Args:
N_similation (int): maximum number of Inverse Dynamics iterations for the simulation
"""
def __init__(self, N_simulation, k_mpc, n_periods):
self.q_ref = np.array([[0.0, 0.0, 0.2027682, 0.0, 0.0, 0.0, 1.0,
0.0, 0.8, -1.6, 0, 0.8, -1.6,
0, -0.8, 1.6, 0, -0.8, 1.6]]).transpose()
self.qtsid = self.q_ref.copy()
self.vtsid = np.zeros((18, 1))
self.ades = np.zeros((18, 1))
self.error = False
self.verbose = True
# List with the names of all feet frames
self.foot_frames = ['FL_FOOT', 'FR_FOOT', 'HL_FOOT', 'HR_FOOT']
# Constraining the contacts
mu = 0.9 # friction coefficient
fMin = 1.0 # minimum normal force
fMax = 25.0 # maximum normal force
contactNormal = np.matrix([0., 0., 1.]).T # direction of the normal to the contact surface
# Coefficients of the posture task
kp_posture = 10.0 # proportionnal gain of the posture task
w_posture = 1.0 # weight of the posture task
# Coefficients of the contact tasks
kp_contact = 100.0 # proportionnal gain for the contacts
self.w_forceRef = 50.0 # weight of the forces regularization
self.w_reg_f = 50.0
# Coefficients of the foot tracking task
kp_foot = 100.0 # proportionnal gain for the tracking task
self.w_foot = 500.0 # weight of the tracking task
# Arrays to store logs
k_max_loop = N_simulation
self.f_pos = np.zeros((4, k_max_loop, 3))
self.f_vel = np.zeros((4, k_max_loop, 3))
self.f_acc = np.zeros((4, k_max_loop, 3))
self.f_pos_ref = np.zeros((4, k_max_loop, 3))
self.f_vel_ref = np.zeros((4, k_max_loop, 3))
self.f_acc_ref = np.zeros((4, k_max_loop, 3))
self.b_pos = np.zeros((k_max_loop, 6))
self.b_vel = np.zeros((k_max_loop, 6))
self.com_pos = np.zeros((k_max_loop, 3))
self.com_pos_ref = np.zeros((k_max_loop, 3))
self.c_forces = np.zeros((4, k_max_loop, 3))
self.h_ref_feet = np.zeros((k_max_loop, ))
self.goals = np.zeros((3, 4))
self.vgoals = | np.zeros((3, 4)) | numpy.zeros |
#!/usr/bin/env python3
# Author: <NAME>
# Copyright 2021
import os
import copy
import shutil
import argparse
import numpy as np
import isce
import isceobj
from isceobj.TopsProc.runMergeBursts import mergeBox
from isceobj.TopsProc.runMergeBursts import adjustValidWithLooks
from isceobj.TopsProc.runIon import cal_cross_ab_ramp
from Stack import ionParam
import s1a_isce_utils as ut
from mergeBurstsIon import updateValid
def createParser():
parser = argparse.ArgumentParser(description='merge swath ionosphere')
parser.add_argument('-c', '--reference', type=str, dest='reference', required=True,
help='directory with the reference image')
parser.add_argument('-s', '--stack', type=str, dest='stack', default = None,
help='directory with the stack xml files which includes the common valid region of the stack')
parser.add_argument('-i', '--input', dest='input', type=str, required=True,
help='directory with input swath ionosphere containing swath directories ion_cal_IW*')
parser.add_argument('-o', '--output', dest='output', type=str, required=True,
help='directory with output merged ionosphere')
parser.add_argument('-r', '--nrlks', type=int, dest='nrlks', default=1,
help = 'number of range looks. NOT number of range looks 0')
parser.add_argument('-a', '--nalks', type=int, dest='nalks', default=1,
help = 'number of azimuth looks. NOT number of azimuth looks 0')
parser.add_argument('-m', '--remove_ramp', type=int, dest='remove_ramp', default=0,
help = 'remove an empirical ramp as a result of different platforms. 0: no removal (default), 1: S1A-S1B, -1: S1B-S1A')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
def main(iargs=None):
'''
'''
inps = cmdLineParse(iargs)
corThresholdSwathAdj = 0.85
numberRangeLooks = inps.nrlks
numberAzimuthLooks = inps.nalks
remove_ramp = inps.remove_ramp
ionParamObj=ionParam()
ionParamObj.configure()
#####################################################################
framesBox=[]
swathList = sorted(ut.getSwathList(inps.reference))
for swath in swathList:
frame = ut.loadProduct(os.path.join(inps.reference, 'IW{0}.xml'.format(swath)))
minBurst = frame.bursts[0].burstNumber
maxBurst = frame.bursts[-1].burstNumber
if minBurst==maxBurst:
print('Skipping processing of swath {0}'.format(swath))
continue
passDirection = frame.bursts[0].passDirection.lower()
if inps.stack is not None:
print('Updating the valid region of each burst to the common valid region of the stack')
frame_stack = ut.loadProduct(os.path.join(inps.stack, 'IW{0}.xml'.format(swath)))
updateValid(frame, frame_stack)
framesBox.append(frame)
box = mergeBox(framesBox)
#adjust valid with looks, 'frames' ARE CHANGED AFTER RUNNING THIS
#here numberRangeLooks, instead of numberRangeLooks0, is used, since we need to do next step multilooking after unwrapping. same for numberAzimuthLooks.
(burstValidBox, burstValidBox2, message) = adjustValidWithLooks(framesBox, box, numberAzimuthLooks, numberRangeLooks, edge=0, avalid='strict', rvalid='strict')
#1. we use adjustValidWithLooks() to compute burstValidBox for extracting burst bounding boxes, use each burst's bounding box to retrive
#the corresponding burst in merged swath image and then put the burst in the final merged image.
#so there is no need to use interferogram IW*.xml, reference IW*.xml is good enough. If there is no corresponding burst in interferogram
#IW*.xml, the burst in merged swath image is just zero, and we can put this zero burst in the final merged image.
#2. we use mergeBox() to compute box[1] to be used in cal_cross_ab_ramp()
#####################################################################
numValidSwaths = len(swathList)
if numValidSwaths == 1:
print('there is only one valid swath, simply copy the files')
os.makedirs(inps.output, exist_ok=True)
corName = os.path.join(inps.input, 'ion_cal_IW{}'.format(swathList[0]), 'raw_no_projection.cor')
ionName = os.path.join(inps.input, 'ion_cal_IW{}'.format(swathList[0]), 'raw_no_projection.ion')
corOutName = os.path.join(inps.output, 'raw_no_projection.cor')
ionOutName = os.path.join(inps.output, 'raw_no_projection.ion')
shutil.copy2(corName, corOutName)
shutil.copy2(ionName, ionOutName)
#os.symlink(os.path.abspath(corName), os.path.abspath(corOutName))
#os.symlink(os.path.abspath(ionName), os.path.abspath(ionOutName))
img = isceobj.createImage()
img.load(corName + '.xml')
img.setFilename(corOutName)
img.extraFilename = corOutName+'.vrt'
img.renderHdr()
img = isceobj.createImage()
img.load(ionName + '.xml')
img.setFilename(ionOutName)
img.extraFilename = ionOutName+'.vrt'
img.renderHdr()
return
print('merging swaths')
corList = []
ampList = []
ionosList = []
for swath in swathList:
corName = os.path.join(inps.input, 'ion_cal_IW{}'.format(swath), 'raw_no_projection.cor')
ionName = os.path.join(inps.input, 'ion_cal_IW{}'.format(swath), 'raw_no_projection.ion')
img = isceobj.createImage()
img.load(ionName + '.xml')
width = img.width
length = img.length
amp = (np.fromfile(corName, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
cor = ( | np.fromfile(corName, dtype=np.float32) | numpy.fromfile |
import unittest
from context import modest as md
import numpy as np
from scipy.linalg import block_diag
from scipy.stats import multivariate_normal as mvn
class TestModularFilters(unittest.TestCase):
def setUp(self):
class simpleState(md.substates.SubState):
def __init__(self, dimension, stateVectorHistory, covarianceStorage='covariance'):
if not isinstance(stateVectorHistory['covariance'], md.utils.covarianceContainer):
stateVectorHistory['covariance'] = md.utils.covarianceContainer(
stateVectorHistory['covariance'],covarianceStorage
)
super().__init__(stateDimension=dimension, stateVectorHistory=stateVectorHistory)
self.simpleState = simpleState
class oneDPositionVelocity(md.substates.SubState):
def __init__(self, objectID, stateVectorHistory,covarianceStorage='covariance'):
if not isinstance(stateVectorHistory['covariance'], md.utils.covarianceContainer):
stateVectorHistory['covariance'] = md.utils.covarianceContainer(
stateVectorHistory['covariance'],covarianceStorage
)
super().__init__(stateDimension=2, stateVectorHistory=stateVectorHistory)
self.stateVector = stateVectorHistory['stateVector']
self.objectID = objectID
self.covarianceStorage = covarianceStorage
def storeStateVector(self, svDict):
xPlus = svDict['stateVector']
aPriori = svDict['aPriori']
if aPriori is False:
self.stateVector = xPlus
svDict['stateVector'] = self.stateVector
super().storeStateVector(svDict)
def timeUpdate(self, dT, dynamics=None):
F = np.array([[1, dT],[0, 1]])
dT2 = np.square(dT)
dT3 = np.power(dT, 3)
dT4 = np.power(dT, 4)
if self.covariance().form == 'covariance':
Q = np.array([[dT4/4, dT3/2],[dT3/2, dT2]])
elif self.covariance().form == 'cholesky':
Q = np.array([[dT2/2,0],[dT,0]])
accelKey = self.objectID + 'acceleration'
if dynamics is not None and accelKey in dynamics:
acceleration = dynamics[accelKey]['value']
accVar = dynamics[accelKey]['var']
else:
acceleration = 0
accVar = 0
self.stateVector = F.dot(self.stateVector) + np.array([0, acceleration])
if self.covariance().form == 'covariance':
Q = md.utils.covarianceContainer(Q * accVar, 'covariance')
elif self.covariance().form == 'cholesky':
Q = md.utils.covarianceContainer(Q * np.sqrt(accVar), 'cholesky')
else:
raise ValueError('unrecougnized covariance')
return {'F': F, 'Q': Q}
def getMeasurementMatrices(self, measurement, source=None):
HDict = {}
RDict = {}
dyDict = {}
if isinstance(source, oneDObjectMeasurement) and source.objectID == self.objectID:
if 'position' in measurement:
H = np.array([[1, 0]])
dY = measurement['position']['value'] - H.dot(self.stateVector)
HDict['%s position' %self.objectID] = H
RDict['%s position' %self.objectID] = np.array(
[[measurement['position']['var']]]
)
dyDict['%s position' %self.objectID] = dY
if 'velocity' in measurement:
H = np.array([[0, 1]])
dY = measurement['velocity']['value'] - H.dot(self.stateVector)
HDict['%s velocity' %self.objectID] = H
RDict['%s velocity' %self.objectID] = np.array(
[[measurement['velocity']['var']]]
)
dyDict['%s velocity' %self.objectID] = dY
return {'H': HDict, 'R': RDict, 'dY': dyDict}
self.oneDPositionVelocity = oneDPositionVelocity
class oneDObjectMeasurement(md.signals.SignalSource):
def __init__(self, objectID):
self.objectID = objectID
return
def computeAssociationProbability(self, measurement, stateDict, validationThreshold=0):
myMeasMat = stateDict[self.objectID]['stateObject'].getMeasurementMatrices(measurement, source=self)
dY = None
R = None
H = None
for key in myMeasMat['dY']:
if H is None:
H = myMeasMat['H'][key]
R = myMeasMat['R'][key]
dY = myMeasMat['dY'][key]
else:
H = np.vstack([H, myMeasMat['H'][key]])
R = block_diag(R, myMeasMat['R'][key])
dY = np.append(dY, myMeasMat['dY'][key])
if dY is not None:
P = stateDict[self.objectID]['stateObject'].covariance()
Pval = P.convertCovariance('covariance').value
# if P.form == 'cholesky':
# Pval = P.value.dot(P.value.transpose())
# elif P.form == 'covariance':
# Pval = P.value
# else:
# raise ValueError('Unrecougnized covariance specifier %s' %P.form)
S = H.dot(Pval).dot(H.transpose()) + R
myProbability = mvn.pdf(dY, cov=S)
else:
myProbability = 0
return myProbability
self.oneDObjectMeasurement = oneDObjectMeasurement
def testAddStates(self):
# Create a simple state class to add to the filter
stateLength1 = np.random.randint(0, 10)
cov1 = np.random.randint(0, 10)
state1 = self.simpleState(
stateLength1,
{
't':0,
'stateVector':np.zeros(stateLength1),
'covariance':np.eye(stateLength1)*cov1,
'stateVectorID':0
}
)
stateLength2 = np.random.randint(0, 10)
cov2 = np.random.randint(1, 10)
state2 = self.simpleState(
stateLength2,
{
't':0,
'stateVector':np.zeros(stateLength2),
'covariance':np.eye(stateLength2)*cov2,
'stateVectorID':0
}
)
myFilter = md.ModularFilter()
myFilter.addStates('state1', state1)
myFilter.addStates('state2', state2)
self.assertEqual(stateLength1 + stateLength2, myFilter.totalDimension)
stackedCov = block_diag(np.eye(stateLength1) * cov1, | np.eye(stateLength2) | numpy.eye |
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
def getStats(name):
ff = open('{}.pol_scores'.format(name),'r')
scores = []
for line in ff.readlines():
scores.append(float(line))
ff.close()
print('\n=== Politeness Scores in {} === '.format(name))
print('max : {}'.format(np.max(scores)))
print('min : {}'.format(np.min(scores)))
print('mean : {}'.format(np.mean(scores)))
print('median : {}'.format(np.median(scores)))
print('std. dev. : {}'.format( | np.std(scores) | numpy.std |
################################################################################
#
# Delaunay density diangostic for MSD and grad-MSD rates
# as described in the paper
# Data-driven geometric scale detection via Delaunay interpolation
# by <NAME> and <NAME>
# Version 1.0, March 2022
#
# For usage information, run:
# python delaunay_density_diagnostic.py --help
#
################################################################################
#==================================================================================================#
# Load packages. Set random state and validation split.
#==================================================================================================#
# from matplotlib.pyplot import legend
# import torch
import pandas as pd
# from torch.autograd import Variable
# import torch.nn.functional as F
# import torch.utils.data as Data
# from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
from numpy.random import rand, default_rng
from numpy import arccos, array, degrees, absolute
from numpy.linalg import norm
from optparse import OptionParser
# import numpy.ma as ma
# import xarray as xr
from sys import exit
import os.path
import copy
import delsparse
from delsparse import delaunaysparsep as dsp
#==================================================================================================#
# Define the test function (hard coded here as the Griewank function)
#==================================================================================================#
def tf(X): # Griewnak function, arbitrary dimension input
X = X.T
term_1 = (1. / 4000.) * sum(X ** 2)
term_2 = 1.0
for i, x in enumerate(X):
term_2 *= np.cos(x) / np.sqrt(i + 1)
return 1. + term_1 - term_2
# use a paraboloid instead:
# return (7/20_000) * ( X[0]**2 + 0.5*(X[1]**2) )
#==================================================================================================#
# Make query point lattice in R^dim
#==================================================================================================#
def make_test_data_grid(rng, static_data=False):
num_samples_per_dim = options.numtestperdim
x = np.linspace(options.queryleftbound, options.queryrightbound, num_samples_per_dim)
print("===> Test coordinates for each dimension = ", x)
mg_in = []
for i in range(options.dim):
mg_in.append(x)
grid_pts = np.array(np.meshgrid(*mg_in))
grid_pts = grid_pts.reshape(options.dim, num_samples_per_dim ** options.dim)
grid_pts = grid_pts.T
outputs_on_grid = tf(grid_pts)
data_test_inputs = pd.DataFrame(grid_pts)
data_test_outputs = pd.DataFrame(outputs_on_grid)
return data_test_inputs, data_test_outputs
#==================================================================================================#
# Collect random sample from bounding box
#==================================================================================================#
def make_random_training_in_box(rng):
train_set_size = options.numtrainpts
# print("==> Generating ", train_set_size, " random points.")
rand_pts_n = rng.random((train_set_size, options.dim))
train_box_scale_vector = np.full(options.dim, (options.bboxrightbound - options.bboxleftbound) )
train_box_shift_vector = np.full(options.dim, options.bboxleftbound )
# do scaling in each dim first
for i in range(options.dim):
rand_pts_n[:,i] *= train_box_scale_vector[i]
# then do shifts
for i in range(options.dim):
rand_pts_n[:,i] += train_box_shift_vector[i]
outputs_on_rand_n = tf(rand_pts_n)
data_train_inputs = pd.DataFrame(rand_pts_n)
data_train_outputs = pd.DataFrame(outputs_on_rand_n)
return data_train_inputs, data_train_outputs
#==================================================================================================#
# Function to compute DelaunaySparse
#==================================================================================================#
def compute_DS_only(data_train_inputs, data_train_outputs, data_test_inputs, data_test_outputs):
# # note: data_test_outputs is only converted to numpy if needed, transposed,
# and returned as actual_test_vals
# # WARNING: deepcopy here may be inefficient at scale
pts_in = copy.deepcopy(data_train_inputs)
q = copy.deepcopy(data_test_inputs)
interp_in = data_train_outputs
actual_test_vals = data_test_outputs
if not isinstance(actual_test_vals, np.ndarray):
actual_test_vals = actual_test_vals.to_numpy()
actual_test_vals = actual_test_vals.T
actual_train_vals = data_train_outputs
if not isinstance(actual_train_vals, np.ndarray):
actual_train_vals = actual_train_vals.to_numpy()
actual_train_vals = actual_train_vals.T
interp_in_n = interp_in
if not isinstance(interp_in_n, np.ndarray):
interp_in_n = interp_in_n.to_numpy()
interp_in_n = interp_in_n.T
if not isinstance(pts_in, np.ndarray):
pts_in = pts_in.to_numpy()
pts_in = pts_in.T
pts_in = np.require(pts_in, dtype=np.float64, requirements=['F'])
if not isinstance(q, np.ndarray):
q = q.to_numpy()
p_in = np.asarray(q.T, dtype=np.float64, order="F")
ir=interp_in_n.shape[0]
interp_in_n = np.require(interp_in_n,
dtype=np.float64, requirements=['F'])
simp_out = np.ones(shape=(p_in.shape[0]+1, p_in.shape[1]),
dtype=np.int32, order="F")
weights_out = np.ones(shape=(p_in.shape[0]+1, p_in.shape[1]),
dtype=np.float64, order="F")
error_out = np.ones(shape=(p_in.shape[1],),
dtype=np.int32, order="F")
interp_out_n = np.zeros([interp_in_n.shape[0],p_in.shape[1]])
interp_out_n = np.require(interp_out_n,
dtype=np.float64, requirements=['F'])
rnorm_n = np.zeros(p_in.shape[1])
rnorm_n = np.require(rnorm_n, dtype=np.float64, requirements=['F'])
# From delsparse.py documenation:
# Setting EXTRAP=0 will cause all extrapolation points to be
# ignored without ever computing a projection. By default, EXTRAP=0.1
# (extrapolate by up to 10% of the diameter of PTS).
dsp(pts_in.shape[0], pts_in.shape[1],
pts_in, p_in.shape[1], p_in, simp_out,
weights_out, error_out,
extrap=options.extrap_thresh,
rnorm=rnorm_n,
pmode=1,
interp_in=interp_in_n, interp_out=interp_out_n)
if (options.computeGrad):
# # arbitrary number of outputs, as determind by interp_in_n.shape[0]
grad_est_DS = np.zeros([interp_in_n.shape[0], simp_out.shape[1], options.dim])
grad_est_DS.fill(999)
for j in range(simp_out.shape[1]):
# note: the value of simp_out.shape[1] should equal the number of interpolation outputs
# extrapolation points don't get a simp_out entry, I think?
#
# mutliple test points may lie in the same simplex
# but that just means you might duplicate effort
# if you already saw a simplex and comptued the gradient(s)
# this presumes pts_in was deep copied from data_train_inputs at start of compute_DS_only(...)
unscaled_inputs = data_train_inputs.to_numpy().T
# can try using scaled points instead:
# # unscaled_inputs = pts_in
for outputdim in range(interp_in_n.shape[0]):
matrixA = np.zeros([options.dim+1, options.dim+1])
for i in range(options.dim+1):
matrixA[i] = np.append(unscaled_inputs[:,simp_out[:,j][i]-1], interp_in_n[outputdim][simp_out[:,j][i]-1])
coords = matrixA
G = coords.sum(axis=0) / coords.shape[0]
# run SVD
u, s, vh = np.linalg.svd(coords - G)
# unitary normal vector
hyper_sfc_normal = vh[options.dim, :]
# approx grad as normal scaled by vertical component, times -1
grad_out = hyper_sfc_normal/hyper_sfc_normal[options.dim]
grad_out = -grad_out[:-1]
# print("grad out = ", grad_out)
grad_est_DS[outputdim][j] = grad_out
# end loop over output dimns
# end if computeGrad
else:
grad_est_DS = []
allow_extrapolation=True
print_errors=True
# note: error code 1= sucessful extrap; 2 = extrap beyond threshold
extrap_indices = np.where((error_out == 1) | (error_out == 2))
extrap_indices = np.array(extrap_indices[0])
# print("extrap indices = ", extrap_indices)
# print("rnorm = ", rnorm_n)
# print("rnorm[indices] = ", rnorm_n[extrap_indices])
# print("type = ", type(extrap_indices))
# print("e i [0]:",extrap_indices[0])
#==============================================================================#
# Check for errors in DelaunaySparse run
#==============================================================================#
if allow_extrapolation:
# print("Extrapolation occured at ", np.where(error_out == 1))
# Next line replaces error code 1 (successful extrapolation)
# with error code 0 (successful interpolation)
error_out = np.where(error_out == 1, 0, error_out)
else:
if 1 in error_out:
class Extrapolation(Exception): pass
raise(Extrapolation("Encountered extrapolation point (beyond threshold) when making Delaunay prediction."))
# Handle any errors that may have occurred.
if (sum(error_out) != 0):
if print_errors:
unique_errors = sorted(np.unique(error_out))
print(" [Delaunay errors:",end="")
for e in unique_errors:
if (e == 0): continue
indices = tuple(str(i) for i in range(len(error_out))
if (error_out[i] == e))
if (len(indices) > 5): indices = indices[:2] + ('...',) + indices[-2:]
print(" %3i"%e,"at","{"+",".join(indices)+"}", end=";")
print("] ")
# Reset the errors to simplex of 1s (to be 0) and weights of 0s.
bad_indices = (error_out > (1 if allow_extrapolation else 0))
simp_out[:,bad_indices] = 1
weights_out[:,bad_indices] = 0
return interp_out_n, actual_test_vals, actual_train_vals, extrap_indices, grad_est_DS
#==================================================================================================#
# Main section, includes some bad input checks
#==================================================================================================#
if __name__ == '__main__':
#==================================================================================================#
# Provide help screen documentation. Let the user define options. Also define defaults. #
#==================================================================================================#
usage = "%prog [options]"
parser = OptionParser(usage)
parser.add_option( "--jobid", help="Job ID.",
dest="jobid", type=int, default=999999)
parser.add_option( "--fn", help="Test function to use. Version 1.0 of the code only supports the Griewank function. " +
"It is possible to code in additional functions by modifying the definition of tf(X).",
dest="fn_name", type=str, default="griewank")
parser.add_option( "--dim", dest="dim", type=int, default=2,
help="Dimension of input space. Default 2.")
parser.add_option("--extrap", dest="extrap_thresh", type=float, default=0.0,
help="Extrapolation threshold parameter passed to DelaunaySparse. Default 0.0.")
parser.add_option("--maxsamp", dest="max_samp", type=int, default=20_000,
help="Max number of samples to draw. Default = 20,000.")
parser.add_option("--numtrainpts", dest="numtrainpts", type=int, default=850,
help="Initial number of samples points (n_0 in the paper). Default = 850.")
parser.add_option("--numtestperdim", dest="numtestperdim", type=int, default=20,
help="Number of test points per dimension. Default = 20.")
parser.add_option("--logbase", dest="log_base", type=float, default=1.4641,
help="Upsampling factor b; also the base of the logarithm in rate computation. Default 1.4641.")
parser.add_option("--zoomctr", dest="zoom_ctr", type=float, default=0.0,
help="Zoom modality: used only in conjunction with zoomexp option - see below. " +\
"Default=0.0. Use 999.0 in zoomctr or zoomexp to manually specify left/right bounds (not implemented in Version 1.0).")
parser.add_option("--zoomexp", dest="zoom_exp", type=float, default=1.0,
help="Zoom modality: set query bounds and bounding box such that (1) center is (x,x,...,x) where x=zoomctr"+\
" (2) length of query grid is 10e[zoomexp] in each dimension and (3) bounding box determined from testbdsc."+\
" Default=0.0. Use 999.0 in zoomctr or zoomexp to manually specify left/right bounds (not implemented in Version 1.0).")
parser.add_option("--queryleftbd", dest="queryleftbound", type=float, default=0.0,
help="Left bound of interval used to build query point domain [a, b]^dim. Overwritten if zoom modality is used (see above). Default 0.0")
parser.add_option("--queryrightbd", dest="queryrightbound", type=float, default=1.0,
help="Right bound of interval used to build query point domain [a, b]^dim. Overwritten if zoom modality is used (see above). Default 1.0")
parser.add_option("--bboxleftbd", dest="bboxleftbound", type=float, default=0.0,
help="Left bound of interval used to build bounding box [a, b]^dim. Overwritten if zoom modality is used (see above). Default 0.0")
parser.add_option("--bboxrightbd", dest="bboxrightbound", type=float, default=1.0,
help="Right bound of interval used to build bounding box [a, b]^dim. Overwritten if zoom modality is used (see above). Default 1.0")
parser.add_option("--testbdsc", dest="tb_scale", type=float, default=0.8,
help="Query points dimension fraction (qpdf), defined as the side length of the query lattice "
+ "divided by the side length of the bounding box. Default=0.8")
parser.add_option("--grad", dest="computeGrad", action="store_true", default=True,
help="Compute gradients within subroutine that calls DelaunaySparse. Default True.")
parser.add_option("--outc", dest="out_cor", type=int, default=-1,
help="Output coordinate to assess. Default -1 avoids this modality and takes the first output coordinate.")
parser.add_option("--seed", dest="spec_seed", type=int, default=0,
help="Value passed as global seed to random number generator. Default 0.")
parser.add_option("--itmax", dest="it_max", type=int, default=100,
help="Max number of iterations. More robust to use --maxsamp to set threshold. Default = 100.")
(options, args) = parser.parse_args()
def echo_options(options):
print("Selected options:")
print()
print("Job ID: ", options.jobid)
print("Function: ", options.fn_name)
print("Dimension: ", options.dim)
print()
print("Query points per dim:", options.numtestperdim)
print("Total number of query points:", options.numtestperdim ** options.dim)
# Set bounding box left/right bounds based on zoom center, zoom exponent, and scale factor qpdf
options.bboxleftbound = np.round(options.zoom_ctr - (10 ** (options.zoom_exp))/options.tb_scale,2)
options.bboxrightbound = np.round(options.zoom_ctr + (10 ** (options.zoom_exp))/options.tb_scale,2)
# Set query lattice left/right bounds based on bounding box bounds and scale factor qpdf
tg_scale_fac = (1.0-options.tb_scale)/2
interval_width = options.bboxrightbound - options.bboxleftbound
options.queryleftbound = options.bboxleftbound + tg_scale_fac * interval_width
options.queryrightbound = options.bboxrightbound - tg_scale_fac * interval_width
print("Query point bounds in each dim: ", "[", options.queryleftbound, ", ", options.queryrightbound, "]")
print("Query points dimension fraction (qpdf): ", options.tb_scale)
print("Bounding box bounds in each dim: ", "[", options.bboxleftbound, ", ", options.bboxrightbound, "]")
print()
print("Initial sample size:", options.numtrainpts)
print("Maximum sample size:", options.max_samp)
print("Upsampling factor b: ", options.log_base)
print()
print("Global seed for randomization: ", options.spec_seed)
print("Using gradients? : ", options.computeGrad)
print("Extrapolation threshold: ", options.extrap_thresh)
# print("Output cor : ", options.out_cor)
print()
if (options.bboxrightbound <= options.bboxleftbound):
print("Right bound must be larger than left bound")
exit()
if (options.tb_scale < 0.0001):
print("Test bound scale must be > 0")
exit()
if options.log_base <= 1:
print("Log base must be > 1. Default is 2.0.")
exit()
if (options.numtestperdim ** options.dim > 10000):
print()
print("==> WARNING: large number of query points = ", options.numtestperdim ** options.dim)
print()
if (options.extrap_thresh < 0 or options.extrap_thresh > 0.5):
print()
print("==> Set extrapolation threshold in [0,0.5]")
exit()
if (options.fn_name != 'griewank'):
print("==> ERROR: Requested function ", options.fn_name)
print("Only the function 'griewank' is supported by this version of the code.")
exit()
echo_options(options)
globalseed = options.spec_seed
rng = np.random.default_rng(globalseed)
# torch.manual_seed(globalseed)
data_train_inputs, data_train_outputs = make_random_training_in_box(rng)
data_test_inputs, data_test_outputs = make_test_data_grid(rng)
outfname = 'zz-' + str(options.jobid) + "-" + str(options.fn_name) + "-d" + str(options.dim) + "-tpd" + str(options.numtestperdim) + "-lb" + str(options.bboxleftbound) + "-rb" + str(options.bboxrightbound) + "-tb" + str(options.tb_scale) + "-log" + str(options.log_base) +".csv"
if (options.zoom_ctr != 999.0 and options.zoom_exp != 999.0): # add in -zoom[exponent value] before csv
outfname = outfname[:-4] + "-zoom" + str(options.zoom_exp) + ".csv"
if (options.spec_seed != 0): # add in -seed[seed value] before csv
outfname = outfname[:-4] + "-seed" + str(options.spec_seed) + ".csv"
print("===> Output will be stored in file ",outfname)
results_df = []
all_pts_in = copy.deepcopy(data_train_inputs)
all_pts_out = copy.deepcopy(data_train_outputs)
if (options.out_cor == -1): # default
out_coord = 0 # this means we will only measure error in 0th component of output; no problem if codomain is R^1
else:
out_coord = options.out_cor
print("")
print("=================================")
print("For output coordinate ", out_coord,": ")
print("=== results for ", options.fn_name, " ===")
print("samples | density | prop extrap | MSD diff | MSD rate | grad diff | grad rate | analytic diff | analytic rate ")
print("")
prev_error = 999999
prev_vals_at_test = []
prev_diff = 999999
########################################################################
# create list of number of samples for each update step;
# have to do in advance to avoid rounding issues
# can also help in future applications to know sampling rate calculation a priori
#######################################################################
quitloop = False
num_samples_to_add = np.zeros(options.it_max+1)
total_samples_so_far = np.zeros(options.it_max+1)
total_samples_so_far[0] = all_pts_in.shape[0]
for i in range(options.it_max): # i = number of "refinements" of interpolant
if quitloop:
break
#
# upsampling rule:
# update number of samples by replacing 2 points per unit per dimension with (logbase + 1) points per unit per dimension
# ALSO: round to the nearst integer and cast as an integer - this is essential for the static data case
# otherwise you may add the same sample point more than once, causing an error for DS
total_samples_so_far[i+1] = int(np.round(np.power((options.log_base*np.power(total_samples_so_far[i], 1/options.dim) - (options.log_base - 1)),options.dim)))
num_samples_to_add[i] = int(total_samples_so_far[i+1] - total_samples_so_far[i])
if (total_samples_so_far[i+1] > options.max_samp):
quitloop = True
########################################################################
# do iterative improvement according to upsampling schedule saved in num_samples_to_add
#######################################################################
quitloop = False
for i in range(options.it_max): # i = number of "refinements" of interpolant
if quitloop:
break
# use the accumulated sample points as the training and the fixed test data sets as the test data
interp_out_n, actual_test_vals, actual_train_vals, extrap_indices, grad_est_DS = compute_DS_only(all_pts_in, all_pts_out, data_test_inputs, data_test_outputs)
prop_extrap_iterate = len(extrap_indices)/interp_out_n.shape[1]
# print('====> proportion extrapolated = %1.2f' % prop_extrap_iterate)
density_of_sample = all_pts_in.shape[0] ** (1/options.dim)
# for analytical functions, we can compute the "actual" rate of convergence, for reference
ds_vs_actual_at_test = np.sqrt(((interp_out_n[out_coord,:]-actual_test_vals[out_coord,:]) ** 2).mean())
if (i == 0):
error_rate = 0
else:
error_rate = (np.log(prev_error/ds_vs_actual_at_test))/np.log(options.log_base)
# difference and rate computation steps for Algorithms 3.1 and 3.2
if (i == 0):
new_vs_prev_at_test = 0
diff_rate = 0
prev_diff = 0
if (options.computeGrad):
grad_new_vs_prev_at_test = 0
grad_diff_rate = 0
grad_prev_diff = 0
elif (i == 1):
new_vs_prev_at_test = np.sqrt(((interp_out_n[out_coord,:]-prev_vals_at_test[out_coord,:]) ** 2).mean())
diff_rate = 0
prev_diff = new_vs_prev_at_test
if (options.computeGrad):
grad_new_vs_prev_at_test = np.linalg.norm(grad_est_DS - grad_prev_vals_at_test)
grad_diff_rate = 0
grad_prev_diff = grad_new_vs_prev_at_test
else: # i > 1
new_vs_prev_at_test = np.sqrt(((interp_out_n[out_coord,:]-prev_vals_at_test[out_coord,:]) ** 2).mean())
# computation of r_k for MSD rate
diff_rate = np.log(prev_diff/new_vs_prev_at_test)/np.log(options.log_base)
prev_diff = new_vs_prev_at_test
if (options.computeGrad):
grad_new_vs_prev_at_test = np.linalg.norm(grad_est_DS - grad_prev_vals_at_test)
# computation of r_k for grad-MSD rate
grad_diff_rate = - | np.log(grad_new_vs_prev_at_test/grad_prev_diff) | numpy.log |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from unittest.mock import Mock
from endochrone.classification import binary_tree as bdt
from endochrone.classification import BinaryDecisionTree
from endochrone.utils import lazy_test_runner as ltr
__author__ = "nickwood"
__copyright__ = "nickwood"
__license__ = "mit"
def test_entropy():
assert bdt.entropy([0]*14 + [1]*16) == pytest.approx(0.9967916319816366)
assert bdt.entropy([0]*16 + [1]*14) == pytest.approx(0.9967916319816366)
assert bdt.entropy([0]*12 + [1]*1) == pytest.approx(0.39124356362925566)
assert bdt.entropy([0]*1 + [1]*12) == pytest.approx(0.39124356362925566)
assert bdt.entropy([0]*4 + [1]*13) == pytest.approx(0.7871265862012691)
assert bdt.entropy([0]*13 + [1]*4) == pytest.approx(0.7871265862012691)
assert bdt.entropy([0]*8 + [1]*8) == pytest.approx(1.0)
assert bdt.entropy([0]*2 + [1]*3 + [3]*3) == pytest.approx(1.5612781244591)
assert bdt.entropy([0]*6 + [1]*8 + [3]*8) == pytest.approx(1.5726236638951)
assert bdt.entropy([0]*1 + [1]*2 + [3]*5) == pytest.approx(1.2987949406953)
assert bdt.entropy(['ca']*9 + ['bd']*4) == pytest.approx(0.890491640219491)
assert bdt.entropy(['ca']*4 + ['bd']*4) == pytest.approx(1.0)
assert bdt.entropy(['ca']*2 + ['bd']*9) == pytest.approx(0.684038435639041)
def test_generate_partitions():
x = np.transpose([[1, 2, 3, 7, 8, 9, 10],
[3, 4, 5, 1, 2, 3, 4],
[3, 1, 5, 1, 2, 3, 4]])
y_1 = np.array([0, 0, 0, 1, 1, 1, 1])
x_parts_0 = list(bdt.generate_partitions(x[:, 0], y_1))
assert len(x_parts_0) == 6
assert np.all(x_parts_0[0][0] == np.array([0]))
assert np.all(x_parts_0[0][1] == np.array([0, 0, 1, 1, 1, 1]))
assert np.all(x_parts_0[2][0] == np.array([0, 0, 0]))
assert np.all(x_parts_0[2][1] == np.array([1, 1, 1, 1]))
assert np.all(x_parts_0[4][0] == np.array([0, 0, 0, 1, 1]))
assert np.all(x_parts_0[4][1] == np.array([1, 1]))
x_parts_1 = list(bdt.generate_partitions(x[:, 1], y_1))
assert len(x_parts_1) == 4
assert np.all(x_parts_1[0][0] == np.array([1]))
assert np.all(x_parts_1[0][1] == np.array([1, 0, 1, 0, 1, 0]))
assert np.all(x_parts_1[2][0] == np.array([1, 1, 0, 1]))
assert np.all(x_parts_1[2][1] == np.array([0, 1, 0]))
assert np.all(x_parts_1[3][0] == np.array([1, 1, 0, 1, 0, 1]))
assert np.all(x_parts_1[3][1] == np.array([0]))
x_parts_2 = list(bdt.generate_partitions(x[:, 2], y_1))
assert len(x_parts_2) == 4
assert np.all(x_parts_2[0][0] == np.array([0, 1]))
assert np.all(x_parts_2[0][1] == np.array([1, 0, 1, 1, 0]))
assert np.all(x_parts_2[1][0] == np.array([0, 1, 1]))
assert np.all(x_parts_2[1][1] == np.array([0, 1, 1, 0]))
assert np.all(x_parts_2[3][0] == np.array([0, 1, 1, 0, 1, 1]))
assert np.all(x_parts_2[3][1] == np.array([0]))
y_2 = np.array([0, 0, 1, 1, 2, 2, 2])
x_parts_0 = list(bdt.generate_partitions(x[:, 0], y_2))
assert np.all(x_parts_0[0][0] == np.array([0]))
assert np.all(x_parts_0[0][1] == np.array([0, 1, 1, 2, 2, 2]))
x_parts_1 = list(bdt.generate_partitions(x[:, 1], y_2))
assert len(x_parts_1) == 4
assert np.all(x_parts_1[0][0] == np.array([1]))
assert np.all(x_parts_1[2][0] == | np.array([1, 2, 0, 2]) | numpy.array |
import numpy as np
# Constant
def Constant_vit():
# (1,7)-RLL constraint, 4 states, 4 error propagations
# Encoder_Dict[a][b]: a stands for each state, b stands for (1 - input tags, 2 - output words, 3 - next state)
encoder_dict = {
1 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 0, 0]]),
'next_state' : np.array([[1], [2], [3], [3]])
},
2 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[1, 0, 0], [1, 0, 0], [1, 0, 1], [1, 0, 1]]),
'next_state' : np.array([[1], [2], [3], [4]])
},
3 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 1]]),
'next_state' : np.array([[1], [2], [3], [4]])
},
4 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]]),
'next_state' : np.array([[1], [2], [3], [3]])
}
}
encoder_definite = {'m' : 0, 'a' : 2}
sbd_dict = {
'i1' : ['00x', '01x', '1xx', '20x', '21x', '40x', '41x', '5xx'],
'i2' : ['042', '044', '045', '05x', '101', '100', '242', '244', '245', '25x', '40x',
'41x', '442', '444', '445', '45x', '500', '501', '52x', '12x', '00x', '01x'],
'list' : {
0 : np.array([[0, 0, 0]]), 1 : np.array([[0, 0, 1]]), 2 : np.array([[0, 1, 0]]),
4 : np.array([[1, 0, 0]]), 5 : np.array([[1, 0, 1]])
},
'num_list' : np.array([[0, 1, 2, 4, 5]])
}
# channel state machine
channel_dict = {
'state_machine' : np.array([
[0, 0], [0, 1], [1, 2], [2, 3], [2, 4], [3, 7], [4, 8], [4, 9],
[5, 0], [5, 1], [6, 2], [7, 5], [7, 6], [8, 7], [9, 8], [9, 9]
]),
'in_out' : np.array([
[0, 0], [1, 1], [1, 3], [0, 2], [1, 3], [0, -2], [0, 0], [1, 1],
[0, -1], [1, 0], [1, 2], [0, -3], [1, -2], [0, -3], [0, -1], [1, 0]
]),
'state_label' : np.array([
[0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 1, 1, 2],
[0, 1, 1, 0, 3], [0, 1, 1, 1, 4], [1, 0, 0, 0, 5],
[1, 0, 0, 1, 6], [1, 1, 0, 0, 7], [1, 1, 1, 0, 8], [1, 1, 1, 1, 9]
]),
'num_state' : 10,
'ini_state' : 0
}
channel_in_out_norm = np.zeros(channel_dict['in_out'].shape)
channel_in_out_norm[:, 0] = channel_dict['in_out'][:, 0]
channel_in_out_norm[:, 1] = channel_dict['in_out'][:, 1] / np.sqrt(10)
channel_dict['in_out'] = channel_in_out_norm
# List of dummy bits paths
dummy_dict = {
0 : np.array([[0, 0, 0, 0, 0]]), 1 : np.array([[2, 3, 7, 5, 0]]),
2 : np.array([[3, 7, 5, 0, 0]]), 3 : np.array([[7, 5, 0, 0, 0]]),
4 : np.array([[8, 7, 5, 0, 0]]), 5 : np.array([[0, 0, 0, 0, 0]]),
6 : np.array([[2, 3, 7, 5, 0]]), 7 : | np.array([[5, 0, 0, 0, 0]]) | numpy.array |
import os
import time
from pims import ImageSequence
import numpy as np
import pandas as pd
import scipy
import matplotlib as mpl
import matplotlib.pyplot as plt
from skimage import feature
import scipy.ndimage as ndimage
from skimage.feature import blob_log
import trackpy as tp
import os
from scipy.ndimage.filters import gaussian_filter
from timeit import default_timer as timer
from glob import glob
from tqdm import tqdm
lip_int_size = 30
lip_BG_size = 60
sep = 15 # afstand mellem centrum af to liposomer skal være 15
mean_multiplier = 1.5 # correleret med antal liposomer, hvor int skal liposomet være for at blive accepteret skal være 1.5
sigmas = 0.9 #
memory = 10 #frame # var 10 da jeg treatede
search_range = 10 # pixels
duration_min = 20 # min duration of track
appear_min = 50 # maske liposomer som opstår efter frame bliver ikke talt med # skulle være 5 var 50
#first reverse green videos
def image_loader_video(video):
from skimage import io
images_1 = io.imread(video)
return np.asarray(images_1) # fjern frame 1
def green_video_reverser(vid_save_path):
print ('Fixing vid: ',str(vid_save_path))
vid = image_loader_video(vid_save_path)
vid = np.asarray(vid)
vid = vid[::-1]
from tifffile import imsave
imsave(str(vid_save_path), vid)
def ext_pir(x, y, frame):
x, y, frame = map(np.asarray, [x, y, frame])
mn, mx = frame.min(), frame.max() + 1
d = np.diff(np.append(frame, mx))
r = np.arange(len(frame))
i = r.repeat(d)
return x[i], y[i], np.arange(mn, mx)
def extend_arrs(x, y, frame):
# Convert to arrays
frame = np.asarray(frame)
x = np.asarray(x)
y = np.asarray(y)
l = frame[-1] - frame[0] + 1
id_ar = np.zeros(l, dtype=int)
id_ar[frame - frame[0]] = 1
idx = id_ar.cumsum() - 1
return np.r_[frame[0]:frame[-1] + 1], x[idx], y[idx]
def position_extractor(tracked, max_length):
x_pos = []
y_pos = []
frames = []
names = []
group_all = tracked.groupby('particle')
for name, group in group_all:
frame = group.frame.tolist()
frame = frame[0:(len(frame) - 3)]
tmp = max_length - 1
frame.append(tmp)
# frame = [0,1,2,(max_length-1)]
x_tmp = group.x.tolist()
y_tmp = group.y.tolist()
frames_full = np.arange(min(frame), max(frame) + 1, 1)
frame, x, y = extend_arrs(x_tmp, y_tmp, frame)
# x,y,frame = ext_pir(x_tmp, y_tmp, frame)
x_pos.extend(x)
y_pos.extend(y)
frames.extend(frame)
names.extend([name] * len(x))
final_df = pd.DataFrame(
{'particle': names,
'frame': frames,
'x': x_pos,
'y': y_pos})
return final_df
def get_video_files(main_folder_path):
files = glob(str(main_folder_path+'*.tif'))
for file in files:
if file.find('green') != -1:
green = file
elif file.find('blue') != -1:
blue = file
elif file.find('red') != -1:
red = file
return [red,green,blue]
def tracker(video, mean_multiplier, sep):
mean = np.mean(video[0])
print ('tracking')
full = tp.batch(video, 11, minmass=mean * mean_multiplier, separation=sep,noise_size = 3);
print ('1000')
# check for subpixel accuracy
tp.subpx_bias(full)
full_tracked = tp.link_df(full, search_range, memory=memory)
full_tracked['particle'] = full_tracked['particle'].transform(int)
full_tracked['duration'] = full_tracked.groupby('particle')['particle'].transform(len)
full_tracked['t_appear'] = full_tracked.groupby('particle')['frame'].transform(min)
full_tracked = full_tracked[full_tracked.duration > duration_min]
full_tracked = full_tracked[full_tracked.t_appear < appear_min]
return full_tracked
def fix_green(green_vid):
new= []
for i in range(len(green_vid)):
for j in range(10):
new.append(green_vid[i])
return np.asarray(new, dtype=np.float32)
def signal_extractor(video, final_df, red_blue,roi_size,bg_size): # change so taht red initial is after appearance timing
lip_int_size= roi_size
lip_BG_size = bg_size
def cmask(index, array, BG_size, int_size):
a, b = index
nx, ny = array.shape
y, x = np.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = np.bitwise_xor(BG_mask, mask2)
return (sum((array[mask]))), np.median(((array[BG_mask])))
final_df = final_df.sort_values(['particle', 'frame'], ascending=True)
def df_extractor2(row):
b, a = row['x'], row['y'] #b,a
frame = int(row['frame'])
array = video[frame]
nx, ny = array.shape
y, x = np.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = np.bitwise_xor(BG_mask, mask2)
return np.sum((array[mask])), np.median(((array[BG_mask]))) # added np in sum
size_maker = np.ones(video[0].shape)
ind = 25, 25 # dont ask - leave it here, it just makes sure the below runs
mask_size, BG_size = cmask(ind, size_maker, lip_BG_size, lip_int_size)
mask_size = np.sum(mask_size)
a = final_df.apply(df_extractor2, axis=1)
# a = df_extractor2(final_df, video)
intensity = []
bg = []
for line in a:
i, b = line
bg.append(b)
intensity.append(i)
if red_blue == 'blue' or red_blue == 'Blue':
final_df['np_int'] = intensity
final_df['np_bg'] = bg
final_df['np_int_corrected'] = (final_df['np_int']/mask_size) - (final_df['np_bg'])
elif red_blue == 'red' or red_blue == 'Red':
final_df['lip_int'] = intensity
final_df['lip_bg'] = bg
final_df['lip_int_corrected'] = (final_df['lip_int']/mask_size) - (final_df['lip_bg'])
else:
final_df['green_int'] = intensity
final_df['green_bg'] = bg
final_df['green_int_corrected'] = (final_df['green_int']/mask_size) - (final_df['green_bg'])
return final_df
def signal_extractor_no_pos(video, final_df, red_blue,roi_size,bg_size): # change so taht red initial is after appearance timing
lip_int_size= roi_size
lip_BG_size = bg_size
def cmask(index, array, BG_size, int_size):
a, b = index
nx, ny = array.shape
y, x = np.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = np.bitwise_xor(BG_mask, mask2)
return (sum((array[mask]))), np.median(((array[BG_mask])))
final_df = final_df.sort_values(['frame'], ascending=True)
def df_extractor2(row):
b, a = row['x'], row['y'] #b,a
frame = int(row['frame'])
array = video[frame]
nx, ny = array.shape
y, x = np.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = np.bitwise_xor(BG_mask, mask2)
return np.sum((array[mask])), np.median(((array[BG_mask]))) # added np in sum
size_maker = np.ones(video[0].shape)
ind = 25, 25 # dont ask - leave it here, it just makes sure the below runs
mask_size, BG_size = cmask(ind, size_maker, lip_BG_size, lip_int_size)
mask_size = np.sum(mask_size)
a = final_df.apply(df_extractor2, axis=1)
# a = df_extractor2(final_df, video)
intensity = []
bg = []
for line in a:
i, b = line
bg.append(b)
intensity.append(i)
if red_blue == 'blue' or red_blue == 'Blue':
final_df['blue_int'] = intensity
final_df['blue_bg'] = bg
final_df['blue_int_corrected'] = (final_df['blue_int']) - (final_df['blue_bg']*mask_size)
elif red_blue == 'red' or red_blue == 'Red':
final_df['red_int'] = intensity
final_df['red_bg'] = bg
final_df['red_int_corrected'] = (final_df['red_int']) - (final_df['red_bg']*mask_size)
else:
final_df['green_int'] = intensity
final_df['green_bg'] = bg
final_df['green_int_corrected'] = (final_df['green_int']) - (final_df['green_bg']*mask_size)
return final_df
def big_red_fix(red):
new = []
for i in range(len(red)):
if i %9 ==0:
new.append(red[i])
new.append(red[i])
else:
new.append(red[i])
return np.asarray(new)
def retreater(df,video,main_folder):
df = df.sort_values(['particle', 'frame'], ascending=True)
x_pos_final = np.asarray(df['x'].tolist())
y_pos_final = np.asarray(df['y'].tolist())
video_g = video
video_g,pos = cut_video(x_pos_final[0],y_pos_final[0],video_g)
from tifffile import imsave
video_g=np.asarray(video_g, dtype=np.float32)
video_g = fix_green(video_g)
imsave(str(main_folder+'green_vbig.tif'), video_g)
def cmask_plotter(index, array, BG_size, int_size):
a, b = index
nx, ny = array.shape
y, x = np.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = np.bitwise_xor(BG_mask, mask2)
return mask,BG_mask
def step_tracker(df):
microns_per_pixel = 1
steps = []
msd = []
lag = []
df['x'] = df['x']* microns_per_pixel
df['y'] = df['y']* microns_per_pixel
group_all = df.groupby('particle')
x_step = []
y_step = []
# easiest: compute step in x, step in y and then steps
for name, group in group_all:
x_list = group.x.tolist()
x_tmp = [y - x for x,y in zip(x_list,x_list[1:])]
x_tmp.insert(0, 0.)
y_list = group.y.tolist()
y_tmp = [y - x for x,y in zip(y_list,y_list[1:])]
y_tmp.insert(0, 0.)
y_step.extend(y_tmp)
x_step.extend(x_tmp)
step_tmp = [np.sqrt(y**2+x**2) for y,x in zip(y_tmp,x_tmp)]
#msd_tmp,lag_tmp = msd_straight_forward(x_tmp,y_tmp)
#msd.extend(msd_tmp)
#lag.extend(lag_tmp)
steps.extend(step_tmp)
df['x_step'] = x_step
df['y_step'] = y_step
df['steplength'] = steps
#df['lag'] = lag
#df['msd'] = msd
return df
def get_meanx_y_df(df):
df = step_tracker(df)
df = df.sort_values(['frame'], ascending=True)
grp = df.groupby('frame')
x_list = []
y_list = []
for name,df_t in grp:
x_list.append(np.mean(df_t['x_step']))
y_list.append( | np.mean(df_t['y_step']) | numpy.mean |
def as_partitioning(power_plant_inputs):
from apcd_partitioning_dictionaries import as_dict
import numpy as np
arsenic_input = power_plant_inputs.Share_Arsenic
pm_control = power_plant_inputs.PM_Control
so2_control = power_plant_inputs.SO2_Control
nox_control = power_plant_inputs.NOx_Control
hg_control = power_plant_inputs.Hg_Control
sorbent = power_plant_inputs.DSI_Usage
#Boiler Partitioning
bottom_ash_solid = arsenic_input * np.mean(as_dict['Bottom_Ash']['solid'])
bottom_ash_liquid = arsenic_input * np.mean(as_dict['Bottom_Ash']['liquid'])
bottom_ash_gas = arsenic_input * np.mean(as_dict['Bottom_Ash']['gas'])
#SCR Partitioning
scr_solid = bottom_ash_gas * np.mean(as_dict[nox_control]['solid'])
scr_liquid = bottom_ash_gas * np.mean(as_dict[nox_control]['liquid'])
scr_gas = bottom_ash_gas * np.mean(as_dict[nox_control]['gas'])
#ACI Partitioning
aci_solid = scr_gas * np.mean(as_dict[hg_control]['solid'])
aci_liquid = scr_gas * np.mean(as_dict[hg_control]['liquid'])
aci_gas = scr_gas * np.mean(as_dict[hg_control]['gas'])
#DSI Partitioning
dsi_solid = aci_gas * np.mean(as_dict[sorbent]['solid'])
dsi_liquid = aci_gas * np.mean(as_dict[sorbent]['liquid'])
dsi_gas = aci_gas * np.mean(as_dict[sorbent]['gas'])
#Partitioning in PM Control Systems
pm_solid = dsi_gas * np.mean(as_dict[pm_control]['solid'])
pm_liquid = dsi_gas * np.mean(as_dict[pm_control]['liquid'])
pm_gas = dsi_gas * np.mean(as_dict[pm_control]['gas'])
#Partitioning in SO2 Control Systems
so2_solid = pm_gas * np.mean(as_dict[so2_control]['solid'])
so2_liquid = pm_gas * np.mean(as_dict[so2_control]['liquid'])
so2_gas = pm_gas * np.mean(as_dict[so2_control]['gas'])
#Calucalate total partitioning
as_solid = bottom_ash_solid + scr_solid + aci_solid + pm_solid + dsi_solid + so2_solid
as_liquid = bottom_ash_liquid + scr_liquid + aci_liquid + pm_liquid + dsi_liquid + so2_liquid
as_gas = so2_gas
return as_solid, as_liquid, as_gas
def cl_partitioning(power_plant_inputs):
from apcd_partitioning_dictionaries import cl_dict
import numpy as np
chlorine_input = power_plant_inputs.Share_Chloride
pm_control = power_plant_inputs.PM_Control
so2_control = power_plant_inputs.SO2_Control
nox_control = power_plant_inputs.NOx_Control
hg_control = power_plant_inputs.Hg_Control
sorbent = power_plant_inputs.DSI_Usage
#Boiler Partitioning
bottom_ash_solid = chlorine_input * np.mean(cl_dict['Bottom_Ash']['solid'])
bottom_ash_liquid = chlorine_input * np.mean(cl_dict['Bottom_Ash']['liquid'])
bottom_ash_gas = chlorine_input * np.mean(cl_dict['Bottom_Ash']['gas'])
#SCR Partitioning
scr_solid = bottom_ash_gas * np.mean(cl_dict[nox_control]['solid'])
scr_liquid = bottom_ash_gas * np.mean(cl_dict[nox_control]['liquid'])
scr_gas = bottom_ash_gas * np.mean(cl_dict[nox_control]['gas'])
#ACI Partitioning
aci_solid = scr_gas * np.mean(cl_dict[hg_control]['solid'])
aci_liquid = scr_gas * np.mean(cl_dict[hg_control]['liquid'])
aci_gas = scr_gas * np.mean(cl_dict[hg_control]['gas'])
#DSI Partitioning
dsi_solid = aci_gas * np.mean(cl_dict[sorbent]['solid'])
dsi_liquid = aci_gas * np.mean(cl_dict[sorbent]['liquid'])
dsi_gas = aci_gas * np.mean(cl_dict[sorbent]['gas'])
#Partitioning in PM Control Systems
pm_solid = dsi_gas * np.mean(cl_dict[pm_control]['solid'])
pm_liquid = dsi_gas * | np.mean(cl_dict[pm_control]['liquid']) | numpy.mean |
"""Rhis module is meant for detecting clusters that occur within a projected
space. This uses one specific type of clustering that seems to work well for
3-dimensional projected data.
"""
import typing
import numpy as np
import sys
import os
try:
stdout, stderr = sys.stdout, sys.stderr
with open(os.devnull, 'w') as dnull:
sys.stdout = dnull
sys.stderr = dnull
import hdbscan # emits a warning that is very difficult to suppress
except:
sys.stdout = stdout
sys.stderr = stderr
raise
finally:
sys.stdout = stdout
sys.stderr = stderr
import pytypeutils as tus
class Clusters:
"""The data class which stores information about clusters generated from
particular samples.
Attributes:
samples (ndarray[n_samples, n_features]): the samples that the clusters were
selected from.
centers (ndarray[n_clusters, n_features]): where the cluster centers are located
labels (ndarray[n_samples]): each value is 0,1,...,n_clusters-1 and corresponds
to the nearest cluster to the corresponding sample in pc-space
calculate_params (dict[str, any]): the parameters that were used to generate these
clusters.
"""
def __init__(self, samples: np.ndarray, centers: np.ndarray, labels: np.ndarray,
calculate_params: typing.Dict[str, typing.Any]):
tus.check(samples=(samples, np.ndarray), centers=(centers, np.ndarray),
labels=(labels, np.ndarray), calculate_params=(calculate_params, dict))
tus.check_ndarrays(
samples=(samples, ('n_samples', 'n_features'),
(np.dtype('float32'), np.dtype('float64'))),
centers=(
centers,
('n_clusters',
('n_features', samples.shape[1] if len(samples.shape) > 1 else None)
),
samples.dtype
),
labels=(
labels,
(('n_samples', samples.shape[0] if bool(samples.shape) else None),),
(np.dtype('int32'), np.dtype('int64'))
)
)
self.samples = samples
self.centers = centers
self.labels = labels
self.calculate_params = calculate_params
@property
def num_samples(self):
"""Returns the number of samples used to generate these clusters"""
return self.samples.shape[0]
@property
def num_features(self):
"""Returns the number of features in the sample space"""
return self.samples.shape[1]
@property
def num_clusters(self):
"""Returns the number of clusters found. This may have been chosen"""
return self.centers.shape[0]
def find_clusters(samples: np.ndarray) -> Clusters:
"""Attempts to locate clusters in the given samples in the most generic
way possible."""
args = {
'min_cluster_size': int(0.2*samples.shape[0]),
'min_samples': 10
}
args_meta = {
'method': 'hdbscan.HDBSCAN'
}
clusts = hdbscan.HDBSCAN(**args)
clusts.fit(samples)
# first we determine how many clusters there are which actually
# have points belonging to them -1 is for unclustered points
labels = clusts.labels_
unique_labels = np.unique(labels)
if -1 in unique_labels:
unique_labels = | np.ascontiguousarray(unique_labels[unique_labels != -1]) | numpy.ascontiguousarray |
"""
Code for creating ZIP load models. Note that this module follows Python
conventions: functions which start with an underscore are "private." So,
a user of this module should only use methods which do not start with an
underscore. At the time of writing (2020-06-17), the public methods are:
- cluster_and_fit
- get_best_fit_from_clustering
- zip_fit
A discussion of load modeling in PyVVO can be found in `this paper
<http://hdl.handle.net/10125/64115>`__.
Discussion of the ZIP modeling follows:
ZIP load models represent a load as part constant impedance (Z), part
constant current (I) and part constant power (P).
Since PyVVO uses GridLAB-D, we'll be formulating the problem the same
way GridLAB-D does.
.. math::
P_k \\! = \\! S_n \\! \\bigg[\\! \\frac{V_k^2}{V_n^2} Z_\\% \\cos(Z_\\theta) + \\frac{V_k}{V_n} I_\\% \\cos(I_\\theta) + P_\\% \\cos(P_\\theta) \\bigg]
Q_k \\! = \\! S_n \\! \\bigg[\\! \\frac{V_k^2}{V_n^2} Z_\\% \\sin(Z_\\theta) + \\frac{V_k}{V_n} I_\\% \\sin(I_\\theta) + P_\\% \\sin(P_\\theta) \\bigg]
1 = Z_\\% + I_\\% + P_\\%
Where:
:math:`P_k`: Predicted real power for time/interval :math:`k`
:math:`Q_k`: Predicted reactive power for time/interval :math:`k`
:math:`S_n`: Magnitude of nominal power
:math:`V_k`: Magnitude of input voltage for time/interval :math:`k`
:math:`V_n`: Nominal voltage
:math:`Z\\%`: Impedance fraction
:math:`Z_\\theta`: Impedance angle
:math:`I\\%`: Current fraction
:math:`I_\\theta`: Current angle
:math:`P\\%`: Power fraction
:math:`P_\\theta`: Power angle
To reduce computations during optimization, we'll make the following
variable substitutions:
.. math::
\\bar{P}:=\\frac{P_k}{S_n}
\\bar{Q}:=\\frac{Q_k}{S_n}
\\bar{V}:=\\frac{V_a}{V_n}
In this module, a "zip_terms" parameter will be used frequently. This
parameter is a numpy array with six entries in the following order:
:math:`Z\\%`, :math:`Z_\\theta`, :math:`I\\%`, :math:`I_\\theta`,
:math:`P\\%`, :math:`P_\\theta`.
"""
# Standard library
import math
import logging
# Installed packages
import numpy as np
import pandas as pd
from scipy.optimize import minimize, Bounds
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
# pyvvo
from pyvvo import cluster
# Set up a log.
LOG = logging.getLogger(__name__)
# Make numpy error out for floating point errors.
np.seterr(all='raise')
# Constant for ZIP coefficients. ORDER MATTERS!
ZIP_TERMS = ['impedance', 'current', 'power']
# FTOL is for convergence tolerance. From scipy docs, once we no longer
# get FTOL improvement between iterations, we consider it converged.
# The value here is the default as documented here:
# https://docs.scipy.org/doc/scipy/reference/optimize.minimize-slsqp.html#optimize-minimize-slsqp
F_TOL = 1e-6
# Cap iterations.
MAX_ITER = 500
# Define bounds for our terms.
# Order: Z%, Z_theta, I%, I_theta, P%, P_theta
# Since we're dealing with power angles of loads, we'll bound them to
# the right-half-plane.
BOUNDS = Bounds([-np.inf, -np.pi/2, -np.inf, -np.pi/2, -np.inf, -np.pi/2],
[np.inf, np.pi/2, np.inf, np.pi/2, np.inf, np.pi/2])
# Use a simple starting point where each fraction is 1/3, and each angle
# is pi / 6 (resulting in a PF of ~0.9). This was set through some
# light-weight manual trial and error.
PAR_0 = (1/3, np.pi/6, 1/3, np.pi/6, 1/3, np.pi/6)
# Jacobian for our equality constraint is constant and not a function of
# the parameters.
EQ_JAC = np.array([1, 0, 1, 0, 1, 0])
# We'll be passing zip_terms around in the order Z%, Z_theta, I%,
# I_theta, P%, P_theta. Create masks to pull the fractions and angles.
FRACTION_MASK = np.array([True, False, True, False, True, False])
ANGLE_MASK = ~FRACTION_MASK
def zip_fit(vpq, v_n=240, s_n=None, par_0=PAR_0,
f_tol=F_TOL, max_iter=MAX_ITER, fit_data=True):
"""Given V, P, and Q data, perform ZIP fit and get coefficients.
:param vpq: pandas DataFrame with columns 'v' for voltage
magnitude, 'p' for real power, and 'q' for reactive power.
:param v_n: nominal voltage magnitude.
:param s_n: nominal apparent power magnitude. If None, it will be
estimated/inferred from the vpq data.
:param par_0: Initial guess/starting point for optimization. Should
be array in the order Z%, Z_theta, I%, I_theta, P%, P_theta.
:param f_tol: Precision goal for optimization. Terminates after
change between iterations is < f_tol
:param max_iter: Maximum number of iterations for optimization.
:param fit_data: Boolean flag. If true, include fitted p and q along
with the corresponding mean square error.
:return: dictionary with several fields:
- zip_gld: Dictionary with all the terms needed for GridLAB-D
modeling. These include:
- base_power: S_n
- impedance_fraction: Z%
- impedance_pf: Impedance "power factor," cos(Z_theta).
Will be negative if the power factor is leading for
GridLAB-D conventions
- current_fraction: I%
- current_pf: Current "power factor," cos(I_theta).
Negative if leading pf.
- power_fraction: P%
- power_pf: Power "power factor," cos(P_theta). Negative
if leading pf.
- sol: scipy.optimize.OptimizeResult object from performing
the ZIP fit (`docs
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html#scipy.optimize.OptimizeResult>`__).
Fields of note include 'x' (contains the zip_terms),
'success' (indicating if the optimizer exited successfully),
and 'message' (description of the cause of optimizer
termination).
If fit_data is true, the following fields will be included:
- p_pred: numpy array of predicted real power for the
resultant ZIP model.
- q_pred: numpy array of predicted reactive power for the
resultant ZIP model.
- mse_p: mean-squared error for real power.
- mse_q: mean-squared error for reactive power.
**IMPORTANT NOTE**: If the optimization fails, the only field
in the return will be 'sol.' So it's up to the caller to either
explicitly check sol.success or handle missing fields.
"""
# Estimate nominal power if not provided.
if s_n is None:
s_n = _estimate_nominal_power(vpq)
# Variable substitution to reduce multiplication/division during
# optimization.
vpq_bar = _get_vpq_bar(vpq=vpq, v_n=v_n, s_n=s_n)
# Solve.
sol = _zip_fit_slsqp(vpq_bar=vpq_bar, par_0=par_0, f_tol=f_tol,
max_iter=max_iter)
# Initialize return.
out = {'sol': sol}
# If this failed, log and return.
if not sol.success:
LOG.warning('Unable to solve. Message: {}'.format(sol.message))
return out
# Get ZIP terms in GridLAB-D format.
zip_gld = _zip_to_gld(sol.x)
# Add base power.
zip_gld['base_power'] = s_n
# Add zip_gld to our return.
out['zip_gld'] = zip_gld
# Compute the predicted values if asked to.
if fit_data:
p_pred, q_pred = _zip_model(v=vpq['v'].values, v_n=v_n, s_n=s_n,
zip_terms=sol.x)
out['p_pred'] = p_pred
out['q_pred'] = q_pred
# Compute mean squared error.
out['mse_p'] = \
mean_squared_error(y_true=vpq['p'].values, y_pred=p_pred)
out['mse_q'] = \
mean_squared_error(y_true=vpq['q'].values, y_pred=q_pred)
# Done.
return out
def _get_vpq_bar(vpq, v_n, s_n):
"""Helper to scale our v, p, and q. This helps reduce the amount
of floating point operations used during optimization.
:param vpq: Pandas DataFrame with columns v, p, and q.
:param v_n: scalar, nominal voltage.
:param s_n: scalar, nominal apparent power magnitude.
:returns: DataFrame with v_bar, p_bar, and q_bar, which are
scaled parameters to be used in optimization.
"""
return pd.DataFrame(data={'v_bar': vpq['v'] / v_n,
'p_bar': vpq['p'] / s_n,
'q_bar': vpq['q'] / s_n})
def _estimate_nominal_power(vpq):
"""Estimate nominal power from p and q.
:param vpq: pandas DataFrame with columns 'v' for voltage
magnitude, 'p' for real power, and 'q' for reactive power.
:return: s_n: our estimate for nominal power.
"""
# |S| = sqrt(P^2 + Q^2)
s_n = np.median(np.sqrt(np.square(vpq['p']) + np.square(vpq['q'])))
return s_n
def _zip_fit_slsqp(vpq_bar, par_0=PAR_0, f_tol=F_TOL,
max_iter=MAX_ITER):
"""Wrapper to call scipy.optimize.minimize.
:param vpq_bar: Pandas DataFrame with columns v_bar, p_bar, and
q_bar.
:param par_0: Initial guess of zip parameters. Should be in order
Z%, Z_theta, I%, I_theta, P%, P_theta.
:param f_tol: Precision goal for the value of f in the stopping
criterion of SLSQP.
:param max_iter: Maximum number of iterations to solve.
:return: scipy OptimizeResult object for this problem.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html#scipy.optimize.OptimizeResult
"""
# Simply call minimize and return the result.
return \
minimize(
# Call _zip_obj_and_jac for our objective function.
fun=_zip_obj_and_jac, x0=par_0,
# By setting jac=True, we indicate the objective function
# will also return the Jacobian. This is for efficiency.
jac=True,
# _zip_obj_and_jac takes additional args v_s, v_bar, p_bar,
# and q_bar.
args=(np.square(vpq_bar['v_bar'].values),
vpq_bar['v_bar'].values,
vpq_bar['p_bar'].values,
vpq_bar['q_bar'].values),
# Use sequential least squares programming, which is great if
# you're fitting to a known model.
method='SLSQP',
# We only have one constraint: Z% + I% + P% = 1.
# Note the Jacobian for that constraint is always the same.
# TODO: is initializing lambda functions each time adding
# overhead? Should there be be regular functions defined
# for 'fun' and 'jac'?
constraints={
'type': 'eq',
'fun': lambda x: np.array([np.sum(x[FRACTION_MASK]) - 1]),
'jac': lambda x: EQ_JAC},
# We don't have any bounds on our fractions, but we'll be
# keeping the angles within the right-half-plane.
bounds=BOUNDS,
# Pass in additional options. We don't want to print convergence
# messages, but will rather rely on the caller to check the
# solution.
# https://docs.scipy.org/doc/scipy/reference/optimize.minimize-slsqp.html#optimize-minimize-slsqp
options={'ftol': f_tol, 'maxiter': max_iter, 'disp': False}
)
def _zip_obj_and_jac(zip_terms, v_s, v_bar, p_bar, q_bar):
"""ZIP objective function and Jacobian calculations. The objective
function is the normalized sum of squared error, handling error for
P and Q separately.
:param zip_terms: numpy array with length 6. Should have terms Z%,
Z_theta, I%, I_theta, P%, and P_theta in that order.
:param v_s: numpy array, (v / v_nominal)^2
:param v_bar: numpy array, v / v_nominal
:param p_bar: numpy array, p / |s_n|
:param q_bar: numpy array, q / |s_n|
:returns: obj, jac. obj is the scalar value of the objective
function, and jac is a numpy array the same shape as zip_terms
with the partial derivative of the objective function with
respect to each term in zip_terms.
TODO: Should we normalize? Does it really matter? What helps the
numerical stability of SLSQP?
"""
# Pre-compute some terms we'll be using frequently.
cos_z_t = math.cos(zip_terms[1])
sin_z_t = math.sin(zip_terms[1])
cos_i_t = math.cos(zip_terms[3])
sin_i_t = math.sin(zip_terms[3])
cos_p_t = math.cos(zip_terms[5])
sin_p_t = math.sin(zip_terms[5])
# The objective function and every partial derivative will involve
# P - .... and Q - ... Pre-compute them here.
p_delta = (p_bar
- v_s * zip_terms[0] * cos_z_t
- v_bar * zip_terms[2] * cos_i_t
- zip_terms[4] * cos_p_t)
q_delta = (q_bar
- v_s * zip_terms[0] * sin_z_t
- v_bar * zip_terms[2] * sin_i_t
- zip_terms[4] * sin_p_t)
# Compute some other terms which will be used more than once. Using
# 'dot' as it's presumably faster than elementwise multiplication
# followed by a sum.
p_v_s_dot = np.dot(p_delta, v_s)
q_v_s_dot = np.dot(q_delta, v_s)
p_v_dot = np.dot(p_delta, v_bar)
q_v_dot = np.dot(q_delta, v_bar)
p_sum = np.sum(p_delta)
q_sum = np.sum(q_delta)
# Compute the value of the objective function.
obj = np.sum(np.square(p_delta) + np.square(q_delta))
# Initialize our Jacobian return.
jac = np.zeros_like(zip_terms)
# Compute the partial derivative w.r.t. Z% (0th term)
jac[0] = (2 *
(-p_v_s_dot * cos_z_t
+
-q_v_s_dot * sin_z_t)
)
# Compute the partial derivative w.r.t Z_theta (1st term)
jac[1] = (2 *
(-p_v_s_dot * zip_terms[0] * -sin_z_t
+
-q_v_s_dot * zip_terms[0] * cos_z_t)
)
# Compute the partial derivative w.r.t I% (2nd term)
jac[2] = (2 *
(-p_v_dot * cos_i_t
+
-q_v_dot * sin_i_t)
)
# Compute the partial derivative w.r.t. I_theta (3rd term)
jac[3] = (2 *
(-p_v_dot * zip_terms[2] * -sin_i_t
+
-q_v_dot * zip_terms[2] * cos_i_t)
)
# Compute the partial derivative w.r.t. P% (4th term)
jac[4] = (2 * (p_sum * -cos_p_t - q_sum * sin_p_t))
# Compute the partial derivative w.r.t. P_theta (5th term)
jac[5] = (2 *
(p_sum * -zip_terms[4] * -sin_p_t
+
q_sum * -zip_terms[4] * cos_p_t)
)
return obj, jac
def _zip_model(v, v_n, s_n, zip_terms):
"""Compute P and Q for a given ZIP model. This is generally used
for testing, not for the fitting/optimization itself.
:param v: numpy array of voltages.
:param v_n: scalar, nominal voltage.
:param s_n: scalar, nominal apparent power magnitude.
:param zip_terms: numpy array with the terms Z%, Z_theta, I%,
I_theta, P%, and P_theta, in that order.
"""
v_s = np.square(v / v_n)
p = s_n * (
v_s * zip_terms[0] * math.cos(zip_terms[1])
+ v / v_n * zip_terms[2] * math.cos(zip_terms[3])
+ zip_terms[4] * math.cos(zip_terms[5])
)
q = s_n * (
v_s * zip_terms[0] * math.sin(zip_terms[1])
+ v / v_n * zip_terms[2] * math.sin(zip_terms[3])
+ zip_terms[4] * math.sin(zip_terms[5])
)
return p, q
def _zip_model_gld(v, v_n, s_n, gld_terms):
"""Wrapper to call _zip_model given a dictionary of GridLAB-D terms.
:param v: numpy array of voltages.
:param v_n: scalar, nominal voltage.
:param gld_terms: Dictionary of GridLAB-D terms for the ZIP model,
as would be output from _zip_to_gld.
"""
zip_terms = np.zeros(6)
# Dump the fractions into the right slots.
zip_terms[FRACTION_MASK] = np.array([
gld_terms['impedance_fraction'], gld_terms['current_fraction'],
gld_terms['power_fraction']
])
# Compute angles. Start by extracting the power factors.
pf = np.array([
gld_terms['impedance_pf'], gld_terms['current_pf'],
gld_terms['power_pf']])
angles = _angles_from_power_factors(pf=pf)
# Dump the angles into the correct slots.
zip_terms[ANGLE_MASK] = angles
return _zip_model(v=v, v_n=v_n, s_n=s_n, zip_terms=zip_terms)
def _power_factors_from_zip_terms(zip_terms):
"""Given the ZIP terms, compute the power factors related to them.
Note that the percentages are not relevant to this function, but
we'll take all the terms so the caller can be agnostic about which
terms are used.
:param zip_terms: numpy array, Z%, Z_theta, I%, I_theta, P%, and
P_theta, in that order.
:returns: numpy array, Z_pf, I_pf, and P_pf in that order.
"""
# Extract the angles.
angles = zip_terms[ANGLE_MASK]
# Initialize the power factors to be the cosine of the power angles.
pf = np.cos(angles)
# Negative angles result in negative (leading) power factors.
pf[angles < 0] *= -1
# All done.
return pf
def _angles_from_power_factors(pf):
"""Helper to convert a given power factor to a power angle.
:param pf: Numpy array of power factors. Negative means leading.
Note that all angles will be assumed to be in the right-half
plane.
:returns: angles. Numpy array with angles (radians) corresponding
to each pf.
"""
# The power angle is simply the inverse cosine of the power factor.
angles = np.arccos(np.abs(pf))
# Get the sign right.
angles[pf < 0] *= -1
# Done.
return angles
def _zip_to_gld(zip_terms):
"""Given zip_terms, return a dictionary of GridLAB-D terms.
:param zip_terms: numpy array of our ZIP terms in the order Z%,
Z_theta, I%, I_theta, P%, P_theta.
:returns: dictionary with the following fields:
- impedance_fraction
- impedance_pf
- current_fraction
- current_pf
- power_fraction
- power_pf
"""
# Simply assign the fractions.
out = {'impedance_fraction': zip_terms[0],
'current_fraction': zip_terms[2],
'power_fraction': zip_terms[4]}
# Convert the angle to power factors.
pf = _power_factors_from_zip_terms(zip_terms)
# Assign.
out['impedance_pf'] = pf[0]
out['current_pf'] = pf[1]
out['power_pf'] = pf[2]
# All done.
return out
def cluster_and_fit(data, zip_fit_inputs, selection_data=None, n_clusters=1,
min_cluster_size=4, random_state=None):
"""Cluster data and perform ZIP fit.
Note that voltage will not be included in clustering.
:param data: pandas DataFrame containing all data needed to cluster
(optional) and fit data. At the minimum, columns must
include v (voltage magnitude), p (active power), and q
(reactive power).
:param zip_fit_inputs: dictionary of key word arguments to be passed
to the function zip_fit.
:param selection_data: pandas Series with data to be used for
cluster selection. Index can only contain
labels that exist in data. NOTE:
selection_data should not have 'v' in it.
Optional. If None, no clustering is
performed.
:param n_clusters: Integer of clusters to create for clustering.
Optional. Only required if selection_data is not
None.
:param min_cluster_size: Minimum allowed number of data points in
the selected cluster. Optional. Only
required if selection_data is not None.
:param random_state: Integer, numpy.random RandomState object
(preferred) or None. Used for random seeding of
K-Means clustering.
:return: fit_outputs: outputs from ZIP fit plus a 'data_len' field,
or None. None will be returned if we're clustering and the
min_cluster_size requirement is not met. The 'data_len' field
indicates the length/height of the data which went into fitting.
"""
# If we're clustering, do so.
if selection_data is not None:
# For K-Means, it's best to first standardize the data so that
# it looks Gaussian.
#
# Initialize a StandardScaler, and fit it to our data.
scaler = StandardScaler()
scaler.fit(data.values)
# TODO: MOVE THIS OUTSIDE OF THIS FUNCTION. THE DATA CAN BE
# SCALED ONCE IN THE CALLING FUNCTION, e.g.,
# get_best_fit_from_clustering. Likewise, the selection_data
# can be scaled outside this function.
# Create a DataFrame for holding scaled data.
# TODO: We're adding extra over-head to use a DataFrame, but
# this is a quick fix without messing with
# cluster.find_best_cluster.
scaled_data = pd.DataFrame(scaler.transform(data.values),
index=data.index, columns=data.columns)
# We also need to scale the selection data.
# Initialize a Series which has all the "columns" of our data.
tmp_series = pd.Series(0, index=data.columns)
# Fill the Series with our selection data values.
tmp_series[selection_data.index] = selection_data
# Now scale the temporary Series. Note the reshaping is for a
# single sample (1 row by X columns), and ravel puts the data
# back into a 1D array for Series creation.
scaled_selection = pd.Series(
scaler.transform(tmp_series.values.reshape(1, -1)).ravel(),
index=tmp_series.index)
# Note that 'v' is dropped from the cluster_data, and we're
# plucking the appropriate selection data.
data_out, best_bool, _ = cluster.find_best_cluster(
cluster_data=scaled_data.drop('v', axis=1),
selection_data=scaled_selection[selection_data.index],
n_clusters=n_clusters,
random_state=random_state)
# Re-associate voltage data.
data_out['v'] = scaled_data[best_bool]['v']
# "Un-scale" the fit_data.
# TODO: Again, we've got extra overhead by using DataFrames.
# TODO: This inverse_transform may be unnecessary. Could we
# instead use "best_bool" to index directly into "data"?
fit_data = pd.DataFrame(
scaler.inverse_transform(data_out[data.columns]),
index=data_out.index, columns=data.columns)
else:
# No clustering.
fit_data = data
# If we aren't clustering, or if we are and have enough data,
# perform the fit.
if (selection_data is None) or (fit_data.shape[0] >= min_cluster_size):
fit_outputs = zip_fit(fit_data[['v', 'p', 'q']], **zip_fit_inputs)
fit_outputs['data_len'] = fit_data.shape[0]
else:
# Otherwise,
fit_outputs = None
return fit_outputs
def get_best_fit_from_clustering(data, zip_fit_inputs, selection_data=None,
min_cluster_size=4, random_state=None):
"""Loop over different numbers of clusters to find the best ZIP fit.
For input descriptions, see ``cluster_and_fit`` function.
This calls cluster_and_fit function for each loop iteration.
NOTE: the 'fit_data' field of zip_fit_inputs will be overridden to
be true, as this function won't work otherwise.
:returns: best_fit. 'Best' output (smallest normalized mse_p
+ mse_q) from calling cluster_and_fit. It will also have a 'k'
field added, indicating the number of clusters used.
"""
# The length of our data must be larger than our minimum cluster
# size.
if len(data) < min_cluster_size:
raise ValueError('The given data has length {}, but the given '
'minimum cluster size is {}.'
.format(len(data), min_cluster_size))
# Override zip_fit_inputs
zip_fit_inputs['fit_data'] = True
# Track best coefficients and minimum normalized mean squared error.
# Note we normalize the MSE by dividing by the length of data used
# to come up with the ZIP coefficients. This normalization is
# important, because you're likely to have a large MSE with more
# data points.
best_fit = None
min_norm_mse = np.inf
# Compute maximum possible number of clusters.
n = | np.floor(data.shape[0] / min_cluster_size) | numpy.floor |
import numpy as np
class Base(object):
def __init__(self, index, feature=np.array([0.])):
self.index = index
self.feature = feature
def encode(self, dim=None,
index=True, feature=True,
vertical=False):
if not dim:
dim = self.index + 1
x = np.array([])
if index:
x = np.concatenate((x, self.index_one_hot(dim)))
if feature:
x = np.concatenate((x, self.feature))
return x if not vertical else | np.array([x]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Level diagram calculations for atoms dressed by rydberg levels.
The dressing is achieved by a AC electromagnetic field (laser).
Most of the code here is from the module calculations_atom_pairstate.py.
This one add the AC field and the ground state to the Hamiltonian and diagonalizes it.
Example:
Calculation of the eigenstates when the laser light is near resonant with the transition
:math:`|~5~P_{3/2}~m_j=1/2\\rangle` -> `|60~S_{1/2}~m_j=1/2\\rangle` state. Colour
highlights the mixture of state :math:`|~5~P_{3/2}~m_j=1/2\\rangle`:
import arc as ARC
n0=5;l0=1;j0=1.5;mj0=0.5; #Ground State
nr=60;lr=0;jr=0.5;mjr=0.5; #Target rydberg State
theta=0; #Polar Angle [0-pi]
phi=0; #Azimuthal Angle [0-2pi]
dn = 3; #Range of n to consider (n0-dn:n0+dn)
dl = 3; #Range of l values
deltaMax = 20e9 #Max pair-state energy difference [Hz]
calc = ARC.DressedPairStateInteractions(ARC.Rubidium(), n0,l0,j0,nr,lr,jr, mj0,mjr,interactionsUpTo = 2, Omega0 = 8e-3,Delta0 = 30e-3)
#Omega0 is the rabi frquency of the ac field and Delta0 is the detuning of the ac field from the transition.
rvdw = calc.getLeRoyRadius()
print("LeRoy radius = %.1f mum" % rvdw)
#R array (um)
r=np.linspace(1.5,10,1000)
#Generate pair-state interaction Hamiltonian
calc.defineBasis(theta,phi, dn,dl, deltaMax,progressOutput=True)
#Diagonalise
nEig=1 #Number of eigenstates to extract (we just want the ground state here)
calc.diagonalise(r,nEig,progressOutput=True,sortEigenvectors = True)
#Save data
calc.exportData('60S_dressed_pair_calculation', exportFormat='csv')
#Plot
calc.plotLevelDiagram(hlim = [0.95,1])
calc.ax.set_xlim(1.0,10.0)
calc.ax.set_ylim(-5,3)
calc.showPlot()
"""
from __future__ import division, print_function, absolute_import
from .wigner import Wigner6j, Wigner3j, CG, WignerDmatrix
from .alkali_atom_functions import _EFieldCoupling, _atomLightAtomCoupling
from scipy.constants import physical_constants, pi, epsilon_0, hbar
import gzip
import sys
import datetime
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
from .calculations_atom_single import StarkMap
from .alkali_atom_functions import *
from .divalent_atom_functions import DivalentAtom
from scipy.special import factorial
from scipy import floor
from scipy.special.specfun import fcoef
from scipy.sparse.linalg import eigsh
from scipy.sparse import csr_matrix, hstack, vstack
from numpy.lib.polynomial import real
from numpy.ma import conjugate
from scipy.optimize import curve_fit
from scipy.constants import e as C_e
from scipy.constants import h as C_h
from scipy.constants import c as C_c
from scipy.constants import k as C_k
import re
import numpy as np
from math import exp, log, sqrt
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['ytick.minor.visible'] = True
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['xtick.minor.size'] = 4
mpl.rcParams['ytick.minor.size'] = 4
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['font.family'] = 'serif'
# for matrices
if sys.version_info > (2,):
xrange = range
DPATH = os.path.join(os.path.expanduser('~'), '.arc-data')
#
class DressedPairStateInteractions:
"""
Calculates level diagram (spaghetti) for levels of atoms dressed by rydberg state.
Initializes Rydberg level spaghetti calculation for the given atom
species (or for two atoms of different species) in the vicinity
of the given pair state to which a laser light. For details of calculation see
Ref. [?]_.
Args:
atom (:obj:`AlkaliAtom` or :obj:`DivalentAtom`): = {
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
n (int): principal quantum number for the ground state
l (int): orbital angular momentum for the ground state
j (float): total angular momentum for the ground state
nn (int): principal quantum number for the rydberg state
ll (int): orbital angular momentum for the rydberg state
jj (float): total angular momentum for the rydberg state
m1 (float): projection of the total angular momentum on z-axis
for the ground state
m2 (float): projection of the total angular momentum on z-axis
for the rydberg state
interactionsUpTo (int): Optional. If set to 1, includes only
dipole-dipole interactions. If set to 2 includes interactions
up to quadrupole-quadrupole. Default value is 1.
s (float): optional, spin state of the first atom. Default value
of 0.5 is correct for :obj:`AlkaliAtom` but for
:obj:`DivalentAtom` it has to be explicitly set to 0 or 1 for
singlet and triplet states respectively.
**If `s2` is not specified, it is assumed that the second
atom is in the same spin state.**
s2 (float): optinal, spin state of the second atom. If not
specified (left to default value None) it will assume spin
state of the first atom.
atom2 (:obj:`AlkaliAtom` or :obj:`DivalentAtom`): optional,
specifies atomic species for the second atom, enabeling
calculation of **inter-species pair-state interactions**.
If not specified (left to default value None) it will assume
spin state of the first atom.
References:
.. [1] Jorge et al.
Examples:
**Advanced interfacing of pair-state is2=None, atom2=Nonenteractions calculations
(PairStateInteractions class).** This
is an advanced example intended for building up extensions to the
existing code. If you want to directly access the pair-state
interaction matrix, constructed by :obj:`defineBasis`,
you can assemble it easily from diagonal part
(stored in :obj:`matDiagonal` ) and off-diagonal matrices whose
spatial dependence is :math:`R^{-3},R^{-4},R^{-5}` stored in that
order in :obj:`matR`. Basis states are stored in :obj:`basisStates`
array.
>>> from arc import *
>>> calc = PairStateInteractions(Rubidium(), 60,0,0.5, \
60,0,0.5, 0.5,0.5,interactionsUpTo = 1)
>>> # theta=0, phi = 0, range of pqn, range of l, deltaE = 25e9
>>> calc.defineBasis(0 ,0 , 5, 5, 25e9, progressOutput=True)
>>> # now calc stores interaction matrix and relevant basis
>>> # we can access this directly and generate interaction matrix
>>> # at distance rval :
>>> rval = 4 # in mum
>>> matrix = calc.matDiagonal
>>> rX = (rval*1.e-6)**3
>>> for matRX in self.matR:
>>> matrix = matrix + matRX/rX
>>> rX *= (rval*1.e-6)
>>> # matrix variable now holds full interaction matrix for
>>> # interacting atoms at distance rval calculated in
>>> # pair-state basis states can be accessed as
>>> basisStates = calc.basisStates
"""
dataFolder = DPATH
# =============================== Methods ===============================
def __init__(self, atom, n, l, j, nn, ll, jj, m1, m2,
interactionsUpTo=1,
s=0.5,
s2=None, atom2=None, Omega0 = 0, Delta0 = 0):
# alkali atom type, principal quantum number, orbital angular momentum,
# total angular momentum projections of the angular momentum on z axis
self.atom1 = atom #: atom type
if atom2 is None:
self.atom2 = atom
else:
self.atom2 = atom2
self.n = n # : pair-state definition: principal quantum number of the ground state
self.l = l # : pair-state definition: orbital angular momentum of the ground state
self.j = j # : pair-state definition: total angular momentum of the ground state
self.nn = nn # : pair-state definition: principal quantum number of rydberg state
self.ll = ll # : pair-state definition: orbital angular momentum of rydberg state
self.jj = jj # : pair-state definition: total angular momentum oof rydberg stateom
self.m1 = m1 # : pair-state definition: projection of the total ang. momentum for the ground state
self.m2 = m2 # : pair-state definition: projection of the total angular momentum for the rydberg state
self.interactionsUpTo = interactionsUpTo
""""
Specifies up to which approximation we include in pair-state interactions.
By default value is 1, corresponding to pair-state interactions up to
dipole-dipole coupling. Value of 2 is also supported, corresponding
to pair-state interactions up to quadrupole-quadrupole coupling.
"""
self.Omega0 = Omega0 #Rabi frequency of the dressing with the near resonant transition (nn, ll, jj, m2).
self.Delta0 = Delta0 # Deltuning from the near resonant transition (nn, ll, jj, m2)
if (issubclass(type(atom),DivalentAtom) and not (s == 0 or s == 1)):
raise ValueError("total angular spin s has to be defined explicitly "
"for calculations, and value has to be 0 or 1 "
"for singlet and tripplet states respectively.")
self.s1 = s #: total spin angular momentum, optional (default 0.5)
if s2 is None:
self.s2 = s
else:
self.s2 = s2
# check that values of spin states are valid for entered atomic species
if issubclass(type(self.atom1), DivalentAtom):
if (abs(self.s1) > 0.1 and abs(self.s1 - 1) > 0.1):
raise ValueError("atom1 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)")
elif (abs(self.s1 - 0.5) > 0.1):
raise ValueError("atom1 is AlkaliAtom and its spin has to be "
"s=0.5")
if issubclass(type(self.atom2), DivalentAtom):
if (abs(self.s2) > 0.1 and abs(self.s2 - 1) > 0.1):
raise ValueError("atom2 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)")
elif (abs(self.s2 - 0.5) > 0.1):
# we have divalent atom
raise ValueError("atom2 is AlkaliAtom and its spin has to be "
"s=0.5")
if (abs((self.s1-self.m1) % 1) > 0.1):
raise ValueError("atom1 with spin s = %.1d cannot have m1 = %.1d"
% (self.s1, self.m1))
if (abs((self.s2-self.m2) % 1) > 0.1):
raise ValueError("atom2 with spin s = %.1d cannot have m2 = %.1d"
% (self.s2, self.m2))
# ====================== J basis (not resolving mj) ===================
self.coupling = []
"""
List of matrices defineing coupling strengths between the states in
J basis (not resolving :math:`m_j` ). Basis is given by
:obj:`channel`. Used as intermediary for full interaction matrix
calculation by :obj:`defineBasis`.
"""
self.channel = []
"""
states relevant for calculation, defined in J basis (not resolving
:math:`m_j`. Used as intermediary for full interaction matrix
calculation by :obj:`defineBasis`.
"""
# ======================= Full basis (resolving mj) ===================
self.basisStates = []
"""
List of pair-states for calculation. In the form
[[n1,l1,j1,mj1,n2,l2,j2,mj2], ...].
Each state is an array [n1,l1,j1,mj1,n2,l2,j2,mj2] corresponding to
:math:`|n_1,l_1,j_1,m_{j1},n_2,l_2,j_2,m_{j2}\\rangle` state.
Calculated by :obj:`defineBasis`.
"""
self.matrixElement = []
"""
`matrixElement[i]` gives index of state in :obj:`channel` basis
(that doesn't resolve :obj:`m_j` states), for the given index `i`
of the state in :obj:`basisStates` ( :math:`m_j` resolving) basis.
"""
# variuos parts of interaction matrix in pair-state basis
self.matDiagonal = []
"""
Part of interaction matrix in pair-state basis that doesn't depend
on inter-atomic distance. E.g. diagonal elements of the interaction
matrix, that describe energies of the pair states in unperturbed
basis, will be stored here. Basis states are stored in
:obj:`basisStates`. Calculated by :obj:`defineBasis`.
"""
self.matR = []
"""
Stores interaction matrices in pair-state basis
that scale as :math:`1/R^3`, :math:`1/R^4` and :math:`1/R^5`
with distance in :obj:`matR[0]`, :obj:`matR[1]` and :obj:`matR[2]`
respectively. These matrices correspond to dipole-dipole
( :math:`C_3`), dipole-quadrupole ( :math:`C_4`) and
quadrupole-quadrupole ( :math:`C_5`) interactions
coefficients. Basis states are stored in :obj:`basisStates`.
Calculated by :obj:`defineBasis`.
"""
self.originalPairStateIndex = 0
"""
index of the original n,l,j,m1,nn,ll,jj,m2 pair-state in the
:obj:`basisStates` basis.
"""
self.matE = []
self.matB_1 = []
self.matB_2 = []
# ===================== Eigen states and plotting =====================
# finding perturbed energy levels
self.r = [] # detuning scale
self.y = [] # energy levels
self.highlight = []
# pointers towards figure
self.fig = 0
self.ax = 0
# for normalization of the maximum coupling later
self.maxCoupling = 0.
# n,l,j,mj, drive polarization q
self.drivingFromState = [0, 0, 0, 0, 0]
# sam = saved angular matrix metadata
self.angularMatrixFile = "angularMatrix.npy"
self.angularMatrixFile_meta = "angularMatrix_meta.npy"
#self.sam = []
self.savedAngularMatrix_matrix = []
# intialize precalculated values for factorial term
# in __getAngularMatrix_M
def fcoef(l1, l2, m):
return factorial(l1 + l2) / (factorial(l1 + m)
* factorial(l1 - m)
* factorial(l2 + m)
* factorial(l2 - m))**0.5
x = self.interactionsUpTo
self.fcp = np.zeros((x + 1, x + 1, 2 * x + 1))
for c1 in range(1, x + 1):
for c2 in range(1, x + 1):
for p in range(-min(c1, c2), min(c1, c2) + 1):
self.fcp[c1, c2, p + x] = fcoef(c1, c2, p)
self.conn = False
self.c = False
def __getAngularMatrix_M(self, l, j, ll, jj, l1, j1, l2, j2):
# did we already calculated this matrix?
self.c.execute('''SELECT ind FROM pair_angularMatrix WHERE
l1 = ? AND j1_x2 = ? AND
l2 = ? AND j2_x2 = ? AND
l3 = ? AND j3_x2 = ? AND
l4 = ? AND j4_x2 = ?
''', (l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2))
index = self.c.fetchone()
if (index):
return self.savedAngularMatrix_matrix[index[0]]
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1):
c1 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
exit()
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1):
c2 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
exit()
am = np.zeros((int(round((2 * j1 + 1) * (2 * j2 + 1), 0)),
int(round((2 * j + 1) * (2 * jj + 1), 0))),
dtype=np.float64)
if (c1 > self.interactionsUpTo) or (c2 > self.interactionsUpTo):
return am
j1range = np.linspace(-j1, j1, round(2 * j1) + 1)
j2range = np.linspace(-j2, j2, round(2 * j2) + 1)
jrange = np.linspace(-j, j, int(2 * j) + 1)
jjrange = np.linspace(-jj, jj, int(2 * jj) + 1)
for m1 in j1range:
for m2 in j2range:
# we have chosen the first index
index1 = int(round(m1 * (2.0 * j2 + 1.0) + m2
+ (j1 * (2.0 * j2 + 1.0) + j2), 0))
for m in jrange:
for mm in jjrange:
# we have chosen the second index
index2 = int(round(m * (2.0 * jj + 1.0)
+ mm + (j * (2.0 * jj + 1.0) + jj),
0)
)
# angular matrix element from Sa??mannshausen, Heiner,
# Merkt, Fr??d??ric, Deiglmayr, Johannes
# PRA 92: 032505 (2015)
elem = (-1.0)**(j + jj + self.s1 + self.s2 + l1 + l2) * \
CG(l, 0, c1, 0, l1, 0) * CG(ll, 0, c2, 0, l2, 0)
elem = elem * \
sqrt((2.0 * l + 1.0) * (2.0 * ll + 1.0)) * \
sqrt((2.0 * j + 1.0) * (2.0 * jj + 1.0))
elem = elem * \
Wigner6j(l, self.s1, j, j1, c1, l1) * \
Wigner6j(ll, self.s2, jj, j2, c2, l2)
sumPol = 0.0 # sum over polarisations
limit = min(c1, c2)
for p in xrange(-limit, limit + 1):
sumPol = sumPol + \
self.fcp[c1, c2, p + self.interactionsUpTo] * \
CG(j, m, c1, p, j1, m1) *\
CG(jj, mm, c2, -p, j2, m2)
am[index1, index2] = elem * sumPol
index = len(self.savedAngularMatrix_matrix)
self.c.execute(''' INSERT INTO pair_angularMatrix
VALUES (?,?, ?,?, ?,?, ?,?, ?)''',
(l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2, index))
self.conn.commit()
self.savedAngularMatrix_matrix.append(am)
self.savedAngularMatrixChanged = True
return am
def __updateAngularMatrixElementsFile(self):
if not (self.savedAngularMatrixChanged):
return
try:
self.c.execute('''SELECT * FROM pair_angularMatrix ''')
data = []
for v in self.c.fetchall():
data.append(v)
data = np.array(data, dtype=np.float32)
data[:, 1] /= 2. # 2 r j1 -> j1
data[:, 3] /= 2. # 2 r j2 -> j2
data[:, 5] /= 2. # 2 r j3 -> j3
data[:, 7] /= 2. # 2 r j4 -> j4
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta),
'wb'
)
np.save(fileHandle, data)
fileHandle.close()
except IOError as e:
print("Error while updating angularMatrix \
data meta (description) File " + self.angularMatrixFile_meta)
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile),
'wb'
)
np.save(fileHandle, self.savedAngularMatrix_matrix)
fileHandle.close()
except IOError as e:
print("Error while updating angularMatrix \
data File " + self.angularMatrixFile)
print(e)
def __loadAngularMatrixElementsFile(self):
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta),
'rb'
)
data = np.load(fileHandle, encoding='latin1', allow_pickle=True)
fileHandle.close()
except:
print("Note: No saved angular matrix metadata files to be loaded.")
print(sys.exc_info())
return
data[:, 1] *= 2 # j1 -> 2 r j1
data[:, 3] *= 2 # j2 -> 2 r j2
data[:, 5] *= 2 # j3 -> 2 r j3
data[:, 7] *= 2 # j4 -> 2 r j4
data = np.array(np.rint(data), dtype=np.int)
try:
self.c.executemany('''INSERT INTO pair_angularMatrix
(l1, j1_x2 ,
l2 , j2_x2 ,
l3, j3_x2,
l4 , j4_x2 ,
ind)
VALUES (?,?,?,?,?,?,?,?,?)''', data)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database!")
print(e)
exit()
if len(data) == 0:
print("error")
return
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile),
'rb'
)
self.savedAngularMatrix_matrix = np.load(
fileHandle,
encoding='latin1',
allow_pickle=True).tolist()
fileHandle.close()
except:
print("Note: No saved angular matrix files to be loaded.")
print(sys.exc_info())
def __isCoupled(self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2, limit):
if ((abs(self.__getEnergyDefect(n, l, j,
nn, ll, jj,
n1, l1, j1,
n2, l2, j2)
) / C_h < limit)
and not (n == n1 and nn == n2
and l == l1 and ll == l2
and j == j1 and jj == j2)
and not ((abs(l1 - l) != 1
and( (abs(j - 0.5) < 0.1
and abs(j1 - 0.5) < 0.1) # j = 1/2 and j'=1/2 forbidden
or
(abs(j) < 0.1
and abs(j1 - 1) < 0.1) # j = 0 and j'=1 forbidden
or
(abs(j-1) < 0.1
and abs(j1) < 0.1) # j = 1 and j'=0 forbidden
)
)
or (abs(l2 - ll) != 1
and( (abs(jj - 0.5) < 0.1
and abs(j2 - 0.5) < 0.1) # j = 1/2 and j'=1/2 forbidden
or
(abs(jj) < 0.1
and abs(j2 - 1) < 0.1) # j = 0 and j'=1 forbidden
or
(abs(jj-1) < 0.1
and abs(j2) < 0.1) # j = 1 and j'=0 forbidden
)
)
)
and not(abs(j)<0.1 and abs(j1)<0.1) # j = 0 and j'=0 forbiden
and not (abs(jj)<0.1 and abs(j2)<0.1)
and not (abs(l)<0.1 and abs(l1)<0.1) # l = 0 and l' = 0 is forbiden
and not (abs(ll)<0.1 and abs(l2)<0.1)
):
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1)and (dj < 2.1) and \
(2 <= self.interactionsUpTo):
c1 = 2 # quadrupole coupling
else:
return False
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1) and (dj < 2.1) and \
(2 <= self.interactionsUpTo):
c2 = 2 # quadrupole coupling
else:
return False
return c1 + c2
else:
return False
def __getEnergyDefect(self,
n, l, j,
nn, ll, jj,
n1, l1, j1,
n2, l2, j2):
"""
Energy defect between |n,l,j>x|nn,ll,jj> state and |n1,l1,j1>x|n1,l1,j1>
state of atom1 and atom2 in respective spins states s1 and s2
Takes spin vales s1 and s2 as the one defined when defining calculation.
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
nn (int): principal quantum number
ll (int): orbital angular momentum
jj (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
Returns:
float: energy defect (SI units: J)
"""
return C_e * (self.atom1.getEnergy(n1, l1, j1, s=self.s1)
+ self.atom2.getEnergy(n2, l2, j2, s=self.s2)
- self.atom1.getEnergy(n, l, j, s=self.s1)
- self.atom2.getEnergy(nn, ll, jj, s=self.s2))
def __makeRawMatrix2(self,
nn, ll, jj,
k, lrange, limit, limitBasisToMj,
progressOutput=False, debugOutput=False):
n = nn
l = ll
j = jj
# limit = limit in Hz on energy defect
# k defines range of n' = [n-k, n+k]
dimension = 0
# which states/channels contribute significantly in the second order perturbation?
states = []
# original pairstate index
opi = 0
# this numbers are conserved if we use only dipole-dipole interactions
Lmod2 = ((l + ll) % 2)
l1start = l - 1
if l == 0:
l1start = 0
l2start = ll - 1
if ll == 0:
l2start = 0
if debugOutput:
print("\n ======= Relevant states =======\n")
for n1 in xrange(max(n - k, 1), n + k + 1):
for n2 in xrange(max(nn - k, 1), nn + k + 1):
l1max = max(l + self.interactionsUpTo, lrange) + 1
l1max = min(l1max, n1 - 1)
for l1 in xrange(l1start, l1max):
l2max = max(ll + self.interactionsUpTo, lrange) + 1
l2max = min(l2max, n2 - 1)
for l2 in xrange(l2start, l2max):
j1 = l1 - self.s1
while j1 < -0.1:
j1 += 2 * self.s1
while j1 <= l1 + self.s1 + 0.1:
j2 = l2 - self.s2
while j2 < -0.1:
j2 += 2 * self.s2
while j2 <= l2 + self.s2 + 0.1:
ed = self.__getEnergyDefect(n, l, j,
nn, ll, jj,
n1, l1, j1,
n2, l2, j2) / C_h
if (abs(ed) < limit
and (not (self.interactionsUpTo == 1)
or (Lmod2 == ((l1 + l2) % 2)))
and ((not limitBasisToMj)
or (j1 + j2 + 0.1
> self.m1 + self.m2))
and (n1 >= self.atom1.groundStateN
or [n1, l1, j1] in self.atom1.extraLevels)
and (n2 >= self.atom2.groundStateN
or [n2, l2, j2] in self.atom2.extraLevels)
):
if debugOutput:
pairState = (
"|"
+ printStateString(n1, l1, j1,
s=self.s1)
+ ","
+ printStateString(n2, l2, j2,
s=self.s2)
+ ">")
print(
pairState
+ ("\t EnergyDefect = %.3f GHz"
% (ed * 1.e-9)
)
)
states.append([n1, l1, j1, n2, l2, j2])
if (n == n1 and nn == n2
and l == l1 and ll == l2
and j == j1 and jj == j2
):
opi = dimension
dimension = dimension + 1
j2 = j2 + 1.0
j1 = j1 + 1.0
if debugOutput:
print("\tMatrix dimension\t=\t", dimension)
m = np.zeros((dimension, dimension), dtype=np.float64)
# mat_value, mat_row, mat_column for each sparce matrix describing
# dipole-dipole, dipole-quadrupole (and quad-dipole) and quadrupole-quadrupole
couplingMatConstructor = [[[], [], []]
for i in xrange(2 * self.interactionsUpTo - 1)]
# original pair-state (i.e. target pair state) Zeeman Shift
opZeemanShift = (self.atom1.getZeemanEnergyShift(
self.l, self.j, self.m1,
self.Bz,
s=self.s1)
+ self.atom2.getZeemanEnergyShift(
self.ll, self.jj, self.m2,
self.Bz,
s=self.s2)
) / C_h * 1.0e-9 # in GHz
if debugOutput:
print("\n ======= Coupling strengths (radial part only) =======\n")
maxCoupling = "quadrupole-quadrupole"
if (self.interactionsUpTo == 1):
maxCoupling = "dipole-dipole"
if debugOutput:
print("Calculating coupling (up to ",
maxCoupling, ") between the pair states")
for i in xrange(dimension):
ed = self.__getEnergyDefect(
states[opi][0], states[opi][1], states[opi][2],
states[opi][3], states[opi][4], states[opi][5],
states[i][0], states[i][1], states[i][2],
states[i][3], states[i][4], states[i][5]) / C_h * 1.0e-9\
- opZeemanShift
pairState1 = (
"|"
+ printStateString(states[i][0], states[i][1], states[i][2],
s=self.s1)
+ ","
+ printStateString(states[i][3], states[i][4], states[i][5],
s=self.s2)
+ ">"
)
states[i].append(ed) # energy defect of given state
for j in xrange(i + 1, dimension):
coupled = self.__isCoupled(
states[i][0], states[i][1], states[i][2],
states[i][3], states[i][4], states[i][5],
states[j][0], states[j][1], states[j][2],
states[j][3], states[j][4], states[j][5], limit)
if (states[i][0] == 24 and states[j][0] == 18):
print("\n")
print(states[i])
print(states[j])
print(coupled)
if coupled and (abs(states[i][0] - states[j][0]) <= k
and abs(states[i][3] - states[j][3]) <= k):
if debugOutput:
pairState2 = ("|"
+ printStateString(states[j][0],
states[j][1],
states[j][2],
s=self.s1)
+ ","
+ printStateString(states[j][3],
states[j][4],
states[j][5],
s=self.s2)
+ ">")
print(pairState1 + " <---> " + pairState2)
couplingStregth = _atomLightAtomCoupling(
states[i][0], states[i][1], states[i][2],
states[i][3], states[i][4], states[i][5],
states[j][0], states[j][1], states[j][2],
states[j][3], states[j][4], states[j][5],
self.atom1, atom2=self.atom2,
s=self.s1, s2=self.s2) / C_h * 1.0e-9
couplingMatConstructor[coupled - 2][0].append(
couplingStregth)
couplingMatConstructor[coupled - 2][1].append(i)
couplingMatConstructor[coupled - 2][2].append(j)
exponent = coupled + 1
if debugOutput:
print(("\tcoupling (C_%d/R^%d) = %.5f"
% (exponent, exponent,
couplingStregth * (1e6)**(exponent))),
"/R^", exponent, " GHz (mu m)^", exponent, "\n"
)
# coupling = [1,1] dipole-dipole, [2,1] quadrupole dipole, [2,2] quadrupole quadrupole
couplingMatArray = [
csr_matrix(
(couplingMatConstructor[i][0],
(couplingMatConstructor[i][1], couplingMatConstructor[i][2])
),
shape=(dimension, dimension)
)
for i in xrange(len(couplingMatConstructor))
]
return states, couplingMatArray
def __initializeDatabaseForMemoization(self):
# memoization of angular parts
self.conn = sqlite3.connect(os.path.join(self.dataFolder,
"precalculated_pair.db"))
self.c = self.conn.cursor()
# ANGULAR PARTS
self.c.execute('''DROP TABLE IF EXISTS pair_angularMatrix''')
self.c.execute('''SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='pair_angularMatrix';''')
if (self.c.fetchone()[0] == 0):
# create table
try:
self.c.execute('''CREATE TABLE IF NOT EXISTS pair_angularMatrix
(l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
l3 TINYINT UNSIGNED, j3_x2 TINYINT UNSIGNED,
l4 TINYINT UNSIGNED, j4_x2 TINYINT UNSIGNED,
ind INTEGER,
PRIMARY KEY (l1,j1_x2, l2,j2_x2, l3,j3_x2, l4,j4_x2)
) ''')
except sqlite3.Error as e:
print(e)
self.conn.commit()
self.__loadAngularMatrixElementsFile()
self.savedAngularMatrixChanged = False
def __closeDatabaseForMemoization(self):
self.conn.commit()
self.conn.close()
self.conn = False
self.c = False
def getLeRoyRadius(self):
"""
Returns Le Roy radius for initial pair-state.
Le Roy radius [#leroy]_ is defined as
:math:`2(\\langle r_1^2 \\rangle^{1/2} + \\langle r_2^2 \\rangle^{1/2})`,
where :math:`r_1` and :math:`r_2` are electron coordinates for the
first and the second atom in the initial pair-state.
Below this radius, calculations are not valid since electron
wavefunctions start to overlap.
Returns:
float: LeRoy radius measured in :math:`\\mu m`
References:
.. [#leroy] <NAME>, <NAME>. Phys. **52**, 246 (1974)
http://www.nrcresearchpress.com/doi/abs/10.1139/p74-035
"""
step = 0.001
r1, psi1_r1 = self.atom2.radialWavefunction(
self.ll, 0.5, self.jj,
self.atom2.getEnergy(self.nn, self.ll, self.jj, s=self.s2) / 27.211,
self.atom2.alphaC**(1 / 3.0),
2.0 * self.nn * (self.nn + 15.0), step)
sqrt_r1_on2 = np.trapz(np.multiply(np.multiply(psi1_r1, psi1_r1),
np.multiply(r1, r1)),
x=r1)
r2, psi2_r2 = self.atom2.radialWavefunction(
self.ll, 0.5, self.jj,
self.atom2.getEnergy(self.nn, self.ll, self.jj, s=self.s2) / 27.211,
self.atom2.alphaC**(1 / 3.0),
2.0 * self.nn * (self.nn + 15.0), step)
sqrt_r2_on2 = np.trapz(np.multiply(np.multiply(psi2_r2, psi2_r2),
np.multiply(r2, r2)),
x=r2)
return 2. * (sqrt(sqrt_r1_on2) + sqrt(sqrt_r2_on2))\
* (physical_constants["Bohr radius"][0] * 1.e6)
def defineBasis(self, theta, phi, nRange, lrange, energyDelta,
Bz=0, progressOutput=False, debugOutput=False):
r"""
Finds relevant states in the vicinity of the given rydberg-level
Finds relevant ryberg-level basis and calculates interaction matrix.
ryberg-level basis is saved in :obj:`basisStates`.
Interaction matrix is saved in parts depending on the scaling with
distance. Diagonal elements :obj:`matDiagonal`, correponding to
relative energy defects of the pair-states, don't change with
interatomic separation. Off diagonal elements can depend
on distance as :math:`R^{-3}, R^{-4}` or :math:`R^{-5}`,
corresponding to dipole-dipole (:math:`C_3` ), dipole-qudrupole
(:math:`C_4` ) and quadrupole-quadrupole coupling (:math:`C_5` )
respectively. These parts of the matrix are stored in :obj:`matR`
in that order. I.e. :obj:`matR[0]` stores dipole-dipole coupling
(:math:`\propto R^{-3}`), :obj:`matR[0]` stores dipole-quadrupole
couplings etc.
Args:
theta (float): relative orientation of the two atoms
(see figure on top of the page), range 0 to :math:`\pi`
phi (float): relative orientation of the two atoms (see figure
on top of the page), range 0 to :math:`2\pi`
nRange (int): how much below and above the given principal
quantum number of the pair state we should be looking?
lrange (int): what is the maximum angular orbital momentum
state that we are including in calculation
energyDelta (float): what is maximum energy difference (
:math:`\Delta E/h` in Hz)
between the original pair state and the other pair states
that we are including in calculation
Bz (float): optional, magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
progressOutput (bool): optional, False by default. If true,
prints information about the progress of the calculation.
debugOutput (bool): optional, False by default. If true,
similarly to progressOutput=True, this will print
information about the progress of calculations, but with
more verbose output.
See also:
:obj:`alkali_atom_functions.saveCalculation` and
:obj:`alkali_atom_functions.loadSavedCalculation` for
information on saving intermediate results of calculation for
later use.
"""
self.__initializeDatabaseForMemoization()
# save call parameters
self.theta = theta
self.phi = phi
self.nRange = nRange
self.lrange = lrange
self.energyDelta = energyDelta
self.Bz = Bz
self.basisStates = []
# wignerDmatrix
wgd = WignerDmatrix(theta, phi)
limitBasisToMj = False
if (theta < 0.001):
limitBasisToMj = True # Mj will be conserved in calculations
originalMj = self.m1 + self.m2
self.channel, self.coupling = self.__makeRawMatrix2(
self.nn, self.ll, self.jj,
nRange, lrange, energyDelta,
limitBasisToMj,
progressOutput=progressOutput,
debugOutput=debugOutput)
self.atom1.updateDipoleMatrixElementsFile()
self.atom2.updateDipoleMatrixElementsFile()
# generate all the states (with mj principal quantum number)
# opi = original pairstate index
opi = 0
# NEW FOR SPACE MATRIX
self.index = np.zeros(len(self.channel) + 1, dtype=np.int16)
for i in xrange(len(self.channel)):
self.index[i] = len(self.basisStates)
stateCoupled = self.channel[i]
for m1c in np.linspace(stateCoupled[2], -stateCoupled[2],
round(1 + 2 * stateCoupled[2])):
for m2c in np.linspace(stateCoupled[5], -stateCoupled[5],
round(1 + 2 * stateCoupled[5])):
if ((not limitBasisToMj) or (abs(originalMj
- m1c - m2c) < 0.1)):
self.basisStates.append(
[stateCoupled[0], stateCoupled[1], stateCoupled[2],
m1c,
stateCoupled[3], stateCoupled[4], stateCoupled[5],
m2c])
self.matrixElement.append(i)
if (abs(stateCoupled[0] - self.n) < 0.1
and abs(stateCoupled[1] - self.l) < 0.1
and abs(stateCoupled[2] - self.j) < 0.1
and abs(m1c - self.m1) < 0.1
and abs(stateCoupled[3] - self.nn) < 0.1
and abs(stateCoupled[4] - self.ll) < 0.1
and abs(stateCoupled[5] - self.jj) < 0.1
and abs(m2c - self.m2) < 0.1):
opi = len(self.basisStates) - 1
if (self.index[i] == len(self.basisStates)):
print(stateCoupled)
self.index[-1] = len(self.basisStates)
if progressOutput or debugOutput:
print("\nCalculating Hamiltonian matrix...\n")
dimension = len(self.basisStates)
if progressOutput or debugOutput:
print("\n\tmatrix (dimension ", dimension, ")\n")
# INITIALIZING MATICES
# all (sparce) matrices will be saved in csr format
# value, row, column
matDiagonalConstructor = [[], [], []]
matRConstructor = [[[], [], []]
for i in xrange(self.interactionsUpTo * 2 - 1)]
matRIndex = 0
for c in self.coupling:
progress = 0.
for ii in xrange(len(self.channel)):
if progressOutput:
dim = len(self.channel)
progress += ((dim - ii) * 2 - 1)
sys.stdout.write(
"\rMatrix R%d %.1f %% (state %d of %d)"
% (matRIndex + 3,
float(progress) / float(dim**2) * 100.,
ii + 1,
len(self.channel)))
sys.stdout.flush()
ed = self.channel[ii][6]
# solves problems with exactly degenerate basisStates
degeneracyOffset = 0.00000001
i = self.index[ii]
dMatrix1 = wgd.get(self.basisStates[i][2])
dMatrix2 = wgd.get(self.basisStates[i][6])
for i in xrange(self.index[ii], self.index[ii + 1]):
statePart1 = singleAtomState(
self.basisStates[i][2], self.basisStates[i][3])
statePart2 = singleAtomState(
self.basisStates[i][6], self.basisStates[i][7])
# rotate individual states
statePart1 = dMatrix1.dot(statePart1)
statePart2 = dMatrix2.dot(statePart2)
stateCom = compositeState(statePart1, statePart2)
if (matRIndex == 0):
zeemanShift = (
self.atom1.getZeemanEnergyShift(
self.basisStates[i][1],
self.basisStates[i][2],
self.basisStates[i][3],
self.Bz,
s=self.s1)
+ self.atom2.getZeemanEnergyShift(
self.basisStates[i][5],
self.basisStates[i][6],
self.basisStates[i][7],
self.Bz,
s=self.s2)
) / C_h * 1.0e-9 # in GHz
matDiagonalConstructor[0].append(ed + zeemanShift
+ degeneracyOffset)
degeneracyOffset += 0.00000001
matDiagonalConstructor[1].append(i)
matDiagonalConstructor[2].append(i)
for dataIndex in xrange(c.indptr[ii], c.indptr[ii + 1]):
jj = c.indices[dataIndex]
radialPart = c.data[dataIndex]
j = self.index[jj]
dMatrix3 = wgd.get(self.basisStates[j][2])
dMatrix4 = wgd.get(self.basisStates[j][6])
if (self.index[jj] != self.index[jj + 1]):
d = self.__getAngularMatrix_M(
self.basisStates[i][1], self.basisStates[i][2],
self.basisStates[i][5], self.basisStates[i][6],
self.basisStates[j][1], self.basisStates[j][2],
self.basisStates[j][5], self.basisStates[j][6])
secondPart = d.dot(stateCom)
else:
print(" - - - ", self.channel[jj])
for j in xrange(self.index[jj], self.index[jj + 1]):
statePart1 = singleAtomState(
self.basisStates[j][2], self.basisStates[j][3])
statePart2 = singleAtomState(
self.basisStates[j][6], self.basisStates[j][7])
# rotate individual states
statePart1 = dMatrix3.dot(statePart1)
statePart2 = dMatrix4.dot(statePart2)
# composite state of two atoms
stateCom2 = compositeState(statePart1, statePart2)
angularFactor = conjugate(
stateCom2.T).dot(secondPart)
angularFactor = real(angularFactor[0, 0])
if (abs(angularFactor) > 1.e-5):
matRConstructor[matRIndex][0].append(
radialPart * angularFactor)
matRConstructor[matRIndex][1].append(i)
matRConstructor[matRIndex][2].append(j)
matRConstructor[matRIndex][0].append(
radialPart * angularFactor)
matRConstructor[matRIndex][1].append(j)
matRConstructor[matRIndex][2].append(i)
matRIndex += 1
if progressOutput or debugOutput:
print("\n")
self.matDiagonal = csr_matrix(
(matDiagonalConstructor[0],
(matDiagonalConstructor[1], matDiagonalConstructor[2])),
shape=(dimension, dimension)
)
self.matR = [
csr_matrix((matRConstructor[i][0],
(matRConstructor[i][1], matRConstructor[i][2])),
shape=(dimension, dimension)
) for i in xrange(self.interactionsUpTo * 2 - 1)
]
self.originalPairStateIndex = opi
self.__updateAngularMatrixElementsFile()
self.__closeDatabaseForMemoization()
def __getDressedMatrixElements(self,UNmat):
r"""
This part is an addition to the various functions defined in calculations_atom_pairstate.py
We add the ground state and the states in which one atom from the pair is excited to the target
Rydberg state.See ref [?] for more details.
Args:
Hamiltonian matrix with all rydberg pairs and thier interactions.
Returns:
Hamiltonian matrix with the ground state and the intermediate state which includes
one Rydberg atom and one ground state atom.
"""
UNmatdimension = len(UNmat.toarray())
#print(UNmat.toarray())
n0 = self.n
j0 = self.j
l0 = self.l
m0 = self.m1
state_main = [self.nn,self.ll,self.jj,self.m2]
d0 = self.atom1.getDipoleMatrixElement(n0,l0,j0,m0,self.nn,self.ll,self.jj,self.m2,0)
Omega_array = []
Omg0 = self.Omega0
for i in range(UNmatdimension):
d = 0
if (self.basisStates[i][:4] == state_main) or (self.basisStates[i][4:] == state_main):
d = self.atom1.getDipoleMatrixElement(n0,l0,j0,m0,self.nn,self.ll,self.jj,self.m2,0)
Omega_array = np.append(Omega_array,0.5*Omg0*d/d0)
row = np.zeros(UNmatdimension) + 1
col = np.arange(UNmatdimension)
mat = csr_matrix((Omega_array, (row, col)), shape=(2, UNmatdimension))
UNmat = vstack([mat,UNmat])
row = np.arange(UNmatdimension+2)
row = np.concatenate((np.array([1]), row))
col = np.zeros(UNmatdimension+2) + 1
col = np.concatenate((np.array([0]), col))
Omega_array = np.concatenate((np.array([Omg0*0.5,Omg0*0.5,self.Delta0]), Omega_array))
mat = csr_matrix((Omega_array, (row, col)), shape=(UNmatdimension+2, 2))
UNmat = hstack([mat,UNmat])
UNmat = csr_matrix(UNmat)
#print(UNmat.toarray())
return UNmat
def diagonalise(self, rangeR, noOfEigenvectors,
drivingFromState=[0, 0, 0, 0, 0],
eigenstateDetuning=0.,
sortEigenvectors=False,
progressOutput=False,
debugOutput=False):
r"""
Finds eigenstates.
ARPACK ( :obj:`scipy.sparse.linalg.eigsh`) calculation of the
`noOfEigenvectors` eigenvectors closest to the original state. If
`drivingFromState` is specified as `[n,l,j,mj,q]` coupling between
the pair-states and the situation where one of the atoms in the
pair state basis is in :math:`|n,l,j,m_j\rangle` state due to
driving with a laser field that drives :math:`q` transition
(+1,0,-1 for :math:`\sigma^-`, :math:`\pi` and :math:`\sigma^+`
transitions respectively) is calculated and marked by the
colourmaping these values on the obtained eigenvectors.
Args:
rangeR ( :obj:`array`): Array of values for distance between
the atoms (in :math:`\mu` m) for which we want to calculate
eigenstates.
noOfEigenvectors (int): number of eigen vectors closest to the
energy of the original (unperturbed) pair state. Has to be
smaller then the total number of states.
eigenstateDetuning (float, optional): Default is 0. This
specifies detuning from the initial pair-state (in Hz)
around which we want to find `noOfEigenvectors`
eigenvectors. This is useful when looking only for couple
of off-resonant features.
drivingFromState ([int,int,float,float,int]): Optional. State
of one of the atoms from the original pair-state basis
from which we try to drive to the excited pair-basis
manifold, **assuming that the first of the two atoms is
already excited to the specified Rydberg state**.
By default, program will calculate just
contribution of the original pair-state in the eigenstates
obtained by diagonalization, and will highlight it's
admixure by colour mapping the obtained eigenstates plot.
State is specified as :math:`[n,\ell,j,mj, d]`
where :math:`d` is +1, 0 or
-1 for driving :math:`\sigma^-` , :math:`\pi`
and :math:`\sigma^+` transitions respectively.
sortEigenvectors(bool): optional, False by default. Tries to
sort eigenvectors so that given eigen vector index
corresponds to adiabatically changing eigenstate, as
detirmined by maximising overlap between old and new
eigenvectors.
progressOutput (bool): optional, False by default. If true,
prints information about the progress of the calculation.
debugOutput (bool): optional, False by default. If true,
similarly to progressOutput=True, this will print
information about the progress of calculations, but with
more verbose output.
"""
self.r = np.sort(rangeR)
dimension = len(self.basisStates)
self.noOfEigenvectors = noOfEigenvectors
# energy of the state - to be calculated
self.y = []
# how much original state is contained in this eigenvector
self.highlight = []
# what are the dominant contributing states?
self.composition = []
if (noOfEigenvectors >= dimension - 1):
noOfEigenvectors = dimension - 1
print("Warning: Requested number of eigenvectors >=dimension-1\n \
ARPACK can only find up to dimension-1 eigenvectors, where\
dimension is matrix dimension.\n")
if noOfEigenvectors < 1:
return
coupling = []
self.maxCoupling = 0.
self.maxCoupledStateIndex = 0
if (drivingFromState[0] != 0):
self.drivingFromState = drivingFromState
if progressOutput:
print("Finding coupling strengths")
# get first what was the state we are calculating coupling with
state1 = drivingFromState
n1 = int(round(state1[0]))
l1 = int(round(state1[1]))
j1 = state1[2]
m1 = state1[3]
q = state1[4]
for i in xrange(dimension):
thisCoupling = 0.
if (int(abs(self.basisStates[i][5] - l1)) == 1
and abs(self.basisStates[i][0]
- self.basisStates[self.originalPairStateIndex][0])
< 0.1
and abs(self.basisStates[i][1]
- self.basisStates[self.originalPairStateIndex][1])
< 0.1
and abs(self.basisStates[i][2]
- self.basisStates[self.originalPairStateIndex][2])
< 0.1
and abs(self.basisStates[i][3]
- self.basisStates[self.originalPairStateIndex][3])
< 0.1
):
state2 = self.basisStates[i]
n2 = int(state2[0 + 4])
l2 = int(state2[1 + 4])
j2 = state2[2 + 4]
m2 = state2[3 + 4]
if debugOutput:
print(n1, " ", l1, " ", j1, " ", m1, " ", n2,
" ", l2, " ", j2, " ", m2, " q=", q)
print(self.basisStates[i])
dme = self.atom2.getDipoleMatrixElement(n1, l1, j1, m1,
n2, l2, j2, m2,
q, s=self.s2)
thisCoupling += dme
thisCoupling = abs(thisCoupling)**2
if thisCoupling > self.maxCoupling:
self.maxCoupling = thisCoupling
self.maxCoupledStateIndex = i
if (thisCoupling > 0.000001) and debugOutput:
print("original pairstate index = ",
self.originalPairStateIndex)
print("this pairstate index = ", i)
print("state itself ", self.basisStates[i])
print("coupling = ", thisCoupling)
coupling.append(thisCoupling)
print("Maximal coupling from a state")
print("is to a state ",
self.basisStates[self.maxCoupledStateIndex])
print("is equal to %.3e a_0 e" % self.maxCoupling)
if progressOutput:
print("\n\nDiagonalizing interaction matrix...\n")
rvalIndex = 0.
previousEigenvectors = []
for rval in self.r:
if progressOutput:
sys.stdout.write("\r%d%%" %
(rvalIndex / len(self.r - 1) * 100.))
sys.stdout.flush()
rvalIndex += 1.
# calculate interaction matrix
m = (self.matDiagonal).toarray()
#print(m)
m[m!=0] += 2*self.Delta0
#print(m)
m = csr_matrix(m)
rX = (rval * 1.e-6)**3
for matRX in self.matR:
m = m + matRX / rX
rX *= (rval * 1.e-6)
#Get the dressed state basis.
m = self.__getDressedMatrixElements(m)
# uses ARPACK algorithm to find only noOfEigenvectors eigenvectors
# sigma specifies center frequency (in GHz)
ev, egvector = eigsh(
m, noOfEigenvectors,
sigma=eigenstateDetuning * 1.e-9,
which='LM',
tol=1E-8)
if sortEigenvectors:
# Find which eigenvectors overlap most with eigenvectors from
# previous diagonalisatoin, in order to find "adiabatic"
# continuation for the respective states
if previousEigenvectors == []:
previousEigenvectors = np.copy(egvector)
previousEigenvalues = | np.copy(ev) | numpy.copy |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
import matplotlib
matplotlib.use('Agg')
import os
import argparse
import cv2
import shutil
import itertools
import tqdm
import math
import numpy as np
import json
import tensorflow as tf
import zipfile
import pickle
import six
from glob import glob
from tensorpack import *
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils import optimizer
from tensorpack.utils import logger
import tensorpack.utils.viz as tpviz
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.dataflow import (
DataFlow, RNGDataFlow, DataFromGenerator, MapData, imgaug, AugmentImageComponent, TestDataSpeed, MultiProcessMapData,
MapDataComponent, DataFromList, PrefetchDataZMQ, BatchData)
from resnet_model import (
preresnet_group, preresnet_basicblock, preresnet_bottleneck,
resnet_group, resnet_basicblock, resnet_bottleneck, se_resnet_bottleneck,
resnet_backbone)
from basemodel import (
image_preprocess, pretrained_resnet_conv4, resnet_conv5)
from model import *
import config
import collections
import ast
import pandas as pd
from utils import *
from tensorflow.python.keras.metrics import categorical_accuracy, top_k_categorical_accuracy
from tensorflow import keras
from custom_utils import ReduceLearningRateOnPlateau
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import f1_score
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold, MultilabelStratifiedShuffleSplit
import scipy.optimize as opt
def sigmoid_np(x):
return 1.0/(1.0 + np.exp(-x))
def tf_ce(x, z):
zeros = np.zeros_like(x)
cond = (x >= zeros)
relu_logits = np.where(cond, x, zeros)
neg_abs_logits = np.where(cond, -x, x)
return np.mean(relu_logits - x * z + np.log1p(np.exp(neg_abs_logits)))
def get_batch_factor():
nr_gpu = get_nr_gpu()
assert nr_gpu in [1, 2, 4, 8], nr_gpu
return 8 // nr_gpu
def get_resnet_model_output_names():
return ['final_probs']
def oversample_2(data):
low = np.array([8,9,10,15,17,20,26,24,27])
data_low = []
for d in data:
true_label = np.arange(config.NUM_CLASS)[d[1]>0]
if np.any([a in low for a in true_label]):
data_low.append(d)
data_low.append(d)
data_low.append(d)
return data + data_low
def oversample(df):
df_orig = df.copy()
lows = [15,15,15,8,9,10,8,9,10,8,9,10,17,20,24,26,15,27,15,20,24,17,8,15,27,27,27]
for i in lows:
target = str(i)
indicies = df_orig.loc[df_orig['Target'] == target].index
df = pd.concat([df,df_orig.loc[indicies]], ignore_index=True)
indicies = df_orig.loc[df_orig['Target'].str.startswith(target+" ")].index
df = pd.concat([df,df_orig.loc[indicies]], ignore_index=True)
indicies = df_orig.loc[df_orig['Target'].str.endswith(" "+target)].index
df = pd.concat([df,df_orig.loc[indicies]], ignore_index=True)
indicies = df_orig.loc[df_orig['Target'].str.contains(" "+target+" ")].index
df = pd.concat([df,df_orig.loc[indicies]], ignore_index=True)
return df
def get_dataflow(is_train=True):
train_df = pd.read_csv(os.path.join('/data/kaggle/HPA', 'train.csv'))
#train_df = oversample(train_df)
labels = [[int(i) for i in s.split()] for s in train_df['Target']]
fnames = train_df['Id'].tolist()
fnames = [os.path.join(config.TRAIN_DATASET, f) for f in fnames]
sprase_label = [np.eye(config.NUM_CLASS, dtype=np.float)[np.array(la)].sum(axis=0) for la in labels]
extra_df = pd.read_csv(os.path.join('/data/kaggle/HPA', 'HPAv18RGBY_WithoutUncertain_wodpl.csv'))
#extra_df = oversample(extra_df)
extra_labels = [[int(i) for i in s.split()] for s in extra_df['Target']]
extra_labels = [np.eye(config.NUM_CLASS, dtype=np.float)[np.array(la)].sum(axis=0) for la in extra_labels]
extra_fnames = extra_df['Id'].tolist()
extra_fnames = [os.path.join(config.EXTRA_DATASET, f) for f in extra_fnames]
fnames = fnames + extra_fnames
sprase_label = sprase_label + extra_labels
fnames = np.array(fnames)
sprase_label = np.array(sprase_label)
msss = MultilabelStratifiedShuffleSplit(n_splits=1, test_size=0.15, random_state=42)
for train_index, test_index in msss.split(fnames, sprase_label):
x_train, x_test = fnames[train_index], fnames[test_index]
y_train, y_test = sprase_label[train_index], sprase_label[test_index]
holdout_data = list(zip(x_test, y_test))
# 5 fold the rest
mskf = MultilabelStratifiedKFold(n_splits=5, random_state=1)
for fold_num, (train_index, test_index) in enumerate(mskf.split(x_train, y_train)):
if fold_num == config.FOLD:
foldx_train, foldx_test = x_train[train_index], x_train[test_index]
foldy_train, foldy_test = y_train[train_index], y_train[test_index]
break
train_data = list(zip(foldx_train, foldy_train))
val_data = list(zip(foldx_test, foldy_test))
train_data = oversample_2(train_data)
pseudo_df = pd.read_csv(os.path.join('/data/kaggle/HPA', 'LB623.csv'))
pseudo_fnames = pseudo_df['Id'].tolist()
pseudo_fnames = [os.path.join(config.TEST_DATASET, f) for f in pseudo_fnames]
#pseudo_labels = np.load("./SOTA.npy")
#pseudo_labels = [np.array(_) for _ in pseudo_labels]
pseudo_labels = [[int(i) for i in s.split()] for s in pseudo_df['Predicted']]
pseudo_labels = [np.eye(config.NUM_CLASS, dtype=np.float)[np.array(la)].sum(axis=0) for la in pseudo_labels]
pseudo_data = list(zip(pseudo_fnames, pseudo_labels))
train_data = train_data + pseudo_data
print("train: ", len(train_data), len(val_data))
if not is_train:
return val_data
ds = DataFromList(train_data, shuffle=True)
ds = BatchData(MapData(ds, preprocess), config.BATCH)
ds = PrefetchDataZMQ(ds, 6)
return ds
class ResnetModel(ModelDesc):
def _get_inputs(self):
if config.RGB:
ret = [
InputDesc(tf.float32, (None, None, None, 3), 'image'),
InputDesc(tf.float32, (None, config.NUM_CLASS), 'labels'),
]
else:
ret = [
InputDesc(tf.float32, (None, None, None, 4), 'image'),
InputDesc(tf.float32, (None, config.NUM_CLASS), 'labels'),
]
return ret
def _build_graph(self, inputs):
is_training = get_current_tower_context().is_training
image, label = inputs
#tf.summary.image('viz', image, max_outputs=10)
image = image_preprocess(image, bgr=False)
#image = image * (1.0 / 255)
image = tf.transpose(image, [0, 3, 1, 2])
depth = config.RESNET_DEPTH
basicblock = preresnet_basicblock if config.RESNET_MODE == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[config.RESNET_MODE]
num_blocks, block_func = {
18: ([2, 2, 2, 2], basicblock),
26: ([2, 2, 2, 2], bottleneck),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
logits = get_logit(image, num_blocks, block_func)
if is_training:
loss = cls_loss(logits, label)
#wd_cost = regularize_cost(
# '.*/W',
# l2_regularizer(1e-4), name='wd_cost')
self.cost = tf.add_n([
loss], 'total_cost')
#add_moving_summary(self.cost)
else:
final_probs = tf.nn.sigmoid(logits, name="final_probs")
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.01, trainable=False)
tf.summary.scalar('learning_rate', lr)
print("get_nr_gpu", get_nr_gpu())
if config.BIG:
if config.ACC:
factor = 4
lr = lr / float(factor)
opt = tf.train.AdamOptimizer(lr)
opt = optimizer.AccumGradOptimizer(opt, factor)
else:
opt = tf.train.AdamOptimizer(lr, 0.9)
else:
#opt = tf.train.MomentumOptimizer(lr, 0.9)
opt = tf.train.AdamOptimizer(lr)
return opt
class ResnetEvalCallbackSimple(Callback):
def _setup_graph(self):
self.pred = self.trainer.get_predictor(
['image'],
get_resnet_model_output_names())
self.valid_ds = get_dataflow(is_train=False)
def _eval(self):
from tensorpack.utils.utils import get_tqdm_kwargs
valid_predictions = []
valid_y = []
valid_logits = []
th = 0.15
total_run = len(self.valid_ds) // config.INFERENCE_BATCH
total_run = total_run + 1 if len(self.valid_ds) % config.INFERENCE_BATCH !=0 else total_run
with tqdm.tqdm(total=total_run, **get_tqdm_kwargs()) as pbar:
for i in range(total_run):
start = i * config.INFERENCE_BATCH
end = start + config.INFERENCE_BATCH if start + config.INFERENCE_BATCH < len(self.valid_ds) else len(self.valid_ds)
data = self.valid_ds[start:end]
data = [preprocess(d, is_training=False) for d in data]
x = np.array([_[0] for _ in data])
y = np.array([_[1] for _ in data])
if len(x) == 0:
break
final_probs = self.pred(x)
valid_predictions.extend(final_probs[0])
valid_logits.extend(final_probs[0])
valid_y.extend(y)
#score += mapk(la, final_labels)
pbar.update()
valid_predictions = np.array(valid_predictions)
valid_y = np.array(valid_y)
valid_logits = np.array(valid_logits)
val_loss = tf_ce(valid_logits, valid_y)
F1_score_05 = calc_macro_f1(valid_predictions, valid_y, 0.5)
F1_score_015 = calc_macro_f1(valid_predictions, valid_y, 0.15)
F1_score_02 = calc_macro_f1(valid_predictions, valid_y, 0.2)
print('F1_score: {:.5f} {:.5f} {:.5f}'.format(F1_score_05, F1_score_015, F1_score_02))
self.trainer.monitors.put_scalar("F1_score", F1_score_015)
return F1_score_015
def _trigger_epoch(self):
interval = 10 if config.BIG else 5
if self.epoch_num % interval == 0:
self._eval() # go to _get_value_to_s
def flip_trans(im):
im = np.fliplr(im)
im = np.transpose(im, [1,0,2])
im = np.fliplr(im)
return im
def inference(pred, x_test, tta=['fliplr', 'rot90'], mode='test'):
with tqdm.tqdm(total=(len(x_test)) // config.INFERENCE_BATCH + 1) as pbar:
start = 0
end = 0
predictions = []
final_probs_tta = {}
for i in range(len(x_test) // config.INFERENCE_BATCH + 1):
start = i * config.INFERENCE_BATCH
end = start + config.INFERENCE_BATCH if start + config.INFERENCE_BATCH < len(x_test) else len(x_test)
x = x_test[start:end]
if (len(x) == 0):
break
if mode == 'test':
if config.BIG:
x = np.array([open_rgby_2048(img_id) for img_id in x])
else:
x = np.array([open_rgby(config.TEST_DATASET, img_id) for img_id in x])
else:
x = [preprocess(d, is_training=False) for d in x]
x = np.array([_[0] for _ in x])
final_probs = pred(x)
predictions.extend(final_probs[0])
if not tta:
pbar.update()
continue
for k in tta:
if k not in final_probs_tta:
final_probs_tta[k] = []
if k == 'fliplr':
x_prime = np.array([np.fliplr(_x) for _x in x])
final_probs = pred(x_prime)
elif k == 'flipud':
x_prime = np.array([np.flipud(_x) for _x in x])
final_probs = pred(x_prime)
elif k == 'rot90':
x_prime = np.array([np.rot90(_x) for _x in x])
final_probs = pred(x_prime)
elif k == 'rot180':
x_prime = np.array([np.rot90(_x, 2) for _x in x])
final_probs = pred(x_prime)
elif k == 'rot270':
x_prime = np.array([ | np.rot90(_x, 3) | numpy.rot90 |
# Copyright (c) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
FastMNMF2
=========
Blind Source Separation using Fast Multichannel Nonnegative Matrix Factorization 2 (FastMNMF2)
"""
import numpy as np
def fastmnmf2(
X,
n_src=None,
n_iter=30,
n_components=8,
mic_index=0,
W0=None,
accelerate=True,
callback=None,
):
"""
Implementation of FastMNMF2 algorithm presented in
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, *Fast Multichannel Nonnegative
Matrix Factorization With Directivity-Aware Jointly-Diagonalizable Spatial
Covariance Matrices for Blind Source Separation*, IEEE/ACM TASLP, 2020.
[`IEEE <https://ieeexplore.ieee.org/abstract/document/9177266>`_]
The code of FastMNMF2 with GPU support and more sophisticated initialization
is available on https://github.com/sekiguchi92/SoundSourceSeparation
Parameters
----------
X: ndarray (nframes, nfrequencies, nchannels)
STFT representation of the observed signal
n_src: int, optional
The number of sound sources (default None).
If None, n_src is set to the number of microphones
n_iter: int, optional
The number of iterations (default 30)
n_components: int, optional
Number of components in the non-negative spectrum (default 8)
mic_index: int or 'all', optional
The index of microphone of which you want to get the source image (default 0).
If 'all', return the source images of all microphones
W0: ndarray (nfrequencies, nchannels, nchannels), optional
Initial value for diagonalizer Q (default None).
If None, identity matrices are used for all frequency bins.
accelerate: bool, optional
If true, the basis and activation of NMF are updated simultaneously (default True)
callback: func, optional
A callback function called every 10 iterations, allows to monitor convergence
Returns
-------
If mic_index is int, returns an (nframes, nfrequencies, nsources) array.
If mic_index is 'all', returns an (nchannels, nframes, nfrequencies, nsources) array.
"""
eps = 1e-10
g_eps = 5e-2
interval_update_Q = 1 # 2 may work as well and is faster
interval_normalize = 10
TYPE_FLOAT = X.real.dtype
TYPE_COMPLEX = X.dtype
# initialize parameter
X_FTM = X.transpose(1, 0, 2)
n_freq, n_frames, n_chan = X_FTM.shape
XX_FTMM = np.matmul(X_FTM[:, :, :, None], X_FTM[:, :, None, :].conj())
if n_src is None:
n_src = X_FTM.shape[2]
if W0 is not None:
Q_FMM = W0
else:
Q_FMM = np.tile(np.eye(n_chan).astype(TYPE_COMPLEX), [n_freq, 1, 1])
g_NM = np.ones([n_src, n_chan], dtype=TYPE_FLOAT) * g_eps
for m in range(n_chan):
g_NM[m % n_src, m] = 1
for m in range(n_chan):
mu_F = (Q_FMM[:, m] * Q_FMM[:, m].conj()).sum(axis=1).real
Q_FMM[:, m] /= np.sqrt(mu_F[:, None])
H_NKT = np.random.rand(n_src, n_components, n_frames).astype(TYPE_FLOAT)
W_NFK = np.random.rand(n_src, n_freq, n_components).astype(TYPE_FLOAT)
lambda_NFT = W_NFK @ H_NKT
Qx_power_FTM = np.abs(np.einsum("fij, ftj -> fti", Q_FMM, X_FTM)) ** 2
Y_FTM = np.einsum("nft, nm -> ftm", lambda_NFT, g_NM)
def separate():
Qx_FTM = np.einsum("fij, ftj -> fti", Q_FMM, X_FTM)
Qinv_FMM = np.linalg.inv(Q_FMM)
Y_NFTM = np.einsum("nft, nm -> nftm", lambda_NFT, g_NM)
if mic_index == "all":
return np.einsum(
"fij, ftj, nftj -> itfn", Qinv_FMM, Qx_FTM / Y_NFTM.sum(axis=0), Y_NFTM
)
elif type(mic_index) is int:
return np.einsum(
"fj, ftj, nftj -> tfn",
Qinv_FMM[:, mic_index],
Qx_FTM / Y_NFTM.sum(axis=0),
Y_NFTM,
)
else:
raise ValueError("mic_index should be int or 'all'")
# update parameters
for epoch in range(n_iter):
if callback is not None and epoch % 10 == 0:
callback(separate())
# update W and H (basis and activation of NMF)
tmp1_NFT = np.einsum("nm, ftm -> nft", g_NM, Qx_power_FTM / (Y_FTM**2))
tmp2_NFT = np.einsum("nm, ftm -> nft", g_NM, 1 / Y_FTM)
numerator = np.einsum("nkt, nft -> nfk", H_NKT, tmp1_NFT)
denominator = np.einsum("nkt, nft -> nfk", H_NKT, tmp2_NFT)
W_NFK *= np.sqrt(numerator / denominator)
if not accelerate:
tmp1_NFT = np.einsum("nm, ftm -> nft", g_NM, Qx_power_FTM / (Y_FTM**2))
tmp2_NFT = np.einsum("nm, ftm -> nft", g_NM, 1 / Y_FTM)
lambda_NFT = W_NFK @ H_NKT + eps
Y_FTM = np.einsum("nft, nm -> ftm", lambda_NFT, g_NM) + eps
numerator = np.einsum("nfk, nft -> nkt", W_NFK, tmp1_NFT)
denominator = np.einsum("nfk, nft -> nkt", W_NFK, tmp2_NFT)
H_NKT *= np.sqrt(numerator / denominator)
lambda_NFT = W_NFK @ H_NKT + eps
Y_FTM = np.einsum("nft, nm -> ftm", lambda_NFT, g_NM) + eps
# update g_NM (diagonal element of spatial covariance matrices)
numerator = np.einsum("nft, ftm -> nm", lambda_NFT, Qx_power_FTM / (Y_FTM**2))
denominator = np.einsum("nft, ftm -> nm", lambda_NFT, 1 / Y_FTM)
g_NM *= np.sqrt(numerator / denominator)
Y_FTM = np.einsum("nft, nm -> ftm", lambda_NFT, g_NM) + eps
# udpate Q (joint diagonalizer)
if (interval_update_Q <= 0) or (epoch % interval_update_Q == 0):
for m in range(n_chan):
V_FMM = (
np.einsum("ftij, ft -> fij", XX_FTMM, 1 / Y_FTM[..., m]) / n_frames
)
tmp_FM = np.linalg.solve(
| np.matmul(Q_FMM, V_FMM) | numpy.matmul |
import numpy as np
import matplotlib.pyplot as plt
from multilayer_perceptron import MLP
from gradient_boosting_decision_tree import GBDT
from xgboost import XGBoost
from random_forest import RandomForest
from adaboost import AdaBoost
from factorization_machines import FactorizationMachines
from support_vector_machine import SVM
from k_nearest_neighbor import kNearestNeighbor
def gen_linear(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (x.sum(axis=1) > 0) * 1
def gen_circle(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (np.square(x).sum(axis=1) > 0.6) * 1
def gen_xor(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, np.array([(xi[0] * xi[1] > 0) for xi in x]) * 1
def gen_spiral(train_num):
r = 0.8 * | np.arange(train_num) | numpy.arange |
import colorgame
import numpy as np
import torch.optim as optim
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class Network(nn.Module):
def __init__(self, num_in_positions, num_actions, num_cols, num_rows, batch_size):
super(Network, self).__init__()
self.fc1 = nn.Linear(num_in_positions, 128)
self.fc2 = nn.Linear(128, 350, bias = True)
self.fc3 = nn.Linear(350, 600, bias = True)
self.fc4 = nn.Linear(600, 350, bias = True)
self.fc5 = nn.Linear(350, num_actions)
def forward(self, x):
x = nn.functional.leaky_relu(self.fc1(x))
x = nn.functional.leaky_relu(self.fc2(x))
x = nn.functional.leaky_relu(self.fc3(x))
x = nn.functional.leaky_relu(self.fc4(x))
x = self.fc5(x)
return x
class ColorDQN:
def __init__(self, num_in_positions, num_actions, num_cols, num_rows):
self.num_actions = num_actions
self.num_in_positions = num_in_positions
self.model = Network(num_in_positions, num_actions, num_cols, num_rows,1)
self.target_model = Network(num_in_positions, num_actions, num_cols, num_rows, 32)
learning_rate = 0.000134
self.optimizer = optim.Adam(self.model.parameters() ,lr = learning_rate)
self.criterion = nn.MSELoss()
self.name = 0
self.batch_size = 32
self.experience = {'prev_obs' : [], 'a' : [], 'r': [], 'obs' : [], 'done': [] }
self.min_exp = 100
self.max_exp = 320
self.gamma = 0.90
def predict(self, inputs):
x = torch.from_numpy(inputs).float().flatten()
x = x[None,:]
return self.model(x)
def predict_batch(self, inputs):
x = torch.from_numpy(inputs).float().flatten()
x = x.view(self.batch_size, self.num_in_positions)
return self.model(x)
def target_predict(self, inputs):
x = torch.from_numpy(inputs).float()
x = x.view(self.batch_size, self.num_in_positions)
return self.target_model(x)
def get_action(self, state, epsilon):
if | np.random.random() | numpy.random.random |
from abstract_esn import AbstractESN
import numpy as np
from pathlib import Path
import signalz
path = Path('./results/mackey/noisy')
def mean_squared_error(y_true, y_pred):
try:
return np.mean(np.abs((y_true - y_pred)**2))
except:
return -1
def mean_absolute_percentage_error(y_true, y_pred):
try:
return np.mean( | np.abs((y_true - y_pred) / y_true) | numpy.abs |
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from jax import vmap
class BinaryFA:
def __init__(self, input_dim, latent, max_iter, conv_tol=1e-4, compute_ll=True):
self.W = 0.1 * np.random.randn(latent, input_dim) # 2x16
self.b = 0.01 * np.random.randn(input_dim, 1) # 16x1
self.mu_prior = np.zeros((latent,1)) # 2x1
self.sigma_prior = np.eye(latent) # 2x2
self.input_dim = input_dim
self.latent = latent
self.max_iter = max_iter
self.compute_ll = compute_ll
if compute_ll :
self.ll_hist = np.zeros((max_iter + 1, 1)) # 51x1
def variational_em(self, data):
ll_hist = np.zeros((self.max_iter + 1, 1))
i = 0
while i < 3:
S1, S2, ll = self.estep(data)
ll_hist[i,0] = ll
self.mstep(S1, S2)
if i!=0:
delta_fval = abs(ll_hist[i] - ll_hist[i-1])
avg_fval = (abs(ll_hist[i]) + abs(ll_hist[i-1]) + np.finfo(float).eps)/2
if (delta_fval / avg_fval) < conv_tol:
break
i += 1
return ll_hist[:i]
def estep(self, data):
S1 = np.zeros((self.latent + 1, self.input_dim)) # 3x16
S2 = np.zeros((self.latent + 1, self.latent + 1, self.input_dim)) # 3x3x16
W, b, mu_prior = self.W , self.b, self.mu_prior
ll = 0
for i in range(data.T.shape[1]):
mu_post, sigma_post, logZ, lambd = self.compute_latent_posterior_statistics(data.T[:,i], max_iter=3)
ll += logZ
EZZ = | np.zeros((self.latent+1, self.latent+1)) | numpy.zeros |
from copy import copy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import linalg as LA
from scipy.sparse import linalg as las
from scipy.signal import lti
from scipy.signal import lsim
from opentorsion.disk_element import Disk
from opentorsion.shaft_element import Shaft
from opentorsion.gear_element import Gear
# from opentorsion.induction_motor import Induction_motor
from opentorsion.errors import DOF_mismatch_error
class Assembly:
"""Powertrain assembly"""
def __init__(
self,
shaft_elements,
disk_elements=None,
gear_elements=None,
motor_elements=None,
):
## Initiate shaft elements
if shaft_elements is None:
raise DOF_mismatch_error("Shaft elements == None")
self.shaft_elements = None
else:
self.shaft_elements = [
copy(shaft_element) for shaft_element in shaft_elements
]
## Initiate gear elements
if gear_elements is None:
self.gear_elements = None
else:
self.gear_elements = [copy(gear_element) for gear_element in gear_elements]
## Initiate motor elements
if motor_elements is None:
self.motor_elements = None
else:
self.motor_elements = [
copy(motor_element) for motor_element in motor_elements
]
self.disk_elements = disk_elements
self.dofs = self._check_dof()
def __repr__(self):
pass
def __str__(self):
return f"rotor"
def M(self):
"""Assembles the mass matrix"""
M = np.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dofs = np.array([element.nl, element.nr])
M[np.ix_(dofs, dofs)] += element.M()
if self.disk_elements is not None:
for element in self.disk_elements:
M[element.node, element.node] += element.M()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dof = np.array([element.nl, element.nr])
# M[np.ix_(dof, dof)] += element.M()
if self.gear_elements is not None:
for element in self.gear_elements:
M[element.node, element.node] += element.M()
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
M = np.dot(np.dot(transform.T, M), transform)
return M
def K(self):
"""Assembles the stiffness matrix"""
K = np.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dofs = np.array([element.nl, element.nr])
K[np.ix_(dofs, dofs)] += element.K()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dofs = np.array([element.nl, element.nr])
# K[np.ix_(dofs, dofs)] += element.K()
if self.gear_elements is not None:
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
K = np.dot(np.dot(transform.T, K), transform)
# print(K)
return K
def C(self):
"""Assembles the damping matrix"""
C = np.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dof = np.array([element.nl, element.nr])
C[np.ix_(dof, dof)] += element.C()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dof = np.array([element.nl, element.nr])
# C[np.ix_(dof, dof)] += element.C()
if self.disk_elements is not None:
for element in self.disk_elements:
C[element.node, element.node] += element.C()
if self.gear_elements is not None:
for element in self.gear_elements:
C[element.node, element.node] += element.C()
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
C = np.dot(np.dot(transform.T, C), transform)
return C
def E(self):
"""Assembles the gear constraint matrix"""
stages = []
for gear in self.gear_elements:
if gear.stages is not None:
stages += gear.stages
E = np.zeros([self.dofs, len(stages)])
for i, stage in enumerate(stages):
E[stage[0][0]][i] += stage[0][1]
E[stage[1][0]][i] += stage[1][1]
try:
E[stage[2][0]][i] += stage[2][1]
except:
pass
return E
def state_matrix(self):
"""Assembles the state-space matrices"""
M, K, C = self.M(), self.K(), self.C()
Z = np.zeros(M.shape, dtype=np.float64)
if self.motor_elements is not None:
motor = self.motor_elements[0]
if motor.small_signal: # Different versions for linear and nonlinear models
R, L = motor.R_linear(), motor.L_linear()
else:
R, L = motor.R(), motor.L()
A = np.zeros((self.dofs * 2 + 4, self.dofs * 2 + 4))
B = np.zeros(A.shape)
dof = np.array([0, 1, 2, 3, 4])
A[np.ix_(dof, dof)] += R
B[np.ix_(dof, dof)] += L
K_m = np.vstack([np.hstack([C, K]), np.hstack([-M, Z])])
M_m = np.vstack([np.hstack([M, Z]), np.hstack([Z, M])])
dof = np.array(range(4, self.dofs * 2 + 4))
A[np.ix_(dof, dof)] += K_m
B[np.ix_(dof, dof)] += M_m
else:
A = np.vstack([np.hstack([C, K]), np.hstack([-M, Z])])
B = np.vstack([np.hstack([M, Z]), np.hstack([Z, M])])
# Solved versions
# A = np.vstack([
# np.hstack([LA.solve(-M, C), LA.solve(-M, K)]),
# np.hstack([I, Z]) # ])
# B = np.vstack([M_inv, Z])
# np.set_printoptions(suppress=True)
# print(A)
return A, B
def modal_analysis(self):
"""Calculates the eigenvalues and eigenfrequencies of the assembly"""
A, B = self.state_matrix()
lam, vec = self._eig(A, B)
# Sort and delete complex conjugates
omegas = np.sort(np.absolute(lam))
omegas_damped = np.sort(np.abs(np.imag(lam)))
freqs = omegas / (2 * np.pi)
damping_ratios = -np.real(lam) / (np.absolute(lam))
return omegas_damped, freqs, damping_ratios
def _eig(self, A, B):
"""Solves the eigenvalues of the state space matrix using ARPACK"""
lam, vec = LA.eig(A, B)
return lam, vec
def _check_dof(self):
"""Returns the number of degrees of freedom in the model"""
nodes = set()
if self.shaft_elements is not None:
for element in self.shaft_elements:
nodes.add(element.nl)
nodes.add(element.nr)
if self.disk_elements is not None:
for element in self.disk_elements:
nodes.add(element.node)
if self.gear_elements is not None:
for element in self.gear_elements:
nodes.add(element.node)
if self.motor_elements is not None:
for element in self.motor_elements:
nodes.add(element.n)
return max(nodes) + 1
def T(self, E):
"""Method for determining gear constraint transformation matrix"""
r, c = E.shape
T = np.eye(r)
for i in range(c):
E_i = np.dot(T.T, E)
# (1) Set T_i = I(n+1) (The identity matrix of dimension (n_i + 1))
T_i = np.eye(r)
# (2) Define k as the position of the entry having the largest absolute value in the ith column of E_i-1
k = np.argmax( | np.abs(E_i[:, i]) | numpy.abs |
import numpy
import numpy.matlib
import copy
import pandas
import wave
import struct
import os
import math
import ctypes
import multiprocessing
import warnings
import scipy
from scipy import ndimage
import scipy.stats as stats
from scipy.fftpack import fft
from scipy.signal import decimate
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6])
def read_wav(input_file_name):
"""
:param input_file_name:
:return:
"""
wfh = wave.open(input_file_name, "r")
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wfh.getparams()
raw = wfh.readframes(nframes * nchannels)
out = struct.unpack_from("%dh" % nframes * nchannels, raw)
sig = numpy.reshape(numpy.array(out), (-1, nchannels)).squeeze()
wfh.close()
return sig.astype(numpy.float32), framerate, sampwidth
def read_pcm(input_file_name):
"""Read signal from single channel PCM 16 bits
:param input_file_name: name of the PCM file to read.
:return: the audio signal read from the file in a ndarray encoded on 16 bits, None and 2 (depth of the encoding in bytes)
"""
with open(input_file_name, 'rb') as f:
f.seek(0, 2) # Go to te end of the file
# get the sample count
sample_count = int(f.tell() / 2)
f.seek(0, 0) # got to the begining of the file
data = numpy.asarray(struct.unpack('<' + 'h' * sample_count, f.read()))
return data.astype(numpy.float32), None, 2
def read_audio(input_file_name, framerate=None):
""" Read a 1 or 2-channel audio file in SPHERE, WAVE or RAW PCM format.
The format is determined from the file extension.
If the sample rate read from the file is a multiple of the one given
as parameter, we apply a decimation function to subsample the signal.
:param input_file_name: name of the file to read from
:param framerate: frame rate, optional, if lower than the one read from the file, subsampling is applied
:return: the signal as a numpy array and the sampling frequency
"""
if framerate is None:
raise TypeError("Expected sampling frequency required in sidekit.frontend.io.read_audio")
ext = os.path.splitext(input_file_name)[-1]
if ext.lower() == '.sph':
sig, read_framerate, sampwidth = read_sph(input_file_name, 'p')
elif ext.lower() == '.wav' or ext.lower() == '.wave':
sig, read_framerate, sampwidth = read_wav(input_file_name)
elif ext.lower() == '.pcm' or ext.lower() == '.raw':
sig, read_framerate, sampwidth = read_pcm(input_file_name)
read_framerate = framerate
else:
raise TypeError("Unknown extension of audio file")
# Convert to 16 bit encoding if needed
sig *= (2**(15-sampwidth))
if framerate > read_framerate:
print("Warning in read_audio, up-sampling function is not implemented yet!")
elif read_framerate % float(framerate) == 0 and not framerate == read_framerate:
print("downsample")
sig = decimate(sig, int(read_framerate / float(framerate)), n=None, ftype='iir', axis=0)
return sig.astype(numpy.float32), framerate
def rasta_filt(x):
"""Apply RASTA filtering to the input signal.
:param x: the input audio signal to filter.
cols of x = critical bands, rows of x = frame
same for y but after filtering
default filter is single pole at 0.94
"""
x = x.T
numerator = numpy.arange(.2, -.3, -.1)
denominator = numpy.array([1, -0.94])
# Initialize the state. This avoids a big spike at the beginning
# resulting from the dc offset level in each band.
# (this is effectively what rasta/rasta_filt.c does).
# Because Matlab uses a DF2Trans implementation, we have to
# specify the FIR part to get the state right (but not the IIR part)
y = numpy.zeros(x.shape)
zf = numpy.zeros((x.shape[0], 4))
for i in range(y.shape[0]):
y[i, :4], zf[i, :4] = lfilter(numerator, 1, x[i, :4], axis=-1, zi=[0, 0, 0, 0])
# .. but don't keep any of these values, just output zero at the beginning
y = numpy.zeros(x.shape)
# Apply the full filter to the rest of the signal, append it
for i in range(y.shape[0]):
y[i, 4:] = lfilter(numerator, denominator, x[i, 4:], axis=-1, zi=zf[i, :])[0]
return y.T
def cms(features, label=None, global_mean=None):
"""Performs cepstral mean subtraction
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: a logical vector
:param global_mean: pre-computed mean to use for feature normalization if given
:return: a feature stream
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if label.sum() == 0:
mu = numpy.zeros((features.shape[1]))
if global_mean is not None:
mu = global_mean
else:
mu = numpy.mean(features[label, :], axis=0)
features -= mu
def cmvn(features, label=None, global_mean=None, global_std=None):
"""Performs mean and variance normalization
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param global_mean: pre-computed mean to use for feature normalization if given
:param global_std: pre-computed standard deviation to use for feature normalization if given
:param label: a logical verctor
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if global_mean is not None and global_std is not None:
mu = global_mean
stdev = global_std
features -= mu
features /= stdev
elif not label.sum() == 0:
mu = numpy.mean(features[label, :], axis=0)
stdev = numpy.std(features[label, :], axis=0)
features -= mu
features /= stdev
def stg(features, label=None, win=301):
"""Performs feature warping on a sliding window
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: label of selected frames to compute the Short Term Gaussianization, by default, al frames are used
:param win: size of the frame window to consider, must be an odd number to get a symetric context on left and right
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
speech_features = features[label, :]
add_a_feature = False
if win % 2 == 1:
# one feature per line
nframes, dim = numpy.shape(speech_features)
# If the number of frames is not enough for one window
if nframes < win:
# if the number of frames is not odd, duplicate the last frame
# if nframes % 2 == 1:
if not nframes % 2 == 1:
nframes += 1
add_a_feature = True
speech_features = numpy.concatenate((speech_features, [speech_features[-1, ]]))
win = nframes
# create the output feature stream
stg_features = numpy.zeros(numpy.shape(speech_features))
# Process first window
r = numpy.argsort(speech_features[:win, ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[: (win - 1) / 2] + 0.5) / win
stg_features[: (win - 1) / 2, :] = stats.norm.ppf(arg, 0, 1)
# process all following windows except the last one
for m in range(int((win - 1) / 2), int(nframes - (win - 1) / 2)):
idx = list(range(int(m - (win - 1) / 2), int(m + (win - 1) / 2 + 1)))
foo = speech_features[idx, :]
r = numpy.sum(foo < foo[(win - 1) / 2], axis=0) + 1
arg = (r - 0.5) / win
stg_features[m, :] = stats.norm.ppf(arg, 0, 1)
# Process the last window
r = numpy.argsort(speech_features[list(range(nframes - win, nframes)), ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[(win + 1) / 2: win, :] + 0.5) / win
stg_features[list(range(int(nframes - (win - 1) / 2), nframes)), ] = stats.norm.ppf(arg, 0, 1)
else:
# Raise an exception
raise Exception('Sliding window should have an odd length')
# wrapFeatures = np.copy(features)
if add_a_feature:
stg_features = stg_features[:-1]
features[label, :] = stg_features
def cep_sliding_norm(features, win=301, label=None, center=True, reduce=False):
"""
Performs a cepstal mean substitution and standard deviation normalization
in a sliding windows. MFCC is modified.
:param features: the MFCC, a numpy array
:param win: the size of the sliding windows
:param label: vad label if available
:param center: performs mean subtraction
:param reduce: performs standard deviation division
"""
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if numpy.sum(label) <= win:
if reduce:
cmvn(features, label)
else:
cms(features, label)
else:
d_win = win // 2
df = pandas.DataFrame(features[label, :])
r = df.rolling(window=win, center=True)
mean = r.mean().values
std = r.std().values
mean[0:d_win, :] = mean[d_win, :]
mean[-d_win:, :] = mean[-d_win-1, :]
std[0:d_win, :] = std[d_win, :]
std[-d_win:, :] = std[-d_win-1, :]
if center:
features[label, :] -= mean
if reduce:
features[label, :] /= std
def pre_emphasis(input_sig, pre):
"""Pre-emphasis of an audio signal.
:param input_sig: the input vector of signal to pre emphasize
:param pre: value that defines the pre-emphasis filter.
"""
if input_sig.ndim == 1:
return (input_sig - numpy.c_[input_sig[numpy.newaxis, :][..., :1],
input_sig[numpy.newaxis, :][..., :-1]].squeeze() * pre)
else:
return input_sig - numpy.c_[input_sig[..., :1], input_sig[..., :-1]] * pre
"""Generate a new array that chops the given array along the given axis
into overlapping frames.
This method has been implemented by <NAME>,
as part of the talk box toolkit
example::
segment_axis(arange(10), 4, 2)
array([[0, 1, 2, 3],
( [2, 3, 4, 5],
[4, 5, 6, 7],
[6, 7, 8, 9]])
:param a: the array to segment
:param length: the length of each frame
:param overlap: the number of array elements by which the frames should overlap
:param axis: the axis to operate on; if None, act on the flattened array
:param end: what to do with the last frame, if the array is not evenly
divisible into pieces. Options are:
- 'cut' Simply discard the extra values
- 'wrap' Copy values from the beginning of the array
- 'pad' Pad with a constant value
:param endvalue: the value to use for end='pad'
:return: a ndarray
The array is not copied unless necessary (either because it is unevenly
strided and being flattened or because end is set to 'pad' or 'wrap').
"""
if axis is None:
a = numpy.ravel(a) # may copy
axis = 0
l = a.shape[axis]
if overlap >= length:
raise ValueError("frames cannot overlap by more than 100%")
if overlap < 0 or length <= 0:
raise ValueError("overlap must be nonnegative and length must" +
"be positive")
if l < length or (l - length) % (length - overlap):
if l > length:
roundup = length + (1 + (l - length) // (length - overlap)) * (length - overlap)
rounddown = length + ((l - length) // (length - overlap)) * (length - overlap)
else:
roundup = length
rounddown = 0
assert rounddown < l < roundup
assert roundup == rounddown + (length - overlap) or (roundup == length and rounddown == 0)
a = a.swapaxes(-1, axis)
if end == 'cut':
a = a[..., :rounddown]
l = a.shape[0]
elif end in ['pad', 'wrap']: # copying will be necessary
s = list(a.shape)
s[-1] = roundup
b = numpy.empty(s, dtype=a.dtype)
b[..., :l] = a
if end == 'pad':
b[..., l:] = endvalue
elif end == 'wrap':
b[..., l:] = a[..., :roundup - l]
a = b
a = a.swapaxes(-1, axis)
if l == 0:
raise ValueError("Not enough data points to segment array " +
"in 'cut' mode; try 'pad' or 'wrap'")
assert l >= length
assert (l - length) % (length - overlap) == 0
n = 1 + (l - length) // (length - overlap)
s = a.strides[axis]
new_shape = a.shape[:axis] + (n, length) + a.shape[axis + 1:]
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
try:
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
except TypeError:
a = a.copy()
# Shape doesn't change but strides does
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
def speech_enhancement(X, Gain, NN):
"""This program is only to process the single file seperated by the silence
section if the silence section is detected, then a counter to number of
buffer is set and pre-processing is required.
Usage: SpeechENhance(wavefilename, Gain, Noise_floor)
:param X: input audio signal
:param Gain: default value is 0.9, suggestion range 0.6 to 1.4,
higher value means more subtraction or noise redcution
:param NN:
:return: a 1-dimensional array of boolean that
is True for high energy frames.
Copyright 2014 <NAME> and <NAME>
"""
if X.shape[0] < 512: # creer une exception
return X
num1 = 40 # dsiable buffer number
Alpha = 0.75 # original value is 0.9
FrameSize = 32 * 2 # 256*2
FrameShift = int(FrameSize / NN) # FrameSize/2=128
nfft = FrameSize # = FrameSize
Fmax = int(numpy.floor(nfft / 2) + 1) # 128+1 = 129
# arising hamming windows
Hamm = 1.08 * (0.54 - 0.46 * numpy.cos(2 * numpy.pi * numpy.arange(FrameSize) / (FrameSize - 1)))
y0 = numpy.zeros(FrameSize - FrameShift) # 128 zeros
Eabsn = numpy.zeros(Fmax)
Eta1 = Eabsn
###################################################################
# initial parameter for noise min
mb = numpy.ones((1 + FrameSize // 2, 4)) * FrameSize / 2 # 129x4 set four buffer * FrameSize/2
im = 0
Beta1 = 0.9024 # seems that small value is better;
pxn = numpy.zeros(1 + FrameSize // 2) # 1+FrameSize/2=129 zeros vector
###################################################################
old_absx = Eabsn
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[
numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
###################################################################
# add the pre-noise estimates
for i in range(200):
Frame += 1
fftn = fft(x * Hamm) # get its spectrum
absn = numpy.abs(fftn[0:Fmax]) # get its amplitude
# add the following part from noise estimation algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # Beta=0.9231 recursive pxn
im = (im + 1) % 40 # noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
# 0-2 vector shifted to 1 to 3
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
# over_sub_noise= oversubtraction factor
# end of noise detection algotihm
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
if In_data.shape[0] < FrameShift: # to check file is out
EOF = 1
break
else:
x[FrameSize - FrameShift:FrameSize] = In_data # shift new 128 to position 129 to FrameSize location
# end of for loop for noise estimation
# end of prenoise estimation ************************
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
X1 = numpy.zeros(X.shape)
Frame = 0
while EOF == 0:
Frame += 1
xwin = x * Hamm
fftx = fft(xwin, nfft) # FrameSize FFT
absx = numpy.abs(fftx[0:Fmax]) # Fmax=129,get amplitude of x
argx = fftx[:Fmax] / (absx + numpy.spacing(1)) # normalize x spectrum phase
absn = absx
# add the following part from rainer algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # s Beta=0.9231 recursive pxn
im = int((im + 1) % (num1 * NN / 2)) # original =40 noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
Eabsn = pn
Gaina = Gain
temp1 = Eabsn * Gaina
Eta1 = Alpha * old_absx + (1 - Alpha) * numpy.maximum(absx - temp1, 0)
new_absx = (absx * Eta1) / (Eta1 + temp1) # wiener filter
old_absx = new_absx
ffty = new_absx * argx # multiply amplitude with its normalized spectrum
y = numpy.real(numpy.fft.fftpack.ifft(numpy.concatenate((ffty,
numpy.conj(ffty[numpy.arange(Fmax - 2, 0, -1)])))))
y[:FrameSize - FrameShift] = y[:FrameSize - FrameShift] + y0
y0 = y[FrameShift:FrameSize] # keep 129 to FrameSize point samples
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
z = 2 / NN * y[:FrameShift] # left channel is the original signal
z /= 1.15
z = numpy.minimum(z, 32767)
z = numpy.maximum(z, -32768)
index0 = numpy.arange(FrameShift * (Frame - 1), FrameShift * Frame)
if not all(index0 < X1.shape[0]):
idx = 0
while (index0[idx] < X1.shape[0]) & (idx < index0.shape[0]):
X1[index0[idx]] = z[idx]
idx += 1
else:
X1[index0] = z
if In_data.shape[0] == 0:
EOF = 1
else:
x[numpy.arange(FrameSize - FrameShift, FrameSize + In_data.shape[0] - FrameShift)] = In_data
X1 = X1[X1.shape[0] - X.shape[0]:]
# }
# catch{
# }
return X1
def vad_percentil(log_energy, percent):
"""
:param log_energy:
:param percent:
:return:
"""
thr = numpy.percentile(log_energy, percent)
return log_energy > thr, thr
def vad_energy(log_energy,
distrib_nb=3,
nb_train_it=8,
flooring=0.0001, ceiling=1.0,
alpha=2):
# center and normalize the energy
log_energy = (log_energy - numpy.mean(log_energy)) / numpy.std(log_energy)
# Initialize a Mixture with 2 or 3 distributions
world = Mixture()
# set the covariance of each component to 1.0 and the mean to mu + meanIncrement
world.cst = numpy.ones(distrib_nb) / (numpy.pi / 2.0)
world.det = numpy.ones(distrib_nb)
world.mu = -2 + 4.0 * numpy.arange(distrib_nb) / (distrib_nb - 1)
world.mu = world.mu[:, numpy.newaxis]
world.invcov = numpy.ones((distrib_nb, 1))
# set equal weights for each component
world.w = numpy.ones(distrib_nb) / distrib_nb
world.cov_var_ctl = copy.deepcopy(world.invcov)
# Initialize the accumulator
accum = copy.deepcopy(world)
# Perform nbTrainIt iterations of EM
for it in range(nb_train_it):
accum._reset()
# E-step
world._expectation(accum, log_energy)
# M-step
world._maximization(accum, ceiling, flooring)
# Compute threshold
threshold = world.mu.max() - alpha * numpy.sqrt(1.0 / world.invcov[world.mu.argmax(), 0])
# Apply frame selection with the current threshold
label = log_energy > threshold
return label, threshold
def vad_snr(sig, snr, fs=16000, shift=0.01, nwin=256):
"""Select high energy frames based on the Signal to Noise Ratio
of the signal.
Input signal is expected encoded on 16 bits
:param sig: the input audio signal
:param snr: Signal to noise ratio to consider
:param fs: sampling frequency of the input signal in Hz. Default is 16000.
:param shift: shift between two frames in seconds. Default is 0.01
:param nwin: number of samples of the sliding window. Default is 256.
"""
overlap = nwin - int(shift * fs)
sig /= 32768.
sig = speech_enhancement(numpy.squeeze(sig), 1.2, 2)
# Compute Standard deviation
sig += 0.1 * numpy.random.randn(sig.shape[0])
std2 = segment_axis(sig, nwin, overlap, axis=None, end='cut', endvalue=0).T
std2 = numpy.std(std2, axis=0)
std2 = 20 * numpy.log10(std2) # convert the dB
# APPLY VAD
label = (std2 > numpy.max(std2) - snr) & (std2 > -75)
return label
def label_fusion(label, win=3):
"""Apply a morphological filtering on the label to remove isolated labels.
In case the input is a two channel label (2D ndarray of boolean of same
length) the labels of two channels are fused to remove
overlaping segments of speech.
:param label: input labels given in a 1D or 2D ndarray
:param win: parameter or the morphological filters
"""
channel_nb = len(label)
if channel_nb == 2:
overlap_label = numpy.logical_and(label[0], label[1])
label[0] = numpy.logical_and(label[0], ~overlap_label)
label[1] = numpy.logical_and(label[1], ~overlap_label)
for idx, lbl in enumerate(label):
cl = ndimage.grey_closing(lbl, size=win)
label[idx] = ndimage.grey_opening(cl, size=win)
return label
def hz2mel(f, htk=True):
"""Convert an array of frequency in Hz into mel.
:param f: frequency to convert
:return: the equivalence on the mel scale.
"""
if htk:
return 2595 * numpy.log10(1 + f / 700.)
else:
f = numpy.array(f)
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
f_0 = 0.
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = f < brkfrq
z = numpy.zeros_like(f)
# fill in parts separately
z[linpts] = (f[linpts] - f_0) / f_sp
z[~linpts] = brkpt + (numpy.log(f[~linpts] / brkfrq)) / numpy.log(logstep)
if z.shape == (1,):
return z[0]
else:
return z
def mel2hz(z, htk=True):
"""Convert an array of mel values in Hz.
:param m: ndarray of frequencies to convert in Hz.
:return: the equivalent values in Hertz.
"""
if htk:
return 700. * (10**(z / 2595.) - 1)
else:
z = numpy.array(z, dtype=float)
f_0 = 0
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = (z < brkpt)
f = numpy.zeros_like(z)
# fill in parts separately
f[linpts] = f_0 + f_sp * z[linpts]
f[~linpts] = brkfrq * numpy.exp(numpy.log(logstep) * (z[~linpts] - brkpt))
if f.shape == (1,):
return f[0]
else:
return f
def hz2bark(f):
"""
Convert frequencies (Hertz) to Bark frequencies
:param f: the input frequency
:return:
"""
return 6. * numpy.arcsinh(f / 600.)
def bark2hz(z):
"""
Converts frequencies Bark to Hertz (Hz)
:param z:
:return:
"""
return 600. * numpy.sinh(z / 6.)
def compute_delta(features,
win=3,
method='filter',
filt=numpy.array([.25, .5, .25, 0, -.25, -.5, -.25])):
"""features is a 2D-ndarray each row of features is a a frame
:param features: the feature frames to compute the delta coefficients
:param win: parameter that set the length of the computation window.
The size of the window is (win x 2) + 1
:param method: method used to compute the delta coefficients
can be diff or filter
:param filt: definition of the filter to use in "filter" mode, default one
is similar to SPRO4: filt=numpy.array([.2, .1, 0, -.1, -.2])
:return: the delta coefficients computed on the original features.
"""
# First and last features are appended to the begining and the end of the
# stream to avoid border effect
x = numpy.zeros((features.shape[0] + 2 * win, features.shape[1]), dtype=numpy.float32)
x[:win, :] = features[0, :]
x[win:-win, :] = features
x[-win:, :] = features[-1, :]
delta = numpy.zeros(x.shape, dtype=numpy.float32)
if method == 'diff':
filt = numpy.zeros(2 * win + 1, dtype=numpy.float32)
filt[0] = -1
filt[-1] = 1
for i in range(features.shape[1]):
delta[:, i] = | numpy.convolve(features[:, i], filt) | numpy.convolve |
from .dataset import Dataset
from .train_model import _train_model
from .train_model import _train_model_new
from .train_model import _get_lvec
from .infer_labels import _infer_labels
from .helpers.corner import corner
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from copy import deepcopy
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
class CannonModel(object):
def __init__(self, order, useErrors):
self.coeffs = None
self.scatters = None
self.chisqs = None
self.pivots = None
self.scales = None
self.new_tr_labels = None
self.order = order
self.wl_filter = wl_filter
self.model_spectra = None
self.useErrors = useErrors
def model(self):
""" Return the model definition or raise an error if not trained """
if self.coeffs is None:
raise RuntimeError('Model not trained')
else:
return self.coeffs
def train(self, ds):
""" Run training step: solve for best-fit spectral model """
if self.useErrors:
self.coeffs, self.scatters, self.new_tr_labels, self.chisqs, self.pivots, self.scales = _train_model_new(ds)
else:
self.coeffs, self.scatters, self.chisqs, self.pivots, self.scales = _train_model(ds)
def diagnostics(self):
""" Produce a set of diagnostics plots about the model. """
_model_diagnostics(self.dataset, self.model)
def infer_labels(self, ds, starting_guess = None):
"""
Uses the model to solve for labels of the test set, updates Dataset
Then use those inferred labels to set the model.test_spectra attribute
Parameters
----------
ds: Dataset
Dataset that needs label inference
Returns
-------
errs_all: ndarray
Covariance matrix of the fit
"""
return _infer_labels(self, ds, starting_guess)
def infer_spectra(self, ds):
"""
After inferring labels for the test spectra,
infer the model spectra and update the dataset
model_spectra attribute.
Parameters
----------
ds: Dataset object
"""
lvec_all = _get_lvec(ds.test_label_vals, self.pivots, self.scales, derivs=False)
self.model_spectra = np.dot(lvec_all, self.coeffs.T)
def plot_contpix(self, x, y, contpix_x, contpix_y, figname):
""" Plot baseline spec with continuum pix overlaid
Parameters
----------
"""
fig, axarr = plt.subplots(2, sharex=True)
plt.xlabel(r"Wavelength $\lambda (\AA)$")
plt.xlim(min(x), max(x))
ax = axarr[0]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.legend(loc='lower right',
prop={'family':'serif', 'size':'small'})
ax.set_title("Baseline Spectrum with Continuum Pixels")
ax.set_ylabel(r'$\theta_0$')
ax = axarr[1]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.set_title("Baseline Spectrum with Continuum Pixels, Zoomed")
ax.legend(loc='upper right', prop={'family':'serif',
'size':'small'})
ax.set_ylabel(r'$\theta_0$')
ax.set_ylim(0.95, 1.05)
print("Diagnostic plot: fitted 0th order spec w/ cont pix")
print("Saved as %s.png" % (figname))
plt.savefig(figname)
plt.close()
def diagnostics_contpix(self, data, nchunks=10, fig = "baseline_spec_with_cont_pix"):
""" Call plot_contpix once for each nth of the spectrum """
if data.contmask is None:
print("No contmask set")
else:
coeffs_all = self.coeffs
wl = data.wl
baseline_spec = coeffs_all[:,0]
contmask = data.contmask
contpix_x = wl[contmask]
contpix_y = baseline_spec[contmask]
rem = len(wl)%nchunks
wl_split = np.array(np.split(wl[0:len(wl)-rem],nchunks))
baseline_spec_split = np.array(
np.split(baseline_spec[0:len(wl)-rem],nchunks))
nchunks = wl_split.shape[0]
for i in range(nchunks):
fig_chunk = fig + "_%s" %str(i)
wl_chunk = wl_split[i,:]
baseline_spec_chunk = baseline_spec_split[i,:]
take = np.logical_and(
contpix_x>wl_chunk[0], contpix_x<wl_chunk[-1])
self.plot_contpix(
wl_chunk, baseline_spec_chunk,
contpix_x[take], contpix_y[take], fig_chunk)
def diagnostics_leading_coeffs(self, ds):
label_names = ds.get_plotting_labels()
lams = ds.wl
npixels = len(lams)
pivots = self.pivots
nlabels = len(pivots)
chisqs = self.chisqs
coeffs = self.coeffs
scatters = self.scatters
# Leading coefficients for each label & scatter
fig, axarr = plt.subplots(nlabels+1, figsize=(8,8), sharex=True)
ax1 = axarr[0]
plt.subplots_adjust(hspace=0.001)
nbins = len(ax1.get_xticklabels())
for i in range(1,nlabels+1):
axarr[i].yaxis.set_major_locator(
MaxNLocator(nbins=nbins, prune='upper'))
plt.xlabel(r"Wavelength $\lambda (\AA)$", fontsize=14)
plt.xlim( | np.min(lams) | numpy.min |
"""Compute gaussian features."""
import warnings
from functools import partial
from itertools import repeat
from multiprocessing import Pool, cpu_count
from bycycle.group.utils import progress_bar
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import stats as st
from bycycle.cyclepoints import find_extrema, find_zerox
from neurodsp.sim.cycles import sim_skewed_gaussian_cycle
###################################################################################################
###################################################################################################
def compute_gaussian_features(df_samples, sig, fs, maxfev=2000, tol=1.49e-6, n_jobs=-1, chunksize=1,
progress=None, z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5):
"""Compute gaussian features.
Parameters
----------
df_samples : pandas.DataFrame
Contains cyclepoint locations for each spike.
sig : 1d array
Voltage time series.
fs : float
Sampling rate, in Hz.
maxfev : int, optional, default: 2000
The maximum number of calls in curve_fit.
tol : float, optional, default: 10e-6
Relative error desired.
n_jobs : int, optional, default: -1
The number of jobs to compute features in parallel.
chunksize : int, optional, default: 1
Number of chunks to split spikes into. Each chunk is submitted as a separate job.
With a large number of spikes, using a larger chunksize will drastically speed up
runtime. An optimal chunksize is typically np.ceil(n_spikes/n_jobs).
progress : {None, 'tqdm', 'tqdm.notebook'}
Specify whether to display a progress bar. Uses 'tqdm', if installed.
z_thresh_k : float, optional, default: 0.5
Potassium (k) current z-score threshold.
z_thresh_cond : float, optional, default: 0.5
Conductive current z-score threshold.
rsq_thresh : float, optional, default: 0.5
Na current r-squared threshold. Used to stop conductive/K fits in cycles
with bad Na current fits.
Returns
-------
params : dict
Fit parameter values.
"""
n_jobs = cpu_count() if n_jobs == -1 else n_jobs
indices = [*range(len(df_samples))]
# Compute features in parallel
with Pool(processes=n_jobs) as pool:
mapping = pool.imap(partial(_compute_gaussian_features_cycle, df_samples=df_samples,
sig=sig, fs=fs, maxfev=maxfev, tol=tol,
z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5),
indices, chunksize=chunksize)
params = list(progress_bar(mapping, progress, len(df_samples)))
return np.array(params)
def _compute_gaussian_features_cycle(index, df_samples=None, sig=None, fs=None,
f_ranges=(300, 2000), maxfev=2000, tol=1.49e-6,
z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5):
"""Compute gaussian features for one cycle."""
start = df_samples.iloc[index]['sample_start'].astype(int)
end = df_samples.iloc[index]['sample_end'].astype(int)
sample_trough = df_samples.iloc[index]['sample_trough'].astype(int)
# Adjust samples to start at zero
sample_trough -= start
# Get signal and time
sig_cyc = sig[start:end+1]
cyc_len = len(sig_cyc)
times_cyc = np.arange(0, cyc_len/fs, 1/fs)
# Fit single skewed gaussian to Na current
na_params, na_gaus = _single_gaus_fit(index, sample_trough, sig_cyc, cyc_len, times_cyc, fs,
extrema_type="trough", maxfev=maxfev, tol=tol)
if not np.isnan(na_gaus).any():
# Get Na center and std
na_center = int(na_params[0]*cyc_len)
na_std = int(na_params[1]*cyc_len)
# Determine Na current region
upper_std = na_center + (2* na_std)
lower_std = na_center - (2* na_std)
# Calculate Na current r-squared
na_rsq = calculate_r_squared(sig_cyc[lower_std:upper_std], na_gaus[lower_std:upper_std])
# Check if Na r-squared is above threshold
if na_rsq < rsq_thresh:
na_rsq = np.nan
na_params = np.append(na_params, na_rsq)
k_params = np.array([np.nan] * len(na_params))
cond_params = np.array([np.nan] * len(na_params))
warnings.warn("Failed fits for index = " + str(index))
else:
na_params = np.append(na_params, na_rsq)
# Substract Na current gaussian fit
rem_sig = sig_cyc - na_gaus
# Split remaining signal into left of Na current (K current)
# and right (conductive current)
rem_sigs, times, z_scores = calculate_side_regions(na_center, rem_sig, times_cyc, fs,
z_thresh_k, z_thresh_cond)
side_current_region = zip(rem_sigs, [z_thresh_k, z_thresh_cond], z_scores, times)
side_current_params = []
side_current_gaus = []
for rem_sig, z_thresh, z_score, times in side_current_region:
if any(z >= z_thresh for z in z_score):
# Get peak of remaining signal
peak = get_current_peak(rem_sig, fs, f_ranges, z_thresh, z_score)
if peak == None:
params = np.array([np.nan] * len(na_params))
gaus = np.array([np.nan] * len(times))
else:
# Fit single skewed gaussian to K current
params, gaus = _single_gaus_fit(index, peak, rem_sig, len(rem_sig),
times, fs, extrema_type="peak",
maxfev=maxfev, tol=tol)
# Calculate r-squared
rsq = calculate_r_squared(rem_sig, gaus)
params = np.append(params, rsq)
else:
params = np.array([np.nan] * len(na_params))
gaus = np.array([np.nan] * len(times))
side_current_params.append(params)
side_current_gaus.append(gaus)
# Unpack results
k_params, cond_params = side_current_params
k_gaus, cond_gaus = side_current_gaus
else:
na_rsq = np.nan
na_params = np.append(na_params, na_rsq)
k_params = np.array([np.nan] * len(na_params))
cond_params = np.array([np.nan] * len(na_params))
warnings.warn("Failed fits for index = " + str(index))
all_params = [*cond_params, *na_params, *k_params]
return all_params
def estimate_params(extrema, sig_cyc, fs, extrema_type="trough", n_decimals=2):
"""Initial gaussian parameter estimates.
Parameters
----------
extrema : int
extrema position (peak or trough) of sig_cyc
sig_cyc : 1d array
Voltage time series.
fs : float
Sampling rate, in Hz.
extrema_type : string, optional, default: "trough"
Type of extrema, trough or peak.
n_decimals : int, optional, default: 2
Number of decimals to round parameters to.
Returns
-------
params : 1d array
Estimated centers, stds, alphas, heights.
"""
cyc_len = len(sig_cyc)
centers = []
stds = []
heights = []
# Define parameters
if extrema_type == "trough":
height0 = sig_cyc[extrema] - np.mean(sig_cyc)
else:
height0 = sig_cyc[extrema]
center0 = extrema / cyc_len
std0 = _estimate_std(sig_cyc, extrema_type=extrema_type, plot=False)
centers.append(center0.round(n_decimals))
stds.append(std0.round(n_decimals))
heights.append(height0.round(n_decimals))
# Assume no skew
alphas = [0] * len(centers)
params = [*centers, *stds, *alphas, *heights]
return np.array(params)
def _estimate_bounds(sig_cyc, centers, stds, heights):
"""Estimate parameter's lower and upper bounds."""
# Define bounds
lower_heights = [height * .5 if height > 0 else height * 1.5 for height in heights]
upper_heights = [height * 1.5 if height > 0 else height * .5 for height in heights]
lower_stds = [std * .5 for std in stds]
upper_stds = [std * 1.5 for std in stds]
lower_alphas = [-3 for std in stds]
upper_alphas = [3 for std in stds]
lower_centers = [center * .5 for center in centers]
upper_centers = [center * 1.5 for center in centers]
upper_max = np.max(sig_cyc) - np.min((sig_cyc[0], sig_cyc[-1]))
bounds = [
[*lower_centers, *lower_stds, *lower_alphas, *lower_heights, 0, -1, 0],
[*upper_centers, *upper_stds, *upper_alphas, *upper_heights, upper_max, 1, 1]
]
return bounds
def _fit_gaussians(xs, ys, guess, tol, maxfev, index, bounds=None):
"""Fit gaussians with scipy's curve_fit."""
try:
# Fit gaussians
warnings.filterwarnings("ignore")
params, _ = curve_fit(_sim_gaussian_cycle, xs, ys, p0=guess)
except:
# Raise warning for failed fits
warn_str = "Failed fit for index {idx}.".format(idx=index)
warnings.warn(warn_str, RuntimeWarning)
params = np.array([np.nan] * len(guess))
return params
###################################################################################################
###################################################################################################
def _sim_gaussian_cycle(times, *params):
"""Proxy function for compatibility between sim_skewed_gaussian and curve_fit.
Parameters
----------
times : 1d array
Time definition of the cycle.
params : floats
Variable number of centers, stds, alphas, and heights arguments, respectively. The number
of these variable parameters determines the number of gaussians simulated. An additional
three trailing arguments to define a sigmoid baseline as maximum, growth, midpoint.
Returns
-------
sig_cycle : 1d array
Simulated action potential.
"""
sing_gaus = sim_skewed_gaussian_cycle(1, len(times), *params)
return sing_gaus
def _single_gaus_fit(index, extrema, sig_cyc, cyc_len, times_cyc,
fs, extrema_type="trough", maxfev=2000, tol=None):
"""Calculate guassian fits for single current """
# Initial parameter estimation
_params = estimate_params(extrema, sig_cyc, fs, extrema_type=extrema_type, n_decimals=2)
# Initial bound estimation for Na current
_bounds = _estimate_bounds(sig_cyc, *_params.reshape(4, -1)[[0, 1, 3]])
# Fit single skewed gaussian
_params_fit = _fit_gaussians(times_cyc, sig_cyc, _params, tol, maxfev, index, bounds=_bounds)
if np.isnan(_params_fit).any():
_gaus = np.array([np.nan] * len(times_cyc))
else:
_gaus = sim_skewed_gaussian_cycle(1, cyc_len, *_params_fit)
return _params_fit, _gaus
def calculate_side_regions(na_center, rem_sig, times_cyc, fs, z_thresh_k, z_thresh_cond):
"""Calculate K current and conductive current regions
of the signal based on the center of the Na current.
"""
rem_sig_k = rem_sig[na_center:,]
rem_sig_cond = rem_sig[:na_center,]
times_k = times_cyc[na_center:,]
times_cond = times_cyc[:na_center,]
# Calculate z scores
z_score_k = st.zscore(rem_sig_k)
z_score_cond = st.zscore(rem_sig_cond)
rem_sigs = [rem_sig_k, rem_sig_cond]
times = [times_k, times_cond]
z_scores = [z_score_k,z_score_cond]
return [rem_sigs, times, z_scores]
###################################################################################################
###################################################################################################
def _estimate_std(spike, extrema_type='trough', plot=False):
"""Estimate std of spike"""
spike = -spike if extrema_type == 'peak' else spike
height, height_idx = np.min(spike), np.argmin(spike)
half_height = height / 2
right = spike[height_idx:]
left = np.flip(spike[:height_idx+1])
if plot:
plt.plot(-spike if extrema_type=='peak' else spike)
plt.axvline(height_idx, color='r')
right_idx = _get_closest(right, spike, half_height)
left_idx = _get_closest(left, spike, half_height)
if right_idx == None:
right_idx = left_idx
if left_idx == None:
left_idx = right_idx
fwhm = (right_idx + left_idx + 1)
std = fwhm / (2 * len(spike) * np.sqrt(2 * np.log(2)))
return std
def _get_closest(flank, spike, half_height):
for idx, volt in enumerate(flank):
if volt > half_height:
# Get closest sample left or right of half max location
closest = np.argmin([volt - half_height,
half_height - flank[idx-1]])
idx = [idx, idx-1][closest]
return idx
def get_current_peak(sig, fs, f_ranges, z_thresh, z_score):
peaks, troughs = find_extrema(sig, fs, f_ranges, first_extrema=None, pass_type='bandpass')
if len(peaks) == 0:
return None
elif len(peaks) > 1:
#select highest peak
max_volt = max( (v, i) for i, v in enumerate(sig[peaks]) )[1]
peak = peaks[max_volt]
else:
peak = peaks[0]
# check if peak is over z score threshold
if z_score[peak] > z_thresh:
return peak
else:
return None
def calculate_r_squared(sig_cyc, sig_cyc_est):
residuals = sig_cyc - sig_cyc_est
ss_res = | np.sum(residuals**2) | numpy.sum |
import random
import numpy as np
import math
from time import perf_counter
import os
import sys
from collections import deque
import gym
import cntk
from cntk.layers import Convolution, MaxPooling, Dense
from cntk.models import Sequential, LayerStack
from cntk.initializer import glorot_normal
env = gym.make("Breakout-v0")
NUM_ACTIONS = env.action_space.n
SCREEN_H_ORIG, SCREEN_W_ORIG, NUM_COLOUR_CHANNELS = env.observation_space.shape
def preprocess_image(screen_image):
# crop the top and bottom
screen_image = screen_image[35:195]
# down sample by a factor of 2
screen_image = screen_image[::2, ::2]
# convert to grey scale
grey_image = np.zeros(screen_image.shape[0:2])
for i in range(len(screen_image)):
for j in range(len(screen_image[i])):
grey_image[i][j] = np.mean(screen_image[i][j])
return np.array([grey_image.astype(np.float)])
CHANNELS, IMAGE_H, IMAGE_W = preprocess_image(np.zeros((SCREEN_H_ORIG, SCREEN_W_ORIG))).shape
STATE_DIMS = (1, IMAGE_H, IMAGE_W)
class Brain:
BATCH_SIZE = 5
def __init__(self):
#### Construct the model ####
observation = cntk.ops.input_variable(STATE_DIMS, np.float32, name="s")
q_target = cntk.ops.input_variable(NUM_ACTIONS, np.float32, name="q")
# Define the structure of the neural network
self.model = self.create_convolutional_neural_network(observation, NUM_ACTIONS)
#### Define the trainer ####
self.learning_rate = cntk.learner.training_parameter_schedule(0.0001, cntk.UnitType.sample)
self.momentum = cntk.learner.momentum_as_time_constant_schedule(0.99)
self.loss = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
mean_error = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
learner = cntk.adam_sgd(self.model.parameters, self.learning_rate, momentum=self.momentum)
self.trainer = cntk.Trainer(self.model, self.loss, mean_error, learner)
def train(self, x, y):
data = dict(zip(self.loss.arguments, [y, x]))
self.trainer.train_minibatch(data, outputs=[self.loss.output])
def predict(self, s):
return self.model.eval([s])
@staticmethod
def create_multi_layer_neural_network(input_vars, out_dims, num_hidden_layers):
num_hidden_neurons = 128
hidden_layer = lambda: Dense(num_hidden_neurons, activation=cntk.ops.relu)
output_layer = Dense(out_dims, activation=None)
model = Sequential([LayerStack(num_hidden_layers, hidden_layer),
output_layer])(input_vars)
return model
@staticmethod
def create_convolutional_neural_network(input_vars, out_dims):
convolutional_layer_1 = Convolution((5, 5), 32, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_1 = MaxPooling((2, 2), strides=(2, 2), pad=True)
convolutional_layer_2 = Convolution((5, 5), 64, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_2 = MaxPooling((2, 2), strides=(2, 2), pad=True)
convolutional_layer_3 = Convolution((5, 5), 128, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_3 = MaxPooling((2, 2), strides=(2, 2), pad=True)
fully_connected_layer = Dense(1024, activation=cntk.ops.relu, init=glorot_normal(), init_bias=0.1)
output_layer = Dense(out_dims, activation=None, init=glorot_normal(), init_bias=0.1)
model = Sequential([convolutional_layer_1, pooling_layer_1,
convolutional_layer_2, pooling_layer_2,
#convolutional_layer_3, pooling_layer_3,
fully_connected_layer,
output_layer])(input_vars)
return model
class Memory:
def __init__(self, capacity):
self.examplers = deque(maxlen=capacity)
self.capacity = capacity
def add(self, sample):
self.examplers.append(sample)
def get_random_samples(self, num_samples):
num_samples = min(num_samples, len(self.examplers))
return random.sample(tuple(self.examplers), num_samples)
def get_stack(self, start_index, stack_size):
end_index = len(self.examplers) - stack_size
if end_index < 0:
stack = list(self.examplers) + [self.examplers[-1] for _ in range(-end_index)]
else:
start_index = min(start_index, end_index)
stack = [self.examplers[i + start_index] for i in range(stack_size)]
return np.stack(stack, axis=-1)
def get_random_stacks(self, num_samples, stack_size):
start_indices = random.sample(range(len(self.examplers)), num_samples)
return [self.get_stack(start_index, stack_size) for start_index in start_indices]
def get_latest_stack(self, stack_size):
return self.get_stack(len(self.examplers), stack_size)
class Agent:
MEMORY_CAPACITY = 100000
DISCOUNT_FACTOR = 0.99
MAX_EXPLORATION_RATE = 1.0
MIN_EXPLORATION_RATE = 0.01
DECAY_RATE = 0.0001
def __init__(self):
self.explore_rate = self.MAX_EXPLORATION_RATE
self.brain = Brain()
self.memory = Memory(self.MEMORY_CAPACITY)
self.steps = 0
def act(self, s):
if random.random() < self.explore_rate:
return random.randint(0, NUM_ACTIONS - 1)
else:
return np.argmax(self.brain.predict(s))
def observe(self, sample):
self.steps += 1
self.memory.add(sample)
# Reduces exploration rate linearly
self.explore_rate = self.MIN_EXPLORATION_RATE + (self.MAX_EXPLORATION_RATE - self.MIN_EXPLORATION_RATE) * math.exp(-self.DECAY_RATE * self.steps)
def replay(self):
batch = self.memory.get_random_samples(self.brain.BATCH_SIZE)
batch_len = len(batch)
states = np.array([sample[0] for sample in batch], dtype=np.float32)
no_state = np.zeros(STATE_DIMS)
resultant_states = | np.array([(no_state if sample[3] is None else sample[3]) for sample in batch], dtype=np.float32) | numpy.array |
# A simple Psi 4 input script to compute a SCF reference using Psi4's libJK
# Requires numpy 1.7.2+
#
# Created by: <NAME>
# Date: 4/1/15
# License: GPL v3.0
#
import time
import numpy as np
import helper_HF as scf_helper
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Memory for Psi4 in GB
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Memory for numpy in GB
numpy_memory = 2
# Triplet O2
mol = psi4.geometry("""
0 3
O
O 1 1.2
symmetry c1
""")
psi4.set_options({'guess': 'core',
'basis': 'aug-cc-pvdz',
'scf_type': 'pk',
'e_convergence': 1e-8,
'reference': 'rohf'})
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))
# Set occupations
nocc = wfn.nalpha()
ndocc = wfn.nbeta()
nsocc = nocc - ndocc
# Set defaults
maxiter = 10
max_micro = 4
micro_print = True
micro_conv = 1.e-3
E_conv = 1.0E-8
D_conv = 1.0E-4
# Integral generation from Psi4's MintsHelper
t = time.time()
mints = psi4.core.MintsHelper(wfn.basisset())
S = np.asarray(mints.ao_overlap())
nbf = S.shape[0]
jk = psi4.core.JK.build(wfn.basisset())
jk.initialize()
if nbf > 100:
raise Exception("This has a N^4 memory overhead, killing if nbf > 100.")
print('\nNumber of doubly occupied orbitals: %d' % ndocc)
print('Number of singly occupied orbitals: %d' % nsocc)
print('Number of basis functions: %d' % nbf)
V = np.asarray(mints.ao_potential())
T = np.asarray(mints.ao_kinetic())
# Build H_core
H = T + V
# ERI's
I = np.asarray(mints.ao_eri())
# Orthogonalizer A = S^(-1/2)
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = | np.asarray(A) | numpy.asarray |
"""Define output of Meta Models and visualize the results."""
import math
from itertools import product
from scipy.spatial import cKDTree
import numpy as np
import logging
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.plotting import figure
from bokeh.models import Slider, ColumnDataSource, HoverTool
from bokeh.models import ColorBar, BasicTicker, LinearColorMapper, Range1d
from bokeh.models.widgets import TextInput, Select
from bokeh.server.server import Server
from openmdao.components.meta_model_unstructured_comp import MetaModelUnStructuredComp
from openmdao.components.meta_model_structured_comp import MetaModelStructuredComp
from openmdao.core.problem import Problem
def stack_outputs(outputs_dict):
"""
Stack the values of a dictionary.
Parameters
----------
outputs_dict : dict
Dictionary of outputs
Returns
-------
array
np.stack of values
"""
return np.stack([np.asarray(v) for v in outputs_dict.values()], axis=-1)
class MetaModelVisualization(object):
"""
Top-level container for the Meta Model Visualization.
Attributes
----------
prob : Problem
Name of variable corresponding to Problem Component
meta_model : MetaModel
Name of empty Meta Model Component object reference
resolution : int
Number used to calculate width and height of contour plot
is_structured_meta_model : Bool
Boolean used to signal whether the meta model is structured or unstructured
slider_source : ColumnDataSource
Data source containing dictionary of sliders
contour_training_data_source : ColumnDataSource
Data source containing dictionary of training data points
bottom_plot_source : ColumnDataSource
Data source containing data for the bottom subplot
bottom_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the bottom subplot
right_plot_source : ColumnDataSource
Data source containing data for the right subplot
right_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the right subplot
contour_plot_source : ColumnDataSource
Data source containing data for the contour plot
input_names : list
List of input data titles as strings
output_names : list
List of output data titles as strings
training_inputs : dict
Dictionary of input training data
x_input_select : Select
Bokeh Select object containing a list of inputs for the x axis
y_input_select : Select
Bokeh Select object containing a list of inputs for the y axis
output_select : Select
Bokeh Select object containing a list of inputs for the outputs
x_input_slider : Slider
Bokeh Slider object containing a list of input values for the x axis
y_input_slider : Slider
Bokeh Slider object containing a list of input values for the y axis
slider_dict : dict
Dictionary of slider names and their respective slider objects
predict_inputs : dict
Dictionary containing training data points to predict at.
num_inputs : int
Number of inputs
num_outputs : int
Number of outputs
limit_range : array
Array containing the range of each input
scatter_distance : TextInput
Text input for user to enter custom value to calculate distance of training points around
slice line
right_alphas : array
Array of points containing alpha values for right plot
bottom_alphas : array
Array of points containing alpha values for bottom plot
dist_range : float
Value taken from scatter_distance used for calculating distance of training points around
slice line
x_index : int
Value of x axis column
y_index : int
Value of y axis column
output_variable : int
Value of output axis column
sliders_and_selects : layout
Layout containing the sliders and select elements
doc_layout : layout
Contains first row of plots
doc_layout2 : layout
Contains second row of plots
Z : array
A 2D array containing contour plot data
"""
def __init__(self, model, resolution=50, doc=None):
"""
Initialize parameters.
Parameters
----------
model : MetaModelComponent
Reference to meta model component
resolution : int
Value used to calculate the size of contour plot meshgrid
doc : Document
The bokeh document to build.
"""
self.prob = Problem()
self.resolution = resolution
logging.getLogger("bokeh").setLevel(logging.ERROR)
# If the surrogate model coming in is structured
if isinstance(model, MetaModelUnStructuredComp):
self.is_structured_meta_model = False
# Create list of input names, check if it has more than one input, then create list
# of outputs
self.input_names = [name[0] for name in model._surrogate_input_names]
if len(self.input_names) < 2:
raise ValueError('Must have more than one input value')
self.output_names = [name[0] for name in model._surrogate_output_names]
# Create reference for untructured component
self.meta_model = MetaModelUnStructuredComp(
default_surrogate=model.options['default_surrogate'])
# If the surrogate model coming in is unstructured
elif isinstance(model, MetaModelStructuredComp):
self.is_structured_meta_model = True
self.input_names = [name for name in model._var_rel_names['input']]
if len(self.input_names) < 2:
raise ValueError('Must have more than one input value')
self.output_names = [name for name in model._var_rel_names['output']]
self.meta_model = MetaModelStructuredComp(
distributed=model.options['distributed'],
extrapolate=model.options['extrapolate'],
method=model.options['method'],
training_data_gradients=model.options['training_data_gradients'],
vec_size=1)
# Pair input list names with their respective data
self.training_inputs = {}
self._setup_empty_prob_comp(model)
# Setup dropdown menus for x/y inputs and the output value
self.x_input_select = Select(title="X Input:", value=[x for x in self.input_names][0],
options=[x for x in self.input_names])
self.x_input_select.on_change('value', self._x_input_update)
self.y_input_select = Select(title="Y Input:", value=[x for x in self.input_names][1],
options=[x for x in self.input_names])
self.y_input_select.on_change('value', self._y_input_update)
self.output_select = Select(title="Output:", value=[x for x in self.output_names][0],
options=[x for x in self.output_names])
self.output_select.on_change('value', self._output_value_update)
# Create sliders for each input
self.slider_dict = {}
self.predict_inputs = {}
for title, values in self.training_inputs.items():
slider_data = np.linspace(min(values), max(values), self.resolution)
self.predict_inputs[title] = slider_data
# Calculates the distance between slider ticks
slider_step = slider_data[1] - slider_data[0]
slider_object = Slider(start=min(values), end=max(values), value=min(values),
step=slider_step, title=str(title))
self.slider_dict[title] = slider_object
self._slider_attrs()
# Length of inputs and outputs
self.num_inputs = len(self.input_names)
self.num_outputs = len(self.output_names)
# Precalculate the problem bounds.
limits = np.array([[min(value), max(value)] for value in self.training_inputs.values()])
self.limit_range = limits[:, 1] - limits[:, 0]
# Positional indicies
self.x_index = 0
self.y_index = 1
self.output_variable = self.output_names.index(self.output_select.value)
# Data sources are filled with initial values
# Slider Column Data Source
self.slider_source = ColumnDataSource(data=self.predict_inputs)
# Contour plot Column Data Source
self.contour_plot_source = ColumnDataSource(data=dict(
z=np.random.rand(self.resolution, self.resolution)))
self.contour_training_data_source = ColumnDataSource(
data=dict(x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
# Bottom plot Column Data Source
self.bottom_plot_source = ColumnDataSource(data=dict(
x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
self.bottom_plot_scatter_source = ColumnDataSource(data=dict(
bot_slice_x=np.repeat(0, self.resolution), bot_slice_y=np.repeat(0, self.resolution)))
# Right plot Column Data Source
self.right_plot_source = ColumnDataSource(data=dict(
x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
self.right_plot_scatter_source = ColumnDataSource(data=dict(
right_slice_x=np.repeat(0, self.resolution),
right_slice_y=np.repeat(0, self.resolution)))
# Text input to change the distance of reach when searching for nearest data points
self.scatter_distance = TextInput(value="0.1", title="Scatter Distance")
self.scatter_distance.on_change('value', self._scatter_input)
self.dist_range = float(self.scatter_distance.value)
# Grouping all of the sliders and dropdowns into one column
sliders = [value for value in self.slider_dict.values()]
sliders.extend(
[self.x_input_select, self.y_input_select, self.output_select, self.scatter_distance])
self.sliders_and_selects = row(
column(*sliders))
# Layout creation
self.doc_layout = row(self._contour_data(), self._right_plot(), self.sliders_and_selects)
self.doc_layout2 = row(self._bottom_plot())
if doc is None:
doc = curdoc()
doc.add_root(self.doc_layout)
doc.add_root(self.doc_layout2)
doc.title = 'Meta Model Visualization'
def _setup_empty_prob_comp(self, metamodel):
"""
Take data from surrogate ref and pass it into new surrogate model with empty Problem model.
Parameters
----------
metamodel : MetaModelComponent
Reference to meta model component
"""
# Check for structured or unstructured
if self.is_structured_meta_model:
# Loop through the input names
for idx, name in enumerate(self.input_names):
# Check for no training data
try:
# Append the input data/titles to a dictionary
self.training_inputs[name] = metamodel.params[idx]
# Also, append the data as an 'add_input' to the model reference
self.meta_model.add_input(name, 0.,
training_data=metamodel.params[idx])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
# Add the outputs to the model reference
for idx, name in enumerate(self.output_names):
self.meta_model.add_output(
name, 0.,
training_data=metamodel.training_outputs[name])
else:
for name in self.input_names:
try:
self.training_inputs[name] = {
title for title in metamodel.options['train:' + str(name)]}
self.meta_model.add_input(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
for name in self.output_names:
self.meta_model.add_output(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
# Add the subsystem and setup
self.prob.model.add_subsystem('interp', self.meta_model)
self.prob.setup()
def _slider_attrs(self):
"""
Assign data to slider objects and callback functions.
Parameters
----------
None
"""
for name, slider_object in self.slider_dict.items():
# Checks if there is a callback previously assigned and then clears it
if len(slider_object._callbacks) == 1:
slider_object._callbacks.clear()
# Check if the name matches the 'x input' title
if name == self.x_input_select.value:
# Set the object and add an event handler
self.x_input_slider = slider_object
self.x_input_slider.on_change('value', self._scatter_plots_update)
# Check if the name matches the 'y input' title
elif name == self.y_input_select.value:
# Set the object and add an event handler
self.y_input_slider = slider_object
self.y_input_slider.on_change('value', self._scatter_plots_update)
else:
# If it is not an x or y input then just assign it the event handler
slider_object.on_change('value', self._update)
def _make_predictions(self, data):
"""
Run the data parameter through the surrogate model which is given in prob.
Parameters
----------
data : dict
Dictionary containing training points.
Returns
-------
array
np.stack of predicted points.
"""
# Create dictionary with an empty list
outputs = {name: [] for name in self.output_names}
# Parse dict into shape [n**2, number of inputs] list
inputs = np.empty([self.resolution**2, self.num_inputs])
for idx, values in enumerate(data.values()):
inputs[:, idx] = values.flatten()
# Check for structured or unstructured
if self.is_structured_meta_model:
# Assign each row of the data coming in to a tuple. Loop through the tuple, and append
# the name of the input and value.
for idx, tup in enumerate(inputs):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
# Append the predicted value(s)
for title in self.output_names:
outputs[title].append(
np.array(self.prob[self.meta_model.name + '.' + title]))
else:
for idx, tup in enumerate(inputs):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
for title in self.output_names:
outputs[title].append(
float(self.prob[self.meta_model.name + '.' + title]))
return stack_outputs(outputs)
def _contour_data_calcs(self):
"""
Parse input data into a dictionary to be predicted at.
Parameters
----------
None
Returns
-------
dict
Dictionary of training data to be predicted at.
"""
# Create initial data array of training points
resolution = self.resolution
x_data = np.zeros((resolution, resolution, self.num_inputs))
self._slider_attrs()
# Broadcast the inputs to every row of x_data array
x_data[:, :, :] = np.array(self.input_point_list)
# Find the x/y input titles and match their index positions
for idx, (title, values) in enumerate(self.slider_source.data.items()):
if title == self.x_input_select.value:
self.xlins_mesh = values
x_index_position = idx
if title == self.y_input_select.value:
self.ylins_mesh = values
y_index_position = idx
# Make meshgrid from the x/y inputs to be plotted
X, Y = np.meshgrid(self.xlins_mesh, self.ylins_mesh)
# Move the x/y inputs to their respective positions in x_data
x_data[:, :, x_index_position] = X
x_data[:, :, y_index_position] = Y
pred_dict = {}
for idx, title in enumerate(self.slider_source.data):
pred_dict.update({title: x_data[:, :, idx]})
return pred_dict
def _contour_data(self):
"""
Create a contour plot.
Parameters
----------
None
Returns
-------
Bokeh Image Plot
"""
resolution = self.resolution
# Output data array initialization
y_data = np.zeros((resolution, resolution, self.num_outputs))
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Pass the dict to make predictions and then reshape the output to
# (resolution, resolution, number of outputs)
y_data[:, :, :] = self._make_predictions(self._contour_data_calcs()).reshape(
(resolution, resolution, self.num_outputs))
# Use the output variable to pull the correct column of data from the predicted
# data (y_data)
self.Z = y_data[:, :, self.output_variable]
# Reshape it to be 2D
self.Z = self.Z.reshape(resolution, resolution)
# Update the data source with new data
self.contour_plot_source.data = dict(z=[self.Z])
# Min to max of training data
self.contour_x_range = xlins = self.xlins_mesh
self.contour_y_range = ylins = self.ylins_mesh
# Color bar formatting
color_mapper = LinearColorMapper(
palette="Viridis11", low=np.amin(self.Z), high=np.amax(self.Z))
color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(), label_standoff=12,
location=(0, 0))
# Contour Plot
self.contour_plot = contour_plot = figure(
match_aspect=False,
tooltips=[(self.x_input_select.value, "$x"), (self.y_input_select.value, "$y"),
(self.output_select.value, "@z")], tools='')
contour_plot.x_range.range_padding = 0
contour_plot.y_range.range_padding = 0
contour_plot.plot_width = 600
contour_plot.plot_height = 500
contour_plot.xaxis.axis_label = self.x_input_select.value
contour_plot.yaxis.axis_label = self.y_input_select.value
contour_plot.min_border_left = 0
contour_plot.add_layout(color_bar, 'right')
contour_plot.x_range = Range1d(min(xlins), max(xlins))
contour_plot.y_range = Range1d(min(ylins), max(ylins))
contour_plot.image(image='z', source=self.contour_plot_source, x=min(xlins), y=min(ylins),
dh=(max(ylins) - min(ylins)), dw=(max(xlins) - min(xlins)),
palette="Viridis11")
# Adding training data points overlay to contour plot
if self.is_structured_meta_model:
data = self._structured_training_points()
else:
data = self._unstructured_training_points()
if len(data):
# Add training data points overlay to contour plot
data = np.array(data)
if self.is_structured_meta_model:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model.training_outputs[
self.output_select.value].flatten())
else:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model._training_output[
self.output_select.value])
training_data_renderer = self.contour_plot.circle(
x='x', y='y', source=self.contour_training_data_source,
size=5, color='white', alpha=0.50)
self.contour_plot.add_tools(HoverTool(renderers=[training_data_renderer], tooltips=[
(self.x_input_select.value + " (train)", '@x'),
(self.y_input_select.value + " (train)", '@y'),
(self.output_select.value + " (train)", '@z'), ]))
return self.contour_plot
def _right_plot(self):
"""
Create the right side subplot to view the projected slice.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the y input and match it with the data
y_idx = self.y_input_select.value
y_data = self.predict_inputs[y_idx]
# Find the position of the x_input slider
x_value = self.x_input_slider.value
# Rounds the x_data to match the predict_inputs value
subplot_value_index = np.where(
np.around(self.predict_inputs[self.x_input_select.value], 5) ==
np.around(x_value, 5))[0]
# Make slice in Z data at the point calculated before and add it to the data source
z_data = self.Z[:, subplot_value_index].flatten()
x = z_data
y = self.slider_source.data[y_idx]
# Update the data source with new data
self.right_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.right_plot_fig = right_plot_fig = figure(
plot_width=250, plot_height=500,
title="{} vs {}".format(y_idx, self.output_select.value), tools="pan")
right_plot_fig.xaxis.axis_label = self.output_select.value
right_plot_fig.yaxis.axis_label = y_idx
right_plot_fig.xaxis.major_label_orientation = math.pi / 9
right_plot_fig.line(x='x', y='y', source=self.right_plot_source)
right_plot_fig.x_range.range_padding = 0.1
right_plot_fig.y_range.range_padding = 0.02
# Determine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True, source='right')
else:
data = self._unstructured_training_points(compute_distance=True, source='right')
self.right_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None,
fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
right_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(self.output_select.value + " (train)", '@x'),
(y_idx + " (train)", '@y'),
]))
right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None, fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
# Set the right_plot data source to new values
self.right_plot_scatter_source.data = dict(
right_slice_x=np.repeat(x_value, self.resolution), right_slice_y=y_data)
self.contour_plot.line(
'right_slice_x', 'right_slice_y', source=self.right_plot_scatter_source,
color='black', line_width=2)
return self.right_plot_fig
def _bottom_plot(self):
"""
Create the bottom subplot to view the projected slice.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the x input and match it with the data
x_idx = self.x_input_select.value
x_data = self.predict_inputs[x_idx]
# Find the position of the y_input slider
y_value = self.y_input_slider.value
# Rounds the y_data to match the predict_inputs value
subplot_value_index = np.where(
np.around(self.predict_inputs[self.y_input_select.value], 5) ==
np.around(y_value, 5))[0]
# Make slice in Z data at the point calculated before and add it to the data source
z_data = self.Z[subplot_value_index, :].flatten()
x = self.slider_source.data[x_idx]
y = z_data
# Update the data source with new data
self.bottom_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.bottom_plot_fig = bottom_plot_fig = figure(
plot_width=550, plot_height=250,
title="{} vs {}".format(x_idx, self.output_select.value), tools="")
bottom_plot_fig.xaxis.axis_label = x_idx
bottom_plot_fig.yaxis.axis_label = self.output_select.value
bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
bottom_plot_fig.x_range.range_padding = 0.02
bottom_plot_fig.y_range.range_padding = 0.1
# Determine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True)
else:
data = self._unstructured_training_points(compute_distance=True)
self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
fill_color='#000000',
fill_alpha=self.bottom_alphas.tolist())
bottom_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(x_idx + " (train)", '@x'),
(self.output_select.value + " (train)", '@y'),
]))
# Set the right_plot data source to new values
self.bottom_plot_scatter_source.data = dict(
bot_slice_x=x_data,
bot_slice_y=np.repeat(y_value, self.resolution))
self.contour_plot.line(
'bot_slice_x', 'bot_slice_y', source=self.bottom_plot_scatter_source, color='black',
line_width=2)
return self.bottom_plot_fig
def _unstructured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and returns and array containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being called from.
Returns
-------
array
The array of training points and their alpha opacity with respect to the surrogate line
"""
# Input training data and output training data
x_training = self.meta_model._training_input
training_output = np.squeeze(stack_outputs(self.meta_model._training_output), axis=1)
# Index of input/output variables
x_index = self.x_input_select.options.index(self.x_input_select.value)
y_index = self.y_input_select.options.index(self.y_input_select.value)
output_variable = self.output_names.index(self.output_select.value)
# Vertically stack the x/y inputs and then transpose them
infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = np.divide(points, self.limit_range)
dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
scaled_x0 = np.divide(self.input_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
# Nearest points to x slice
if x_training.shape[1] < 3:
tree = cKDTree(points)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
dists, idxs = tree.query(
scaled_x0, k=len(x_training), distance_upper_bound=self.dist_range)
# kdtree query always returns requested k even if there are not enough valid points
idx_finite = np.where(np.isfinite(dists))
dists = dists[idx_finite]
idxs = idxs[idx_finite]
else:
dists, idxs = self._multidimension_input(scaled_x0, points, source=source)
# data contains:
# [x_value, y_value, ND-distance, func_value]
data = np.zeros((len(idxs), 4))
for dist_index, j in enumerate(idxs):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = dists[dist_index]
data[dist_index, 3] = training_output[j, output_variable]
return data
def _structured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and return an array containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being called from.
Returns
-------
array
The array of training points and their alpha opacity with respect to the surrogate line
"""
# Create tuple of the input parameters
input_dimensions = tuple(self.meta_model.params)
# Input training data and output training data
x_training = np.array([z for z in product(*input_dimensions)])
training_output = self.meta_model.training_outputs[self.output_select.value].flatten()
# Index of input/output variables
x_index = self.x_input_select.options.index(self.x_input_select.value)
y_index = self.y_input_select.options.index(self.y_input_select.value)
# Vertically stack the x/y inputs and then transpose them
infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = np.divide(points, self.limit_range)
self.dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
scaled_x0 = np.divide(self.input_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
# Nearest points to x slice
if x_training.shape[1] < 3:
x_tree, x_idx = self._two_dimension_input(scaled_x0, points, source=source)
else:
x_tree, x_idx = self._multidimension_input(scaled_x0, points, source=source)
# format for 'data'
# [x_value, y_value, ND-distance_(x or y), func_value]
n = len(x_tree)
data = np.zeros((n, 4))
for dist_index, j in enumerate(x_idx):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = x_tree[dist_index]
data[dist_index, 3] = training_output[j]
return data
def _two_dimension_input(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : array
Array of normalized slider positions.
training_points : array
Array of input training data.
source : str
Which subplot the method is being called from.
Returns
-------
idxs : array
Index of closest points that are within the dist range.
x_tree : array
One dimentional array of points that are within the dist range.
"""
# Column of the input
if source == 'right':
col_idx = self.y_input_select.options.index(self.y_input_select.value)
else:
col_idx = self.x_input_select.options.index(self.x_input_select.value)
# Delete the axis of input from source to predicted 1D distance
x = np.delete(scaled_points, col_idx, axis=0)
x_training_points = np.delete(training_points, col_idx, axis=1).flatten()
# Tree of point distances
x_tree = np.abs(x - x_training_points)
# Only return points that are within our distance-viewing paramter.
idx = np.where(x_tree <= self.dist_range)
x_tree = x_tree[idx]
return x_tree, idx[0]
def _multidimension_input(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : array
Array of normalized slider positions.
training_points : array
Array of input training data.
source : str
Which subplot the method is being called from.
Returns
-------
idxs : array
Index of closest points that are within the dist range.
x_tree : array
Array of points that are within the dist range.
"""
# Column of the input
if source == 'right':
col_idx = self.y_input_select.options.index(self.y_input_select.value)
else:
col_idx = self.x_input_select.options.index(self.x_input_select.value)
# Delete the axis of input from source to predicted distance
x = np.delete(scaled_points, col_idx, axis=0)
x_training_points = | np.delete(training_points, col_idx, axis=1) | numpy.delete |
"""Matrix, Jco and Cov classes for easy linear algebra
"""
from __future__ import print_function, division
import copy
import struct
from datetime import datetime
import numpy as np
import pandas
import scipy.linalg as la
from scipy.io import FortranFile
from pyemu.pst.pst_handler import Pst
def concat(mats):
"""Concatenate Matrix objects. Tries either axis.
Parameters
----------
mats: list
list of Matrix objects
Returns
-------
Matrix : Matrix
"""
for mat in mats:
if mat.isdiagonal:
raise NotImplementedError("concat not supported for diagonal mats")
row_match = True
col_match = True
for mat in mats[1:]:
if sorted(mats[0].row_names) != sorted(mat.row_names):
row_match = False
if sorted(mats[0].col_names) != sorted(mat.col_names):
col_match = False
if not row_match and not col_match:
raise Exception("mat_handler.concat(): all Matrix objects"+\
"must share either rows or cols")
if row_match and col_match:
raise Exception("mat_handler.concat(): all Matrix objects"+\
"share both rows and cols")
if row_match:
row_names = copy.deepcopy(mats[0].row_names)
col_names = []
for mat in mats:
col_names.extend(copy.deepcopy(mat.col_names))
x = mats[0].newx
for mat in mats[1:]:
mat.align(mats[0].row_names, axis=0)
other_x = mat.newx
x = np.append(x, other_x, axis=1)
else:
col_names = copy.deepcopy(mats[0].col_names)
row_names = []
for mat in mats:
row_names.extend(copy.deepcopy(mat.row_names))
x = mat[0].newx
for mat in mats[1:]:
mat.align(mats[0].col_names, axis=1)
other_x = mat.newx
x = np.append(x, other_x, axis=0)
return Matrix(x=x, row_names=row_names, col_names=col_names)
def get_common_elements(list1, list2):
"""find the common elements in two lists. used to support auto align
might be faster with sets
Parameters
----------
list1 : list
a list of objects
list2 : list
a list of objects
Returns
-------
list : list
list of common objects shared by list1 and list2
"""
#result = []
#for item in list1:
# if item in list2:
# result.append(item)
#Return list(set(list1).intersection(set(list2)))
set2 = set(list2)
result = [item for item in list1 if item in set2]
return result
class Matrix(object):
"""a class for easy linear algebra
Parameters
----------
x : numpy.ndarray
Matrix entries
row_names : list
list of row names
col_names : list
list of column names
isdigonal : bool
to determine if the Matrix is diagonal
autoalign: bool
used to control the autoalignment of Matrix objects
during linear algebra operations
Returns
-------
Matrix : Matrix
Attributes
----------
binary_header_dt : numpy.dtype
the header info in the PEST binary file type
binary_rec_dt : numpy.dtype
the record info in the PEST binary file type
Methods
-------
to_ascii : write a PEST-style ASCII matrix format file
to_binary : write a PEST-stle compressed binary format file
Note
----
this class makes heavy use of property decorators to encapsulate
private attributes
"""
integer = np.int32
double = np.float64
char = np.uint8
binary_header_dt = np.dtype([('itemp1', integer),
('itemp2', integer),
('icount', integer)])
binary_rec_dt = np.dtype([('j', integer),
('dtemp', double)])
par_length = 12
obs_length = 20
def __init__(self, x=None, row_names=[], col_names=[], isdiagonal=False,
autoalign=True):
self.col_names, self.row_names = [], []
[self.col_names.append(str(c).lower()) for c in col_names]
[self.row_names.append(str(r).lower()) for r in row_names]
self.__x = None
self.__u = None
self.__s = None
self.__v = None
if x is not None:
assert x.ndim == 2
#x = np.atleast_2d(x)
if isdiagonal and len(row_names) > 0:
#assert 1 in x.shape,"Matrix error: diagonal matrix must have " +\
# "one dimension == 1,shape is {0}".format(x.shape)
mx_dim = max(x.shape)
assert len(row_names) == mx_dim,\
'Matrix.__init__(): diagonal shape[1] != len(row_names) ' +\
str(x.shape) + ' ' + str(len(row_names))
#x = x.transpose()
else:
if len(row_names) > 0:
assert len(row_names) == x.shape[0],\
'Matrix.__init__(): shape[0] != len(row_names) ' +\
str(x.shape) + ' ' + str(len(row_names))
if len(col_names) > 0:
# if this a row vector
if len(row_names) == 0 and x.shape[1] == 1:
x.transpose()
assert len(col_names) == x.shape[1],\
'Matrix.__init__(): shape[1] != len(col_names) ' + \
str(x.shape) + ' ' + str(len(col_names))
self.__x = x
self.isdiagonal = bool(isdiagonal)
self.autoalign = bool(autoalign)
def reset_x(self,x):
"""reset self.__x private attribute
Parameters
----------
x : numpy.ndarray
Note
----
makes a copy of 'x' argument
"""
assert x.shape == self.shape
self.__x = x.copy()
def __str__(self):
"""overload of object.__str__()
Returns
-------
str : str
"""
s = "shape:{0}:{1}".format(*self.shape)+" row names: " + str(self.row_names) + \
'\n' + "col names: " + str(self.col_names) + '\n' + str(self.__x)
return s
def __getitem__(self, item):
"""a very crude overload of object.__getitem__().
Parameters
----------
item : iterable
something that can be used as an index
Returns
-------
Matrix : Matrix
an object that is a sub-Matrix of self
"""
if self.isdiagonal and isinstance(item, tuple):
submat = np.atleast_2d((self.__x[item[0]]))
else:
submat = np.atleast_2d(self.__x[item])
# transpose a row vector to a column vector
if submat.shape[0] == 1:
submat = submat.transpose()
row_names = self.row_names[:submat.shape[0]]
if self.isdiagonal:
col_names = row_names
else:
col_names = self.col_names[:submat.shape[1]]
return type(self)(x=submat, isdiagonal=self.isdiagonal,
row_names=row_names, col_names=col_names,
autoalign=self.autoalign)
def __pow__(self, power):
"""overload of numpy.ndarray.__pow__() operator
Parameters
----------
power: (int or float)
interpreted as follows: -1 = inverse of self,
-0.5 = sqrt of inverse of self,
0.5 = sqrt of self. All other positive
ints = elementwise self raised to power
Returns
-------
Matrix : Matrix
a new Matrix object
"""
if power < 0:
if power == -1:
return self.inv
elif power == -0.5:
return (self.inv).sqrt
else:
raise NotImplementedError("Matrix.__pow__() not implemented " +
"for negative powers except for -1")
elif int(power) != float(power):
if power == 0.5:
return self.sqrt
else:
raise NotImplementedError("Matrix.__pow__() not implemented " +
"for fractional powers except 0.5")
else:
return type(self)(self.__x**power, row_names=self.row_names,
col_names=self.col_names,
isdiagonal=self.isdiagonal)
def __sub__(self, other):
"""numpy.ndarray.__sub__() overload. Tries to speedup by
checking for scalars of diagonal matrices on either side of operator
Parameters
----------
other : scalar,numpy.ndarray,Matrix object
the thing to difference
Returns
-------
Matrix : Matrix
"""
if np.isscalar(other):
return Matrix(x=self.x - other, row_names=self.row_names,
col_names=self.col_names,
isdiagonal=self.isdiagonal)
else:
if isinstance(other, np.ndarray):
assert self.shape == other.shape, "Matrix.__sub__() shape" +\
"mismatch: " +\
str(self.shape) + ' ' + \
str(other.shape)
if self.isdiagonal:
elem_sub = -1.0 * other
for j in range(self.shape[0]):
elem_sub[j, j] += self.x[j]
return type(self)(x=elem_sub, row_names=self.row_names,
col_names=self.col_names)
else:
return type(self)(x=self.x - other,
row_names=self.row_names,
col_names=self.col_names)
elif isinstance(other, Matrix):
if self.autoalign and other.autoalign \
and not self.element_isaligned(other):
common_rows = get_common_elements(self.row_names,
other.row_names)
common_cols = get_common_elements(self.col_names,
other.col_names)
if len(common_rows) == 0:
raise Exception("Matrix.__sub__ error: no common rows")
if len(common_cols) == 0:
raise Exception("Matrix.__sub__ error: no common cols")
first = self.get(row_names=common_rows,
col_names=common_cols)
second = other.get(row_names=common_rows,
col_names=common_cols)
else:
assert self.shape == other.shape, \
"Matrix.__sub__():shape mismatch: " +\
str(self.shape) + ' ' + str(other.shape)
first = self
second = other
if first.isdiagonal and second.isdiagonal:
return type(self)(x=first.x - second.x, isdiagonal=True,
row_names=first.row_names,
col_names=first.col_names)
elif first.isdiagonal:
elem_sub = -1.0 * second.newx
for j in range(first.shape[0]):
elem_sub[j, j] += first.x[j, 0]
return type(self)(x=elem_sub, row_names=first.row_names,
col_names=first.col_names)
elif second.isdiagonal:
elem_sub = first.newx
for j in range(second.shape[0]):
elem_sub[j, j] -= second.x[j, 0]
return type(self)(x=elem_sub, row_names=first.row_names,
col_names=first.col_names)
else:
return type(self)(x=first.x - second.x,
row_names=first.row_names,
col_names=first.col_names)
def __add__(self, other):
"""Overload of numpy.ndarray.__add__(). Tries to speedup by checking for
scalars of diagonal matrices on either side of operator
Parameters
----------
other : scalar,numpy.ndarray,Matrix object
the thing to add
Returns
-------
Matrix : Matrix
"""
if np.isscalar(other):
return type(self)(x=self.x + other)
if isinstance(other, np.ndarray):
assert self.shape == other.shape, \
"Matrix.__add__(): shape mismatch: " +\
str(self.shape) + ' ' + str(other.shape)
if self.isdiagonal:
raise NotImplementedError("Matrix.__add__ not supported for" +
"diagonal self")
else:
return type(self)(x=self.x + other, row_names=self.row_names,
col_names=self.col_names)
elif isinstance(other, Matrix):
if self.autoalign and other.autoalign \
and not self.element_isaligned(other):
common_rows = get_common_elements(self.row_names,
other.row_names)
common_cols = get_common_elements(self.col_names,
other.col_names)
if len(common_rows) == 0:
raise Exception("Matrix.__add__ error: no common rows")
if len(common_cols) == 0:
raise Exception("Matrix.__add__ error: no common cols")
first = self.get(row_names=common_rows, col_names=common_cols)
second = other.get(row_names=common_rows, col_names=common_cols)
else:
assert self.shape == other.shape, \
"Matrix.__add__(): shape mismatch: " +\
str(self.shape) + ' ' + str(other.shape)
first = self
second = other
if first.isdiagonal and second.isdiagonal:
return type(self)(x=first.x + second.x, isdiagonal=True,
row_names=first.row_names,
col_names=first.col_names)
elif first.isdiagonal:
ox = second.newx
for j in range(first.shape[0]):
ox[j, j] += first.__x[j]
return type(self)(x=ox, row_names=first.row_names,
col_names=first.col_names)
elif second.isdiagonal:
x = first.x
for j in range(second.shape[0]):
x[j, j] += second.x[j]
return type(self)(x=x, row_names=first.row_names,
col_names=first.col_names)
else:
return type(self)(x=first.x + second.x,
row_names=first.row_names,
col_names=first.col_names)
else:
raise Exception("Matrix.__add__(): unrecognized type for " +
"other in __add__: " + str(type(other)))
def hadamard_product(self, other):
"""Overload of numpy.ndarray.__mult__(): element-wise multiplication.
Tries to speedup by checking for scalars of diagonal matrices on
either side of operator
Parameters
----------
other : scalar,numpy.ndarray,Matrix object
the thing for element-wise multiplication
Returns
-------
Matrix : Matrix
"""
if np.isscalar(other):
return type(self)(x=self.x * other)
if isinstance(other, np.ndarray):
assert self.shape == other.shape, \
"Matrix.hadamard_product(): shape mismatch: " + \
str(self.shape) + ' ' + str(other.shape)
if self.isdiagonal:
raise NotImplementedError("Matrix.hadamard_product() not supported for" +
"diagonal self")
else:
return type(self)(x=self.x * other, row_names=self.row_names,
col_names=self.col_names)
elif isinstance(other, Matrix):
if self.autoalign and other.autoalign \
and not self.element_isaligned(other):
common_rows = get_common_elements(self.row_names,
other.row_names)
common_cols = get_common_elements(self.col_names,
other.col_names)
if len(common_rows) == 0:
raise Exception("Matrix.hadamard_product error: no common rows")
if len(common_cols) == 0:
raise Exception("Matrix.hadamard_product error: no common cols")
first = self.get(row_names=common_rows, col_names=common_cols)
second = other.get(row_names=common_rows, col_names=common_cols)
else:
assert self.shape == other.shape, \
"Matrix.hadamard_product(): shape mismatch: " + \
str(self.shape) + ' ' + str(other.shape)
first = self
second = other
if first.isdiagonal and second.isdiagonal:
return type(self)(x=first.x * second.x, isdiagonal=True,
row_names=first.row_names,
col_names=first.col_names)
# elif first.isdiagonal:
# #ox = second.as_2d
# #for j in range(first.shape[0]):
# # ox[j, j] *= first.__x[j]
# return type(self)(x=first.as_2d * second.as_2d, row_names=first.row_names,
# col_names=first.col_names)
# elif second.isdiagonal:
# #x = first.as_2d
# #for j in range(second.shape[0]):
# # x[j, j] *= second.x[j]
# return type(self)(x=first.x * second.as_2d, row_names=first.row_names,
# col_names=first.col_names)
else:
return type(self)(x=first.as_2d * second.as_2d,
row_names=first.row_names,
col_names=first.col_names)
else:
raise Exception("Matrix.hadamard_product(): unrecognized type for " +
"other: " + str(type(other)))
def __mul__(self, other):
"""Dot product multiplication overload. Tries to speedup by
checking for scalars or diagonal matrices on either side of operator
Parameters
----------
other : scalar,numpy.ndarray,Matrix object
the thing the dot product against
Returns:
Matrix : Matrix
"""
if np.isscalar(other):
return type(self)(x=self.__x.copy() * other,
row_names=self.row_names,
col_names=self.col_names,
isdiagonal=self.isdiagonal)
elif isinstance(other, np.ndarray):
assert self.shape[1] == other.shape[0], \
"Matrix.__mul__(): matrices are not aligned: " +\
str(self.shape) + ' ' + str(other.shape)
if self.isdiagonal:
return type(self)(x=np.dot(np.diag(self.__x.flatten()).transpose(),
other))
else:
return type(self)(x=np.dot(self.__x, other))
elif isinstance(other, Matrix):
if self.autoalign and other.autoalign\
and not self.mult_isaligned(other):
common = get_common_elements(self.col_names, other.row_names)
assert len(common) > 0,"Matrix.__mult__():self.col_names " +\
"and other.row_names" +\
"don't share any common elements. first 10: " +\
','.join(self.col_names[:9]) + '...and..' +\
','.join(other.row_names[:9])
# these should be aligned
if isinstance(self, Cov):
first = self.get(row_names=common, col_names=common)
else:
first = self.get(row_names=self.row_names, col_names=common)
if isinstance(other, Cov):
second = other.get(row_names=common, col_names=common)
else:
second = other.get(row_names=common,
col_names=other.col_names)
else:
assert self.shape[1] == other.shape[0], \
"Matrix.__mul__(): matrices are not aligned: " +\
str(self.shape) + ' ' + str(other.shape)
first = self
second = other
if first.isdiagonal and second.isdiagonal:
elem_prod = type(self)(x=first.x.transpose() * second.x,
row_names=first.row_names,
col_names=second.col_names)
elem_prod.isdiagonal = True
return elem_prod
elif first.isdiagonal:
ox = second.newx
for j in range(first.shape[0]):
ox[j, :] *= first.x[j]
return type(self)(x=ox, row_names=first.row_names,
col_names=second.col_names)
elif second.isdiagonal:
x = first.newx
ox = second.x
for j in range(first.shape[1]):
x[:, j] *= ox[j]
return type(self)(x=x, row_names=first.row_names,
col_names=second.col_names)
else:
return type(self)(np.dot(first.x, second.x),
row_names=first.row_names,
col_names=second.col_names)
else:
raise Exception("Matrix.__mul__(): unrecognized " +
"other arg type in __mul__: " + str(type(other)))
def __rmul__(self, other):
"""Reverse order Dot product multiplication overload.
Parameters
----------
other : scalar,numpy.ndarray,Matrix object
the thing the dot product against
Returns
-------
Matrix : Matrix
"""
if np.isscalar(other):
return type(self)(x=self.__x.copy() * other,row_names=self.row_names,\
col_names=self.col_names,isdiagonal=self.isdiagonal)
elif isinstance(other, np.ndarray):
assert self.shape[0] == other.shape[1], \
"Matrix.__rmul__(): matrices are not aligned: " +\
str(other.shape) + ' ' + str(self.shape)
if self.isdiagonal:
return type(self)(x=np.dot(other,np.diag(self.__x.flatten()).\
transpose()))
else:
return type(self)(x=np.dot(other,self.__x))
elif isinstance(other, Matrix):
if self.autoalign and other.autoalign \
and not self.mult_isaligned(other):
common = get_common_elements(self.row_names, other.col_names)
assert len(common) > 0,"Matrix.__rmul__():self.col_names " +\
"and other.row_names" +\
"don't share any common elements"
# these should be aligned
if isinstance(self, Cov):
first = self.get(row_names=common, col_names=common)
else:
first = self.get(col_names=self.row_names, row_names=common)
if isinstance(other, Cov):
second = other.get(row_names=common, col_names=common)
else:
second = other.get(col_names=common,
row_names=other.col_names)
else:
assert self.shape[0] == other.shape[1], \
"Matrix.__rmul__(): matrices are not aligned: " +\
str(other.shape) + ' ' + str(self.shape)
first = other
second = self
if first.isdiagonal and second.isdiagonal:
elem_prod = type(self)(x=first.x.transpose() * second.x,
row_names=first.row_names,
col_names=second.col_names)
elem_prod.isdiagonal = True
return elem_prod
elif first.isdiagonal:
ox = second.newx
for j in range(first.shape[0]):
ox[j, :] *= first.x[j]
return type(self)(x=ox, row_names=first.row_names,
col_names=second.col_names)
elif second.isdiagonal:
x = first.newx
ox = second.x
for j in range(first.shape[1]):
x[:, j] *= ox[j]
return type(self)(x=x, row_names=first.row_names,
col_names=second.col_names)
else:
return type(self)(np.dot(first.x, second.x),
row_names=first.row_names,
col_names=second.col_names)
else:
raise Exception("Matrix.__rmul__(): unrecognized " +
"other arg type in __mul__: " + str(type(other)))
def __set_svd(self):
"""private method to set SVD components.
Note: this should not be called directly
"""
if self.isdiagonal:
x = np.diag(self.x.flatten())
else:
# just a pointer to x
x = self.x
try:
u, s, v = la.svd(x, full_matrices=True)
v = v.transpose()
except Exception as e:
print("standard SVD failed: {0}".format(str(e)))
try:
v, s, u = la.svd(x.transpose(), full_matrices=True)
u = u.transpose()
except Exception as e:
np.savetxt("failed_svd.dat",x,fmt="%15.6E")
raise Exception("Matrix.__set_svd(): " +
"unable to compute SVD of self.x, " +
"saved matrix to 'failed_svd.dat' -- {0}".\
format(str(e)))
col_names = ["left_sing_vec_" + str(i + 1) for i in range(u.shape[1])]
self.__u = Matrix(x=u, row_names=self.row_names,
col_names=col_names, autoalign=False)
sing_names = ["sing_val_" + str(i + 1) for i in range(s.shape[0])]
self.__s = Matrix(x=np.atleast_2d(s).transpose(), row_names=sing_names,
col_names=sing_names, isdiagonal=True,
autoalign=False)
col_names = ["right_sing_vec_" + str(i + 1) for i in range(v.shape[0])]
self.__v = Matrix(v, row_names=self.col_names, col_names=col_names,
autoalign=False)
def mult_isaligned(self, other):
"""check if matrices are aligned for dot product multiplication
Parameters
----------
other : (Matrix)
Returns
-------
bool : bool
True if aligned, False if not aligned
"""
assert isinstance(other, Matrix), \
"Matrix.isaligned(): other argumnent must be type Matrix, not: " +\
str(type(other))
if self.col_names == other.row_names:
return True
else:
return False
def element_isaligned(self, other):
"""check if matrices are aligned for element-wise operations
Parameters
----------
other : Matrix
Returns
-------
bool : bool
True if aligned, False if not aligned
"""
assert isinstance(other, Matrix), \
"Matrix.isaligned(): other argument must be type Matrix, not: " +\
str(type(other))
if self.row_names == other.row_names \
and self.col_names == other.col_names:
return True
else:
return False
@property
def newx(self):
"""return a copy of x
Returns
-------
numpy.ndarray : numpy.ndarray
"""
return self.__x.copy()
@property
def x(self):
"""return a reference to x
Returns
-------
numpy.ndarray : numpy.ndarray
"""
return self.__x
@property
def as_2d(self):
""" get a 2D representation of x. If not self.isdiagonal, simply
return reference to self.x, otherwise, constructs and returns
a 2D, diagonal ndarray
Returns
-------
numpy.ndarray : numpy.ndarray
"""
if not self.isdiagonal:
return self.x
return np.diag(self.x.flatten())
@property
def shape(self):
"""get the implied, 2D shape of self
Returns
-------
tuple : tuple
length 2 tuple of ints
"""
if self.__x is not None:
if self.isdiagonal:
return (max(self.__x.shape), max(self.__x.shape))
if len(self.__x.shape) == 1:
raise Exception("Matrix.shape: Matrix objects must be 2D")
return self.__x.shape
return None
@property
def ncol(self):
""" length of second dimension
Returns
-------
int : int
number of columns
"""
return self.shape[1]
@property
def nrow(self):
""" length of first dimensions
Returns
-------
int : int
number of rows
"""
return self.shape[0]
@property
def T(self):
"""wrapper function for Matrix.transpose() method
"""
return self.transpose
@property
def transpose(self):
"""transpose operation of self
Returns
-------
Matrix : Matrix
transpose of self
"""
if not self.isdiagonal:
return type(self)(x=self.__x.copy().transpose(),
row_names=self.col_names,
col_names=self.row_names,
autoalign=self.autoalign)
else:
return type(self)(x=self.__x.copy(), row_names=self.row_names,
col_names=self.col_names,
isdiagonal=True, autoalign=self.autoalign)
@property
def inv(self):
"""inversion operation of self
Returns
-------
Matrix : Matrix
inverse of self
"""
if self.isdiagonal:
inv = 1.0 / self.__x
if (np.any(~np.isfinite(inv))):
idx = np.isfinite(inv)
np.savetxt("testboo.dat",idx)
invalid = [self.row_names[i] for i in range(idx.shape[0]) if idx[i] == 0.0]
raise Exception("Matrix.inv has produced invalid floating points " +
" for the following elements:" + ','.join(invalid))
return type(self)(x=inv, isdiagonal=True,
row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
else:
return type(self)(x=la.inv(self.__x), row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
def get_maxsing(self,eigthresh=1.0e-5):
""" Get the number of singular components with a singular
value ratio greater than or equal to eigthresh
Parameters
----------
eigthresh : float
the ratio of the largest to smallest singular value
Returns
-------
int : int
number of singular components
"""
sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh)
return max(1,np.argmin(sthresh))
def pseudo_inv_components(self,maxsing=None,eigthresh=1.0e-5):
""" Get the truncated SVD components
Parameters
----------
maxsing : int
the number of singular components to use. If None,
maxsing is calculated using Matrix.get_maxsing() and eigthresh
eigthresh : float
the ratio of largest to smallest singular components to use
for truncation. Ignored if maxsing is not None
Returns
-------
u : Matrix
truncated left singular vectors
s : Matrix
truncated singular value matrix
v : Matrix
truncated right singular vectors
"""
if maxsing is None:
maxsing = self.get_maxsing(eigthresh=eigthresh)
s = self.s[:maxsing,:maxsing]
v = self.v[:,:maxsing]
u = self.u[:,:maxsing]
return u,s,v
def pseudo_inv(self,maxsing=None,eigthresh=1.0e-5):
""" The pseudo inverse of self. Formed using truncated singular
value decomposition and Matrix.pseudo_inv_components
Parameters
----------
maxsing : int
the number of singular components to use. If None,
maxsing is calculated using Matrix.get_maxsing() and eigthresh
eigthresh : float
the ratio of largest to smallest singular components to use
for truncation. Ignored if maxsing is not None
Returns
-------
Matrix : Matrix
"""
if maxsing is None:
maxsing = self.get_maxsing(eigthresh=eigthresh)
full_s = self.full_s.T
for i in range(self.s.shape[0]):
if i <= maxsing:
full_s.x[i,i] = 1.0 / full_s.x[i,i]
else:
full_s.x[i,i] = 0.0
return self.v * full_s * self.u.T
@property
def sqrt(self):
"""square root operation
Returns
-------
Matrix : Matrix
square root of self
"""
if self.isdiagonal:
return type(self)(x=np.sqrt(self.__x), isdiagonal=True,
row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
elif self.shape[1] == 1: #a vector
return type(self)(x=np.sqrt(self.__x), isdiagonal=False,
row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
else:
return type(self)(x=la.sqrtm(self.__x), row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
@property
def full_s(self):
""" Get the full singular value matrix of self
Returns
-------
Matrix : Matrix
"""
x = np.zeros((self.shape),dtype=np.float32)
x[:self.s.shape[0],:self.s.shape[0]] = self.s.as_2d
s = Matrix(x=x, row_names=self.row_names,
col_names=self.col_names, isdiagonal=False,
autoalign=False)
return s
@property
def s(self):
"""the singular value (diagonal) Matrix
Returns
-------
Matrix : Matrix
"""
if self.__s is None:
self.__set_svd()
return self.__s
@property
def u(self):
"""the left singular vector Matrix
Returns
-------
Matrix : Matrix
"""
if self.__u is None:
self.__set_svd()
return self.__u
@property
def v(self):
"""the right singular vector Matrix
Returns
-------
Matrix : Matrix
"""
if self.__v is None:
self.__set_svd()
return self.__v
@property
def zero2d(self):
""" get an 2D instance of self with all zeros
Returns
-------
Matrix : Matrix
"""
return type(self)(x=np.atleast_2d(np.zeros((self.shape[0],self.shape[1]))),
row_names=self.row_names,
col_names=self.col_names,
isdiagonal=False)
def indices(self, names, axis=None):
"""get the row and col indices of names. If axis is None, two ndarrays
are returned, corresponding the indices of names for each axis
Parameters
----------
names : iterable
column and/or row names
axis : (int) (optional)
the axis to search.
Returns
-------
numpy.ndarray : numpy.ndarray
indices of names.
"""
row_idxs, col_idxs = [], []
for name in names:
if name.lower() not in self.col_names \
and name.lower() not in self.row_names:
raise Exception('Matrix.indices(): name not found: ' + name)
if name.lower() in self.col_names:
col_idxs.append(self.col_names.index(name))
if name.lower() in self.row_names:
row_idxs.append(self.row_names.index(name))
if axis is None:
return np.array(row_idxs, dtype=np.int32),\
np.array(col_idxs, dtype=np.int32)
elif axis == 0:
if len(row_idxs) != len(names):
raise Exception("Matrix.indices(): " +
"not all names found in row_names")
return np.array(row_idxs, dtype=np.int32)
elif axis == 1:
if len(col_idxs) != len(names):
raise Exception("Matrix.indices(): " +
"not all names found in col_names")
return np.array(col_idxs, dtype=np.int32)
else:
raise Exception("Matrix.indices(): " +
"axis argument must 0 or 1, not:" + str(axis))
def align(self, names, axis=None):
"""reorder self by names. If axis is None, reorder both indices
Parameters
----------
names : iterable
names in rowS and\or columnS
axis : (int)
the axis to reorder. if None, reorder both axes
"""
if not isinstance(names, list):
names = [names]
row_idxs, col_idxs = self.indices(names)
if self.isdiagonal or isinstance(self, Cov):
assert row_idxs.shape == col_idxs.shape
assert row_idxs.shape[0] == self.shape[0]
if self.isdiagonal:
self.__x = self.__x[row_idxs]
else:
self.__x = self.__x[row_idxs, :]
self.__x = self.__x[:, col_idxs]
row_names = []
[row_names.append(self.row_names[i]) for i in row_idxs]
self.row_names, self.col_names = row_names, row_names
else:
if axis is None:
raise Exception("Matrix.align(): must specify axis in " +
"align call for non-diagonal instances")
if axis == 0:
assert row_idxs.shape[0] == self.shape[0], \
"Matrix.align(): not all names found in self.row_names"
self.__x = self.__x[row_idxs, :]
row_names = []
[row_names.append(self.row_names[i]) for i in row_idxs]
self.row_names = row_names
elif axis == 1:
assert col_idxs.shape[0] == self.shape[1], \
"Matrix.align(): not all names found in self.col_names"
self.__x = self.__x[:, col_idxs]
col_names = []
[col_names.append(self.col_names[i]) for i in row_idxs]
self.col_names = col_names
else:
raise Exception("Matrix.align(): axis argument to align()" +
" must be either 0 or 1")
def get(self, row_names=None, col_names=None, drop=False):
"""get a new Matrix instance ordered on row_names or col_names
Parameters
----------
row_names : iterable
row_names for new Matrix
col_names : iterable
col_names for new Matrix
drop : bool
flag to remove row_names and/or col_names
Returns
-------
Matrix : Matrix
"""
if row_names is None and col_names is None:
raise Exception("Matrix.get(): must pass at least" +
" row_names or col_names")
if row_names is not None and not isinstance(row_names, list):
row_names = [row_names]
if col_names is not None and not isinstance(col_names, list):
col_names = [col_names]
if isinstance(self,Cov) and (row_names is None or col_names is None ):
if row_names is not None:
idxs = self.indices(row_names, axis=0)
names = row_names
else:
idxs = self.indices(col_names, axis=1)
names = col_names
if self.isdiagonal:
extract = self.__x[idxs].copy()
else:
extract = self.__x[idxs, :].copy()
extract = extract[:, idxs.copy()]
if drop:
self.drop(names, 0)
return Cov(x=extract, names=names, isdiagonal=self.isdiagonal)
if self.isdiagonal:
extract = np.diag(self.__x[:, 0])
else:
extract = self.__x.copy()
if row_names is not None:
row_idxs = self.indices(row_names, axis=0)
extract = np.atleast_2d(extract[row_idxs, :].copy())
if drop:
self.drop(row_names, axis=0)
else:
row_names = self.row_names
if col_names is not None:
col_idxs = self.indices(col_names, axis=1)
extract = np.atleast_2d(extract[:, col_idxs].copy())
if drop:
self.drop(col_names, axis=1)
else:
col_names = copy.deepcopy(self.col_names)
return type(self)(x=extract, row_names=row_names, col_names=col_names)
def drop(self, names, axis):
""" drop elements from self in place
Parameters
----------
names : iterable
names to drop
axis : (int)
the axis to drop from. must be in [0,1]
"""
if axis is None:
raise Exception("Matrix.drop(): axis arg is required")
if not isinstance(names, list):
names = [names]
if axis == 1:
assert len(names) < self.shape[1], "can't drop all names along axis 1"
else:
assert len(names) < self.shape[0], "can't drop all names along axis 0"
idxs = self.indices(names, axis=axis)
if self.isdiagonal:
self.__x = np.delete(self.__x, idxs, 0)
keep_names = [name for name in self.row_names if name not in names]
assert len(keep_names) == self.__x.shape[0],"shape-name mismatch:"+\
"{0}:{0}".format(len(keep_names),self.__x.shape)
self.row_names = keep_names
self.col_names = copy.deepcopy(keep_names)
# idxs = np.sort(idxs)
# for idx in idxs[::-1]:
# del self.row_names[idx]
# del self.col_names[idx]
elif isinstance(self,Cov):
self.__x = np.delete(self.__x, idxs, 0)
self.__x = np.delete(self.__x, idxs, 1)
keep_names = [name for name in self.row_names if name not in names]
assert len(keep_names) == self.__x.shape[0],"shape-name mismatch:"+\
"{0}:{0}".format(len(keep_names),self.__x.shape)
self.row_names = keep_names
self.col_names = copy.deepcopy(keep_names)
# idxs = np.sort(idxs)
# for idx in idxs[::-1]:
# del self.row_names[idx]
# del self.col_names[idx]
elif axis == 0:
if idxs.shape[0] == self.shape[0]:
raise Exception("Matrix.drop(): can't drop all rows")
elif idxs.shape == 0:
raise Exception("Matrix.drop(): nothing to drop on axis 0")
self.__x = np.delete(self.__x, idxs, 0)
keep_names = [name for name in self.row_names if name not in names]
assert len(keep_names) == self.__x.shape[0],"shape-name mismatch:"+\
"{0}:{0}".format(len(keep_names),self.__x.shape)
self.row_names = keep_names
# idxs = np.sort(idxs)
# for idx in idxs[::-1]:
# del self.row_names[idx]
elif axis == 1:
if idxs.shape[0] == self.shape[1]:
raise Exception("Matrix.drop(): can't drop all cols")
if idxs.shape == 0:
raise Exception("Matrix.drop(): nothing to drop on axis 1")
self.__x = np.delete(self.__x, idxs, 1)
keep_names = [name for name in self.col_names if name not in names]
assert len(keep_names) == self.__x.shape[1],"shape-name mismatch:"+\
"{0}:{0}".format(len(keep_names),self.__x.shape)
self.col_names = keep_names
# idxs = np.sort(idxs)
# for idx in idxs[::-1]:
# del self.col_names[idx]
else:
raise Exception("Matrix.drop(): axis argument must be 0 or 1")
def extract(self, row_names=None, col_names=None):
"""wrapper method that Matrix.gets() then Matrix.drops() elements.
one of row_names or col_names must be not None.
Parameters
----------
row_names : iterable
row names to extract
col_names : (enumerate)
col_names to extract
Returns
-------
Matrix : Matrix
"""
if row_names is None and col_names is None:
raise Exception("Matrix.extract() " +
"row_names and col_names both None")
extract = self.get(row_names, col_names, drop=True)
return extract
def get_diagonal_vector(self, col_name="diag"):
"""Get a new Matrix instance that is the diagonal of self. The
shape of the new matrix is (self.shape[0],1). Self must be square
Parameters:
col_name : str
the name of the column in the new Matrix
Returns:
Matrix : Matrix
"""
assert self.shape[0] == self.shape[1]
assert not self.isdiagonal
assert isinstance(col_name,str)
return type(self)(x=np.atleast_2d( | np.diag(self.x) | numpy.diag |
import numpy as np
from skimage.exposure import equalize_adapthist
import torch
from scipy.ndimage import gaussian_filter
import scipy
import random
import torch as th
from PIL import Image
from scipy.interpolate import RectBivariateSpline
class MyRandomImageContrastTransform(object):
def __init__(self, random_state=None, is_labelmap=[False, True], clip_limit_range=[0.01, 1], nbins=256,
enable=False):
"""
Perform Contrast Limited Adaptive Histogram Equalization (CLAHE)
. An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the
image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image.
Based on https://scikit-image.org/docs/dev/api/skimage.exposure.html?highlight=equalize_adapthist#skimage
.exposure.equalize_adapthist
Arguments
---------
"""
self.random_state = random_state
self.clip_limit_range = clip_limit_range # [0,1] The larger the value, the higher the contrast
self.nbins = nbins
self.is_label_map = is_labelmap
self.enable = enable
def __call__(self, *inputs):
if self.enable:
outputs = []
assert len(self.is_label_map) == len(
inputs), 'for each input, must clarify whether this is a label map or not.'
clip_limit = np.random.uniform(low=self.clip_limit_range[0], high=self.clip_limit_range[1])
for idx, _input in enumerate(inputs):
_input = _input.numpy()
flag = self.is_label_map[idx]
if flag:
result = _input
else:
print(_input.shape)
result = np.zeros(_input.shape, dtype=_input.dtype)
for i in range(_input.shape[0]):
temp = _input[i]
print('temp shape', temp.shape)
_input_min = temp.min()
_input_max = temp.max()
## clahe requires intensity to be Uint16
temp = intensity_normalise(temp, perc_threshold=(0., 100.0), min_val=0, max_val=255)
temp = np.int16(temp)
clahe_output = equalize_adapthist(temp, clip_limit=clip_limit, nbins=self.nbins)
## recover intensity range
result[i] = intensity_normalise(clahe_output, perc_threshold=(0., 100.0), min_val=_input_min,
max_val=_input_max)
tensorresult = torch.from_numpy(result).float()
outputs.append(tensorresult)
return outputs if idx >= 1 else outputs[0]
else:
outputs = inputs
return outputs
class RandomGamma(object):
'''
Perform Random Gamma Contrast Adjusting
support 2D and 3D
'''
def __init__(self, p_thresh=0.5, gamma_range=[0.8, 1.4], gamma_flag=True, preserve_range=True):
"""
Randomly do gamma to a torch tensor
Arguments
--------
:param gamma_flag: [bool] list of flags for gamma aug
"""
self.gamma_range = gamma_range
self.p_thresh = p_thresh
self.gamma_flag = gamma_flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
gamma = random.random() * (self.gamma_range[1] - self.gamma_range[0]) + self.gamma_range[0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.gamma_flag[idx]):
assert gamma > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input ** (1.0 / gamma)
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
class RandomBrightnessFluctuation(object):
'''
Perform image contrast and brightness augmentation.
support 2D and 3D
'''
def __init__(self, p=0.5, contrast_range=[0.8, 1.2], brightness_range=[-0.1, 0.1], flag=True, preserve_range=True):
"""
Arguments
--------
:param flag: [bool] list of flags for aug
"""
self.contrast_range = contrast_range
self.brightness_range = brightness_range
self.p_thresh = p
self.flag = flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
scale = random.random() * (self.contrast_range[1] - self.contrast_range[0]) + self.contrast_range[0] #
brightness = random.random() * (self.brightness_range[1] - self.brightness_range[0]) + \
self.brightness_range[
0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.flag[idx]):
assert scale > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input * scale + brightness
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
def intensity_normalise(img_data, perc_threshold=(0., 99.0), min_val=0., max_val=1):
'''
intensity_normalise
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
img_data=3D matrix [N*H*W]
'''
if len(img_data.shape) == 3:
output = np.zeros_like(img_data)
assert img_data.shape[0] < img_data.shape[1], 'check data is formatted as N*H*W'
for idx in range(img_data.shape[0]): #
slice_data = img_data[idx]
a_min_val, a_max_val = np.percentile(slice_data, perc_threshold)
## restrict the intensity range
slice_data[slice_data <= a_min_val] = a_min_val
slice_data[slice_data >= a_max_val] = a_max_val
## perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output[idx] = slice_data * scale + bias
return output
elif len(img_data.shape) == 2:
a_min_val, a_max_val = np.percentile(img_data, perc_threshold)
## restrict the intensity range
img_data[img_data <= a_min_val] = a_min_val
img_data[img_data >= a_max_val] = a_max_val
## perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output = img_data * scale + bias
return output
else:
raise NotImplementedError
def contrast_enhancement(img_data, clip_limit=0.01, nbins=256):
if len(img_data.shape) == 3:
output = np.zeros_like(img_data)
assert img_data.shape[0] < img_data.shape[1], 'check data is formatted as N*H*W'
for idx in range(img_data.shape[0]): #
slice_data = img_data[idx]
slice_data = equalize_adapthist(slice_data, clip_limit=clip_limit, nbins=nbins)
output[idx] = slice_data
return output
else:
raise NotImplementedError
class MyNormalizeMedicPercentile(object):
"""
Given min_val: float and max_val: float,
will normalize each channel of the th.*Tensor to
the provided min and max values.
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
where min' & max' are given values,
and min & max are observed min/max for each channel
"""
def __init__(self,
min_val=0.0,
max_val=1.0,
perc_threshold=(1.0, 95.0),
norm_flag=True):
"""
Normalize a tensor between a min and max value
:param min_val: (float) lower bound of normalized tensor
:param max_val: (float) upper bound of normalized tensor
:param perc_threshold: (float, float) percentile of image intensities used for scaling
:param norm_flag: [bool] list of flags for normalisation
"""
self.min_val = min_val
self.max_val = max_val
self.perc_threshold = perc_threshold
self.norm_flag = norm_flag
def __call__(self, *inputs):
# prepare the normalisation flag
if isinstance(self.norm_flag, bool):
norm_flag = [self.norm_flag] * len(inputs)
else:
norm_flag = self.norm_flag
outputs = []
eps = 1e-8
for idx, _input in enumerate(inputs):
if norm_flag[idx]:
# determine the percentiles and threshold the outliers
_min_val, _max_val = np.percentile(_input.numpy(), self.perc_threshold)
_input[th.le(_input, _min_val)] = _min_val
_input[th.ge(_input, _max_val)] = _max_val
# scale the intensity values
a = (self.max_val - self.min_val) / ((_max_val - _min_val) + eps)
b = self.max_val - a * _max_val
_input = _input.mul(a).add(b)
outputs.append(_input)
return outputs if idx >= 1 else outputs[0]
class MyRandomPurtarbation(object):
"""
"""
def __init__(self,
multi_control_points=[2,4,8],
max_sigma=16,
flag=True,
add_noise=True,
epsilon=0.01,
p=0.5,
magnitude=0.3
):
"""
Running random perturbation on images
:param multi_control_points: list of number of control points at each scale, by default, only use 4 control
points.
:param max_sigma: float, a parameter to control the scale of gaussian filter for smoothness
:param flag: whether to apply the perturbation to each input in the list
:param add_noise: boolean: adding random gaussian noise: default: True
:param epsilon: float, a scalar to control the level of noise, Default: 0.01
:param p: the probability of performing perturbation. Default: 0.5
"""
self.multi_control_points = multi_control_points
self.max_sigma = max_sigma
self.flag = flag
self.add_noise = add_noise
self.epsilon = epsilon
assert magnitude>=0 and magnitude<1,'magnitude must be in [0,1)'
self.magnitude=magnitude
self.p = p
def __call__(self, *inputs):
# prepare the perturbation flag
if isinstance(self.flag, bool):
flag = [self.flag] * len(inputs)
else:
flag = self.flag
if np.random.rand() >= self.p:
# do nothing
return inputs
else:
outputs = []
if isinstance(self.multi_control_points, list):
self.multi_control_points.sort()
else:
raise ValueError
for idx, input in enumerate(inputs):
if flag[idx]:
_input = input.numpy()
if np.abs(np.sum(_input) - 0) > 1e-6:
##random generate bias field
ch, h, w = _input.shape[0], _input.shape[1], _input.shape[2]
total_bias_field = np.zeros((h, w))
## from coarse grid to fine grid
for control_points in self.multi_control_points:
assert control_points <= np.min((h,
w)), 'num of control points at each scale must be ' \
'smaller or equal to the original image size'
control_points_field = np.float32(np.random.uniform(0, 1, (control_points, control_points)))
sigma = control_points * 2.0
if sigma > self.max_sigma: sigma = self.max_sigma
control_points_field = gaussian_filter(control_points_field, sigma)
interp = np.array(
Image.fromarray(control_points_field, mode='L').resize((h, w), resample=Image.BICUBIC),
dtype=np.float32)
interp = interp / (1.0 * interp.sum() * control_points + 1e-12)
total_bias_field += interp
total_bias_field = gaussian_filter(total_bias_field, self.max_sigma)
total_bias_field = (total_bias_field / (
1.0 * total_bias_field.sum() + 1e-12)) * h * w ## should be close to a identity
# restrict values to [1-magnitude, 1+magnitude]
total_bias_field=np.clip(total_bias_field,1-self.magnitude,1+self.magnitude)
## bias image
_input = np.repeat(total_bias_field[np.newaxis, :, :], repeats=ch, axis=0) * _input
_min_val = np.min(np.array(_input))
_max_val = np.max(np.array(_input))
_input = (_input - _min_val) / (_max_val - _min_val + 1e-8)
## add gaussian noise
if self.add_noise:
noise = np.random.randn(ch, h, w)
noise = noise * self.epsilon
_input = _input + noise
_input = np.clip(_input, 0, 1)
else:
print('ignore black images')
#
input = torch.from_numpy(_input).float()
# print (input.size())
outputs.append(input)
return outputs if idx >= 1 else outputs[0]
class MyRandomPurtarbationV2(object):
"""
"""
def __init__(self,
ms_control_point_spacing=[32],
magnitude=0.2,
flag=True,
add_noise=True,
epsilon=0.01,
p=0.5,
debug=False,
spline_dgree=3,
spline_smoothness=3,
):
"""
Running random perturbation on images, perturbation is smoothed using bspline interpolation
:param ms_control_point_spacing: list of control point spacing at each scale. Prefer to use 5x5
control points in the coarse grid (images are divided into 4x4).
:param magnitude: float, control the value range of knots vectors at the initialization stage
:param flag: whether to apply the perturbation to each input in the list
:param add_noise: boolean: adding random gaussian noise: default: True
:param epsilon: float, a scalar to control the level of noise, Default: 0.01
:param spline_dgree: int,degree of bivariate spline, default =3
:param p: the probability of performing perturbation. Default: 0.5
"""
assert len(ms_control_point_spacing) >= 1, 'must specify at least one spacing, but got {}'.format(
str(ms_control_point_spacing))
assert np.abs(magnitude)<1, 'must set magnitude x in a reasonable range, bias field value 1+/-magnitude can not be zero or negative'
self.ms_control_point_spacing = [64]
self.magnitude = magnitude
self.flag = flag
self.add_noise = add_noise
self.epsilon = epsilon
self.spline_dgree = spline_dgree
self.spline_smoothness = spline_smoothness
self.p = p
self.debug = False
def __call__(self, *inputs):
# prepare the perturbation flag
if isinstance(self.flag, bool):
flag = [self.flag] * len(inputs)
else:
flag = self.flag
if np.random.rand() >= self.p:
# do nothing
return inputs
else:
outputs = []
if isinstance(self.ms_control_point_spacing, list):
## from coarse to fine:
self.ms_control_point_spacing.sort(reverse=True)
if not self.ms_control_point_spacing[-1] == 1:
self.ms_control_point_spacing.append(1)
self.ms_control_point_spacing.sort(reverse=True)
else:
raise ValueError
for idx, input in enumerate(inputs):
if flag[idx]:
_input = input.numpy()
if np.abs(np.sum(_input) - 0) > 1e-6:
##random generate bias field
ch, orig_h, orig_w = _input.shape[0], _input.shape[1], _input.shape[2]
assert orig_h == orig_w, 'currently only support square images for simplicity, but found size ({},' \
'{})'.format(
orig_h, orig_w)
raw_image = _input.copy()
## extend the coordinates to be larger than the original
h=np.round(orig_h+self.ms_control_point_spacing[0]*1.5)
w=np.round(orig_w+self.ms_control_point_spacing[0]*1.5)
h=np.int(h)
w=np.int(w)
assert np.round(h /self.ms_control_point_spacing[0]) >= self.spline_dgree + 1 and np.round(w / self.ms_control_point_spacing[
0]) >= self.spline_dgree + 1, 'please decrease the spacing, the number of control ' \
'points in each dimension ' \
'should be at least kx+1, current bspline order k={}, ' \
'but found only :{} and {} along each axis'.format(
self.spline_dgree, h / self.ms_control_point_spacing[0], w / self.ms_control_point_spacing[0])
## initialize the coarsest grid:
xmax, ymax = w // 2, h // 2
if self.debug:
print (xmax,ymax)
print ('self.ms_control_point_spacing[0]',self.ms_control_point_spacing[0])
x = | np.arange(-xmax, xmax + 1, self.ms_control_point_spacing[0]) | numpy.arange |
# pyPI: Potential Intensity Calculations in Python
# -----------------------------------------------------------------------------------
# import required packages
import numpy as np
import numba as nb
from . import constants
from . import utilities
# define the function to calculate CAPE
#定义计算CAPE的函数
@nb.njit()
def cape(TP, RP, PP, T, R, P, ascent_flag=0, ptop=50, miss_handle=1):
# function [CAPED,TOB,LNB,IFLAG]= cape(TP,RP,PP,T,R,P,ascent_flag=0,ptop=50,miss_handle=1)
#
# This function calculates the CAPE of a parcel given parcel pressure PP (hPa),
# temperature TP (K) and mixing ratio RP (gram/gram) and given a sounding
# of temperature (T in K) and mixing ratio (R in gram/gram) as a function
# of pressure (P in hPa). CAPED is the calculated value of CAPE following
# Emanuel 1994 (E94) Equation 6.3.6 and TOB is the temperature at the
# level of neutral buoyancy ("LNB") for the displaced parcel. IFLAG is a flag
# integer. If IFLAG = 1, routine is successful; if it is 0, routine did
# not run owing to improper sounding (e.g. no water vapor at parcel level).
# IFLAG=2 indicates that the routine did not converge, IFLAG=3 indicates that
# the input profile had missing values.
#
# 这个函数计算一个包裹的CAPE给定包裹压力PP (hPa),温度TP (K)和混合比RP(克/克),并给出一个探测温度(T, K)和混合比(R,克/克)作为压力的函数(P,
# hPa)。CAPED为CAPE的计算值,根据Emanuel 1994 (E94)式6.3.6,TOB为位移包裹的中性浮力水平温度(LNB)。ifflag为标志整数。If If = 1, routine is
# successful;如果为0,则由于探测不当(例如包裹层没有水汽),程序没有运行。IFLAG=2表示例程没有收敛,IFLAG=3表示输入剖面有缺失值。
#
# INPUT: TP,RP,PP: floating point numbers of Parcel pressure (hPa),
# temperature (K), and mixing ratio (gram/gram)
#
# T,R,P: One-dimensional arrays
# containing environmental pressure (hPa), temperature (K),
# and mixing ratio (gram/gram) profiles. The arrays MUST be
# arranged so that the lowest index corresponds
# to the lowest model level, with increasing index
# corresponding to decreasing pressure.
#
# ascent_flag: Adjustable constant fraction for buoyancy of displaced
# parcels, where 0=Reversible ascent; 1=Pseudo-adiabatic ascent
#
# ptop: Pressure below which sounding is ignored (hPa)
#
# miss_handle: Flag that determines how missing (NaN) values are handled.
# If = 0 (BE02 default), NaN values in profile are ignored and PI is still calcuated
# If = 1 (pyPI default), given NaN values PI will be set to missing (with IFLAG=3)
# NOTE: If any missing values are between the lowest valid level and ptop
# then PI will automatically be set to missing (with IFLAG=3)
#
#
# OUTPUT: CAPED (J/kg) is Convective Available Potential Energy of an air parcel
# consistent with its parcel and environmental properties.
#
# TOB is the Temperature (K) at the level of neutral bouyancy
# for the displaced air parcel
#
# LNB is the pressure level of neutral bouyancy (hPa) for the
# displaced air parcel
#
# IFLAG is a flag where the value of 1 means OK; a value of 0
# indicates an improper sounding or parcel; a value of 2
# means that the routine failed to converge
#
#
# *** Handle missing values ***
#
# find if any values are missing in the temperature or mixing ratio array
valid_i = ~np.isnan(T)
first_valid = np.where(valid_i)[0][0]
# Are there missing values? If so, assess according to flag
if (np.sum(valid_i) != len(P)):
# if not allowed, set IFLAG=3 and return missing CAPE
if (miss_handle != 0):
CAPED = np.nan
TOB = np.nan
LNB = np.nan
IFLAG = 3
# Return the unsuitable values
return (CAPED, TOB, LNB, IFLAG)
else:
# if allowed, but there are missing values between the lowest existing level
# and ptop, then set IFLAG=3 and return missing CAPE
if np.sum(np.isnan(T[first_valid:len(P)]) > 0):
CAPED = np.nan
TOB = np.nan
LNB = np.nan
IFLAG = 3
# Return the unsuitable values
return (CAPED, TOB, LNB, IFLAG)
else:
first_lvl = first_valid
else:
first_lvl = 0
# Populate new environmental profiles removing values above ptop and
# find new number, N, of profile levels with which to calculate CAPE
N = np.argmin(np.abs(P - ptop))
P = P[first_lvl:N]
T = T[first_lvl:N]
R = R[first_lvl:N]
nlvl = len(P)
TVRDIF = np.zeros((nlvl,))
#
# *** Run checks ***
#
# CHECK: Is the input parcel suitable? If not, return missing CAPE
if ((RP < 1e-6) or (TP < 200)):
CAPED = 0
TOB = np.nan
LNB = np.nan
IFLAG = 0
# Return the unsuitable values
return (CAPED, TOB, LNB, IFLAG)
#
# *** Define various parcel quantities, including reversible ***
# *** entropy, S ***
#
TPC = utilities.T_ktoC(TP) # Parcel temperature in Celsius
ESP = utilities.es_cc(TPC) # Parcel's saturated vapor pressure
EVP = utilities.ev(RP, PP) # Parcel's partial vapor pressure
RH = EVP / ESP # Parcel's relative humidity
RH = min([RH, 1.0]) # ensure that the relatively humidity does not exceed 1.0
# calculate reversible total specific entropy per unit mass of dry air (E94, EQN. 4.5.9)
S = utilities.entropy_S(TP, RP, PP)
#
# *** Estimate lifted condensation level pressure, PLCL ***
# Based on E94 "calcsound.f" code at http://texmex.mit.edu/pub/emanuel/BOOK/
# see also https://psl.noaa.gov/data/composites/day/calculation.html
#
# NOTE: Modern PLCL calculations are made following the exact expressions of Romps (2017),
# see https://journals.ametsoc.org/doi/pdf/10.1175/JAS-D-17-0102.1
# and Python PLCL code at http://romps.berkeley.edu/papers/pubdata/2016/lcl/lcl.py
#
PLCL = utilities.e_pLCL(TP, RH, PP)
# Initial default values before loop
CAPED = 0
TOB = T[0]
IFLAG = 1
# Values to help loop
NCMAX = 0
jmin = int(1e6)
#
# *** Begin updraft loop ***
#
# loop over each level in the profile
for j in range(nlvl):
# jmin is the index of the lowest pressure level evaluated in the loop
jmin = int(min([jmin, j]))
#
# *** Calculate Parcel quantities BELOW lifted condensation level ***
#
if (P[j] >= PLCL):
# Parcel temperature at this pressure
TG = TP * (P[j] / PP) ** (constants.RD / constants.CPD)
# Parcel Mixing ratio
RG = RP
# Parcel and Environmental Density Temperatures at this pressure (E94, EQN. 4.3.1 and 6.3.7)
TLVR = utilities.Trho(TG, RG, RG)
TVENV = utilities.Trho(T[j], R[j], R[j])
# Bouyancy of the parcel in the environment (Proxy of E94, EQN. 6.1.5)
TVRDIF[j,] = TLVR - TVENV
#
# *** Calculate Parcel quantities ABOVE lifted condensation level ***
#
else:
# Initial default values before loop
TGNEW = T[j]
TJC = utilities.T_ktoC(T[j])
ES = utilities.es_cc(TJC)
RG = utilities.rv(ES, P[j])
#
# *** Iteratively calculate lifted parcel temperature and mixing ***
# *** ratio for reversible ascent ***
#
# set loop counter and initial condition
NC = 0
TG = 0
# loop until loop converges or bails out
while ((np.abs(TGNEW - TG)) > 0.001):
# Parcel temperature and mixing ratio during this iteration
TG = TGNEW
TC = utilities.T_ktoC(TG)
ENEW = utilities.es_cc(TC)
RG = utilities.rv(ENEW, P[j])
# increase iteration count in the loop
NC += 1
#
# *** Calculate estimates of the rates of change of the entropy ***
# *** with temperature at constant pressure ***
#
ALV = utilities.Lv(TC)
# calculate the rate of change of entropy with temperature, s_ell
SL = (constants.CPD + RP * constants.CL + ALV * ALV * RG / (constants.RV * TG * TG)) / TG
EM = utilities.ev(RG, P[j])
# calculate the saturated entropy, s_k, noting r_T=RP and
# the last term vanishes with saturation, i.e. RH=1
SG = (constants.CPD + RP * constants.CL) * np.log(TG) - constants.RD * np.log(P[j] - EM) + ALV * RG / TG
# convergence speed (AP, step in entropy fraction) varies as a function of
# number of iterations
if (NC < 3):
# converge slowly with a smaller step
AP = 0.3
else:
# speed the process with a larger step when nearing convergence
AP = 1.0
# find the new temperature in the iteration
TGNEW = TG + AP * (S - SG) / SL
#
# *** If the routine does not converge, set IFLAG=2 and bail out ***
#
if (NC > 500) or (ENEW > (P[j] - 1)):
CAPED = 0
TOB = T[0]
LNB = P[0]
IFLAG = 2
# Return the uncoverged values
return (CAPED, TOB, LNB, IFLAG)
# store the number of iterations
NCMAX = NC
#
# *** Calculate buoyancy ***
#
# Parcel total mixing ratio: either reversible (ascent_flag=0) or pseudo-adiabatic (ascent_flag=1)
RMEAN = ascent_flag * RG + (1 - ascent_flag) * RP
# Parcel and Environmental Density Temperatures at this pressure (E94, EQN. 4.3.1 and 6.3.7)
TLVR = utilities.Trho(TG, RMEAN, RG)
TENV = utilities.Trho(T[j], R[j], R[j])
# Bouyancy of the parcel in the environment (Proxy of E94, EQN. 6.1.5)
TVRDIF[j,] = TLVR - TENV
#
# *** Begin loop to find Positive areas (PA) and Negative areas (NA) ***
# *** and CAPE from reversible ascent ***
NA = 0.0
PA = 0.0
#
# *** Find maximum level of positive buoyancy, INB ***
#
INB = 0
for j in range(nlvl - 1, jmin, -1):
if (TVRDIF[j] > 0):
INB = max([INB, j])
# CHECK: Is the LNB higher than the surface? If not, return zero CAPE
if (INB == 0):
CAPED = 0
TOB = T[0]
LNB = P[INB]
# TOB=np.nan
LNB = 0
# Return the unconverged values
return (CAPED, TOB, LNB, IFLAG)
# if check is passed, continue with the CAPE calculation
else:
#
# *** Find positive and negative areas and CAPE ***
# via E94, EQN. 6.3.6)
#
for j in range(jmin + 1, INB + 1, 1):
PFAC = constants.RD * (TVRDIF[j] + TVRDIF[j - 1]) * (P[j - 1] - P[j]) / (P[j] + P[j - 1])
PA = PA + max([PFAC, 0.0])
NA = NA - min([PFAC, 0.0])
#
# *** Find area between parcel pressure and first level above it ***
#
PMA = (PP + P[jmin])
PFAC = constants.RD * (PP - P[jmin]) / PMA
PA = PA + PFAC * max([TVRDIF[jmin], 0.0])
NA = NA - PFAC * min([TVRDIF[jmin], 0.0])
#
# *** Find residual positive area above INB and TO ***
# and finalize estimate of LNB and its temperature
#
PAT = 0.0
TOB = T[INB]
LNB = P[INB]
if (INB < nlvl - 1):
PINB = (P[INB + 1] * TVRDIF[INB] - P[INB] * TVRDIF[INB + 1]) / (TVRDIF[INB] - TVRDIF[INB + 1])
LNB = PINB
PAT = constants.RD * TVRDIF[INB] * (P[INB] - PINB) / (P[INB] + PINB)
TOB = (T[INB] * (PINB - P[INB + 1]) + T[INB + 1] * (P[INB] - PINB)) / (P[INB] - P[INB + 1])
#
# *** Find CAPE ***
#
CAPED = PA + PAT - NA
CAPED = max([CAPED, 0.0])
# set the flag to OK if procedure reached this point
IFLAG = 1
# Return the calculated outputs to the above program level
return (CAPED, TOB, LNB, IFLAG)
# 定义计算PI的函数
@nb.njit()
def pi(SSTC, MSL, P, TC, R, CKCD=0.9, ascent_flag=0, diss_flag=1, V_reduc=0.8, ptop=50, miss_handle=1):
# function [VMAX,PMIN,IFL,TO,OTL] = pi(SSTC,MSL,P,TC,R,CKCD=0.9,ascent_flag=0,diss_flag=1,V_reduc=0.8,ptop=50,miss_handle=0)
#
# *** This function calculates the maximum wind speed ***
# *** and mimimum central pressure ***
# *** achievable in tropical cyclones, given a sounding ***
# *** and a sea surface temperature. ***
# 这个函数计算热带气旋中可达到的最大风速和最小中心气压,给出一个探测值和海面温度。
# Thermodynamic and dynamic technical backgrounds (and calculations) are found in Bister
# and Emanuel (2002; BE02) and Emanuel's "Atmospheric Convection" (E94; 1994; ISBN: 978-0195066302)
#
# 热力学和动力学技术背景(和计算)可以在Bister和Emanuel (2002;BE02)和Emanuel的“大气对流”(E94;1994;ISBN: 978 - 0195066302)
# INPUT: SSTC: Sea surface temperature (C)
# SSTC:海面温度(C)
# MSL: Mean Sea level pressure (hPa)
# MSL:平均海平面气压(hPa)
# P, TC, R:一维数组
# 含压(hPa)、温度(C)、混合比(g/kg)。阵列的排列必须使最低的索引对应于最低的模型级别,指数的增加对应于压力的减少。
# 温度至少是对流层顶的温度,最好是平流层低层的温度,但是边界层以上的混合比并不重要。缺失的混合比例可以用零代替
#
# CKCD: C_k与C_D的比值(无单位数),即焓和动量通量交换系数的比值(例如见Bister和Emanuel 1998, EQN)。17 - 18)。
# 更多关于CK/CD的讨论见于Emanuel(2003)。默认值为0.9,如Wing等人(2015)
# ascent_flag:可调常数分数(无单位分数)
# 其中0=可逆上升(默认值),1=伪绝热上升
#
# diss_flag:可调开关整数(flag integer;0或1)
# 表示耗散加热是允许的(默认值)还是不允许的0。参见Bister和Emanuel(1998)中包含的耗散加热。
# v_reduce:可调常数分数(无单位分数)
# 减少梯度风到10米风见Emanuel(2000)和Powell(1980)。默认是0.8
#
# ptop: 测深低于时忽略的压力(百帕)
#
# miss_handle:决定在CAPE计算中如何处理缺失值(NaN)的标志
# 如果= 0 (BE02默认值),配置文件中的NaN值被忽略,PI仍然被计算
# 如果 = 1,给定NaN值PI将被设为missing (If =3)
# 注意:如果任何缺失值在最低有效级别和ptop之间,那么PI将自动设置为缺失(If =3)
#
# OUTPUT:
# VMAX为最大地面风速(m/s)
# 减少通过v_reducc反射表面阻力
#
# PMIN为最小中心压力(hPa)
#
# IFL是一个标志:
# 值为1表示OK;
# 值为0表示不收敛;
# 值为2表示CAPE例程未能收敛;
# 值3表示CAPE例程由于输入中缺少数据而失败;
#
# TO为出流温度,K
#
# OTL为出水温度水平(hPa),定义为发现的中性浮力温度水平
# 即浮力实际上是一个在海平面压力下饱和的气团的同等条件
#
# convert units
SSTK = utilities.T_Ctok(SSTC) # SST in kelvin
T = utilities.T_Ctok(TC) # Temperature profile in kelvin
R = R * 0.001 # Mixing ratio profile in g/g
# 检查1:sst是否超过5C?如果没有,设置IFL=0并返回缺失的PI
if (SSTC <= 5.0):
VMAX = np.nan
PMIN = np.nan
IFL = 0
TO = np.nan
OTL = np.nan
return (VMAX, PMIN, IFL, TO, OTL)
# 检查2:温度曲线是否超过100K?如果没有,设置IFL=0并返回缺失的PI
# if (np.min(T) <= 100):#返回列表中最小的
# VMAX = np.nan
# PMIN = np.nan
# IFL = 0
# TO = np.nan
# OTL = np.nan
# return (VMAX, PMIN, IFL, TO, OTL)
# 设置缺失混合比例为零g/g,遵循Kerry的BE02算法
R[np.isnan(R)] = 0.
# 饱和水汽压
# 来自克劳修斯-克拉珀龙关系/奥古斯特-罗氏-马格纳斯公式
ES0 = utilities.es_cc(SSTC)
# 定义包裹的起吊水平(第一个压力水平)
NK = 0
TP = T[NK]
RP = R[NK]
PP = P[NK]
result = cape(TP, RP, PP, T, R, P, ascent_flag, ptop, miss_handle)
CAPEA = result[0]
IFLAG = result[3]
# 如果CAPE函数触发了一个标志,将输出IFL设置为它
if (IFLAG != 1):
IFL = int(IFLAG)
#
# ***开始迭代寻找最小压力***
#
# 设置循环计数器和初始条件
NP = 0 # loop counter
PM = 970.0
PMOLD = PM # initial condition from minimum pressure
PNEW = 0.0 # initial condition from minimum pressure
IFL = int(1) # Default flag for CAPE calculation
# 循环直到收敛或保释
while (np.abs(PNEW - PMOLD) > 0.5):
#
# ***找到CAPE在最大风速半径***
#
TP = T[NK]
PP = min([PM, 1000.0])
# find the mixing ratio with the average of the lowest level pressure and MSL
RP = constants.EPS * R[NK] * MSL / (PP * (constants.EPS + R[NK]) - R[NK] * MSL)
result = cape(TP, RP, PP, T, R, P, ascent_flag, ptop, miss_handle)
CAPEM = result[0]
IFLAG = result[3]
# if the CAPE function tripped a different flag, set the output IFL to it
if (IFLAG != 1):
IFL = int(IFLAG)
#
# *** Find saturation CAPE at radius of maximum winds ***
# *** Note that TO and OTL are found with this assumption ***
#
TP = SSTK
PP = min([PM, 1000.0])
RP = utilities.rv(ES0, PP)
result = cape(TP, RP, PP, T, R, P, ascent_flag, ptop, miss_handle)
CAPEMS, TOMS, LNBS, IFLAG = result
# if the CAPE function tripped a flag, set the output IFL to it
if (IFLAG != 1):
IFL = int(IFLAG)
# Store the outflow temperature and level of neutral bouyancy at the outflow level (OTL)
TO = TOMS
OTL = LNBS
# Calculate the proxy for TC efficiency (BE02, EQN. 1-3)
RAT = SSTK / TO
# If dissipative heating is "off", TC efficiency proxy is set to 1.0 (BE02, pg. 3)
if (diss_flag == 0):
RAT = 1.0
#
# *** Initial estimate of pressure at the radius of maximum winds ***
#
RS0 = RP
# Lowest level and Sea-surface Density Temperature (E94, EQN. 4.3.1 and 6.3.7)
TV0 = utilities.Trho(T[NK], R[NK], R[NK])
TVSST = utilities.Trho(SSTK, RS0, RS0)
# Average Surface Density Temperature, e.g. 1/2*[Tv(Tsfc)+Tv(sst)]
TVAV = 0.5 * (TV0 + TVSST)
# Converge toward CAPE*-CAPEM (BE02, EQN 3-4)
CAT = (CAPEM - CAPEA) + 0.5 * CKCD * RAT * (CAPEMS - CAPEM)
CAT = max([CAT, 0.0])
# Iterate on pressure
PNEW = MSL * np.exp(-CAT / (constants.RD * TVAV))
#
# *** Test for convergence (setup for possible next while iteration) ***
#
# store the previous step's pressure
PMOLD = PM
# store the current step's pressure
PM = PNEW
# increase iteration count in the loop
NP += 1
#
# *** If the routine does not converge, set IFL=0 and return missing PI ***
#
if (NP > 200) or (PM < 400):
VMAX = np.nan
PMIN = np.nan
IFL = 0
TO = np.nan
OTL = np.nan
return (VMAX, PMIN, IFL, TO, OTL)
# Once converged, set potential intensity at the radius of maximum winds
CATFAC = 0.5 * (1. + 1 / constants.b)
CAT = (CAPEM - CAPEA) + CKCD * RAT * CATFAC * (CAPEMS - CAPEM)
CAT = max([CAT, 0.0])
# Calculate the minimum pressure at the eye of the storm
# BE02 EQN. 4
PMIN = MSL * np.exp(-CAT / (constants.RD * TVAV))
# 计算最大风速半径下的潜在强度
# BE02 EQN。3、减少某些部分(默认20%),以解释减少
# 10米风速的梯度风速(Emanuel 2000, Powell 1980)
FAC = max([0.0, (CAPEMS - CAPEM)])
VMAX = V_reduc * | np.sqrt(CKCD * RAT * FAC) | numpy.sqrt |
import SVMClassification as SVMC
from os import system
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
SVMC.clearScreen()
dataTraining= SVMC.loadData("dataTraining.txt")
X=dataTraining[:,0:2]
y=dataTraining[:,2:3]
degree=1
theta =SVMC.initTheta(X,degree)
#theta = SVMC.gradientDescent(X, y, theta,5,500, degree)
y=y.flatten()
plt.scatter(X[np.where(y==1),0],X[np.where(y==1),1],marker="+")
plt.scatter(X[np.where(y!=1),0],X[np.where(y!=1),1],marker=".")
#SVMC.plotDecisionBoundry(theta,X,y)
C=70
tol =0.001
max_passes = 20
m,n = X.shape
y[y==0] = -1
alphas = np.zeros((m, 1))
b = 0
E = np.zeros((m, 1))
passes = 0
eta = 0
L = 0
H = 0
JRANDOM=[ 37, 36, 7, 18, 36, 23, 18, 13, 11, 27, 9, 1, 23, 22, 40, 31, 35, 20, 8, 40, 41, 50, 20, 21, 37, 49, 26, 40, 18, 10, 48, 3, 40, 34, 17, 4, 28, 43, 8, 18, 32, 11, 9, 5, 17, 27, 17, 13, 17, 38, 24 ]
y=y.flatten()
E=E.flatten()
alphas=alphas.flatten()
K = np.matmul(X,X.T)
while (passes < max_passes):
num_changed_alphas = 0
for i in range(m):
E[i] = b + np.sum(np.multiply(alphas,np.multiply(y,K[:,i]))) - y[i]
if ((y[i]*E[i] < -tol and alphas[i] < C) or (y[i]*E[i] > tol and alphas[i] > 0)):
j= | np.random.randint(0,m) | numpy.random.randint |
from __future__ import absolute_import, print_function
import numpy as npy
from PyDSTool import Events, Variable, Pointset, Trajectory
from PyDSTool.common import args, metric, metric_L2, metric_weighted_L2, \
metric_float, remain, fit_quadratic, fit_exponential, fit_diff_of_exp, \
smooth_pts, nearest_2n_indices, make_poly_interpolated_curve, simple_bisection
from PyDSTool.Trajectory import numeric_to_traj
from PyDSTool.ModelContext import *
from PyDSTool.Toolbox.data_analysis import butter, filtfilt, rectify
from PyDSTool.errors import PyDSTool_KeyError
import copy
# Test this on a single spike with global max at spike and minima at endpoints
# Test this on a mexican hat type spike with global min and max at spike peak and trough
# Test this on monotonic data for worst case scenario!! Should return None for max and min
# Also test on noisy monotonic data
# Return value of Nones to a feature evaluator should suggest to it to change window size for defining pts
def find_internal_extrema(pts, noise_tol=0):
"""
Find an interior (local) maximum and minimum values of a 1D pointset, away from the endpoints.
Returns a dictionary mapping 'local_max' -> (index_max, xmax), 'local_min' -> (index_min, xmin),
whose values are None if the pointset is monotonic or is close enough so that the global extrema
are at the endpoints.
Use noise_tol > 0 to avoid getting a local extremum right next to an endpoint because of noise.
Also returned in the dictionary for reference:
'first' -> (0, <start_endpoint_value>), 'last' -> (last_index, <last_endpoint_value>),
'global_max' -> (index, value), 'global_min' -> (index, value)
Assumes there is only one interior (max, min) pair in pts, otherwise will return an arbitrary choice
from multiple maxima and minima."""
assert pts.dimension == 1
# convert all singleton points to floats with [0] selection
x0 = pts[0][0]
x1 = pts[-1][0]
# need last_ix explicitly for index test below
last_ix = len(pts)-1
end_ixs = (0, last_ix)
max_val_ix = npy.argmax(pts)
min_val_ix = npy.argmin(pts)
glob_xmax = pts[max_val_ix][0]
glob_xmin = pts[min_val_ix][0]
no_local_extrema = {'local_max': (None, None), 'local_min': (None, None),
'first': (0, x0), 'last': (last_ix, x1),
'global_max': (max_val_ix, glob_xmax),
'global_min': (min_val_ix, glob_xmin)
}
max_at_end = max_val_ix in end_ixs
min_at_end = min_val_ix in end_ixs
if max_at_end:
if min_at_end:
# No detectable turning points present (this is criterion for ignoring noisy data)
return no_local_extrema
else:
# interior minimum found
index_min = min_val_ix
xmin = pts[index_min]
# find associated interior local maximum
max_val_ix1 = npy.argmax(pts[:min_val_ix])
max_val_ix2 = npy.argmax(pts[min_val_ix:])+min_val_ix
if max_val_ix1 in end_ixs:
if max_val_ix2 in end_ixs:
index_max = None
xmax = None
else:
index_max = max_val_ix2
xmax = pts[index_max][0]
else:
# assumes only one local max / min pair in interior!
index_max = max_val_ix1
xmax = pts[index_max][0]
else:
# interior maximum found
index_max = max_val_ix
xmax = pts[index_max][0]
# find associated interior local minimum
min_val_ix1 = npy.argmin(pts[:max_val_ix])
xmin1 = pts[min_val_ix1][0]
min_val_ix2 = npy.argmin(pts[max_val_ix:])+max_val_ix
xmin2 = pts[min_val_ix2][0]
if min_val_ix1 in end_ixs or abs(xmin1-x0)<noise_tol or abs(xmin1-x1)<noise_tol:
if min_val_ix2 in end_ixs or abs(xmin1-x0)<noise_tol or abs(xmin1-x1)<noise_tol:
index_min = None
xmin = None
else:
index_min = min_val_ix2
xmin = xmin2
else:
# assumes only one local max / min pair in interior!
index_min = min_val_ix1
xmin = xmin1
return {'local_max': (index_max, xmax), 'local_min': (index_min, xmin),
'first': (0, x0), 'last': (last_ix, x1),
'global_max': (max_val_ix, glob_xmax),
'global_min': (min_val_ix, glob_xmin)}
class get_spike_model(ql_feature_leaf):
"""Qualitative test for presence of spike in model trajectory data
using events to identify spike times. Also records salient spike
information for quantitative comparisons later."""
def evaluate(self, traj):
# function of traj, not target
pts = traj.sample(coords=[self.super_pars.burst_coord],
tlo=self.pars.tlo,
thi=self.pars.tlo+self.pars.width_tol)
loc_extrema = find_internal_extrema(pts)
if self.pars.verbose_level > 0:
print(loc_extrema)
max_val_ix, xmax = loc_extrema['local_max']
global_max_val_ix, global_xmax = loc_extrema['global_max']
min_val_ix, xmin = loc_extrema['local_min']
global_min_val_ix, global_xmin = loc_extrema['global_min']
# could split these tests into 3 further sub-features but we'll skip that here for efficiency
if xmax is None:
self.results.ixmax = None
self.results.tmax = None
test1 = test2 = test3 = False
else:
test1 = max_val_ix not in (loc_extrema['first'][0], loc_extrema['last'][0])
test2 = npy.linalg.norm(global_xmin-xmax) > self.pars.height_tol
try:
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
except:
# fails if xmin is None, i.e. no interior minimum
# allow no local minimum present, in which case use the other endpoint for test
# ... we don't know which is the one alread tested in test2, so test both ends again,
# knowing that they are both lower than the interior maximum found in this case
xmin = max([global_xmin, loc_extrema['last'][1], loc_extrema['first'][1]])
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
self.results.ixmax = max_val_ix
self.results.tmax = pts.indepvararray[max_val_ix]
self.results.spike_pts = pts
return test1 and test2 and test3
def finish(self, traj):
self.results.spike_time = self.results.tmax
self.results.spike_val = self.results.spike_pts[self.results.ixmax][self.super_pars.burst_coord]
class get_spike_data(ql_feature_leaf):
"""Qualitative test for presence of spike in noisy data. Also records salient spike information
for quantitative comparisons later.
Criteria: ensure a maximum occurs, and that this is away from endpoints of traj
"Uniqueness" of this maximum can only be determined for noisy data using a height
tolerance.
Assumes spikes will never bunch up too much so that more than spike occurs in the
spacing_tol window.
Finds maximum position using a quadratic fit.
"""
def _local_init(self):
# avoids recreating this object for every test
self.quadratic = fit_quadratic(verbose=self.pars.verbose_level>0)
def evaluate(self, traj):
# function of traj, not target
event_args = {'name': 'spike_thresh',
'eventtol': self.pars.eventtol,
'eventdelay': self.pars.eventtol*.1,
'starttime': 0,
'active': True}
if 'coord' not in self.pars:
self.pars.coord = self.super_pars.burst_coord
# update thi each time b/c tlo will be different
self.pars.thi = self.pars.tlo+self.pars.width_tol
self.pars.ev = Events.makePythonStateZeroCrossEvent(self.pars.coord,
"thresh", 0,
event_args, traj.variables[self.pars.coord])
pts = traj.sample(coords=[self.pars.coord], tlo=self.pars.tlo,
thi=self.pars.thi)
if pts.indepvararray[-1] < self.pars.thi:
self.pars.thi = pts.indepvararray[-1]
loc_extrema = find_internal_extrema(pts, self.pars.noise_tol)
if self.pars.verbose_level > 0:
print(loc_extrema)
# from PyDSTool import plot, show
## plot spike and quadratic fit
#plot(pts.indepvararray, pts[self.super_pars.burst_coord], 'go-')
#show()
max_val_ix, xmax = loc_extrema['local_max']
global_max_val_ix, global_xmax = loc_extrema['global_max']
min_val_ix, xmin = loc_extrema['local_min']
global_min_val_ix, global_xmin = loc_extrema['global_min']
# could split these tests into 3 further sub-features but we'll skip that here for efficiency
test1 = max_val_ix not in (loc_extrema['first'][0], loc_extrema['last'][0])
test2 = npy.linalg.norm(global_xmin-xmax) > self.pars.height_tol
try:
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
except:
# fails if xmin is None, i.e. no interior minimum
# allow no local minimum present, in which case use the other endpoint for test
# ... we don't know which is the one already tested in test2, so test both ends again,
# knowing that they are both lower than the interior maximum found in this case
xmin = max([global_xmin, loc_extrema['last'][1], loc_extrema['first'][1]])
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
# generate a suitable threshold from local maximum
try:
thresh_pc = self.pars.thresh_pc
except:
# default value of 15%
thresh_pc = 0.15
thresh = (xmin + thresh_pc*(xmax-xmin))
if self.pars.verbose_level > 0:
print("xmin used =", xmin)
print("thresh = ", thresh)
# Define extent of spike for purposes of quadratic fit ...
evs_found = self.pars.ev.searchForEvents(trange=[self.pars.tlo,
self.pars.thi],
parDict={'thresh': thresh})
tlo = evs_found[0][0]
thi = evs_found[1][0]
tmax = pts.indepvararray[max_val_ix]
symm_dist = npy.min([abs(tmax-tlo), abs(thi-tmax)])
# HACK! Ensure dt value will not cause us to hit an index directly, otherwise
# have to catch case from Pointset.find method when return value is a single
# integer index rather than a pair of indices
if symm_dist > self.pars.fit_width_max/2.000000007:
dt = self.pars.fit_width_max/2.000000007
else:
dt = symm_dist*1.0000000007
tlo = tmax-dt
thi = tmax+dt
ixlo = pts.find(tmax-dt, end=0)
ixhi = pts.find(tmax+dt, end=1)
if self.pars.verbose_level > 0:
print("ixlo =", ixlo, "ixhi =", ixhi)
print("tlo =",tmax-dt, "thi =",tmax+dt)
print(pts[ixlo], pts[ixhi])
print("\nget_spike tests:", test1, test2, test3)
self.results.ixlo = ixlo
self.results.ixhi = ixhi
self.results.ixmax = max_val_ix
self.results.tlo = tlo
self.results.thi = thi
self.results.tmax = tmax
self.results.spike_pts = pts[ixlo:ixhi]
return test1 and test2 and test3
def finish(self, traj):
# function of traj, not target
if self.pars.verbose_level > 0:
print("Finishing spike processing...")
pts = self.results.spike_pts
coord = self.pars.coord
xlo = pts[0][0]
# xmax is just an estimate of the max value
xmax = pts[self.results.ixmax-self.results.ixlo][0]
estimate_quad_coeff = -(xmax-xlo)/((self.results.tmax - \
self.results.tlo)**2)
estimate_intercept = xlo - \
((xmax-xlo)/(self.results.tmax-self.results.tlo))*self.results.tlo
res = self.quadratic.fit(pts.indepvararray, pts[coord],
pars_ic=(estimate_quad_coeff,0,estimate_intercept),
opts=args(peak_constraint=(self.results.ixmax - \
self.results.ixlo,xmax,
self.pars.weight*len(pts)/(self.results.tmax - \
self.results.tlo),
self.pars.weight*len(pts)/(xmax-xlo))))
tval, xval = res.results.peak
self.results.spike_time = tval
self.results.spike_val = xval
self.results.pars_fit = res.pars_fit
if self.pars.verbose_level > 0:
from PyDSTool import plot, show
# plot spike and quadratic fit
dec = 10
plot(pts.indepvararray, pts[coord], 'go-')
plot(tval, xval, 'rx')
ts = [pts.indepvararray[0]]
for i, t in enumerate(pts.indepvararray[:-1]):
ts.extend([t+j*(pts.indepvararray[i+1]-t)/dec for j in range(1,dec)])
ts.append(pts.indepvararray[-1])
plot(ts, [res.results.f(t) for t in ts], 'k:')
# temp
if self.pars.verbose_level > 1:
show()
class get_burst_duration(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
on_t = self.super_pars.ref_spike_times[0] - self.pars.t_lookback
self.pars.ref_burst_on_time = on_t
# find associated V for ref_on_thresh
pts = self.super_pars.ref_burst_coord_pts
x = pts[self.super_pars.burst_coord]
on_ix = pts.find(on_t, end=1)
ix_lo, ix_hi = nearest_2n_indices(x, on_ix, 2)
t = pts.indepvararray
on_res = smooth_pts(t[ix_lo:ix_hi+1],
x[ix_lo:ix_hi+1], self.super_pars.quadratic)
self.pars.ref_on_thresh = on_res.results.f(on_t)
#
off_t = self.super_pars.ref_spike_times[-1] + self.pars.t_lookforward
self.pars.ref_burst_off_time = off_t
off_ix = pts.find(off_t, end=0)
ix_lo, ix_hi = nearest_2n_indices(x, off_ix, 2)
off_res = smooth_pts(t[ix_lo:ix_hi+1],
x[ix_lo:ix_hi+1], self.super_pars.quadratic)
self.pars.ref_off_thresh = off_res.results.f(off_t)
self.pars.ref_burst_duration = off_t - on_t
self.pars.ref_burst_prop = (off_t - on_t)/self.super_pars.ref_period
def evaluate(self, target):
traj = target.test_traj
varname = self.super_pars.burst_coord
pts = self.super_pars.burst_coord_pts
on_t = self.super_results.spike_times[0] - self.pars.t_lookback
self.results.burst_on_time = on_t
x = pts[varname]
on_ix = pts.find(on_t, end=1)
ix_lo, ix_hi = nearest_2n_indices(x, on_ix, 2)
pp = make_poly_interpolated_curve(pts[ix_lo:ix_hi+1], varname,
target.model)
thresh = pp(on_t)
self.results.on_thresh = thresh
#
# don't find "off" based on last spike time because
# when new spikes suddenly appear this value will jump
# instead, use a threshold event search, assuming that
# only one period is "in view"
t = pts.indepvararray
x_rev = x[:ix_hi:-1]
t_rev = t[:ix_hi:-1]
off_ix = len(x) - npy.argmin(npy.asarray(x_rev < thresh, int))
ix_lo, ix_hi = nearest_2n_indices(x, off_ix, 2)
pp = make_poly_interpolated_curve(pts[ix_lo:ix_hi+1], varname,
target.model)
# bisect to find accurate crossing point
tlo = t[ix_lo]
thi = t[ix_hi]
off_t = simple_bisection(tlo, thi, pp, self.pars.t_tol)
self.results.burst_duration = off_t - on_t
self.results.burst_prop = (off_t - on_t) / self.super_results.period
return self.metric(self.results.burst_prop,
self.super_pars.ref_burst_prop) < self.pars.tol
class get_burst_active_phase(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
self.pars.ref_active_phase = self.super_pars.ref_spike_times[0] / \
self.super_pars.ref_period
def evaluate(self, target):
self.results.active_phase = self.super_results.spike_times[0] / \
self.super_results.period
return self.metric(self.results.active_phase,
self.pars.ref_active_phase) \
< self.pars.tol
class get_burst_dc_offset(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
# 20% of burst_on_V (i.e., on_thresh) - min_V above min_V
self.pars.ref_baseline_V = self.super_pars.ref_min_V + \
0.2*(self.super_pars.ref_on_thresh - \
self.super_pars.ref_min_V)
def evaluate(self, target):
baseline = self.super_results.min_V + 0.2*(self.super_results.on_thresh - \
self.super_results.min_V)
self.results.baseline_V = baseline - self.super_pars.ref_baseline_V
return self.metric(baseline, self.super_pars.ref_baseline_V) < \
self.pars.tol
class get_burst_passive_extent(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
self.pars.ref_passive_extent_V = self.super_pars.ref_max_V - \
self.super_pars.ref_min_V
def evaluate(self, target):
self.results.passive_extent_V = self.super_results.max_V - \
self.super_results.min_V
return self.metric(self.results.passive_extent_V,
self.super_pars.ref_passive_extent_V) < \
self.pars.tol
class burst_feature(ql_feature_node):
"""Embed the following sub-features, if desired:
get_burst_X, where X is a number of feature types defined in this module.
"""
def _local_init(self):
self.pars.quadratic = fit_quadratic(verbose=self.pars.verbose_level>0)
self.pars.filt_coeffs = butter(3, self.pars.cutoff, btype='highpass')
self.pars.filt_coeffs_LP = butter(3, self.pars.cutoff/10)
def postprocess_ref_traj(self):
# single coord used as indicator
pts = self.ref_traj.sample()
burst_pts = self.ref_traj.sample(coords=[self.pars.burst_coord],
dt=self.pars.dt)
xrs = burst_pts[self.pars.burst_coord]
trs = burst_pts.indepvararray
x = pts[self.pars.burst_coord]
b, a = self.pars.filt_coeffs_LP
xf = filtfilt(b, a, xrs)
t = pts.indepvararray
min_val_ix = npy.argmin(xf) # use LPF version to avoid noise artifacts
max_val_ix = npy.argmax(xf) # use LPF version to avoid spikes
min_ix_lo, min_ix_hi = nearest_2n_indices(xrs, min_val_ix, 30)
max_ix_lo, max_ix_hi = nearest_2n_indices(xrs, max_val_ix, 30)
min_res = smooth_pts(trs[min_ix_lo:min_ix_hi+1],
xf[min_ix_lo:min_ix_hi+1], self.pars.quadratic)
# use LPF data for max
max_res = smooth_pts(trs[max_ix_lo:max_ix_hi+1],
xf[max_ix_lo:max_ix_hi+1], self.pars.quadratic)
min_t, min_val = min_res.results.peak
max_t, max_val = max_res.results.peak
# thresh1 = float(max_val-self.pars.active_frac_height*(max_val-min_val))
# thresh2 = x[0]+3.
# # don't make threshold smaller than initial value, assuming
# # burst will be rising at initial condition
# thresh = max((thresh1,thresh2))
self.pars.ref_burst_coord_pts = pts
# self.pars.ref_on_thresh = thresh
# self.pars.ref_off_thresh = thresh
self.pars.ref_min_V = min_val
self.pars.ref_max_V = max_val
assert self.pars.on_cross_dir in (-1,1)
if self.pars.on_cross_dir == 1:
self.pars.off_cross_dir = -1
else:
self.pars.off_cross_dir = 1
self.pars.ref_burst_est = estimate_spiking(burst_pts[self.pars.burst_coord],
burst_pts.indepvararray,
self.pars.filt_coeffs)
self.pars.ref_burst_pts_resampled = burst_pts
# spike times will be overwritten by get_spikes_data instance, if present
#self.pars.ref_spike_times = self.pars.ref_burst_est.spike_ts
# to establish period, find min on other side of active phase
if min_t < self.pars.ref_burst_est.spike_ts[0]:
# look to the right
start_t = self.pars.ref_burst_est.spike_ts[-1]
start_ix = pts.find(start_t, end=1)
other_min_ix = npy.argmin(x[start_ix:])
other_min_t = t[start_ix+other_min_ix]
else:
# look to the left
start_t = self.pars.ref_burst_est.spike_ts[0]
start_ix = pts.find(start_t, end=0)
other_min_ix = npy.argmin(x[:start_ix])
other_min_t = t[other_min_ix]
self.pars.ref_period = abs(other_min_t - min_t)
def prepare(self, target):
# single coord used as indicator
pts = target.test_traj.sample()
x = pts[self.pars.burst_coord]
burst_pts = target.test_traj.sample(coords=[self.pars.burst_coord],
dt=self.pars.dt)
xrs = burst_pts[self.pars.burst_coord]
trs = burst_pts.indepvararray
if max(x)-min(x) < 5:
print("\n\n Not a bursting trajectory!!")
raise ValueError("Not a bursting trajectory")
b, a = self.pars.filt_coeffs_LP
xf = filtfilt(b, a, xrs)
t = pts.indepvararray
min_val_ix = npy.argmin(x) # precise because of Model's events
max_val_ix = npy.argmax(xf)
max_ix_lo, max_ix_hi = nearest_2n_indices(xrs, max_val_ix, 4)
max_res = smooth_pts(trs[max_ix_lo:max_ix_hi+1],
xf[max_ix_lo:max_ix_hi+1], self.pars.quadratic)
min_t = t[min_val_ix]
min_val = x[min_val_ix]
max_t, max_val = max_res.results.peak
self.results.min_V = min_val
self.results.max_V = max_val
assert self.pars.on_cross_dir in (-1,1)
if self.pars.on_cross_dir == 1:
self.pars.off_cross_dir = -1
else:
self.pars.off_cross_dir = 1
self.results.burst_est = estimate_spiking(burst_pts[self.pars.burst_coord],
burst_pts.indepvararray,
self.pars.filt_coeffs)
# record approximate spike times - may be overwritten by
# get_burst_spikes if done accurately
#self.results.spike_times = self.results.burst_est.spike_ts
if self.pars.verbose_level > 0:
print("Spikes found at (approx) t=", self.results.burst_est.spike_ts)
if self.results.burst_est.spike_ts[0] < self.pars.shrink_end_time_thresh:
# kludgy way to ensure that another burst doesn't encroach
if not hasattr(self.pars, 'shrunk'):
# do this *one time*
end_time = t[-1] - self.pars.shrink_end_time_amount
target.model.set(tdata=[0,end_time])
end_pts = pts.find(end_time, end=0)
end_burst_pts = burst_pts.find(end_time, end=0)
pts = pts[:end_pts]
burst_pts = burst_pts[:end_burst_pts]
self.pars.shrunk = True
elif hasattr(self.pars, 'shrunk'):
# in case period grows back reset end time *one time*
target.model.set(tdata=[0,t[-1]+self.pars.shrink_end_time_amount])
del self.pars.shrunk
self.pars.burst_coord_pts = pts
self.pars.burst_pts_resampled = burst_pts
# to establish period, find min on other side of active phase
if min_t < self.results.burst_est.spike_ts[0]:
# look to the right
start_t = self.results.burst_est.spike_ts[-1]
start_ix = pts.find(start_t, end=1)
other_min_ix = npy.argmin(x[start_ix:])
other_min_t = t[start_ix+other_min_ix]
other_min_val = x[start_ix+other_min_ix]
else:
# look to the left
start_t = self.results.burst_est.spike_ts[0]
start_ix = pts.find(start_t, end=0)
other_min_ix = npy.argmin(x[:start_ix])
other_min_t = t[other_min_ix]
other_min_val = x[other_min_ix]
self.results.period = abs(other_min_t - min_t)
self.results.period_val_error = other_min_val - min_val
class get_burst_spikes(ql_feature_node):
"""Requires a get_spike_data and get_spike_model instance to be
the only sub-features (supplied as a dict with keys 'is_spike_data'
and 'is_spike_model').
"""
def _local_init(self):
assert len(self.subfeatures) == 2
assert remain(self.subfeatures.keys(),
['is_spike_data', 'is_spike_model']) == []
def postprocess_ref_traj(self):
# get precise spike times and record in self.results.ref_spike_times
self.pars.ref_spike_times, self.pars.ref_spike_vals = \
self._eval(self.ref_traj, self.super_pars.ref_burst_est,
self.subfeatures['is_spike_data'])
def evaluate(self, target):
self.results.spike_times, self.results.spike_vals = \
self._eval(target.test_traj, self.super_results.burst_est,
self.subfeatures['is_spike_model'])
# satisfied if all spikes determined correctly
return len(self.results.spike_times) == \
len(self.super_results.burst_est.spike_ixs)
def _eval(self, traj, burst_est, is_spike):
# isn't the next line redundant?
is_spike.super_pars = copy.copy(self.pars)
spike_times = []
spike_vals = []
satisfied = True
for spike_num, spike_ix in enumerate(burst_est.spike_ixs):
if self.pars.verbose_level > 0:
print("\n Starting spike", spike_num+1)
is_spike.super_pars.burst_coord = self.super_pars.burst_coord
# step back 20% of estimated period
try:
is_spike.pars.width_tol = burst_est.ISIs[spike_num]*.8
except IndexError:
# one fewer ISI than spike, so just assume last one is about
# the same
is_spike.pars.width_tol = burst_est.ISIs[spike_num-1]*.8
is_spike.pars.tlo = burst_est.t[spike_ix] - \
is_spike.pars.width_tol #/ 2.
if self.pars.verbose_level > 0:
print("new tlo =", is_spike.pars.tlo)
# would prefer to work this out self-consistently...
#is_spike.pars.fit_width_max = ?
new_sat = is_spike(traj)
satisfied = satisfied and new_sat
# make recorded spike time in global time coordinates
if new_sat:
spike_times.append(is_spike.results.spike_time)
spike_vals.append(is_spike.results.spike_val)
if self.pars.verbose_level > 0:
print("Spike times:", spike_times)
return spike_times, spike_vals
class get_burst_peak_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
# should really use quadratic fit to get un-biased peaks
peak_vals = self.super_pars.ref_spike_vals
peak_t = self.super_pars.ref_spike_times
self.ref_traj = numeric_to_traj([peak_vals], 'peak_envelope',
self.super_pars.burst_coord, peak_t,
self.super_pars.ref_burst_pts_resampled.indepvarname,
discrete=False)
# discrete option false yields error if only one spike found, but error is cryptic!
if len(peak_t) > 1:
ref_env_ts = npy.linspace(peak_t[0], peak_t[-1],
self.pars.num_samples)
else:
ref_env_ts = npy.array(peak_t)
self.pars.ref_peak_vals = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)[0]
def evaluate(self, target):
# ignore target
dc_offset = self.super_results.baseline_V
# min and max events in model mean that these are recorded
# accurately in the pointsets already
peak_vals = self.super_results.spike_vals - dc_offset
peak_t = self.super_results.spike_times
self.results.burst_peak_env = numeric_to_traj([peak_vals],
'peak_envelope',
self.super_pars.burst_coord, peak_t,
self.super_pars.burst_pts_resampled.indepvarname,
discrete=False)
# burst_est = self.super_results.burst_est
# call_args = {}
# try:
# call_args['noise_floor'] = is_spike.pars.noise_tol
# except AttributeError:
# pass
# try:
# call_args['depvar'] = self.super_pars.burst_coord
# except AttributeError:
# pass
# try:
# call_args['tol'] = 1.1*burst_est.std_ISI/burst_est.mean_ISI
# except AttributeError:
# pass
# call_args['make_traj'] = False
# call_args['spest'] = burst_est
# env = spike_envelope(burst_est.pts, burst_est.mean_ISI,
# **call_args)
test_env_ts = npy.linspace(peak_t[0], peak_t[-1], self.pars.num_samples)
return self.metric(self.results.burst_peak_env(test_env_ts,
self.super_pars.burst_coord),
self.super_pars.ref_peak_vals) < self.pars.tol
class get_burst_trough_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
burst_pts = self.super_pars.ref_burst_pts_resampled
burst_est = self.super_pars.ref_burst_est
vals = burst_pts[self.super_pars.burst_coord]
inter_spike_ixs = [(burst_est.spike_ixs[i-1],
burst_est.spike_ixs[i]) \
for i in range(1, len(burst_est.spike_ixs))]
# should really use quadratic fit to get an un-biased minimum
trough_ixs = [npy.argmin(vals[ix_lo:ix_hi])+ix_lo for ix_lo, ix_hi in \
inter_spike_ixs]
trough_vals = [vals[i] for i in trough_ixs]
trough_t = [burst_pts.indepvararray[i] for i in trough_ixs]
self.ref_traj = numeric_to_traj([trough_vals], 'trough_envelope',
self.super_pars.burst_coord, trough_t,
burst_pts.indepvarname, discrete=False)
ref_env_ts = npy.linspace(trough_t[0], trough_t[-1],
self.pars.num_samples)
self.pars.ref_trough_vals = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)
def evaluate(self, target):
# ignore target
dc_offset = self.super_results.baseline_V
burst_pts = self.super_pars.burst_coord_pts
burst_est = self.super_results.burst_est
vals = burst_pts[self.super_pars.burst_coord]
ts = self.super_results.spike_times
spike_ixs = []
for t in ts:
tix = burst_pts.find(t, end=0)
spike_ixs.append(tix)
inter_spike_ixs = [(spike_ixs[i-1],
spike_ixs[i]) \
for i in range(1, len(ts))]
# min and max events in model mean that these are recorded
# accurately in the pointsets already
trough_ixs = [npy.argmin(vals[ix_lo:ix_hi])+ix_lo for ix_lo, ix_hi in \
inter_spike_ixs]
trough_vals = [vals[i] - dc_offset for i in trough_ixs]
# use self.pars.trough_t for isi mid-point times
trough_t = [burst_pts.indepvararray[i] for i in trough_ixs]
self.results.burst_trough_env = numeric_to_traj([trough_vals],
'trough_envelope',
self.super_pars.burst_coord,
trough_t,
burst_pts.indepvarname, discrete=False)
test_env_ts = npy.linspace(trough_t[0], trough_t[-1],
self.pars.num_samples)
self.results.trough_t = trough_t
return self.metric(self.results.burst_trough_env(test_env_ts,
self.super_pars.burst_coord),
self.super_pars.ref_trough_vals) < self.pars.tol
class get_burst_isi_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
burst_pts = self.super_pars.ref_burst_pts_resampled
ts = burst_pts.indepvararray
burst_est = self.super_pars.ref_burst_est
# find approximate (integer) mid-point index between spikes
mid_isi_ixs = [int(0.5*(burst_est.spike_ixs[i-1]+burst_est.spike_ixs[i])) \
for i in range(1, len(burst_est.spike_ixs))]
isi_t = [ts[i] for i in mid_isi_ixs]
isi_vals = [ts[burst_est.spike_ixs[i]]-ts[burst_est.spike_ixs[i-1]] for \
i in range(1, len(burst_est.spike_ixs))]
self.ref_traj = numeric_to_traj([isi_vals], 'isi_envelope',
self.super_pars.burst_coord, isi_t,
burst_pts.indepvarname, discrete=False)
ref_env_ts = npy.linspace(isi_t[0], isi_t[-1],
self.pars.num_samples)
self.pars.ref_isis = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)
def evaluate(self, target):
# ignore target
ts = self.super_results.spike_times
tname = self.super_pars.burst_coord_pts.indepvarname
isi_vals = [ts[i]-ts[i-1] for i in range(1, len(ts))]
self.results.burst_isi_env = numeric_to_traj([isi_vals],
'isi_envelope',
self.super_pars.burst_coord,
self.super_results.trough_t,
tname, discrete=False)
test_env_ts = npy.linspace(self.super_results.trough_t[0],
self.super_results.trough_t[-1],
self.pars.num_samples)
return self.metric(self.results.burst_isi_env(test_env_ts,
self.super_pars.burst_coord),
self.pars.ref_isis) < self.pars.tol
class get_burst_upsweep(qt_feature_leaf):
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.t_offs)
def postprocess_ref_traj(self):
vname = self.super_pars.burst_coord
ts = [self.super_pars.ref_spike_times[0] - toff for \
toff in self.pars.t_offs]
self.pars.ref_upsweep_V = npy.array([self.ref_traj(t, vname) for \
t in ts])
def evaluate(self, target):
dc_offset = self.super_results.baseline_V
vname = self.super_pars.burst_coord
all_pts = self.super_pars.burst_coord_pts
vals = []
for toff in self.pars.t_offs:
target_t = self.super_results.spike_times[0] - toff
if target_t < all_pts.indepvararray[0]:
# out of range - return penalty
self.metric.results = 5000*npy.ones((self.metric_len,),float)
return False
tix = all_pts.find(target_t, end=0)
new_var = make_poly_interpolated_curve(all_pts[tix-5:tix+5],
vname, target.model)
vals.append(new_var(target_t))
self.results.upsweep_V = npy.array(vals) - dc_offset
return self.metric(self.results.upsweep_V, \
self.pars.ref_upsweep_V) < self.pars.tol
class get_burst_downsweep(qt_feature_leaf):
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.t_offs)
def postprocess_ref_traj(self):
vname = self.super_pars.burst_coord
ts = [self.super_pars.ref_spike_times[-1] + toff for \
toff in self.pars.t_offs]
self.pars.ref_downsweep_V = npy.array([self.ref_traj(t, vname) for \
t in ts])
def evaluate(self, target):
dc_offset = self.super_results.baseline_V
vname = self.super_pars.burst_coord
all_pts = self.super_pars.burst_coord_pts
vals = []
for toff in self.pars.t_offs:
target_t = self.super_results.spike_times[-1] + toff
if target_t > all_pts.indepvararray[-1]:
# out of range - return penalty
self.metric.results = 5000*npy.ones((self.metric_len,),float)
return False
tix = all_pts.find(target_t, end=0)
new_var = make_poly_interpolated_curve(all_pts[tix-5:tix+5],
vname, target.model)
vals.append(new_var(target_t))
self.results.downsweep_V = npy.array(vals) - dc_offset
return self.metric(self.results.downsweep_V,
self.pars.ref_downsweep_V) < self.pars.tol
class get_burst_num_spikes(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def evaluate(self, target):
return self.metric(npy.array(len(self.super_results.spike_times)),
npy.array(len(self.super_pars.ref_spike_times))) == 0
class get_burst_period_info(qt_feature_leaf):
def _local_init(self):
self.metric = metric_weighted_L2()
self.metric_len = 2
# strongly penalize lack of periodicity
self.metric.weights = npy.array([1., 1000.])
def evaluate(self, target):
return self.metric(npy.array([self.super_results.period,
self.super_results.period_val_error]),
npy.array([self.super_pars.ref_period,
0.])) \
< self.pars.tol
# --------------------------------------------
class spike_metric(metric):
"""Measures the distance between spike time and height,
using an inherent weighting of height suited to neural voltage
signals (0.05 of time distance)."""
def __call__(self, sp1, sp2):
# weight 'v' component down b/c 't' values are on a different scale
self.results = npy.array(sp1-sp2).flatten()*npy.array([1,0.05])
return npy.linalg.norm(self.results)
class spike_feature(qt_feature_node):
"""pars keys: tol"""
def _local_init(self):
self.metric_len = 2
self.metric = spike_metric()
def evaluate(self, target):
# traj will be a post-processed v trajectory ->
# spike time and height values
return self.metric(target.test_traj.sample(), self.ref_traj.sample()) \
< self.pars.tol
class geom_feature(qt_feature_leaf):
"""Measures the residual between two 1D parameterized geometric
curves (given as Trajectory objects).
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.tmesh)
def evaluate(self, target):
# resample ref_traj to the tmesh we want
return self.metric(target.test_traj(self.pars.tmesh,
coords=[self.pars.depvar]),
self.ref_traj(self.pars.tmesh,
coords=[self.pars.depvar])) < self.pars.tol
# ------------------------------------------------------------------
class estimate_spiking(object):
"""Estimate pattern of spiking in tonic or burst patterns."""
def __init__(self, x, t, filt_coeffs, sense='up'):
"""Pass only 1D pointset.
If spikes are in the "positive" direction of the variable,
use sense='up', else use 'down'."""
self.sense = sense
self.b, self.a = filt_coeffs
x_filt = filtfilt(self.b, self.a, x)
self.x_just_filt = x_filt
self.t = t
max_x = max(x_filt)
# retain only values larger than 10% of max to estimate burst
# envelope
x_filt_mask = npy.asarray(x_filt>(0.1*max_x),int)
burst_off_ix = len(t) - npy.argmax(x_filt_mask[::-1])
burst_on_ix = npy.argmax(x_filt_mask)
self.burst_on = (burst_on_ix, t[burst_on_ix])
self.burst_off = (burst_off_ix, t[burst_off_ix])
self.burst_duration = t[burst_off_ix] - t[burst_on_ix]
# retain only values larger than 25% of max for actual spikes
# FAILING: temp switch off
x_filt_th = x_filt_mask #npy.asarray(x_filt>(0.25*max_x),int)*x_filt
# find each spike by group of positive values
# eliminating each afterwards (separated by zeros)
spike_ixs = []
done = False
n = 0 # for safety
while not done:
# find next group centre and eliminate it
x_filt_th = self.eliminate_group(x_filt_th, spike_ixs)
n += 1
# no groups left to eliminate?
done = max(x_filt_th) == 0 or n > 100
spike_ixs.sort()
self.spike_ixs = spike_ixs
self.spike_ts = t[spike_ixs]
self.ISIs = [self.spike_ts[i]-self.spike_ts[i-1] for \
i in range(1, len(spike_ixs))]
self.mean_ISI = npy.mean(self.ISIs)
self.std_ISI = npy.std(self.ISIs)
self.num_spikes = len(spike_ixs)
def eliminate_group(self, xf, spike_ixs):
centre_ix = npy.argmax(xf)
# print "Current spike_ixs", spike_ixs
# print "eliminating group at t = ", self.t[centre_ix]
# forward half-group
end_ix = npy.argmin(xf[centre_ix:])+centre_ix
# backward half-group
start_ix = centre_ix-npy.argmin(xf[:centre_ix+1][::-1])
# nullify values in range!
xf[start_ix:end_ix]=0
# print start_ix, end_ix, xf[start_ix:end_ix]
if self.sense == 'up':
# x will be rising to peak, so track forwards until
# xfilt makes zero crossing and becomes negative
new = centre_ix+npy.argmax(self.x_just_filt[centre_ix:]<0)
if new not in spike_ixs:
spike_ixs.append(new)
else:
# track backwards
new = centre_ix-npy.argmin(self.x_just_filt[:centre_ix+1]>0)
if new not in spike_ixs:
spike_ixs.append(new)
return xf
class spike_envelope(object):
"""Find an amplitude envelope over a smooth 1D signal that features
roughly periodic spikes. Input is a 1D parameterized pointset
and the approximate period. An optional input is the tolerance (fraction)
for finding spikes around the period (measuring uncertainty in the
period) -- default 0.2 (20% of the period).
Optional start_t sets where to orient the search in the independent
variable -- default None (start at the highest point of the signal).
It *must* be a value that is present in the independent variable
array of the given points argument.
Optional noise_floor sets minimum signal amplitude considered to
be a peak (default 0 means non-noisy data assumed).
Outside of spike times +/- tol, envelope curve will be defined as
amplitude zero.
adjust_rate is a fraction < 1 specifying the %age change of spike
search interval (a.k.a. 'period'). default 0.1.
make_traj option can be used to avoid costly creation of a Trajectory
object representing the envelope curve, if unneeded (default True).
When less is known in advance about the regularity or other properties
of the spikes, pre-process using estimate_spiking() and pass the
result as the optional argument spest.
"""
def __init__(self, pts, per, tol=0.2, start_t=None,
noise_floor=0, depvar=None, adjust_rate=0.1,
make_traj=True, spest=None):
try:
self.tvals = pts.indepvararray
except:
raise TypeError("Parameterized pointset required")
self.pts = pts # store this to take advantage of index search
if depvar is None:
assert pts.dimension == 1
depvar = pts.coordnames[0]
self.vals = pts[depvar]
else:
try:
self.vals = pts[depvar]
except PyDSTool_KeyError:
raise ValueError("Invalid dependent variable name")
self.numpoints = len(self.vals)
assert self.numpoints > 1
self.per = per
self.noise_floor = noise_floor
assert tol < 1 and tol > 0
self.tol = tol
# assume that the maximum is a spike, so is a reliable
# phase reference
if start_t is None:
self.start_ix = | npy.argmax(self.vals) | numpy.argmax |
# Copyright (c) 2018, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN, DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Testing grid functionality.
"""
import unittest
import numpy.testing as nptest
import numpy as np
try:
from osgeo import ogr
ogr_installed = True
except ImportError:
ogr_installed = False
import pytest
from pygeogrids.grids import lonlat2cell, BasicGrid
import pygeogrids as grids
class Test_lonlat2cell(unittest.TestCase):
def setUp(self):
lat = np.arange(-90, 90, 2.5)
lon = np.arange(-180, 180, 2.5)
self.lons, self.lats = np.meshgrid(lon, lat)
def testlonlat2cell_hist(self):
"""
Setup grid with unequal cell size along lat and lon and test if the
correct number of points lay in each cell.
"""
cells = lonlat2cell(
self.lons, self.lats, cellsize_lon=15, cellsize_lat=30)
hist, bin_edges = np.histogram(
cells.flatten(), bins=len(np.unique(cells)))
nptest.assert_allclose(hist, np.zeros_like(hist) + 72)
def testlonlat2cell_edge(self):
"""
Use points on the 180 degree longitude and see if they fall into
the correct cell
"""
lats = [69.8242, 69.122, 68.42]
lons = [180, 180, 180]
cells = lonlat2cell(lons, lats)
assert list(cells) == [31, 31, 31]
class TestFindNearestNeighbor(unittest.TestCase):
def setUp(self):
self.grid = grids.genreg_grid(1, 1)
def test_nearest_neighbor(self):
gpi, dist = self.grid.find_nearest_gpi(14.3, 18.5)
assert gpi == 25754
assert len([dist]) == 1
lon, lat = self.grid.gpi2lonlat(gpi)
assert lon == 14.5
assert lat == 18.5
def test_nearest_neighbor_list(self):
gpi, dist = self.grid.find_nearest_gpi([145.1, 90.2], [45.8, -16.3])
assert len(gpi) == 2
assert len(dist) == 2
assert gpi[0] == 16165
assert gpi[1] == 38430
lon, lat = self.grid.gpi2lonlat(gpi)
assert lon[0] == 145.5
assert lon[1] == 90.5
assert lat[0] == 45.5
assert lat[1] == -16.5
def test_nearest_neighbor_ndarray(self):
gpi, dist = self.grid.find_nearest_gpi(
np.array([145.1, 90.2]), np.array([45.8, -16.3]))
assert len(gpi) == 2
assert len(dist) == 2
assert gpi[0] == 16165
assert gpi[1] == 38430
lon, lat = self.grid.gpi2lonlat(gpi)
assert lon[0] == 145.5
assert lon[1] == 90.5
assert lat[0] == 45.5
assert lat[1] == -16.5
def test_nearest_neighbor_numpy_single(self):
gpi, dist = self.grid.find_nearest_gpi(
np.array([145.1, 90.2])[0], np.array([45.8, -16.3])[0])
assert gpi == 16165
lon, lat = self.grid.gpi2lonlat(gpi)
assert lon == 145.5
assert lat == 45.5
def test_k_nearest_neighbor(self):
gpi, dist = self.grid.find_k_nearest_gpi(14.3, 18.5, k=2)
assert gpi[0, 0] == 25754
assert gpi[0, 1] == 25753
assert dist.shape == (1, 2)
lon, lat = self.grid.gpi2lonlat(gpi[0, 0])
assert lon == 14.5
assert lat == 18.5
lon, lat = self.grid.gpi2lonlat(gpi[0, 1])
assert lon == 13.5
assert lat == 18.5
def test_k_nearest_neighbor_list(self):
gpi, dist = self.grid.find_k_nearest_gpi(
[145.1, 90.2], [45.8, -16.3], k=2)
assert gpi.shape == (2, 2)
assert dist.shape == (2, 2)
assert gpi[0, 0] == 16165
assert gpi[0, 1] == 16164
assert gpi[1, 0] == 38430
assert gpi[1, 1] == 38429
def test_nearest_neighbor_max_dist(self):
# test with maxdist higher than nearest point
gpi, dist = self.grid.find_nearest_gpi(14.3, 18.5, max_dist=100e3)
assert gpi == 25754
assert len([dist]) == 1
lon, lat = self.grid.gpi2lonlat(gpi)
assert lon == 14.5
assert lat == 18.5
# test with maxdist lower than nearest point
gpi, dist = self.grid.find_nearest_gpi(14.3, 18.5, max_dist=10000)
assert len(gpi) == 0
assert len(dist) == 0
# test with custom gpi, see issue #68
grid = grids.BasicGrid(lon=[16,17], lat=[45,46], gpis=[100,200])
gpi, dist = grid.find_nearest_gpi(0,0, max_dist=1000)
assert len(gpi) == 0
assert len(dist) == 0
class TestCellGridNotGpiDirect(unittest.TestCase):
"""
Setup simple 2D grid 2.5 degree global grid (144x72) which starts at the
North Western corner of 90 -180 Test for cell specific features.
"""
def setUp(self):
self.latdim = np.arange(90, -90, -2.5)
self.londim = np.arange(-180, 180, 2.5)
self.lon, self.lat = np.meshgrid(self.londim, self.latdim)
self.grid = grids.BasicGrid(self.lon.flatten(), self.lat.flatten(),
gpis=np.arange(self.lon.flatten().size),
shape=(len(self.latdim),
len(self.londim)))
self.reverse_gpi_grid = grids.BasicGrid(
self.lon.flatten(), self.lat.flatten(),
gpis=np.arange(self.lon.flatten().size)[::-1],
shape=(len(self.latdim),
len(self.londim)))
self.cellgrid = self.grid.to_cell_grid()
def test_gpi2cell(self):
"""
Test if gpi to row column lookup works correctly.
"""
gpi = 200
cell = self.cellgrid.gpi2cell(gpi)
assert cell == 1043
def test_gpi2cell_iterable(self):
"""
Test if gpi to row column lookup works correctly.
"""
gpi = [200, 255]
cell = self.cellgrid.gpi2cell(gpi)
assert np.all(cell == [1043, 2015])
def test_gpi2cell_numpy(self):
"""
test if gpi to cell lookup works correctly
"""
gpi = np.array([200, 255])
cell = self.cellgrid.gpi2cell(gpi)
assert np.all(cell == [1043, 2015])
def test_gpi2cell_numpy_single(self):
"""
test if gpi to row column lookup works correctly
"""
gpi = np.array([200, 255])[0]
cell = self.cellgrid.gpi2cell(gpi)
assert cell == 1043
def test_calc_lut(self):
"""
Test calcuation of lookuptable into reverse gpi grid.
This must result in a lookuptable that reverses the gpis.
"""
lut = self.grid.calc_lut(self.reverse_gpi_grid)
nptest.assert_allclose(lut[::-1], self.grid.gpis)
def test_gpi2cell_custom_gpis(self):
"""
Test if gpi to row column lookup works correctly.
"""
self.custom_gpi_grid = \
grids.BasicGrid(self.lon.flatten(), self.lat.flatten(),
shape=(len(self.latdim),
len(self.londim)),
gpis=np.arange(len(self.lat.flatten()))[::-1])
self.custom_gpi_cell_grid = self.custom_gpi_grid.to_cell_grid()
gpi = [200, 255]
cell = self.custom_gpi_cell_grid.gpi2cell(gpi)
assert np.all(cell == [1549, 577])
gpi = 200
cell = self.custom_gpi_cell_grid.gpi2cell(gpi)
assert cell == 1549
def test_subgrid_from_cells(self):
"""
Test subgrid selection.
"""
cells = [1549, 577]
subgrid = self.cellgrid.subgrid_from_cells(cells)
assert type(subgrid) == type(self.cellgrid)
for cell in cells:
gpis, lons, lats = subgrid.grid_points_for_cell(cell)
cell_index = np.where(cell == self.cellgrid.activearrcell)
orig_gpis = self.cellgrid.activegpis[cell_index]
orig_lons = self.cellgrid.activearrlon[cell_index]
orig_lats = self.cellgrid.activearrlat[cell_index]
nptest.assert_array_equal(gpis, orig_gpis)
nptest.assert_array_equal(lons, orig_lons)
nptest.assert_array_equal(lats, orig_lats)
def test_subgrid_from_gpis(self):
"""
Test subgrid selection.
"""
gpis = [200, 255]
subgrid = self.cellgrid.subgrid_from_gpis(gpis)
assert type(subgrid) == type(self.cellgrid)
lons_should, lats_should = self.cellgrid.gpi2lonlat(gpis)
cells_should = self.cellgrid.gpi2cell(gpis)
subgrid_should = grids.CellGrid(
lons_should, lats_should, cells_should, gpis=gpis)
assert subgrid == subgrid_should
class TestLutCalculation(unittest.TestCase):
def setUp(self):
"""
Setup two grids with similar gpis but with different subset/gpi ordering.
The lookup tables should still give the correct results.
The gpi's of the two grids are identical.
"""
self.lats = np.array([1, 2, 3, 4])
self.lons = np.array([1, 2, 3, 4])
self.gpis = [0, 1, 2, 3]
self.subset = [3, 2]
self.lats2 = np.array([3, 4, 2, 1])
self.lons2 = np.array([3, 4, 2, 1])
self.gpis2 = [2, 3, 1, 0]
self.subset2 = [0, 1]
self.grid1 = grids.BasicGrid(self.lons, self.lats, gpis=self.gpis,
subset=self.subset)
self.grid2 = grids.BasicGrid(self.lons2, self.lats2, gpis=self.gpis2,
subset=self.subset2)
def test_calc_lut(self):
lut = self.grid1.calc_lut(self.grid2)
nptest.assert_array_equal(lut, [-1, -1, 2, 3])
nptest.assert_array_equal(
lut[self.grid2.activegpis], self.grid2.activegpis)
lut2 = self.grid2.calc_lut(self.grid1)
nptest.assert_array_equal(lut2, [-1, -1, 2, 3])
nptest.assert_array_equal(
lut2[self.grid1.activegpis], self.grid1.activegpis)
class TestCellGridNotGpiDirectSubset(unittest.TestCase):
"""Setup simple 2D grid 2.5 degree global grid (144x72) which starts at the
North Western corner of 90 -180 Test for cell specific features. This grid
also has a subset with only the first half of points active.
"""
def setUp(self):
self.latdim = np.arange(90, -90, -2.5)
self.londim = np.arange(-180, 180, 2.5)
self.lon, self.lat = np.meshgrid(self.londim, self.latdim)
self.grid = grids.BasicGrid(self.lon.flatten(), self.lat.flatten(),
gpis=np.arange(self.lon.flatten().size),
shape=(len(self.londim),
len(self.latdim)),
subset=np.arange(self.lon.flatten().size / 2,
dtype=np.int))
self.cellgrid = self.grid.to_cell_grid()
def test_gpi2cell(self):
"""
Test if gpi to cell lookup works correctly.
"""
gpi = 5185
cell = self.cellgrid.gpi2cell(gpi)
assert cell == 18
def test_gpi2cell_iterable(self):
"""
Test if gpi to cell lookup works correctly.
"""
gpi = [200, 5185]
cell = self.cellgrid.gpi2cell(gpi)
assert np.all(cell == [1043, 18])
def test_gpi2cell_numpy_single(self):
"""
test if gpi to cell lookup works correctly
"""
gpi = np.array([5185, 255])[0]
cell = self.cellgrid.gpi2cell(gpi)
assert cell == 18
def test_gpi2lonlat(self):
"""
Test if gpi to lon lat lookup works correctly.
"""
gpi = 5185
lon, lat = self.cellgrid.gpi2lonlat(gpi)
assert lon == -177.5
assert lat == 0.0
def test_gpi2lonlat_iterable(self):
"""
Test if gpi to lon lat lookup works correctly.
"""
gpi = [200, 5185]
lon, lat = self.cellgrid.gpi2lonlat(gpi)
assert np.all(lon == [-40.0, -177.5])
assert np.all(lat == [87.5, 0.0])
def test_gpi2lonlat_numpy_single(self):
"""
test if gpi to lon lat lookup works correctly
"""
gpi = np.array([5185, 255])[0]
lon, lat = self.cellgrid.gpi2lonlat(gpi)
assert lon == -177.5
assert lat == 0.0
class TestCellGrid(unittest.TestCase):
"""
Setup simple 2D grid 2.5 degree global grid (144x72) which starts at the
North Western corner of 90 -180 Test for cell specific features.
"""
def setUp(self):
self.latdim = np.arange(90, -90, -2.5)
self.londim = np.arange(-180, 180, 2.5)
self.lon, self.lat = np.meshgrid(self.londim, self.latdim)
self.grid = grids.BasicGrid(self.lon.flatten(), self.lat.flatten(),
shape=(len(self.latdim),
len(self.londim)))
self.cellgrid = self.grid.to_cell_grid()
def test_gpi2cell(self):
"""
Test if gpi to row column lookup works correctly.
"""
gpi = 200
cell = self.cellgrid.gpi2cell(gpi)
assert cell == 1043
def test_gpi2cell_iterable(self):
"""
Test if gpi to row column lookup works correctly.
"""
gpi = [200, 255]
cell = self.cellgrid.gpi2cell(gpi)
assert np.all(cell == [1043, 2015])
def test_gpi2cell_numpy_single(self):
"""
test if gpi to row column lookup works correctly
"""
gpi = np.array([200, 255])[0]
cell = self.cellgrid.gpi2cell(gpi)
assert cell == 1043
def test_gpi2cell_custom_gpis(self):
"""
Test if gpi to row column lookup works correctly.
"""
self.custom_gpi_grid = \
grids.BasicGrid(self.lon.flatten(), self.lat.flatten(),
shape=(len(self.londim),
len(self.latdim)),
gpis=np.arange(len(self.lat.flatten()))[::-1])
self.custom_gpi_cell_grid = self.custom_gpi_grid.to_cell_grid()
gpi = [200, 255]
cell = self.custom_gpi_cell_grid.gpi2cell(gpi)
assert np.all(cell == [1549, 577])
gpi = 200
cell = self.custom_gpi_cell_grid.gpi2cell(gpi)
assert cell == 1549
def test_subgrid_from_cells(self):
"""
Test subgrid selection.
"""
cells = [1549, 577]
subgrid = self.cellgrid.subgrid_from_cells(cells)
assert type(subgrid) == type(self.cellgrid)
for cell in cells:
gpis, lons, lats = subgrid.grid_points_for_cell(cell)
cell_index = np.where(cell == self.cellgrid.activearrcell)
orig_gpis = self.cellgrid.activegpis[cell_index]
orig_lons = self.cellgrid.activearrlon[cell_index]
orig_lats = self.cellgrid.activearrlat[cell_index]
nptest.assert_array_equal(gpis, orig_gpis)
nptest.assert_array_equal(lons, orig_lons)
nptest.assert_array_equal(lats, orig_lats)
def test_subgrid_from_gpis(self):
"""
Test subgrid selection.
"""
gpis = [200, 255]
subgrid = self.cellgrid.subgrid_from_gpis(gpis)
assert type(subgrid) == type(self.cellgrid)
lons_should, lats_should = self.cellgrid.gpi2lonlat(gpis)
cells_should = self.cellgrid.gpi2cell(gpis)
subgrid_should = grids.CellGrid(
lons_should, lats_should, cells_should, gpis=gpis)
assert subgrid == subgrid_should
def test_get_bbox_grid_points(self):
gpis = self.cellgrid.get_bbox_grid_points(latmin=-10,
latmax=-5,
lonmin=-10,
lonmax=-5)
nptest.assert_allclose(gpis,
np.array([5684, 5685, 5828, 5829,
5540, 5541, 5686, 5830, 5542]))
# gpis should come back sorted by cells
nptest.assert_allclose(self.cellgrid.gpi2cell(gpis),
np.array([1240, 1240, 1240, 1240,
1241, 1241, 1276, 1276, 1277]))
lats, lons = self.cellgrid.get_bbox_grid_points(latmin=-10,
latmax=-5,
lonmin=-10,
lonmax=-5,
coords=True)
lats_should = np.array([-7.5, -7.5, -10., -10.,
-5., -5., -7.5, -10., -5.])
lons_should = np.array([-10., -7.5, -10., -7.5,
-10., -7.5, -5., -5., -5.])
nptest.assert_allclose(lats,
lats_should)
nptest.assert_allclose(lons,
lons_should)
gpis, lats, lons = self.cellgrid.get_bbox_grid_points(latmin=-10,
latmax=-5,
lonmin=-10,
lonmax=-5,
both=True)
lats_should = np.array([-7.5, -7.5, -10., -10.,
-5., -5., -7.5, -10., -5.])
lons_should = np.array([-10., -7.5, -10., -7.5,
-10., -7.5, -5., -5., -5.])
nptest.assert_allclose(lats,
lats_should)
nptest.assert_allclose(lons,
lons_should)
nptest.assert_allclose(gpis,
np.array([5684, 5685, 5828, 5829,
5540, 5541, 5686, 5830, 5542]))
# gpis should come back sorted by cells
nptest.assert_allclose(self.cellgrid.gpi2cell(gpis),
np.array([1240, 1240, 1240, 1240,
1241, 1241, 1276, 1276, 1277]))
def test_setup_grid_with_lists():
grid = grids.BasicGrid([1, 2, 3, 4, 5], [1, 2, 3, 4, 5])
nptest.assert_allclose(grid.arrlon, np.array([1, 2, 3, 4, 5]))
nptest.assert_allclose(grid.arrlat, np.array([1, 2, 3, 4, 5]))
def test_setup_cellgrid_with_lists():
grid = grids.CellGrid([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 1, 1, 1, 1])
nptest.assert_allclose(grid.arrlon, np.array([1, 2, 3, 4, 5]))
nptest.assert_allclose(grid.arrlat, np.array([1, 2, 3, 4, 5]))
nptest.assert_allclose(grid.arrcell, np.array([1, 1, 1, 1, 1]))
class Test_2Dgrid(unittest.TestCase):
"""
Setup simple 2D grid 2.5 degree global grid (144x72) which starts at the
North Western corner of 90 -180 and test 2D lookup.
"""
def setUp(self):
self.latdim = np.arange(90, -90, -2.5)
self.londim = np.arange(-180, 180, 2.5)
self.lon, self.lat = np.meshgrid(self.londim, self.latdim)
self.grid = grids.BasicGrid(self.lon.flatten(), self.lat.flatten(),
shape=(len(self.latdim),
len(self.londim)))
def test_gpi2rowcol(self):
"""
Test if gpi to row column lookup works correctly.
"""
gpi = 200
row_should = 1
column_should = 200 - 144
row, column = self.grid.gpi2rowcol(gpi)
assert row == row_should
assert column == column_should
def test_gpi2rowcol_np_int(self):
"""
test if gpi to row column lookup works correctly
"""
gpi = np.array([200])[0]
row_should = 1
column_should = 200 - 144
row, column = self.grid.gpi2rowcol(gpi)
assert row == row_should
assert column == column_should
def test_gpi2rowcol_iterable(self):
"""
Test if gpi to row column lookup works correctly.
"""
gpi = [143, 200, 255]
row_should = [0, 1, 1]
column_should = [143, 200 - 144, 255 - 144]
row, column = self.grid.gpi2rowcol(gpi)
assert np.all(row == row_should)
assert np.all(column == column_should)
def test_gpi2rowcol_custom_gpis(self):
"""
Test if gpi to row column lookup works correctly.
"""
self.custom_gpi_grid = grids.BasicGrid(self.lon.flatten(),
self.lat.flatten(),
shape=(len(self.latdim),
len(self.londim)),
gpis=np.arange(len(self.lat.flatten()))[::-1])
gpi = [200, 255]
row_should = [70, 70]
column_should = [87, 32]
row, column = self.custom_gpi_grid.gpi2rowcol(gpi)
assert np.all(row == row_should)
assert np.all(column == column_should)
def test_gpi2lonlat(self):
"""
Test if gpi to longitude latitude lookup works correctly.
"""
gpi = 200
lat_should = 87.5
lon_should = -180 + (200 - 144) * 2.5
lon, lat = self.grid.gpi2lonlat(gpi)
assert lon == lon_should
assert lat == lat_should
def test_lonlat2d(self):
"""
Test if lonlat 2d grids are the same as the grids used for making the grid.
"""
assert np.all(self.lon == self.grid.lon2d)
assert np.all(self.lat == self.grid.lat2d)
def test_tocellgrid(self):
"""
test if to_cell_grid method works correctly
"""
cell_grid = self.grid.to_cell_grid()
result = grids.BasicGrid.__eq__(self.grid, cell_grid)
assert result
def test_genreggrid():
"""
Test generation of regular grids.
"""
grid = grids.genreg_grid()
assert grid.shape == (180, 360)
lon, lat = grid.gpi2lonlat(3)
assert lon == -176.5
assert lat == 89.5
lon, lat = grid.gpi2lonlat(360)
assert lon == -179.5
assert lat == 88.5
def test_reorder_to_cellsize():
"""
Test reordering to different cellsize
"""
lons = np.array([-177, -177, -176, -176])
lats = np.array([51, 57, 51, 57])
gpis = np.array([1, 2, 3, 4])
cells = np.array([14, 14, 14, 14])
orig_grid = grids.CellGrid(lons, lats, cells, gpis=gpis)
reordered_grid = grids.reorder_to_cellsize(orig_grid, 5.0, 5.0)
nptest.assert_almost_equal(reordered_grid.gpis,
np.array([1, 3, 2, 4]))
nptest.assert_almost_equal(reordered_grid.arrlon,
| np.array([-177, -176, -177, -176]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import stanpy as stp
np.set_printoptions(precision=5, linewidth=500)
def assembly(*s_list, deg_freedom=3):
number_elements = len(s_list)
nodes = np.zeros((2 * number_elements, 3))
nodes[0::2, :] = np.array([s["Xi"] for s in s_list]).astype(int)
nodes[1::2, :] = np.array([s["Xk"] for s in s_list]).astype(int)
global_nodes = np.unique(nodes, axis=0)
num_global_nodes = global_nodes.shape[0]
indices = (np.arange(num_global_nodes) * deg_freedom).astype(int)
a = np.zeros((number_elements, 2, num_global_nodes))
a_full = np.zeros((number_elements, 2 * deg_freedom, num_global_nodes * deg_freedom))
for i, node in enumerate(nodes.reshape(number_elements, -1, 3)):
a[i, 0] = (global_nodes == node[0]).all(axis=1).astype(int)
a[i, 1] = (global_nodes == node[1]).all(axis=1).astype(int)
mask = a[i, 0] == 1
a_full[i, 0:deg_freedom, indices[mask].item() : indices[mask].item() + deg_freedom] = np.eye(
deg_freedom, deg_freedom
)
mask = a[i, 1] == 1
a_full[
i,
deg_freedom : 2 * deg_freedom,
indices[mask].item() : indices[mask].item() + deg_freedom,
] = np.eye(deg_freedom, deg_freedom)
return a_full
# def assembly_univ(*elements, deg_freedom=3):
# number_elements = len(elements)
# nodes = np.zeros((2 * number_elements, 3)) # 3Dimensional
# nodes[0::2, :] = np.array([s["Xi"] for s in elements])
# nodes[1::2, :] = np.array([s["Xk"] for s in elements])
# global_nodes = np.unique(nodes, axis=0)
# num_global_nodes = global_nodes.shape[0]
# indices = (np.arange(num_global_nodes) * deg_freedom).astype(int)
# a = np.zeros((number_elements, 2, num_global_nodes))
# a_full = np.zeros((number_elements, 2 * deg_freedom, num_global_nodes * deg_freedom))
# for i, node in enumerate(nodes.reshape(number_elements, -1, 3)):
# a[i, 0] = (global_nodes == node[0]).all(axis=1).astype(int)
# a[i, 1] = (global_nodes == node[1]).all(axis=1).astype(int)
# mask = a[i, 0] == 1
# a_full[i, 0:deg_freedom, indices[mask].item() : indices[mask].item() + deg_freedom] = np.eye(
# deg_freedom, deg_freedom
# )
# mask = a[i, 1] == 1
# a_full[
# i,
# deg_freedom : 2 * deg_freedom,
# indices[mask].item() : indices[mask].item() + deg_freedom,
# ] = np.eye(deg_freedom, deg_freedom)
# return a_full
def element_stiffness_matrix(**s):
R = rotation_matrix(**s)
vec_i = np.array(s["Xi"])
vec_k = np.array(s["Xk"])
vec_R = vec_k - vec_i
QeT = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]).dot(R)
l = np.linalg.norm(vec_R).item()
K = s["EA"] / l * QeT.T.dot(np.array([[1, -1], [-1, 1]])).dot(QeT)
return K
def system_stiffness_matrix(*s_list):
a = assembly(*s_list)
K = a[0].T.dot(element_stiffness_matrix(**s_list[0])).dot(a[0])
for i, s in enumerate(s_list):
if i == 0:
pass
else:
K += a[i].T.dot(element_stiffness_matrix(**s_list[i])).dot(a[i])
K[0, :] = 0
diag = np.copy(np.diag(K))
diag[diag == 0] = 1
np.fill_diagonal(K, diag)
return K
def rotation_matrix(**s):
vec_i = np.array(s["Xi"])
vec_k = np.array(s["Xk"])
vec_R = vec_k - vec_i
norm_R = np.linalg.norm(vec_R)
theta = np.radians(90)
c, s = np.cos(theta), np.sin(theta)
rot_mat = np.array(((c, -s, 0), (s, c, 0), (0, 0, 1)))
e1 = vec_R / norm_R
e2 = rot_mat.dot(e1)
e3 = | np.cross(e1, e2) | numpy.cross |
import os
import json
import logging
import argparse
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
from tqdm import tqdm
from numpy.random import default_rng
from collections import OrderedDict, Counter
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score, roc_auc_score, average_precision_score, confusion_matrix
from pyteap.signals.bvp import acquire_bvp, get_bvp_features
from pyteap.signals.gsr import acquire_gsr, get_gsr_features
from pyteap.signals.hst import acquire_hst, get_hst_features
from pyteap.signals.ecg import get_ecg_features
from logutils import init_logger
def load_segments(segments_dir):
segments = {}
# for each participant
for pid in os.listdir(segments_dir):
segments.setdefault(int(pid), [])
froot = os.path.join(segments_dir, pid)
# for segments for a participant
for fname in os.listdir(froot):
# get labels, segment index, and path to json file
labels = fname.split('-')[-1].split('.')[0]
idx = int(fname.split('-')[1])
fpath = os.path.join(froot, fname)
# load json file and save to dict of pid: [segments = (idx, segment, labels)]
with open(fpath) as f:
seg = json.load(f)
segments[int(pid)].append((idx, seg, labels))
# return dict sorted by pid
return OrderedDict(sorted(segments.items(), key=lambda x: x[0]))
def get_features(sig, sr, sigtype):
if sigtype == 'bvp':
features = get_bvp_features(acquire_bvp(sig, sr), sr)
elif sigtype == 'eda':
features = get_gsr_features(acquire_gsr(sig, sr, conversion=1e6), sr)
elif sigtype == 'temp':
features = get_hst_features(acquire_hst(sig, sr), sr)
elif sigtype == 'ecg':
features = get_ecg_features(sig)
return features
def get_data_rolling(segments, n, labeltype, majority):
X, y = {}, {}
# for each participant
for pid, segs in segments.items():
segs = sorted(segs, key=lambda x: x[0])
pbar = tqdm(range(len(segs) - n), desc=f'Subject {pid:02d}', ascii=True, dynamic_ncols=True)
curr_X, curr_y = [], []
for i in pbar:
# get n consecutive segments from i-th segment
curr_segs = segs[i:i + n]
features = []
# get features
for sigtype, sr in [('bvp', 64), ('eda', 4), ('temp', 4), ('ecg', 1)]:
sig = np.concatenate([sigs[sigtype] for _, sigs, _ in curr_segs])
features.extend(get_features(sig, sr, sigtype))
# skip if one or more feature is NaN
if np.isnan(features).any():
logging.getLogger('default').warning('One or more feature is NaN, skipped.')
continue
if labeltype == 's':
curr_a = [int(labels[0]) for _, _, labels in curr_segs]
curr_v = [int(labels[1]) for _, _, labels in curr_segs]
elif labeltype == 'p':
curr_a = [int(labels[2]) for _, _, labels in curr_segs]
curr_v = [int(labels[3]) for _, _, labels in curr_segs]
elif labeltype == 'e':
curr_a = [int(labels[4]) for _, _, labels in curr_segs]
curr_v = [int(labels[5]) for _, _, labels in curr_segs]
elif labeltype == 'sp':
curr_a = [np.sum([int(labels[0]), int(labels[2])]) for _, _, labels in curr_segs]
curr_v = [np.sum([int(labels[1]), int(labels[3])]) for _, _, labels in curr_segs]
# take majority label
if majority:
a_values, a_counts = np.unique(curr_a, return_counts=True)
v_values, v_counts = np.unique(curr_v, return_counts=True)
a_val = a_values[np.argmax(a_counts)]
v_val = v_values[np.argmax(v_counts)]
# or take label of the last segment
else:
a_val, v_val = curr_a[-1], curr_v[-1]
curr_X.append(features)
if labeltype != 'sp':
curr_y.append([int(a_val > 2), int(v_val > 2)])
else:
curr_y.append([int(a_val > 5), int(v_val > 5)])
# stack features for current participant and apply standardization
X[pid] = StandardScaler().fit_transform(np.stack(curr_X))
y[pid] = np.stack(curr_y)
return X, y
def get_data_discrete(segments, n, labeltype, majority):
X, y = {}, {}
# for each participant
for pid, segs in segments.items():
segs = sorted(segs, key=lambda x: x[0])
pbar = tqdm(segs, desc=f'For subject {pid:02d}', ascii=True, dynamic_ncols=True)
curr_X, curr_y, curr_segs = [], [], {}
# for each segment
for idx, signals, labels in pbar:
# get labels and add to buffer
s_a, s_v = int(labels[0]), int(labels[1])
p_a, p_v = int(labels[2]), int(labels[3])
e_a, e_v = int(labels[4]), int(labels[5])
if labeltype == 's':
curr_segs.setdefault('a', []).append(s_a)
curr_segs.setdefault('v', []).append(s_v)
elif labeltype == 'p':
curr_segs.setdefault('a', []).append(p_a)
curr_segs.setdefault('v', []).append(p_v)
elif labeltype == 'e':
curr_segs.setdefault('a', []).append(e_a)
curr_segs.setdefault('v', []).append(e_v)
elif labeltype == 'sp':
curr_segs.setdefault('a', []).append(np.sum([s_a, p_a]))
curr_segs.setdefault('v', []).append(np.sum([s_v, p_v]))
# get signals and add to buffer
for sigtype, sr in [('bvp', 64), ('eda', 4), ('temp', 4), ('ecg', 1)]:
curr_segs.setdefault(sigtype, []).append(signals[sigtype])
# if n segments are in buffer
if len(curr_segs[sigtype]) == n:
# concat signals and get features
sig = np.concatenate(curr_segs.pop(sigtype))
features = get_features(sig, sr, sigtype)
curr_segs.setdefault('features', []).append(features)
# if features are in the buffer, pop features and labels
if 'features' in curr_segs:
features = np.concatenate(curr_segs.pop('features'))
# skip if one or more feature is NaN
if np.isnan(features).any():
logging.getLogger('default').warning('One or more feature is NaN, skipped.')
continue
# take majority label
if majority:
a_values, a_counts = np.unique(curr_segs.pop('a'), return_counts=True)
v_values, v_counts = np.unique(curr_segs.pop('v'), return_counts=True)
a_val = a_values[np.argmax(a_counts)]
v_val = v_values[np.argmax(v_counts)]
# or take label of the last segment
else:
a_val = curr_segs.pop('a')[-1]
v_val = curr_segs.pop('v')[-1]
curr_X.append(features)
if labeltype != 'sp':
curr_y.append([int(a_val > 2), int(v_val > 2)])
else:
curr_y.append([int(a_val > 5), int(v_val > 5)])
pbar.set_postfix({'processed': idx // n})
# stack features for current participant and apply standardization
X[pid] = StandardScaler().fit_transform(np.stack(curr_X))
y[pid] = np.stack(curr_y)
return X, y
def prepare_kemocon(segments_dir, n, labeltype, majority, rolling):
# load segments
pid_to_segments = load_segments(segments_dir)
# extract features and labels
if rolling:
X, y = get_data_rolling(pid_to_segments, n, labeltype, majority)
else:
X, y = get_data_discrete(pid_to_segments, n, labeltype, majority)
return X, y
# deprecated auroc and ap for compatibility with multiclass classification
def get_results(y_test, preds, probs=None):
acc = accuracy_score(y_test, preds)
# bacc = balanced_accuracy_score(y_test, preds, adjusted=False)
f1 = f1_score(y_test, preds, average='weighted')
# auroc = roc_auc_score(y_test, probs, average='weighted')
# ap = average_precision_score(y_test, probs, average='weighted')
# return {'acc.': acc, 'bacc.': bacc, 'f1': f1, 'auroc': auroc, 'ap': ap}
return {'acc.': acc, 'f1': f1}
def pred_majority(majority, y_test):
preds = np.repeat(majority, y_test.size)
# probs = np.repeat(majority, y_test.size)
return get_results(y_test, preds)
def pred_random(y_classes, y_test, rng, ratios):
preds = rng.choice(y_classes, y_test.size, replace=True, p=ratios)
# if ratios is not None:
# probs = np.where(preds == 1, ratios[1], ratios[0])
# else:
# probs = np.repeat(0.5, y_test.size)
return get_results(y_test, preds)
def pred_gnb(X_train, y_train, X_test, y_test):
clf = GaussianNB().fit(X_train, y_train)
preds = clf.predict(X_test)
# probs = clf.predict_proba(X_test)[:, 1]
return get_results(y_test, preds)
def pred_xgb(X_train, y_train, X_test, y_test, seed, gpu, target):
# load data into DMatrix
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
# set parameters
params = {
'booster': 'gbtree',
'verbosity': 1,
'max_depth': 6,
'eta': 0.3,
'objective': 'multi:softmax',
'eval_metric': 'mlogloss' if target == 'multiclass' else 'logloss',
'num_class': 4 if target == 'multiclass' else 2,
'seed': seed,
}
# if gpu=True
if gpu:
params['gpu_id'] = 0
params['tree_method'] = 'gpu_hist'
# train model and predict
num_round = 100
bst = xgb.train(params, dtrain, num_round)
preds = bst.predict(dtest)
# return results
return get_results(y_test, preds)
def get_baseline_kfold(X, y, seed, target, n_splits, shuffle, gpu):
# initialize random number generator and fold generator
rng = default_rng(seed)
skf = StratifiedKFold(n_splits=n_splits, shuffle=shuffle, random_state=seed)
# aggregated features and labels
X = np.concatenate(list(X.values()))
y = np.concatenate(list(y.values()))
logging.getLogger('default').info(f'Dataset size: {X.shape}')
# get labels corresponding to target class
if target == 'arousal':
y = y[:, 0]
elif target == 'valence':
y = y[:, 1]
elif target == 'multiclass':
classes = np.unique(y, axis=0).tolist()
y = np.fromiter(map(lambda x: classes.index(x.tolist()), y), dtype=np.int)
results = {}
# for each fold, split train & test and get classification results
for i, (train_idx, test_idx) in enumerate(skf.split(X, y)):
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
y_classes, y_counts = np.unique(y_train, return_counts=True)
majority = y_classes[np.argmax(y_counts)]
class_ratios = y_counts / y_train.size
n_classes = len(y_classes)
results[i+1] = {
'Random': pred_random(y_classes, y_test, rng, ratios=np.repeat(1/n_classes, n_classes)),
'Majority': pred_majority(majority, y_test),
'Class ratio': pred_random(y_classes, y_test, rng, ratios=class_ratios),
'Gaussian NB': pred_gnb(X_train, y_train, X_test, y_test),
'XGBoost': pred_xgb(X_train, y_train, X_test, y_test, seed, gpu, target),
}
# return results as table
results = {(fold, classifier): values for (fold, _results) in results.items() for (classifier, values) in _results.items()}
results_table = pd.DataFrame.from_dict(results, orient='index').stack().unstack(level=1).rename_axis(['Fold', 'Metric'])
return results_table[['Random', 'Majority', 'Class ratio', 'Gaussian NB', 'XGBoost']]
def get_baseline_loso(X, y, seed, target, n_splits, shuffle, gpu):
# initialize random number generator
rng = default_rng(seed)
results = {}
# for each participant split train & test
for pid in X.keys():
X_train, X_test = np.concatenate([v for k, v in X.items() if k != pid]), X[pid]
y_train, y_test = np.concatenate([v for k, v in y.items() if k != pid]), y[pid]
# get labels corresponding to target class
if target == 'arousal':
y_train, y_test = y_train[:, 0], y_test[:, 0]
elif target == 'valence':
y_train, y_test = y_train[:, 1], y_test[:, 1]
# skip current user if there aren't both labels (0, 1) in the test set
if len(Counter(y_test)) != 2:
continue
# get majority label and class ratios
y_classes, y_counts = | np.unique(y_train, return_counts=True) | numpy.unique |
##############################################################################
### ICS5110: Applied Machine Learning
###
### Custom Classifiers Implementation
### By <NAME>, <NAME>, <NAME>
###
### January 2019
##############################################################################
import math
import copy
import numpy as np
import pandas as pd
from scipy import stats
# Base class to easily plug into the sklearn ecosystem e.g. when using Pipelines
from sklearn.base import BaseEstimator
##############################################################################
### Logistic Regression
class CustomLogitRegression(BaseEstimator):
"""Logistic regression classifier.
Parameters
----------
max_epochs : int
Iterations upper bound.
alpha : float
Learning rate.
min_gain : float
Minimum loss difference.
p_threshold : float
Class boundary.
fit_bias : bool
Add a bias/intercept constant.
class_balance : bool
Adjust class balance.
"""
def __init__(self, max_epochs=1000, alpha=0.1, min_gain=0.0001, p_threshold=0.5,
fit_bias=True, class_balance=True):
self.max_epochs = max_epochs
self.alpha = alpha
self.min_gain = min_gain
self.n_nogain = 5
self.p_threshold = p_threshold
self.fit_bias = fit_bias
self.class_balance = class_balance
self.coef_ = None # Weights to be learned
####################
# Internal functions
def _add_bias(self, X):
"""Add intercepts to matrix X."""
return np.insert(X, 0, 1, axis=1)
def _cost(self, y, y_hat):
"""Finds the prediction cost."""
return ((-y).T @ np.log(y_hat)) - ((1 - y).T @ np.log(1 - y_hat))
def _sigmoid(self, Z):
"""Maps Z to a value between 0 and 1."""
return 1 / (1 + np.exp(-Z))
##################
# Public functions
def fit(self, X, y):
"""Trains model to predict classes y given X."""
if self.fit_bias:
X = self._add_bias(X)
# Initialise weights
self.coef_ = np.zeros(X.shape[1])
# Weighted cross entropy
n_samples = np.float(y.size)
y_weights = np.ones(y.size)
if self.class_balance:
# Find weights inversely proportional to class frequencies
class_weights = n_samples / (2 * np.bincount(y))
y_weights[y == 0] = class_weights[0]
y_weights[y == 1] = class_weights[1]
n_nogain = 0
top_loss = np.Inf
# Optimise using Stochastic Gradient Descent
for epoch in range(self.max_epochs):
# Predict class probabilities
Z = X @ self.coef_.T
y_hat = self._sigmoid(Z)
# Check if the new coefficients reduce the loss
loss = (self._cost(y, y_hat) * y_weights).mean()
if loss > (top_loss - self.min_gain):
# Loss is increasing, we overshot the minimum?
n_nogain += 1
else:
# Loss is decreasing, keep descending...
n_nogain = 0
#if epoch > 0 and epoch % 1000 == 0:
# print('{} Loss: {} Top: {}'.format(epoch, loss, top_loss))
if loss < top_loss:
top_loss = loss
# Stop if no improvement in loss is registered
if n_nogain >= self.n_nogain:
print('Converged early after {} epochs.'.format(epoch))
return
# Find the gradient
delta = np.matmul(X.T, (y_hat - y) * y_weights) / n_samples
# Adjust the weights
self.coef_ -= self.alpha * delta
print('Reached maximum number of epochs without converging early.')
def predict_proba(self, X):
"""Find probability of belonging to the true/false class."""
# Sanity check
if self.coef_ is None:
raise RuntimeError('Call fit first!')
# Add a bias constant
if self.fit_bias:
X = self._add_bias(X)
# Find probability of belonging to true class
Z = X @ self.coef_.T
p1 = self._sigmoid(Z)
# Find probability of belonging to false class
p0 = 1 - p1
return np.array([p0, p1]).T
def predict(self, X):
"""Predicts the classes of X."""
return self.predict_proba(X)[:,1] >= self.p_threshold
### Logistic Regression
##############################################################################
##############################################################################
### Decision Tree
class _LeafNode():
"""Class that represents a leaf in the decision tree"""
def __init__(self, y):
self.outcome = y
def predict(self, X, proba):
if proba:
# Calculate class probality
bc = np.bincount(self.outcome)
zeros = bc[0]
ones = bc[1] if len(bc) == 2 else 0
return np.array([zeros, ones], dtype=np.float) / len(self.outcome)
else:
# Calculate the outcome base on the majority vote
values, counts = np.unique(self.outcome, return_counts=True)
return values[counts.argmax()]
class _DecisionNode():
"""Class that represents a decision node in the decision tree"""
def __init__(self, i_feature, threshold, left_branch, right_branch):
self.i_feature = i_feature
self.threshold = threshold
self.left_branch = left_branch
self.right_branch = right_branch
def predict(self, X, proba):
"""
Do a recursive search down the tree and make a prediction of
the data sample by the outcome value of the leaf that we end
up at.
"""
# Choose the feature that we will test
feature_value = X[self.i_feature]
# Determine if we will follow left or right branch
branch = self.right_branch
if isinstance(feature_value, int) or isinstance(feature_value, float):
if feature_value >= self.threshold:
branch = self.left_branch
elif feature_value == self.threshold:
branch = self.left_branch
# Test subtree
return branch.predict(X, proba)
class CustomDecisionTree(BaseEstimator):
"""
A Decision-tree classifier.
Parameters:
-----------
min_samples_split: int
The minimum number of samples needed to make a split when building a tree.
min_impurity: float
The minimum impurity required to split the tree further.
max_depth: int
The maximum depth of a tree.
"""
def __init__(self, min_samples_split=2, min_impurity=0, max_depth=float("inf")):
self.root = None # Root node
self.min_samples_split = min_samples_split
self.min_impurity = min_impurity
self.max_depth = max_depth
####################
# Internal functions
def _predict(self, X, proba):
if isinstance(X, pd.DataFrame):
X = X.values
if self.root is None:
raise RuntimeError('call fit first!')
return np.array([self.root.predict(X[i, :], proba) for i in range(X.shape[0])])
def _build_tree(self, X, y, current_depth=0):
"""
Recursive method which builds out the decision tree and splits X and
respective y on the feature of X which (based on impurity) best separates
the data.
"""
n_samples, _ = | np.shape(X) | numpy.shape |
import numpy as np
from baselines.ecbp.agents.buffer.ps_learning_process import PSLearningProcess
# from baselines.ecbp.agents.graph.build_graph_mer_attention import *
from baselines.ecbp.agents.graph.build_graph_mer_bvae_attention import *
import logging
from multiprocessing import Pipe
import os
from baselines.ecbp.agents.psmp_learning_target_agent import PSMPLearnTargetAgent
import cv2
class BVAEAttentionAgent(PSMPLearnTargetAgent):
def __init__(self, encoder_func,decoder_func, exploration_schedule, obs_shape, vector_input=True, lr=1e-4, buffer_size=1000000,
num_actions=6, latent_dim=32,
gamma=0.99, knn=4, eval_epsilon=0.1, queue_threshold=5e-5, batch_size=32, density=True, trainable=True,
num_neg=10, tf_writer=None):
self.conn, child_conn = Pipe()
self.replay_buffer = np.empty((buffer_size + 10,) + obs_shape, np.float32 if vector_input else np.uint8)
self.ec_buffer = PSLearningProcess(num_actions, buffer_size, latent_dim*2, obs_shape, child_conn, gamma,
density=density)
self.obs = None
self.z = None
self.cur_capacity = 0
self.ind = -1
self.writer = tf_writer
self.sequence = []
self.gamma = gamma
self.queue_threshold = queue_threshold
self.num_actions = num_actions
self.exploration_schedule = exploration_schedule
self.latent_dim = latent_dim
self.knn = knn
self.steps = 0
self.batch_size = batch_size
self.rmax = 100000
self.logger = logging.getLogger("ecbp")
self.log("psmp learning agent here")
self.eval_epsilon = eval_epsilon
self.train_step = 4
self.alpha = 1
self.burnin = 2000
self.burnout = 10000000000
self.update_target_freq = 10000
self.buffer_capacity = 0
self.trainable = trainable
self.num_neg = num_neg
self.loss_type = ["attention"]
input_type = U.Float32Input if vector_input else U.Uint8Input
# input_type = U.Uint8Input
self.hash_func, self.unmask_z_func,self.train_func, self.eval_func, self.norm_func, self.attention_func, self.value_func, self.reconstruct_func,self.update_target_func = build_train_mer_bvae_attention(
input_type=input_type,
obs_shape=obs_shape,
encoder_func=encoder_func,
decoder_func=decoder_func,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-4),
gamma=gamma,
grad_norm_clipping=10,
latent_dim=latent_dim,
loss_type=self.loss_type,
batch_size=batch_size,
num_neg=num_neg,
c_loss_type="sqmargin",
)
self.finds = [0, 0]
self.ec_buffer.start()
def train(self):
# sample
# self.log("begin training")
# print("training",self.writer)
noise = np.random.randn(9,self.batch_size,self.latent_dim)
samples = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_u = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_v = self.send_and_receive(4, (self.batch_size, self.num_neg))
index_u, _, _, _, value_u, _, _, _ = samples_u
index_v, _, _, _, value_v, _, _, _ = samples_v
index_tar, index_pos, index_neg, reward_tar, value_tar, action_tar, neighbours_index, neighbours_value = samples
if len(index_tar) < self.batch_size:
return
obs_tar = [self.replay_buffer[ind] for ind in index_tar]
obs_pos = [self.replay_buffer[ind] for ind in index_pos]
obs_neg = [self.replay_buffer[ind] for ind in index_neg]
obs_neighbour = [self.replay_buffer[ind] for ind in neighbours_index]
obs_u = [self.replay_buffer[ind] for ind in index_u]
obs_v = [self.replay_buffer[ind] for ind in index_v]
# print(obs_tar[0].shape)
if "regression" in self.loss_type:
value_original = self.norm_func(np.array(obs_tar))
value_tar = np.array(value_tar)
self.log(value_original, "value original")
self.log(value_tar, "value tar")
value_original = np.array(value_original).squeeze() / self.alpha
assert value_original.shape == np.array(value_tar).shape, "{}{}".format(value_original.shape,
np.array(value_tar).shape)
value_tar[np.isnan(value_tar)] = value_original[np.isnan(value_tar)]
assert not np.isnan(value_tar).any(), "{}{}".format(value_original, obs_tar)
input = [noise,obs_tar]
if "contrast" in self.loss_type:
input += [obs_pos, obs_neg]
if "regression" in self.loss_type:
input += [np.nan_to_num(value_tar)]
if "linear_model" in self.loss_type:
input += [action_tar]
if "contrast" not in self.loss_type:
input += [obs_pos]
if "fit" in self.loss_type:
input += [obs_neighbour, np.nan_to_num(neighbours_value)]
if "regression" not in self.loss_type:
input += [np.nan_to_num(value_tar)]
if "causality" in self.loss_type:
input += [reward_tar, action_tar]
if "weight_product" in self.loss_type:
value_u = np.nan_to_num(np.array(value_u))
value_v = np.nan_to_num(np.array(value_v))
input += [obs_u, obs_v, obs_u, obs_v, value_u, value_v]
if "attention" in self.loss_type:
value_original = self.value_func(noise,np.array(obs_tar))
value_tar = np.array(value_tar)
value_original = np.array(value_original).squeeze()
value_tar[np.isnan(value_tar)] = value_original[np.isnan(value_tar)]
input += [value_tar]
func = self.train_func if self.steps < self.burnout else self.eval_func
loss, summary = func(*input)
# self.log("finish training")
self.writer.add_summary(summary, global_step=self.steps)
def save_attention(self, filedir, step):
subdir = os.path.join(filedir, "./attention")
noise = np.random.randn(9, 1, self.latent_dim)
origin_z = np.array(self.unmask_z_func(noise,np.array(self.obs)))[0]
z = np.array(self.hash_func(noise, | np.array(self.obs) | numpy.array |
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Modified by yl
# --------------------------------------------------------
import os
# import cPickle
import pickle
import numpy as np
import cv2
import math
from six.moves import xrange
from shapely.geometry import *
import xml.etree.cElementTree as ET
def parse_rec_txt(filename):
with open(filename.strip(),'r') as f:
gts = f.readlines()
objects = []
for obj in gts:
cors = obj.strip().split(',')
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['difficult'] = 0
obj_struct['bbox'] = [int(cors[0]),
int(cors[1]),
int(cors[2]),
int(cors[3])]
objects.append(obj_struct)
return objects
def rotate_box(point1, point2, point3, point4, mid_x, mid_y, theta):
theta = -theta * math.pi / 180
sin = math.sin(theta)
cos = math.cos(theta)
point1 = point1 - [mid_x, mid_y]
point2 = point2 - [mid_x, mid_y]
point3 = point3 - [mid_x, mid_y]
point4 = point4 - [mid_x, mid_y]
x1 = point1[0] * cos - point1[1] * sin + mid_x
y1 = point1[0] * sin + point1[1] * cos + mid_y
x2 = point2[0] * cos - point2[1] * sin + mid_x
y2 = point2[0] * sin + point2[1] * cos + mid_y
x3 = point3[0] * cos - point3[1] * sin + mid_x
y3 = point3[0] * sin + point3[1] * cos + mid_y
x4 = point4[0] * cos - point4[1] * sin + mid_x
y4 = point4[0] * sin + point4[1] * cos + mid_y
return np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
def quadrangle2minAreaRect(quad_coord_boxes):
quad_coord = np.array(quad_coord_boxes).reshape((4,2))
min_area_rect = cv2.minAreaRect(quad_coord)
mid_x, mid_y = min_area_rect[0]
theta = min_area_rect[2]
box = cv2.boxPoints(min_area_rect)
# determine the minAreaRect direction
# reference: http://blog.csdn.net/sunflower_boy/article/details/51170232
x0 = box[0][0]
count = np.sum(box[:,0].reshape(-1)>x0)
if count >= 2:
theta = theta
hori_box = rotate_box(box[1], box[2], box[3], box[0], mid_x, mid_y, theta)
else:
theta = 90 + theta
hori_box = rotate_box(box[2], box[3], box[0], box[1], mid_x, mid_y, theta)
min_x = np.min(hori_box[:,0])
min_y = np.min(hori_box[:,1])
max_x = np.max(hori_box[:,0])
max_y = np.max(hori_box[:,1])
mid_x = (min_x+max_x)/2.0
mid_y = (min_y+max_y)/2.0
# normalize the rotate angle in -45 to 45
items = [min_x, min_y, max_x, max_y]
if theta > 90:
theta = theta - 180
if theta < -90:
theta = theta + 180
if theta > 45:
theta = theta - 90
width = items[3] - items[1]
height = items[2] - items[0]
elif theta < -45:
theta = theta + 90
width = items[3] - items[1]
height = items[2] - items[0]
else:
width = items[2] - items[0]
height = items[3] - items[1]
return [mid_x,mid_y,width,height,-theta]# positive degree for the gt box rotated counter-clockwisely to the horizontal rectangle
def curve_parse_rec_txt(filename):
with open(filename.strip(),'r') as f:
gts = f.readlines()
objects = []
if len(gts) == 0:
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['difficult'] = 1
obj_struct['bbox'] = []
# obj_struct['minAreaRect'] = []
objects.append(obj_struct)
else:
for obj in gts:
cors = obj.strip().split(',')
obj_struct = {}
obj_struct['name'] = 'text'
# if cors[-1] == "-1":
# obj_struct['difficult'] = 1
# print('difficult')
# else:
# obj_struct['difficult'] = 0
obj_struct['difficult'] = 0
# obj_struct['bbox'] = [int(cors[0]), int(cors[1]),int(cors[2]),int(cors[3]),
# int(cors[4]), int(cors[5]),int(cors[6]),int(cors[7])]
obj_struct['bbox'] = [int(coor) for coor in cors]
# obj_struct['minAreaRect'] = quadrangle2minAreaRect(obj_struct['bbox'])
objects.append(obj_struct)
return objects
def is_valid_tag(tags):
all_tags = tags.split('|')
valid = True
count_tag = 0
for cls in ['Text', 'Formula', 'FormulaSN', 'Figure', 'Table', 'Table_Form', 'ItemList', 'Table_keyvalue_vertical', 'Table_keyvalue_horizontal']:
if cls in all_tags:
count_tag += 1
if count_tag == 0:
tags += "|Text"
elif count_tag != 1:
valid = False
# print(valid)
return valid
def curve_parse_rec_xml(filename):
tree = ET.parse(filename.strip())
root = tree.getroot()
objects = []
for elem in root.iter('Line'):
poly = elem.find('Polygon')
tags = elem.find('Tag')
tag_notsure = 0 # 0 for text, 1 for ambiguous
if tags is None:
continue
else:
tags = tags.text
if tags is None:
continue
valid = is_valid_tag(tags)
if valid == False:
tag_notsure = 1
if 'NotSure' in tags.split('|'):
tag_notsure = 1
# if not ('Table' in tags.split('|')):
if not ('Table' in tags.split('|') or 'Table_Form' in tags.split('|') or 'ItemList' in tags.split('|') or 'Table_keyvalue_vertical' in tags.split('|') or 'Table_keyvalue_horizontal' in tags.split('|')):
if tag_notsure == 0:
continue
# if not (('Table' in tags.split('|')) and ('Text' not in tags.split('|'))):
# continue
if poly is None:
continue
else:
poly = poly.text
if poly is None:
continue
items = poly.split(' ')
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['difficult'] = tag_notsure
obj_struct['bbox'] = [int(coor) for coor in items]
objects.append(obj_struct)
if len(objects) == 0:
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['difficult'] = 1
obj_struct['bbox'] = []
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = | np.max(prec[rec >= t]) | numpy.max |
from functools import reduce
from copy import copy
from time import time
import numpy as np
import numpy.random as npr
import numpy.linalg as la
import scipy.linalg as sla
from scipy.linalg import solve_discrete_lyapunov, solve_discrete_are
from utility.matrixmath import vec, mat, mdot, matmul_lr, specrad, dlyap, dare, dare_gain
from quadtools import quadblock, quadstack, unquadblock, unquadstack
class LinearSystem:
def __init__(self, A, B, C, a, Aa, b, Bb, c, Cc, Q, W):
self.A = A
self.B = B
self.C = C
self.a = a
self.b = b
self.c = c
self.Aa = Aa
self.Bb = Bb
self.Cc = Cc
self.Q = Q
self.W = W
self.n = A.shape[0]
self.m = B.shape[1]
self.p = C.shape[0]
@property
def data(self):
return self.A, self.B, self.C, self.a, self.Aa, self.b, self.Bb, self.c, self.Cc, self.Q, self.W
@property
def dims(self):
return self.n, self.m, self.p
@property
def AB(self):
return np.block([self.A, self.B])
@property
def AC(self):
return np.block([[self.A], [self.C]])
class LinearSystemControlled(LinearSystem):
def __init__(self, system, K, L):
super().__init__(*system.data)
self.K = K
self.L = L
# Zeros matrices
self.Zn = np.zeros([self.n, self.n])
@property
def BK(self):
return self.B @ self.K
@property
def LC(self):
return self.L @ self.C
@property
def F(self):
return self.A + self.BK - self.LC
@property
def Phi_aug(self):
return np.block([[self.A, self.BK],
[self.LC, self.F]])
@property
def AK(self):
return self.A + self.BK
@property
def AL(self):
return self.A - self.LC
@property
def IK(self):
return np.block([[np.eye(self.n)], [self.K]])
@property
def IL(self):
return np.block([np.eye(self.n), self.L])
@property
def QK(self):
return matmul_lr(self.IK.T, self.Q)
@property
def WL(self):
return matmul_lr(self.IL, self.W)
@property
def IK_aug(self):
return sla.block_diag(np.eye(self.n), self.K)
@property
def IL_aug(self):
return sla.block_diag(np.eye(self.n), self.L)
@property
def QK_aug(self):
return matmul_lr(self.IK_aug.T, self.Q)
@property
def WL_aug(self):
return matmul_lr(self.IL_aug, self.W)
@property
def linop1(self):
# Closed-loop quadratic cost transition operator
linop = np.kron(self.Phi_aug.T, self.Phi_aug.T)
for i in range(self.a.size):
PhiAa = np.block([[self.Aa[i], self.Zn],
[self.Zn, self.Zn]])
linop += self.a[i]* | np.kron(PhiAa.T, PhiAa.T) | numpy.kron |
import numpy as np
from Network import Network
from Layer import Layer
from Activations import Activations
def main():
np.random.seed(10)
X = | np.array([[0,0],[0,1],[1,0],[1,1]]) | numpy.array |
import numpy as np
def default_limit_theta(theta):
return theta
def gradient_descent(x, y, iterations, predict, derivative, theta=None, limit_theta=None):
assert x.shape[0] == y.shape[0]
if theta is None:
theta = | np.zeros(x.shape[1]) | numpy.zeros |
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
<NAME> and <NAME>. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
<NAME> and <NAME>. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
<NAME> and <NAME> (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
<NAME>, <NAME> (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from __future__ import division
from statsmodels.compat.python import range, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array-like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array-like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array-like
The data for the model.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array-like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array-like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : string or array-like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = np.dot(qc.T, score) / scale
amat = np.linalg.inv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
The scale parameter for binomial, Poisson, and multinomial
families is fixed at 1, otherwise it is estimated from
the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed.
lin_pred : array-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array-like
Values of the independent variables at which the derivative
is calculated.
params : array-like
Parameter values at which the derivative is calculated.
offset_exposure : array-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array-like
Parameters / coefficients of a marginal regression model.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
model = GLM(self.endog, self.exog, family=self.family,
offset=self._offset_exposure,
freq_weights=self.weights)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
hm *= self.estimate_scale()
sn -= self.num_group * en * params
return np.linalg.solve(hm, sn), hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : integer
The maximum number of iterations.
ddof_scale : integer
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : integer
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
<NAME>, <NAME>, <NAME>. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[np.abs(mean_params) < ztol] = 0
self._update_assoc(mean_params)
ma = self._regularized_covmat(mean_params)
cov = np.linalg.solve(hm, ma)
cov = np.linalg.solve(hm, cov.T)
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, mean_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array-like
A parameter vector estimate for the reduced model.
bcov : array-like
The covariance matrix of mean_params.
Returns
-------
mean_params : array-like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array-like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : array-like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : array-like
An estimate of the covariance matrix for the
model parameters. Conventionally this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the mean and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare mean structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerically evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for all families and links. Many other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases where the link function is canonical. These
analytical expressions may omit additive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differ from the values reported by other packages. However only
the differences between two QIC values calculated for different models
using the same data are meaningful. Our QIC should produce the same
QIC differences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for all models being compared.
References
----------
.. [*] <NAME> (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
means = []
omega = 0.0
# omega^-1 is the model-based covariance assuming independence
for i in range(self.num_group):
expval, lpr = self.cached_means[i]
means.append(expval)
dmat = self.mean_deriv(self.exog_li[i], lpr)
omega += np.dot(dmat.T, dmat) / scale
means = np.concatenate(means)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
du = means - self.endog
nstep = 10000
qv = np.empty(nstep)
xv = np.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = self.endog + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -np.sum(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not allow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / np.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val * self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(np.dot(self.model.exog,
self.params))
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc': ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc': ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc': ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : string
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array-like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : integer
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array-like
The first dep_params in the sequence
dep_params_last : array-like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array-like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = | np.unique(endog) | numpy.unique |
import numpy as np
with open('input.txt', 'r') as f:
mask = [1 if char == '#' else 0 for char in f.readline().strip()]
f.readline()
image = [[1 if char == '#' else 0 for char in line.strip()] for line in f.readlines()]
image = np.array(image)
for _ in range(25):
image = np.pad(image, 2, mode='constant', constant_values=0)
enhanced_image = np.zeros(image.shape)
for i in range(1, len(image) - 1):
for j in range(1, len(image[i]) - 1):
surround_seq = np.append(image[i-1][j-1:j+2], np.append(image[i][j-1:j+2], image[i+1][j-1:j+2]))
bit = int(surround_seq.dot(2** | np.arange(surround_seq.size) | numpy.arange |
from collections import Counter, defaultdict
import itertools
try:
import igraph as ig
except ModuleNotFoundError:
ig = None
import numpy as np
import operator
import logging
#############################
# Fuzzy Modularity Measures #
#############################
def nepusz_modularity(G, cover):
raise NotImplementedError("See the CONGA 2010 paper")
def zhang_modularity(G, cover):
raise NotImplementedError("""See 'Identification of overlapping algorithms structure in
complex networks using fuzzy C-means clustering'""")
def nicosia_modularity(G, cover):
raise NotImplementedError("""See 'Extending the definition of
modularity to directed graphs with overlapping communities'""")
#############################
# Crisp modularity measures #
#############################
def count_communities(G, cover):
"""
Helper for lazar_modularity.
Returns a dict {v:count} where v is a vertex id and
count is the number of different communities it is
assigned to.
"""
counts = {i.index : 0 for i in G.vs}
for community in cover:
for v in community:
counts[v] += 1
return counts
def get_weights(G):
"""
Given a graph G, returns a list of weights. If the graph is unweighted,
returns a list of 1s the same length as the number of edges.
"""
try:
# asssumes weight as an attribute name means graph is weighted.
weights = G.es['weight']
except KeyError:
#unweighted means all weights are 1.
weights = [1 for e in G.es]
return weights
def get_single_lazar_modularity(G, community, weights, counts):
"""
Returns the lazar modularity of a single algorithms.
"""
totalInternalWeight = sum(weights[G.es[e].index] for e in community) # m_c in paper
numVerticesInCommunity = len(community) # V_c in paper
numPossibleInternalEdges = numVerticesInCommunity * (numVerticesInCommunity - 1) / 2
if numPossibleInternalEdges == 0: return 0
edgeDensity = totalInternalWeight / numPossibleInternalEdges / numVerticesInCommunity
interVsIntra = 0
comm = set(community)
for v in community:
interVsIntraInternal = 0
neighbors = G.neighbors(v)
degree = len(neighbors) # k_i in paper
numCommunitiesWithin = counts[v] # s_i in paper
for n in neighbors:
weight = weights[G.get_eid(v, n)]
if n in comm:
interVsIntraInternal += weight
else:
interVsIntraInternal -= weight
interVsIntraInternal /= (degree * numCommunitiesWithin)
interVsIntra += interVsIntraInternal
return edgeDensity * interVsIntra
def lazar_modularity(G, cover):
"""
Returns the crisp modularity measure as defined by Lazar et al. 2009
Defined as the average edge density times normalized difference
between inter and intracommunity edges for each algorithms.
See CONGA 2010 or Lazar's paper for a precise definition.
"""
numCommunities = len(cover) # |C| in the paper
totalModularity = 0 # M_c in the paper
weights = get_weights(G)
counts = count_communities(G, cover)
for c in cover:
totalModularity += get_single_lazar_modularity(G, c, weights, counts)
averageModularity = 1/numCommunities * totalModularity # M in the paper
return averageModularity
##################################
# Classes for overlapping covers #
##################################
class CrispOverlap(object):
"""
TODO
"""
def __init__(self, graph, covers, modularities=None, optimal_count=None, modularity_measure="lazar"):
"""
Initializes a CrispOverlap object with the given parameters.
Graph: The graph to which the object refers
covers: a dict of VertexCovers, also referring to this graph, of the form {k : v}
where k is the number of clusters and v is the vertexCluste
modularities (optional): a dict of modularities of the form {c:m} where c is
the number of clusters and m is the modularity.
optimal_count (optional): A hint for the number of clusters to use.
modularity_measure (optional): The name of the modularity function to use.
Right now, the only choice is "lazar."
"""
# Possibly figure out a better data structure like a merge
# list that contains all information needed?
# So far only know of Lazar's measure for crisp overlapping.
self._measureDict = {"lazar" : lazar_modularity}
self._covers = covers
self._graph = graph
self._optimal_count = optimal_count
self._modularities = modularities
if modularity_measure in self._measureDict:
self._modularity_measure = modularity_measure
else: raise KeyError("Modularity measure not found.")
def __getitem__(self, numClusters):
"""
Returns the cover with the given number of clusters.
"""
if not numClusters:
raise KeyError("Number of clusters must be a positive integer.")
return self._covers[numClusters]
def __iter__(self):
"""
Iterates over the covers in the list.
"""
return (v for v in list(self._covers.values()))
def __len__(self):
"""
Returns the number of covers in the list.
"""
return len(self._covers)
def __bool__(self):
"""
Returns True when there is at least one cover in the list.
"""
return bool(self._covers)
def __str__(self):
"""
Returns a string representation of the list of covers.
"""
return '{0} vertices in {1} possible covers.'.format(len(self._graph.vs), len(self._covers))
def as_cover(self):
"""
Returns the optimal cover (by modularity) from the object.
"""
return self._covers[self.optimal_count]
def recalculate_modularities(self):
"""
Recalculates the modularities and optimal count using the modularity_measure.
"""
modDict = {}
for cover in self._covers.values():
modDict[len(cover)] = self._measureDict[self._modularity_measure](self._graph, cover)
self._modularities = modDict
self._optimal_count = max(iter(self._modularities.items()), key=operator.itemgetter(1))[0]
return self._modularities
@property
def modularities(self):
"""
Returns the a dict {c : m} where c is the number of clusters
in the cover and m is the modularity. If modularity has not
been calculated, it recalculates it for all covers. Otherwise,
it returns the stored dict.
Note: Call recalculate_modularities to recalculate the modularity.
"""
if self._modularities:
return self._modularities
self._modularities = self.recalculate_modularities()
return self._modularities
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time and
recalculate_modularities has not been called, this property simply returns the
hint. If such a count was not given, this method calculates the optimal cover
by maximizing the modularity along all possible covers in the object.
Note: Call recalculate_modularities to recalculate the optimal count.
"""
if self._optimal_count is not None:
return self._optimal_count
else:
modularities = self.modularities
self._optimal_count = max(list(modularities.items()), key=operator.itemgetter(1))[0]
return self._optimal_count
def pretty_print_cover(self, numClusters, label='CONGA_index'):
"""
Takes a cover in vertex-id form and prints it nicely
using label as each vertex's name.
"""
cover = self._covers[numClusters]
#if label == 'CONGA_index':
pp = [self._graph.vs[num] for num in [cluster for cluster in cover]]
#else:
# pp = [G.vs[num][label] for num in [cluster for cluster in cover]]
for count, comm in enumerate(pp):
print("Community {0}:".format(count))
for v in comm:
print('\t {0}'.format(v.index if label == 'CONGA_index' else v[label]))
print()
def make_fuzzy(self):
"""
TODO. see CONGA 2010
"""
pass
#
###################################################################################################################################################
# TODO:
# * only call fix_betweennesses when needed
def congo(OG, h=2):
"""
Provides an Implementation of the CONGO algorithm defined by <NAME>
in his 2010 paper "A Fast Algorithm to Find Overlapping Communities in Networks."
The parameters are OG, the graph on which the analysis is to be performed, and h,
the length of the longest shortest path that Congo is to consider.
"""
logging.basicConfig(filename='congo.log',level=logging.DEBUG)
G = OG.copy()
# Just in case the original graph is disconnected
if not G.is_connected():
raise RuntimeError("Congo only makes sense for connected graphs.")
# initializing attributes of copied graph
G.vs['CONGA_orig'] = [i.index for i in OG.vs]
G.es['eb'] = 0
G.vs['pb'] = [{uw : 0 for uw in itertools.combinations(G.neighbors(vertex), 2)} for vertex in G.vs]
# initializing all pair and edge betweennesses
do_initial_betweenness(G, h)
nClusters = 1
# The first cover is simply the entire connected graph.
allCovers = {nClusters : ig.VertexCover(OG)}
while G.es:
logging.info("%d edges remaining", len(G.es))
# get the edge with the max edge betweenness, and its betweenness.
maxEdge, maxEb = max(enumerate(G.es['eb']), key=operator.itemgetter(1))
G.vs['vb'] = G.betweenness(cutoff=h)
# since split betweennes is upper bounded by vertex betweenness, we
# only need to look at the vertices for which the vertex betweenness
# is greater than the max edge betweenness. (We multiply by 2
# because our edge betweenness calculations yield values in both
# directions.)
# TODO check if I need to multiply by 2
vInteresting = [i for i, b in enumerate(G.vs['vb']) if 2 * b > maxEb]
logging.info("Vertices to examine: %s", vInteresting)
splitInstr = max_split_betweenness(G, vInteresting)
# split if max split betweenness > max edge betweenness
if splitInstr is None or splitInstr[0] <= maxEb:
split = delete_edge(G, maxEdge, h)
else:
split = split_vertex(G, splitInstr[1], splitInstr[2], h)
if split:
# there must be a new algorithms
comm = G.components().membership
cover = get_cover(G, OG, comm)
nClusters += 1
# short circuit stuff would go here.
allCovers[nClusters] = cover
return CrispOverlap(OG, allCovers)
def delete_edge(G, edge, h):
"""
Given a graph G and one of its edges in tuple form, checks if the deletion
splits the graph.
"""
tup = G.es[edge].tuple
logging.info("Deleted: %s", tup)
neighborhood = get_neighborhood_edge(G, tup, h)
# subtracts local betweennesses in the region, as discussed
# in the paper
do_local_betweenness(G, neighborhood, h, operator.neg)
G.delete_edges(edge)
fix_betweennesses(G)
# adds back in local betweennesses after the deletion
do_local_betweenness(G, neighborhood, h, operator.pos)
return check_for_split(G, tup)
def fix_pair_betweennesses(G):
"""
Given a graph G, makes sure that all of the pair betweennesses
listed as attributes remain possible, and removes those that are not.
Also adds new attributes where new edges have been added.
"""
for v in G.vs:
toDel = []
neededPairs = {uw for uw in itertools.combinations(G.neighbors(v), 2)}
for pair in v['pb']:
if pair not in neededPairs:
toDel.append(pair)
for d in toDel:
del v['pb'][d]
for pair in neededPairs:
if pair not in v['pb']:
v['pb'][pair] = 0
def fix_edge_betweennesses(G):
"""
Given a graph G, makes sure that every edge has a betweenness
score assigned to it.
"""
for e in G.es:
if e['eb'] is None:
e['eb'] = 0
def fix_betweennesses(G):
"""
Fixes the pair and edge betweennesses such that every attribute is up to date.
"""
fix_pair_betweennesses(G)
fix_edge_betweennesses(G)
def split_vertex(G, vToSplit, instr, h):
"""
Splits the vertex v into two new vertices, each with
edges depending on s. Returns True if the split
divided the graph, else False.
"""
neighborhood = get_neighborhood_vertex(G, vToSplit, h)
do_local_betweenness(G, neighborhood, h, operator.neg)
new_index = G.vcount()
G.add_vertex()
G.vs[new_index]['CONGA_orig'] = G.vs[vToSplit]['CONGA_orig']
G.vs[new_index]['pb'] = {uw : 0 for uw in itertools.combinations(G.neighbors(vToSplit), 2)}
# adding all relevant edges to new vertex, deleting from old one.
toAdd = list(zip(itertools.repeat(new_index), instr[0]))
toDelete = list(zip(itertools.repeat(vToSplit), instr[0]))
G.add_edges(toAdd)
G.delete_edges(toDelete)
neighborhood.append(new_index)
fix_betweennesses(G)
logging.info("split: %d, %s", vToSplit, instr)
do_local_betweenness(G, neighborhood, h, operator.pos)
# check if the two new vertices are disconnected.
return check_for_split(G, (vToSplit, new_index))
def max_split_betweenness(G, vInteresting):
"""
Performs the greedy algorithm discussed in the 2007 CONGA paper
to approximate the maximum split betweenness. Returns a tuple
(a, b, c) where a is the maximum score, b the vertex to split
to acheive the score, and c a list of the instructions for which
neighbors to connect to each side of the split.
"""
maxSplitBetweenness = 0
vToSplit = None
# for every vertex of interest, we want to figure out the maximum score achievable
# by splitting the vertices in various ways, and return that optimal split
for v in vInteresting:
clique = create_clique(G, v, G.vs['pb'][v])
if clique.size < 4:
continue
# initialize a list on how we will map the neighbors to the collapsing matrix
vMap = [[ve] for ve in G.neighbors(v)]
# we want to keep collapsing the matrix until we have a 2x2 matrix and its
# score. Then we want to remove index j from our vMap list and concatenate
# it with the vMap[i]. This begins building a way of keeping track of how
# we are splitting the vertex and its neighbors
while clique.size > 4:
i,j,clique = reduce_matrix(clique)
vMap[i] += vMap.pop(j)
if clique[0,1] >= maxSplitBetweenness:
maxSplitBetweenness = clique[0,1]
vToSplit = v
splitInstructions = vMap
if vToSplit is None:
return None
return maxSplitBetweenness, vToSplit, splitInstructions
def do_initial_betweenness(G, h):
"""
Given a graph G and a depth h, calculates all edge and pair betweennesses
and updates G's attributes to reflect the new scores.
"""
# Not guaranteed to work on multigraphs.
all_pairs_shortest_paths = []
# Counter for normalizing scores
pathCounts = Counter()
for ver in G.vs:
logging.info("initializing betweennesses for %d", ver.index)
neighborhood = get_neighborhood_vertex(G, ver, h)
neighborhood.remove(ver.index)
#for i, v in enumerate(neighborhood):
s_s_shortest_paths = G.get_all_shortest_paths(ver, to=neighborhood)#[i+1:])
all_pairs_shortest_paths += s_s_shortest_paths
# to ignore duplicate edges, uncomment the next line.
#all_pairs_shortest_paths = set(tuple(p) for p in all_pairs_shortest_paths)
for path in all_pairs_shortest_paths:
pathCounts[(path[0], path[-1])] += 1
logging.info("updating all betweenness attributes...")
for path in all_pairs_shortest_paths:
if len(path) <= h + 1:
update_betweenness(G, path, pathCounts[(path[0], path[-1])], operator.pos)
def do_local_betweenness(G, neighborhood, h, op=operator.pos):
"""
Given a neighborhood and depth h, recalculates all betweennesses
confined to the neighborhood. If op is operator.neg, it subtracts these
betweennesses from the current ones. Otherwise, it adds them.
"""
all_pairs_shortest_paths = []
pathCounts = Counter()
for i, v in enumerate(neighborhood):
s_s_shortest_paths = G.get_all_shortest_paths(v, to=neighborhood)#[i+1:])
all_pairs_shortest_paths += s_s_shortest_paths
neighSet = set(neighborhood)
neighSize = len(neighborhood)
apsp = []
for path in all_pairs_shortest_paths:
# path does not go out of region
if len(neighSet | set(path)) == neighSize:
pathCounts[(path[0], path[-1])] += 1 # can improve
apsp.append(path)
for path in apsp:
if len(path) <= h + 1:
update_betweenness(G, path, pathCounts[(path[0], path[-1])], op)
def update_betweenness(G, path, count, op):
"""
Given a shortest path in G, along with a count of paths
that length, to determine weight, updates the edge and
pair betweenness dicts with the path's new information.
"""
weight = op(1./count)
pos = 0
while pos < len(path) - 2:
G.vs[path[pos + 1]]['pb'][order_tuple((path[pos], path[pos + 2]))] += weight
G.es[G.get_eid(path[pos], path[pos + 1])]['eb'] += weight
pos += 1
if pos < len(path) - 1:
G.es[G.get_eid(path[pos], path[pos + 1])]['eb'] += weight
def get_cover(G, OG, comm):
"""
Given the graph, the original graph, and a algorithms
membership list, returns a vertex cover of the communities
referring back to the original algorithms.
"""
coverDict = defaultdict(list)
for i, community in enumerate(comm):
coverDict[community].append(int(G.vs[i]['CONGA_orig']))
return ig.clustering.VertexCover(OG, clusters=list(coverDict.values()))
def vertex_betweeenness_from_eb(G, eb):
"""
Calculates the vertex betweenness scores in G. Returns a list
in which the indices are the vertex indices and the values are
their betweeennesses. The same as G.betweenness(), but faster because
it uses the edge betweenness scores.
(CONGA, page 4, equation 1)
"""
components = G.components()
membership = components.membership
vbs = []
for vertex in G.vs:
numComponents = len(components[membership[vertex.index]])
incidentEdges = G.incident(vertex)
vb = .5 * (sum(G.es[e]['eb'] for e in incidentEdges) - (numComponents - 1))
vbs.append(vb)
return vbs
def get_neighborhood_vertex(G, v, h):
"""
Given a vertex and a height/depth to
traverse, find the neighborhood as defined in the CONGA
paper.
"""
return G.neighborhood(v, order=h)
def get_neighborhood_edge(G, e, h):
"""
Given an edge and a height/depth to
traverse, find the neighborhood as defined in the CONGA
paper.
"""
neigh = set(G.neighborhood(e[0], order=h-1))
neigh.update(G.neighborhood(e[1], order=h-1))
return list(neigh)
def order_tuple(toOrder):
if toOrder[0] <= toOrder[1]:
return toOrder
return (toOrder[1], toOrder[0])
def create_clique(G, v, pb):
"""
Given a vertex and its pair betweennesses, returns a k-clique
representing all of its neighbors, with edge weights determined by the pair
betweenness scores. Algorithm discussed on page 5 of the CONGA paper.
"""
neighbors = G.neighbors(v)
# map each neighbor to its index in the adjacency matrix
mapping = {neigh : i for i, neigh in enumerate(neighbors)}
n = len(neighbors)
# Can use ints instead: (dtype=int). Only works if we use matrix_min
# instead of mat_min.
clique = np.matrix(np.zeros((n, n)))
for uw, score in pb.items():
clique[mapping[uw[0]], mapping[uw[1]]] = score
clique[mapping[uw[1]], mapping[uw[0]]] = score
# Ignore any self loops if they're there. If not, this line
# does nothing and can be removed.
np.fill_diagonal(clique, 0)
return clique
def reduce_matrix(M):
"""
Given a matrix M, collapses the row and column of the minimum value. This is just
an adjacency matrix way of implementing the greedy "collapse" discussed in CONGA.
Returns the new matrix and the collapsed indices.
"""
i,j = mat_min(M)
#i, j = matrix_min(M)
# add the ith row to the jth row and overwrite the ith row with those values
M[i,:] = M[j,:] + M[i,:]
# delete the jth row
M = np.delete(M, (j), axis=0)
# similarly with the columns
M[:,i] = M[:,j] + M[:,i]
M = np.delete(M, (j), axis=1)
| np.fill_diagonal(M,0) | numpy.fill_diagonal |
import os
import numpy as np
import pandas as pd
def load_menu(file, parent=None):
with open(file, 'r') as fp:
menu = []
for line in fp:
elements = line.strip().split(' ')
# First parser
data_file = elements[0]
st = int(elements[1])
ed = int(elements[2])
lp1 = int(elements[3])
lp2 = int(elements[4])
# Second parser
disp_name = data_file.split('/')[-1].split('.')[0]
file_name = data_file if parent is None else os.path.join(
parent, data_file.split('/')[-1]
)
menu.append({
'name': disp_name,
'dir': file_name,
'start': st,
'end': ed,
'point1': lp1,
'point2': lp2,
})
return menu
def load_segment(file):
with open(file, 'r') as fp:
data = [[int(float(elem)) for elem in line.strip().split(',')]
for line in fp]
return np.array(data)
def load_raw_file(file, delimiter='\t'):
with open(file, 'r') as fp:
data = [[float(elem) for elem in line.strip().split(delimiter)]
for line in fp]
return | np.array(data) | numpy.array |
import os
from pprint import pprint
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
import skimage
import network as network_lib
from loss.CEL import CEL
from utils.dataloader import create_loader
from utils.metric import cal_maxf, cal_pr_mae_meanf
from measure.saliency_toolbox import (
read_and_normalize,
mean_square_error,
e_measure,
s_measure,
adaptive_fmeasure,
weighted_fmeasure,
prec_recall,
)
from utils.misc import (
AvgMeter,
construct_print,
write_data_to_file,
)
from utils.pipeline_ops import (
get_total_loss,
make_optimizer,
make_scheduler,
resume_checkpoint,
save_checkpoint,
)
from utils.recorder import TBRecorder, Timer, XLSXRecoder
from datetime import datetime
class Solver:
def __init__(self, exp_name: str, arg_dict: dict, path_dict: dict):
super(Solver, self).__init__()
self.exp_name = exp_name
self.arg_dict = arg_dict
self.path_dict = path_dict
self.dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.to_pil = transforms.ToPILImage()
self.tr_data_path = self.arg_dict["rgb_data"]["tr_data_path"]
self.te_data_list = self.arg_dict["rgb_data"]["te_data_list"]
self.save_path = self.path_dict["save"]
self.save_pre = self.arg_dict["save_pre"]
if self.arg_dict["tb_update"] > 0:
self.tb_recorder = TBRecorder(tb_path=self.path_dict["tb"])
if self.arg_dict["xlsx_name"]:
self.xlsx_recorder = XLSXRecoder(xlsx_path=self.path_dict["xlsx"],module_name=self.arg_dict["model"],model_name=self.exp_name)
# 依赖与前面属性的属性
self.tr_loader = create_loader(
data_path=self.tr_data_path,
training=True,
size_list=self.arg_dict["size_list"],
prefix=self.arg_dict["prefix"],
get_length=False,
)
self.end_epoch = self.arg_dict["epoch_num"]
self.iter_num = self.end_epoch * len(self.tr_loader)
if hasattr(network_lib, self.arg_dict["model"]):
self.net = getattr(network_lib, self.arg_dict["model"])().to(self.dev)
else:
raise AttributeError
pprint(self.arg_dict)
if self.arg_dict["resume_mode"] == "test" or self.arg_dict["resume_mode"] == "measure":
# resume model only to test model.
# self.start_epoch is useless
resume_checkpoint(
model=self.net, load_path=self.path_dict["final_state_net"], mode="onlynet",
)
return
self.loss_funcs = [
torch.nn.BCEWithLogitsLoss(reduction=self.arg_dict["reduction"]).to(self.dev)
]
if self.arg_dict["use_aux_loss"]:
self.loss_funcs.append(CEL().to(self.dev))
self.opti = make_optimizer(
model=self.net,
optimizer_type=self.arg_dict["optim"],
optimizer_info=dict(
lr=self.arg_dict["lr"],
momentum=self.arg_dict["momentum"],
weight_decay=self.arg_dict["weight_decay"],
nesterov=self.arg_dict["nesterov"],
),
)
self.sche = make_scheduler(
optimizer=self.opti,
total_num=self.iter_num if self.arg_dict["sche_usebatch"] else self.end_epoch,
scheduler_type=self.arg_dict["lr_type"],
scheduler_info=dict(
lr_decay=self.arg_dict["lr_decay"], warmup_epoch=self.arg_dict["warmup_epoch"]
),
)
# AMP
if self.arg_dict["use_amp"]:
construct_print("Now, we will use the amp to accelerate training!")
from apex import amp
self.amp = amp
self.net, self.opti = self.amp.initialize(self.net, self.opti, opt_level="O1")
else:
self.amp = None
if self.arg_dict["resume_mode"] == "train":
# resume model to train the model
self.start_epoch = resume_checkpoint(
model=self.net,
optimizer=self.opti,
scheduler=self.sche,
amp=self.amp,
exp_name=self.exp_name,
load_path=self.path_dict["final_full_net"],
mode="all",
)
else:
# only train a new model.
self.start_epoch = 0
def train(self):
for curr_epoch in range(self.start_epoch, self.end_epoch):
train_loss_record = AvgMeter()
self._train_per_epoch(curr_epoch, train_loss_record)
# 根据周期修改学习率
if not self.arg_dict["sche_usebatch"]:
self.sche.step()
# 每个周期都进行保存测试,保存的是针对第curr_epoch+1周期的参数
save_checkpoint(
model=self.net,
optimizer=self.opti,
scheduler=self.sche,
amp=self.amp,
exp_name=self.exp_name,
current_epoch=curr_epoch + 1,
full_net_path=self.path_dict["final_full_net"],
state_net_path=self.path_dict["final_state_net"],
) # 保存参数
if self.arg_dict["use_amp"]:
# https://github.com/NVIDIA/apex/issues/567
with self.amp.disable_casts():
construct_print("When evaluating, we wish to evaluate in pure fp32.")
self.test()
else:
self.test()
@Timer
def _train_per_epoch(self, curr_epoch, train_loss_record):
for curr_iter_in_epoch, train_data in enumerate(self.tr_loader):
num_iter_per_epoch = len(self.tr_loader)
curr_iter = curr_epoch * num_iter_per_epoch + curr_iter_in_epoch
self.opti.zero_grad()
train_inputs, train_masks, _ = train_data
train_inputs = train_inputs.to(self.dev, non_blocking=True)
train_masks = train_masks.to(self.dev, non_blocking=True)
train_preds = self.net(train_inputs)
train_loss, loss_item_list = get_total_loss(train_preds, train_masks, self.loss_funcs)
if self.amp:
with self.amp.scale_loss(train_loss, self.opti) as scaled_loss:
scaled_loss.backward()
else:
train_loss.backward()
self.opti.step()
if self.arg_dict["sche_usebatch"]:
self.sche.step()
# 仅在累计的时候使用item()获取数据
train_iter_loss = train_loss.item()
train_batch_size = train_inputs.size(0)
train_loss_record.update(train_iter_loss, train_batch_size)
# 显示tensorboard
if (
self.arg_dict["tb_update"] > 0
and (curr_iter + 1) % self.arg_dict["tb_update"] == 0
):
self.tb_recorder.record_curve("trloss_avg", train_loss_record.avg, curr_iter)
self.tb_recorder.record_curve("trloss_iter", train_iter_loss, curr_iter)
self.tb_recorder.record_curve("lr", self.opti.param_groups, curr_iter)
self.tb_recorder.record_image("trmasks", train_masks, curr_iter)
self.tb_recorder.record_image("trsodout", train_preds.sigmoid(), curr_iter)
self.tb_recorder.record_image("trsodin", train_inputs, curr_iter)
# 记录每一次迭代的数据
if (
self.arg_dict["print_freq"] > 0
and (curr_iter + 1) % self.arg_dict["print_freq"] == 0
):
lr_str = ",".join(
[f"{param_groups['lr']:.7f}" for param_groups in self.opti.param_groups]
)
log = (
f"{curr_iter_in_epoch}:{num_iter_per_epoch}/"
f"{curr_iter}:{self.iter_num}/"
f"{curr_epoch}:{self.end_epoch} "
f"{self.exp_name}\n"
f"Lr:{lr_str} "
f"M:{train_loss_record.avg:.5f} C:{train_iter_loss:.5f} "
f"{loss_item_list}"
)
print(log)
write_data_to_file(log, self.path_dict["tr_log"])
def test(self):
self.net.eval()
msg = f"Testing start time: {datetime.now()}"
construct_print(msg)
write_data_to_file(msg, self.path_dict["te_log"])
total_results = {}
for data_name, data_path in self.te_data_list.items():
construct_print(f"Testing with testset: {data_name}")
self.te_loader = create_loader(
data_path=data_path,
training=False,
prefix=self.arg_dict["prefix"],
get_length=False,
)
self.save_path = os.path.join(self.path_dict["save"], data_name)
if not os.path.exists(self.save_path):
construct_print(f"{self.save_path} do not exist. Let's create it.")
os.makedirs(self.save_path)
results = self._test_process(save_pre=self.save_pre)
msg = f"Results on the testset({data_name}:'{data_path}'): {results}"
construct_print(msg)
write_data_to_file(msg, self.path_dict["te_log"])
# Print out time taken
msg = f"Time Finish on testset {data_name}: {datetime.now()}"
construct_print(msg)
write_data_to_file(msg, self.path_dict["te_log"])
total_results[data_name] = results
self.net.train()
if self.arg_dict["xlsx_name"]:
# save result into xlsx file.
self.xlsx_recorder.write_xlsx(self.exp_name, total_results)
def _test_process(self, save_pre):
loader = self.te_loader
# pres = [AvgMeter() for _ in range(256)]
# recs = [AvgMeter() for _ in range(256)]
pres = list()
recs = list()
meanfs = AvgMeter()
maes = AvgMeter()
# Measures from Saliency toolbox
measures = ['Wgt-F', 'E-measure', 'S-measure', 'Mod-Max-F', 'Mod-Adp-F', 'Mod-Wgt-F']
beta=np.sqrt(0.3) # default beta parameter used in the adaptive F-measure
gt_threshold=0.5 # The threshold that is used to binrize ground truth maps.
values = dict() # initialize measure value dictionary
pr = dict() # initialize precision recall dictionary
prm = dict() # initialize precision recall dictionary for Mod-Max-F
for idx in measures:
values[idx] = list()
if idx == 'Max-F':
pr['Precision'] = list()
pr['Recall'] = list()
if idx == 'Mod-Max-F':
prm['Precision'] = list()
prm['Recall'] = list()
tqdm_iter = tqdm(enumerate(loader), total=len(loader), leave=False)
for test_batch_id, test_data in tqdm_iter:
tqdm_iter.set_description(f"{self.exp_name}: te=>{test_batch_id + 1}")
in_imgs, in_mask_paths, in_names = test_data
generate_out_imgs = False
if self.arg_dict["resume_mode"] == "measure":
# Check if prediction masks have already been created
for item_id, in_fname in enumerate(in_names):
oimg_path = os.path.join(self.save_path, in_fname + ".png")
if not os.path.exists(oimg_path):
# Out image doesn't exist yet
generate_out_imgs = True
break
else:
generate_out_imgs = True
if generate_out_imgs:
with torch.no_grad():
in_imgs = in_imgs.to(self.dev, non_blocking=True)
outputs = self.net(in_imgs)
outputs_np = outputs.sigmoid().cpu().detach()
for item_id, in_fname in enumerate(in_names):
oimg_path = os.path.join(self.save_path, in_fname + ".png")
gimg_path = os.path.join(in_mask_paths[item_id])
gt_img = Image.open(gimg_path).convert("L")
if self.arg_dict["resume_mode"] == "measure" and generate_out_imgs == False:
out_img = Image.open(oimg_path).convert("L")
else:
out_item = outputs_np[item_id]
out_img = self.to_pil(out_item).resize(gt_img.size, resample=Image.NEAREST)
if save_pre and generate_out_imgs:
out_img.save(oimg_path)
gt_img = np.array(gt_img)
out_img = np.array(out_img)
# Gather images again using Saliency toolboxes import methods
# These images will be grayscale floats between 0 and 1
sm = out_img.astype(np.float32)
if sm.max() == sm.min():
sm = sm / 255
else:
sm = (sm - sm.min()) / (sm.max() - sm.min())
gt = np.zeros_like(gt_img, dtype=np.float32)
gt[gt_img > 256*gt_threshold] = 1
ps, rs, mae, meanf = cal_pr_mae_meanf(out_img, gt_img)
pres.append(ps)
recs.append(rs)
# for pidx, pdata in enumerate(zip(ps, rs)):
# p, r = pdata
# pres[pidx].update(p)
# recs[pidx].update(r)
maes.update(mae)
meanfs.update(meanf)
# Compute other measures using the Saliency Toolbox
if 'MAE2' in measures:
values['MAE2'].append(mean_square_error(gt, sm))
if 'E-measure' in measures:
values['E-measure'].append(e_measure(gt, sm))
if 'S-measure' in measures:
values['S-measure'].append(s_measure(gt, sm))
if 'Adp-F' in measures:
values['Adp-F'].append(adaptive_fmeasure(gt, sm, beta, allowBlackMask=False))
if 'Mod-Adp-F' in measures:
values['Mod-Adp-F'].append(adaptive_fmeasure(gt, sm, beta, allowBlackMask=True))
if 'Wgt-F' in measures:
values['Wgt-F'].append(weighted_fmeasure(gt, sm, allowBlackMask=False))
if 'Mod-Wgt-F' in measures:
values['Mod-Wgt-F'].append(weighted_fmeasure(gt, sm, allowBlackMask=True))
if 'Max-F' in measures:
prec, recall = prec_recall(gt, sm, 256, allowBlackMask=False) # 256 thresholds between 0 and 1
# Check if precision recall curve exists
if len(prec) != 0 and len(recall) != 0:
pr['Precision'].append(prec)
pr['Recall'].append(recall)
if 'Mod-Max-F' in measures:
prec, recall = prec_recall(gt, sm, 256, allowBlackMask=True) # 256 thresholds between 0 and 1
# Check if precision recall curve exists
if len(prec) != 0 and len(recall) != 0:
prm['Precision'].append(prec)
prm['Recall'].append(recall)
# Compute total measures over all images
if 'MAE2' in measures:
values['MAE2'] = np.mean(values['MAE2'])
if 'E-measure' in measures:
values['E-measure'] = np.mean(values['E-measure'])
if 'S-measure' in measures:
values['S-measure'] = np.mean(values['S-measure'])
if 'Adp-F' in measures:
values['Adp-F'] = np.mean(values['Adp-F'])
if 'Mod-Adp-F' in measures:
values['Mod-Adp-F'] = np.mean(values['Mod-Adp-F'])
if 'Wgt-F' in measures:
values['Wgt-F'] = np.mean(values['Wgt-F'])
if 'Mod-Wgt-F' in measures:
values['Mod-Wgt-F'] = np.mean(values['Mod-Wgt-F'])
if 'Max-F' in measures:
if len(pr['Precision']) > 0:
pr['Precision'] = np.mean(np.hstack(pr['Precision'][:]), 1)
pr['Recall'] = np.mean(np.hstack(pr['Recall'][:]), 1)
f_measures = (1 + beta ** 2) * pr['Precision'] * pr['Recall'] / (
beta ** 2 * pr['Precision'] + pr['Recall'])
# Remove any NaN values to allow calculation
f_measures[np.isnan(f_measures)] = 0
values['Max-F'] = | np.max(f_measures) | numpy.max |
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import PIL.BmpImagePlugin
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
__all__ = [
"opencv2pil", "opencv2tensor", "pil2opencv", "process_image"
]
def opencv2pil(image: np.ndarray) -> PIL.BmpImagePlugin.BmpImageFile:
""" OpenCV Convert to PIL.Image format.
Returns:
PIL.Image.
"""
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return image
def opencv2tensor(image: np.ndarray, gpu: int) -> torch.Tensor:
""" OpenCV Convert to torch.Tensor format.
Returns:
torch.Tensor.
"""
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
nhwc_image = torch.from_numpy(rgb_image).div(255.0).unsqueeze(0)
input_tensor = nhwc_image.permute(0, 3, 1, 2)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
def pil2opencv(image: PIL.BmpImagePlugin.BmpImageFile) -> np.ndarray:
""" PIL.Image Convert to OpenCV format.
Returns:
np.ndarray.
"""
image = cv2.cvtColor( | np.asarray(image) | numpy.asarray |
#!/usr/bin/env python
# After you've done an inversion, what are the results?
# How much moment is created, and how big is the misfit?
# Writes into a summary text file, and prints to screen.
# Useful for L-curve analysis.
import numpy as np
import sys
import json
import slippy.io
# -------- INPUT FUNCTIONS ----------- #
def welcome_and_parse(argv):
print("Metrics for this inversion:")
if len(argv) < 2:
print("Error! Please provide the name of a config json. Exiting. "); sys.exit(0);
else:
configname = argv[1];
return configname;
def parse_json(configname):
config_file = open(configname, 'r');
config = json.load(config_file);
for key in config: # adding the output directory onto the output files, for ease of use.
if "output" in key and key != "output_dir":
config[key] = config["output_dir"]+config[key];
config["summary_file"] = config["output_dir"]+"summary_stats.txt"; # Creates an output file
return config;
# -------- COMPUTE FUNCTIONS ----------- #
def get_slip_moment(slip_filename):
# From the inversion results, what is the moment of the slip distribution?
moment_total = 0;
mu = 30e9; # Pa, assumed.
length, width, leftlat, thrust, tensile = np.loadtxt(slip_filename, skiprows=1, unpack=True,
usecols=(5, 6, 7, 8, 9));
for i in range(len(length)):
slip = np.sqrt(leftlat[i]*leftlat[i] + thrust[i]*thrust[i]);
area = length[i]*width[i]; # m^2
momenti = moment_from_muad(mu, area, slip);
moment_total = moment_total+momenti;
mw = mw_from_moment(moment_total);
print("Calculating moment from %s" % slip_filename);
return [moment_total, mw];
def get_total_misfit(config):
print("Calculating metrics for inversion results.");
lev_misfit, lev_norm_misfit, insar_misfit, insar_norm_misfit = None, None, None, None;
gps_misfit, gps_norm_misfit = None, None;
lev_npts, insar_npts, gps_npts = None, None, None;
if "observed_leveling_file" in config.keys():
[lev_misfit, lev_norm_misfit, lev_npts] = get_misfit_leveling(config["observed_leveling_file"],
config["predicted_leveling_file"]);
if "observed_insar_file" in config.keys():
[insar_misfit, insar_norm_misfit, insar_npts] = get_misfit_insar(config["observed_insar_file"],
config["predicted_insar_file"]);
if "observed_gps_file" in config.keys():
[gps_misfit, gps_norm_misfit, gps_npts] = get_misfit_gps(config["observed_gps_file"],
config["predicted_gps_file"]);
return [gps_misfit, gps_norm_misfit, gps_npts, insar_misfit, insar_norm_misfit, insar_npts,
lev_misfit, lev_norm_misfit, lev_npts];
def get_misfit_gps(obs_file, pred_file):
# Misfit from each data pair (GPS, UAVSAR, Leveling, S1, TSX)
# Want in both absolute numbers and relative to the respective uncertainties.
if obs_file is None or pred_file is None:
return [None, None, None];
gps_input = slippy.io.read_gps_data(obs_file);
gps_pred = slippy.io.read_gps_data(pred_file);
abs_misfit = np.abs(gps_input[1]-gps_pred[1]);
norm_misfit = np.divide(abs_misfit, gps_input[2]); # divide by sigma
mean_average_misfit = np.nanmean(abs_misfit);
mean_norm_average_misfit = np.nanmean(norm_misfit);
npts = len(gps_input[1]);
return [mean_average_misfit, mean_norm_average_misfit, npts];
def get_misfit_insar(obs_file, pred_file):
# Misfit from each data pair (GPS, UAVSAR, Leveling, S1, TSX)
# Want in both absolute numbers and relative to the respective uncertainties.
if obs_file is None or pred_file is None:
return [None, None, None];
insar_input = slippy.io.read_insar_data(obs_file)
insar_pred = slippy.io.read_insar_data(pred_file)
abs_misfit = np.abs(insar_input[1]-insar_pred[1]);
norm_misfit = np.divide(abs_misfit, insar_input[2]); # divide by sigma
mean_average_misfit = np.nanmean(abs_misfit);
mean_norm_average_misfit = np.nanmean(norm_misfit);
npts = len(insar_input[1]);
return [mean_average_misfit, mean_norm_average_misfit, npts];
def get_misfit_leveling(obs_file, pred_file):
# Misfit from each data pair (GPS, UAVSAR, Leveling, S1, TSX)
# Want in both absolute numbers and relative to the respective uncertainties.
if obs_file is None or pred_file is None:
return [None, None, None];
leveling_input = slippy.io.read_insar_data(obs_file)
leveling_pred = slippy.io.read_insar_data(pred_file)
abs_misfit = np.abs(leveling_input[1]-leveling_pred[1]);
norm_misfit = np.divide(abs_misfit, leveling_input[2]); # divide by sigma
mean_average_misfit = np.nanmean(abs_misfit);
mean_norm_average_misfit = | np.nanmean(norm_misfit) | numpy.nanmean |
import os
import sys
import random
import pickle
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
from config import get_args
from data_utils import CRMatchingDataset
from metrics import recall_2at1, recall_at_k, precision_at_k, MRR, MAP
from model import WDMN as model
random.seed(1234)
np.random.seed(1234)
FLAGS = get_args()
print("\nParameters:")
for attr, value in sorted(FLAGS.flag_values_dict().items()):
print("{}={}".format(attr.upper(), value))
if __name__ == "__main__":
''' Output directory for checkpoints and predictions '''
out_dir = os.path.abspath(os.path.join(os.path.curdir, FLAGS.log_root))
print("Writing to {}\n".format(out_dir))
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if FLAGS.init_dict:
''' Load pretrained word embeddings from disk '''
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"Loading pretrained word embeddings ... | {time_str}")
init_embeddings_path = '%s/vocab_and_embeddings.pkl'%(FLAGS.data_path)
with open(init_embeddings_path, 'rb') as f:
vocab, embeddings = pickle.load(f)
pretrained_word_embeddings = np.array(embeddings)
FLAGS.vocab_size = pretrained_word_embeddings.shape[0]
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f'loaded vocab size {pretrained_word_embeddings.shape[0]} | {time_str}')
else:
pretrained_word_embeddings = None
''' Loading dataset '''
train_file = '%s/train.pkl'%(FLAGS.data_path)
dev_file = '%s/dev.pkl'%(FLAGS.data_path)
test_file = '%s/test.pkl'%(FLAGS.data_path)
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("Creating dataset... | %s " % time_str)
with open(train_file, 'rb') as f:
train_contexts, train_responses, train_labels = pickle.load(f)
with open(dev_file, 'rb') as f:
dev_contexts, dev_responses, dev_labels = pickle.load(f)
with open(test_file, 'rb') as f:
test_contexts, test_responses, test_labels = pickle.load(f)
trainset = CRMatchingDataset(train_contexts, train_responses, train_labels, shuffle=True)
devset = CRMatchingDataset(dev_contexts, dev_responses, dev_labels, shuffle=False)
testset = CRMatchingDataset(test_contexts, test_responses, test_labels, shuffle=False)
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("Created dataset. | %s " % time_str)
''' Init tensorflow session'''
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
''' Init WDMN model '''
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("Creating WDMN model... | %s " % time_str)
model = model(FLAGS, pretrained_word_embeddings)
print('++++++++++++++\nprint model parameters\n++++++++++++++')
total_cnt = 0
for v in tf.global_variables():
print(v)
try:
total_cnt += np.prod([int(e) for e in v.get_shape()])
except:
pass
print(f'++++++++++++++\nTotal number of parameters = {total_cnt}\n++++++++++++++')
''' Init training'''
global_step = tf.Variable(0, name="global_step", trainable=False)
learning_rate = tf.placeholder(tf.float32, shape=[])
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(model.loss, global_step=global_step)
''' Init saver '''
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"Initing Saver | {time_str} ")
saver = tf.train.Saver(max_to_keep=1)
if FLAGS.reload_model:
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"Reloading model from {checkpoint_dir} | {time_str}")
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
else:
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"Init parameters | {time_str}")
sess.run(tf.global_variables_initializer())
if FLAGS.init_dict:
sess.run(model.embedding_init)
def train_step(dataset):
"""
A single training step
"""
train_step = tf.train.global_step(sess, global_step)
''' Learning_rate decaying '''
if FLAGS.lr_decay:
current_lr = max(FLAGS.lr * np.power(FLAGS.decay_rate, (train_step/FLAGS.decay_steps)), FLAGS.lr_minimal)
else:
current_lr = FLAGS.lr
''' Training step '''
contexts, responses, labels = dataset.next()
feed_dict = {
learning_rate: current_lr,
model.context: contexts,
model.response: responses,
model.target: labels,
model.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, loss, accuracy = sess.run(
[train_op, global_step, model.loss, model.accuracy], feed_dict)
''' visualization '''
if step == 0 or step % FLAGS.print_every == 0:
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("Step: %d \t| loss: %.3f \t| acc: %.3f \t| lr: %.5f \t| %s" %
(step, loss, accuracy, current_lr, time_str))
def eval(dataset, split):
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"Evaluating {split} set")
acc = []
losses = []
pred_scores = []
true_scores = []
count = 0
''' Inferencing '''
for _ in range(dataset.batches()):
contexts, responses, labels = dataset.next()
feed_dict = {
model.context: contexts,
model.response: responses,
model.target: labels,
model.dropout_keep_prob: 1.0
}
step, loss, accuracy, y_pred, target = sess.run(
[global_step, model.loss, model.accuracy, model.y_pred, model.target], feed_dict)
acc.append(accuracy)
losses.append(loss)
pred_scores += list(y_pred[:, 1])
true_scores += list(target)
count += 1
if count % 2500 == 0:
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"Evaluated {count} batches | {time_str}")
assert dataset.index == 0
''' Writing predictions '''
MeanAcc = sum(acc) / len(acc)
MeanLoss = sum(losses) / len(losses)
if len(pred_scores) % 10 != 0:
print(f'Warning: eval {len(pred_scores)} cases cannot be divided by 10, will cut remainder')
pred_scores = pred_scores[:int(len(pred_scores) / 10) * 10]
true_scores = true_scores[:int(len(true_scores) / 10) * 10]
with open(os.path.join(out_dir, 'predScores-iter-%s.txt'%(step)), 'w') as f:
for score1, score2 in zip(pred_scores, true_scores):
f.writelines(str(score1) + '\t' + str(score2) + '\n')
''' Calculating metrics'''
num_sample = int(len(pred_scores) / 10)
score_list = np.split(np.array(pred_scores), num_sample, axis=0)
recall_2_1 = recall_2at1(score_list, k=1)
recall_at_1 = recall_at_k(np.array(true_scores), np.array(pred_scores), 1)
recall_at_2 = recall_at_k(np.array(true_scores), | np.array(pred_scores) | numpy.array |
Subsets and Splits