prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
import glob, os, pickle, json, copy, sys, h5py
from statistics import mode
import plyfile
from pyntcloud import PyntCloud
from plyfile import PlyData, PlyElement
from collections import Counter
MTML_VOXEL_SIZE = 0.1 # size for voxel
def make_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def read_label_ply(filename):
plydata = PlyData.read(filename)
x =
|
np.asarray(plydata.elements[0].data['x'])
|
numpy.asarray
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 14 09:27:05 2021
@author: vargh
"""
import numpy as np
import pandas as pd
from sympy import symbols, pi, Eq, integrate, diff, init_printing, solve
from scipy.optimize import curve_fit
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d, interp2d
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from shapely.geometry import Polygon
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
#init_printing()
## Functions
def calc_maneuver_sum(spec_range, spec_dtr, fuel_used_interper, maneuver_time_interper, printer):
calc_fuel_used = fuel_used_interper(spec_range, spec_dtr)
calc_maneuver_time = maneuver_time_interper(spec_range, spec_dtr)
if printer:
print('Total distance range: %.2f m'%(spec_range))
print('Total fuel mass burned range: %.2f kg'%(calc_fuel_used))
print('Maneuver time: %.2f s'%(calc_maneuver_time))
return calc_fuel_used, calc_maneuver_time
def calc_geom(threeD_balloon, theta):
norm_vec = np.array([np.cos(theta), 0, np.sin(theta)])
proj_of_u_on_n = (np.dot(threeD_balloon, norm_vec))*norm_vec.reshape(len(norm_vec), 1)
proj_of_u_on_n = threeD_balloon - proj_of_u_on_n.transpose()
points = np.zeros((threeD_balloon.shape[0], 2))
points[:, 0] = proj_of_u_on_n[:, 1]
points[:, 1] = proj_of_u_on_n[:, 2]
hull = ConvexHull(points)
bound = points[hull.vertices]
perp_A_x = Polygon(bound).area
cent_y = Polygon(bound).centroid.coords[0][0]
norm_vec2 = np.array([np.sin(theta), 0, np.cos(theta)])
proj_of_u_on_n2 = (np.dot(threeD_balloon, norm_vec2))*norm_vec2.reshape(len(norm_vec2), 1)
proj_of_u_on_n2 = threeD_balloon - proj_of_u_on_n2.transpose()
points2 = np.zeros((threeD_balloon.shape[0], 2))
points2[:, 0] = proj_of_u_on_n2[:, 0]
points2[:, 1] = proj_of_u_on_n2[:, 1]
hull2 = ConvexHull(points2)
bound2 = points2[hull2.vertices]
perp_A_y = Polygon(bound2).area
cent_x = Polygon(bound2).centroid.coords[0][0]
return perp_A_x, perp_A_y, cent_x, cent_y
def init_calc(threeD_balloon, payload_height, payload_width, payload_depth, connector_height, balloon_height, balloon_mass, COG_payload_h, COG_payload_w, rho_atmo, dim_scale, dyn_visc, F_b, thrust_f, thrust_r, m_dot_f, m_dot_r, acc_g, consider_bouyancy_drift, time_step, target_range, d_tol, dragthrustratio, min_burn_index, moment_arm_thruster):
## Initializations
t = np.array([0]) # time
r_m = np.array([total_rover_mass]) # full rover mass
# kinematics in x, displacement, velocity and acceleration
d_x = np.array([0]) # (m)
v_x = np.array([0]) # (m/s)
a_x = np.array([0]) # (m/s^2)
# kinematics in y, displacement, velocity and acceleration
d_y = np.array([0]) # (m)
v_y = np.array([0]) # (m/s)
a_y = np.array([0]) # (m/s^2)
# moment about z
m_z = np.array([0]) # (Nm)
F = np.array([thrust_f]) # Thrust (N)
D_x = np.array([0]) # Drag in x (N)
D_y = np.array([0]) # Drag in y (N)
# rotational kinematics in z, displacement, velocity, accleration
alpha = np.array([0]) # (rad/s^2)
omega = np.array([0]) # (rad/s)
theta = np.array([0]) # (rad)
rem_fuel = np.array([fuel_mass])
ballast_mass = np.array([0])
i = 0
fail = 0
burn_index = 0
while abs(d_x[i] - target_range) > d_tol and not(fail == 1):
## initial conditions
prev_t = t[i]
prev_r_m = r_m[i]
prev_d_x = d_x[i]
prev_v_x = v_x[i]
prev_a_x = a_x[i]
prev_d_y = d_y[i]
prev_v_y = v_y[i]
prev_a_y = a_y[i]
prev_m_z = m_z[i]
prev_F = F[i]
prev_D_x = D_x[i]
prev_D_y = D_y[i]
prev_alpha = alpha[i]
prev_omega = omega[i]
prev_theta = theta[i]
prev_fuel = rem_fuel[i]
prev_ballast_mass = ballast_mass[i]
## time
t = np.append(t, prev_t + time_step)
cur_t = prev_t + time_step
## Modified perpendicular area
perp_A_x, perp_A_y, cent_x, cent_y = calc_geom(threeD_balloon, prev_theta) # calculates perpendicular area in x and y and the centroid for a given theta
## Center of Gravity, Center of Drag, Moment of Inertia (not rotated)
COG_balloon_h = (payload_height + connector_height + balloon_height/2)
COG_balloon_w = cent_x
COG_cur_h = ((r_m[i] - balloon_mass)*COG_payload_h + balloon_mass*COG_balloon_h)/(r_m[i]) # calculates changing height COG
COG_cur_w = ((r_m[i] - balloon_mass)*COG_payload_w + balloon_mass*COG_balloon_w)/(r_m[i]) # calculates changing COG
J_payload_u = r_m[i]*(payload_height**2 + payload_width**2) # untransformed moment of inertia of payload
trans_payload_J_d = np.sqrt(COG_cur_h**2 + COG_cur_w**2) - COG_payload # distance axis of rotation must be moved
J_payload_t = J_payload_u + r_m[i]*trans_payload_J_d**2 # moving axis of rotation with parallel axis theorem
trans_balloon_J_d = np.sqrt((COG_balloon_h - COG_cur_h)**2 + (COG_balloon_w - COG_cur_w)**2) # distance axis of rotation must be moved
J_balloon_t = J_balloon_u + balloon_mass*trans_balloon_J_d**2 # moving axis of rotation with parallel axis theorem
J_tot = J_payload_t + J_balloon_t
COD_balloon_h = COG_balloon_h # needs to be updated based on CFD
COD_balloon_w = COG_balloon_w # needs to be updated based on CFD
# Skin Friction coefficient
if prev_v_x != 0:
re_num = rho_atmo*prev_v_x*dim_scale/dyn_visc # Reynold's Number
C_f = .027/np.power(re_num, 1/7) ## Prandtl's 1/7 Power Law
else:
C_f = 0 # If velocity = 0, C_f = 0
D_mag = np.sqrt(prev_D_x**2 + prev_D_y**2) # magnitude of drag
res_freq = int(np.ceil(2*pi*np.sqrt(J_tot/(F_b*balloon_height)))) # calculated resonant frequency
thrust = thrust_f # thrust
m_dot = m_dot_f # mass flow rate
if abs(D_mag/thrust) < dragthrustratio: # if thrust to drag ratio is less than max ratio, burn
burn_condition = 1
else:
if burn_index > min_burn_index: # if engine has burned for minimal time, and drag condition exceeded, stop burning
burn_condition = 0
burn_index = 0
if burn_condition:
burn_index = burn_index + 1
## Force
cur_F = thrust
cur_fuel = prev_fuel - m_dot*time_step
# Ballast
cur_ballast_mass = prev_ballast_mass + m_dot*time_step
cur_r_m = prev_r_m
else:
cur_F = 0
cur_r_m = prev_r_m
cur_fuel = prev_fuel
mass_deficit = 0
cur_ballast_mass = prev_ballast_mass
perp_A_pay_x = payload_width/np.cos(prev_theta)*payload_depth # calculates perpendicular surface area of payload
pay_drag_x = -.5*(C_D_payload+C_f)*perp_A_pay_x*rho_atmo*prev_v_x**2 # calculates drag from payload
ball_drag_x = -.5*(C_D_balloon+C_f)*perp_A_x*rho_atmo*prev_v_x**2 # calculates drag from balloon in x
ball_drag_y = -.5*(C_D_balloon+C_f)*perp_A_y*rho_atmo*prev_v_y**2 # calculates drag from balloon in y
cur_D_x = pay_drag_x + ball_drag_x # calculates total drag in x
cur_D_y = ball_drag_y # calculates total drag in y
cur_D_mag = np.sqrt(cur_D_x**2 + cur_D_y**2) # Magnitude of drag
## Linear Kinematics
tot_force_x = cur_F*np.cos(prev_theta) + cur_D_x # effective thrust in x
tot_force_y = cur_F*np.sin(prev_theta) + cur_D_y # effective force in y
cur_a_x = tot_force_x/cur_r_m
cur_a_y = tot_force_y/cur_r_m
cur_v_x = prev_v_x+cur_a_x*time_step
cur_v_y = prev_v_y+cur_a_y*time_step
cur_d_x = prev_d_x+cur_v_x*time_step
cur_d_y = prev_d_y+cur_v_y*time_step
## Rotational Kinematics
# Payload Gravity Torque
g_m_a_y_pay = COG_cur_h - COG_payload_h # moment arm for gravity on the payload y
g_m_a_x_pay = COG_cur_w - COG_payload_w # moment arm for gravity on the payload x
g_m_a_pay = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_pay = abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_pay)
# Balloon Gravity Torque
g_m_a_y_ball = COG_cur_h - COG_balloon_h # moment arm for gravity on the payload y
g_m_a_x_ball = COG_cur_w - COG_balloon_w # moment arm for gravity on the payload x
g_m_a_ball = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_ball = -abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_ball)
g_m = g_m_pay + g_m_ball
# Balloon Drag Torque
d_m_a_y = COD_balloon_h - COG_cur_h # moment arm for drag on the balloon y
d_m_a_x = COD_balloon_w - COG_cur_w # moment arm for drag on the balloon x
d_m_a = np.sqrt(d_m_a_y**2 + d_m_a_x**2) # euclidean distance
ball_D_mag = np.sqrt(ball_drag_x**2 + ball_drag_y**2) # magnitude of drag on balloon
d_m = d_m_a*ball_D_mag*np.cos(prev_theta) - pay_drag_x*g_m_a_pay # sum all drag moments
# Bouyancy force torque, balloon
b_m_a_y = COG_balloon_h - COG_cur_h # moment arm for bouyancy force y
b_m_a_x = COG_balloon_w - COG_cur_w # moment arm for bouyancy force x
b_m_a = np.sqrt(b_m_a_y**2 + b_m_a_x**2) # euclidean
b_m = b_m_a * F_b * np.sin(prev_theta) # total buoyancy moment
t_m_a = moment_arm_thruster # thruster moment arm
t_m = cur_F * (moment_arm_thruster) # thruster moment
m_z_tot = d_m - b_m + t_m - g_m # total moment
cur_alpha = m_z_tot / J_tot
cur_omega = prev_omega + cur_alpha*time_step
cur_theta = prev_theta + cur_omega*time_step
## all updates
F = np.append(F, cur_F)
r_m = np.append(r_m, cur_r_m)
D_x = np.append(D_x, cur_D_x)
D_y = np.append(D_y, cur_D_y)
a_x = np.append(a_x, cur_a_x)
a_y = np.append(a_y, cur_a_y)
v_x = np.append(v_x, cur_v_x)
v_y = np.append(v_y, cur_v_y)
d_x = np.append(d_x, cur_d_x)
d_y = np.append(d_y, cur_d_y)
m_z = np.append(m_z, m_z_tot)
alpha = np.append(alpha, cur_alpha)
omega = np.append(omega, cur_omega)
theta = np.append(theta, cur_theta)
rem_fuel = np.append(rem_fuel, cur_fuel)
ballast_mass = np.append(ballast_mass, cur_ballast_mass)
i = i + 1
if cur_fuel < 0:
fail = 1
print('Not Enough Fuel Mass')
if i % 100 == 0:
print('.', end= '')
if i % 5000 == 0:
print('\n')
all_data = np.zeros((len(t), 17))
all_data[:, 0] = t
all_data[:, 1] = F
all_data[:, 2] = r_m
all_data[:, 3] = D_x
all_data[:, 4] = D_y
all_data[:, 5] = a_x
all_data[:, 6] = a_y
all_data[:, 7] = v_x
all_data[:, 8] = v_y
all_data[:, 9] = d_x
all_data[:, 10] = d_y
all_data[:, 11] = m_z
all_data[:, 12] = alpha
all_data[:, 13] = omega
all_data[:, 14] = theta
all_data[:, 15] = rem_fuel
all_data[:, 16] = ballast_mass
headers = ['time', 'force', 'mass', 'drag_x', 'drag_y', 'acceleration_x', 'acceleration_y', 'velocity_x', 'velocity_y', 'displacement_x', 'displacement_y', 'moment_z', 'alpha', 'omega', 'theta', 'fuel_mass', 'ballast_mass']
return pd.DataFrame(all_data, columns=headers)
def drag_stop_calc(test, ind_ignore, maneuver_time, max_vel, forward_burn_frac, ind_at_end, threeD_balloon, payload_height, payload_width, payload_depth, connector_height, balloon_height, balloon_mass, COG_payload_h, COG_payload_w, rho_atmo, dim_scale, dyn_visc, F_b, thrust_f, thrust_r, m_dot_f, m_dot_r, acc_g, consider_bouyancy_drift, time_step, target_range, d_tol, dragthrustratio, min_burn_index, moment_arm_thruster):
## Drag Stop
reverse_burn_frac = 1 - forward_burn_frac # deprecated if no reverse burn
cutoff_time = maneuver_time * forward_burn_frac
## Initializations
t = np.array([0]) # time
r_m = np.array([total_rover_mass]) # full rover mass
# kinematics in x, displacement, velocity and acceleration
d_x = np.array([0]) # (m)
v_x = np.array([0]) # (m/s)
a_x = np.array([0]) # (m/s^2)
# kinematics in y, displacement, velocity and acceleration
d_y = np.array([0]) # (m)
v_y = np.array([0]) # (m/s)
a_y = np.array([0]) # (m/s^2)
# moment about z
m_z = np.array([0]) # (Nm)
F = np.array([thrust_f]) # Thrust (N)
D_x = np.array([0]) # Drag in x (N)
D_y = np.array([0]) # Drag in y (N)
# rotational kinematics in z, displacement, velocity, accleration
alpha = np.array([0]) # (rad/s^2)
omega = np.array([0]) # (rad/s)
theta = np.array([0]) # (rad)
rem_fuel = np.array([fuel_mass])
ballast_mass = np.array([0])
i = 0
fail = 0
burn_index = 0
vel_checker = 10 # lets loop accelerate the craft
while vel_checker >= vel_elbow and not(fail == 1):
## initial conditions
prev_t = t[i]
prev_r_m = r_m[i]
prev_d_x = d_x[i]
prev_v_x = v_x[i]
prev_a_x = a_x[i]
prev_d_y = d_y[i]
prev_v_y = v_y[i]
prev_a_y = a_y[i]
prev_m_z = m_z[i]
prev_F = F[i]
prev_D_x = D_x[i]
prev_D_y = D_y[i]
prev_alpha = alpha[i]
prev_omega = omega[i]
prev_theta = theta[i]
prev_fuel = rem_fuel[i]
prev_ballast_mass = ballast_mass[i]
## time
t = np.append(t, prev_t + time_step)
cur_t = prev_t + time_step
## Modified perpendicular area
perp_A_x, perp_A_y, cent_x, cent_y = calc_geom(threeD_balloon, prev_theta)
## COG, COD, J (not rotated)
COG_balloon_h = (payload_height + connector_height + balloon_height/2)
COG_balloon_w = cent_x
COG_cur_h = ((r_m[i] - balloon_mass)*COG_payload_h + balloon_mass*COG_balloon_h)/(r_m[i]) # calculates changing height COG
COG_cur_w = ((r_m[i] - balloon_mass)*COG_payload_w + balloon_mass*COG_balloon_w)/(r_m[i]) # calculates changing COG
J_payload_u = r_m[i]*(payload_height**2 + payload_width**2) # untransformed moment of inertia of payload
trans_payload_J_d = np.sqrt(COG_cur_h**2 + COG_cur_w**2) - COG_payload # distance axis of rotation must be moved
J_payload_t = J_payload_u + r_m[i]*trans_payload_J_d**2 # moving axis of rotation with parallel axis theorem
trans_balloon_J_d = np.sqrt((COG_balloon_h - COG_cur_h)**2 + (COG_balloon_w - COG_cur_w)**2) # distance axis of rotation must be moved
J_balloon_t = J_balloon_u + balloon_mass*trans_balloon_J_d**2 # moving axis of rotation with parallel axis theorem
J_tot = J_payload_t + J_balloon_t
COD_balloon_h = COG_balloon_h # needs to be updated based on CFD
COD_balloon_w = COG_balloon_w # needs to be updated based on CFD
if prev_v_x != 0:
re_num = rho_atmo*prev_v_x*dim_scale/dyn_visc
C_f = .027/np.power(re_num, 1/7) ## Prandtl's 1/7 Power Law
else:
C_f = 0
D_mag = np.sqrt(prev_D_x**2 + prev_D_y**2)
res_freq = int(np.ceil(2*pi*np.sqrt(J_tot/(F_b*balloon_height))))
max_alpha = max_theta/4*res_freq**2
if cur_t < cutoff_time:
reverse = 0
else:
reverse = 1
if reverse:
thrust = 0
m_dot = 0
curdtr = 0
else:
thrust = thrust_f
m_dot = m_dot_f
curdtr = abs(D_mag/thrust)
if curdtr < dragthrustratio:
if reverse:
burn_condition = 0
else:
burn_condition = 1
else:
if burn_index > min_burn_index:
burn_condition = 0
burn_index = 0
if burn_condition:
burn_index = burn_index + 1
## Force
cur_F = thrust
cur_fuel = prev_fuel - m_dot*time_step
# Ballast
cur_ballast_mass = prev_ballast_mass + m_dot*time_step
cur_r_m = prev_r_m
else:
cur_F = 0
cur_r_m = prev_r_m
cur_fuel = prev_fuel
mass_deficit = 0
cur_ballast_mass = prev_ballast_mass
perp_A_pay_x = payload_width/
|
np.cos(prev_theta)
|
numpy.cos
|
from sklearn import metrics
import pickle
import random
import sys
import numpy as np
import torch
import os
from scipy.special import softmax
np.set_printoptions(threshold=100000)
def read_train(split_dir):
csv_path = os.path.join(split_dir, "train" + '.csv')
print(csv_path)
lines = [x.strip() for x in open(csv_path, 'r',encoding='utf-8').readlines()]
trainlabel = []
trainlb_all = 0
for l in lines:
name, wnid = l.split(',')
if wnid not in trainlabel:
trainlb_all = trainlb_all + 1
trainlabel.append(wnid)
# trainlabel_set = list(set(trainlabel))
# trainlb_all = len(trainlabel_set)
print(trainlabel,trainlb_all)
return trainlabel
#def softmax(x):
# return np.exp(x)/np.sum(np.exp(x), axis=-1)
imagenames = []
newresult = []
valcsv = sys.argv[1] + "/val.csv"
with open(valcsv,'r') as tf:
lines = tf.readlines()
for line in lines:
imagename,_ = line.split(',')
imagenames.append(imagename)
pre_types = sys.argv[1] + "/trainlabelset.bin"
with open(pre_types,'rb') as f:
pred_type_list = pickle.load(f)
print(pred_type_list)
def one_hot(index,num):
label = []
for i in range(num):
if i == index:
label.append(1)
else:
label.append(0)
return np.array(label)
def checkmax(c):
count = 0
max_num = np.max(c)
for num in c:
if num == max_num:
count = count +1
return count
print(len(lines))
def readtheresult(filename):
print(filename)
with open(filename,'rb') as f:
results = pickle.load(f)
print(results[1])
print(len(results))
newlogits = []
i,pred,label,logits = map(list,zip(*results))
for logit in logits:
newlogits.append(logit.numpy())
print(newlogits[0])
# newpred = np.argmax(np.array(newlogits), axis=2)
prob = softmax(np.array(newlogits),axis=-1)
print(prob[0])
newpred = np.argmax(softmax(np.array(newlogits),axis=-1), axis=-1)
print("pred10",pred[0:10])
print("newpred10",newpred[0:10])
return np.array(prob),np.array(pred)
alldir = sys.argv[2]
modeldirs = os.listdir(sys.argv[2])
resultfiles = []
for filename in modeldirs:
checkpointdirs = os.listdir(alldir + "/" + filename +"/MiniImageNet-AmdimNet-ProtoNet" )
for checkpointdir in checkpointdirs:
checkdir = alldir + "/" + filename +"/MiniImageNet-AmdimNet-ProtoNet/" + checkpointdir
if os.path.exists(checkdir + "/" + "result.bin"):
resultfiles.append(checkdir+ "/" + "result.bin")
probs = []
preds = []
for resultfile in resultfiles:
#print(resultfile)
prob,pred = readtheresult(resultfile)
prob = prob.reshape((len(pred),len(pred_type_list)))
probs.append(prob)
preds.append(pred)
results = []
print(preds[0].shape)
print(probs[0].shape)
for i in range(len(preds[0])):
pred_num = None
for j in range(len(preds)):
if pred_num is None:
pred_num = one_hot(preds[j][i],len(pred_type_list))
else:
pred_num = pred_num + one_hot(preds[j][i],len(pred_type_list))
if checkmax(pred_num) == 1:
results.append(np.argmax(pred_num))
continue
prob_total = None
for j in range(len(probs)):
if prob_total is None:
prob_total = np.array(probs[j][i])
else:
prob_total =
|
np.array(probs[j][i])
|
numpy.array
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflowjs import quantization
class TestQuantizationUtil(unittest.TestCase):
def assertDictContainsSubsetAlmostEqual(self, d1, d2):
self.assertIsInstance(d1, dict)
self.assertIsInstance(d2, dict)
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
self.assertTrue(d2_keys.issubset(d1_keys))
for key in d2_keys:
self.assertAlmostEqual(d1[key], d2[key])
def _runQuantizeTest(
self, range_min, range_max, data_dtype, quantization_dtype,
expected_metadata):
d = np.arange(range_min, range_max + 1, dtype=data_dtype)
q, metadata = quantization.quantize_weights(d, quantization_dtype)
self.assertDictContainsSubsetAlmostEqual(metadata, expected_metadata)
self.assertEqual(q.dtype, quantization_dtype)
de_q = quantization.dequantize_weights(
q, metadata, data_dtype)
if data_dtype != np.float32:
np.testing.assert_allclose(de_q, d)
else:
np.testing.assert_array_almost_equal(de_q, d, decimal=2)
if quantization_dtype in [np.uint8, np.uint16]:
s = metadata['scale']
m = metadata['min']
if range_min <= 0 <= range_max:
d_0 = np.zeros(1, data_dtype)
q_0 = np.round((d_0 - m) / s).astype(quantization_dtype)
self.assertEqual(
quantization.dequantize_weights(q_0, metadata, data_dtype), d_0)
def testAffineQuantizeAllEqual(self):
d = np.ones(5, dtype=np.float32)
q, metadata = quantization.quantize_weights(d, np.uint8)
assert 'scale' in metadata and 'min' in metadata
self.assertEqual(metadata['scale'], 1.0)
self.assertEqual(q.dtype, np.uint8)
de_q = quantization.dequantize_weights(q, metadata, np.float32)
np.testing.assert_array_equal(de_q, d)
def testFloatQuantizeAllEqual(self):
d = np.ones(5, dtype=np.float32)
q, metadata = quantization.quantize_weights(d, np.float16)
self.assertDictEqual(metadata, {})
self.assertEqual(q.dtype, np.float16)
de_q = quantization.dequantize_weights(q, metadata, np.float32)
np.testing.assert_array_equal(de_q, d)
def testAffineQuantizeNegativeFloats(self):
self._runQuantizeTest(
-3, -1, np.float32, np.uint8,
expected_metadata={'scale': 2/255})
self._runQuantizeTest(
-3, -1, np.float32, np.uint16,
expected_metadata={'scale': 2/65536})
def testAffineQuantizeNegativeAndZeroFloats(self):
self._runQuantizeTest(
-3, 0, np.float32, np.uint8,
expected_metadata={'scale': 3/255})
self._runQuantizeTest(
-3, 0, np.float32, np.uint16,
expected_metadata={'scale': 3/65536})
def testAffineQuantizeNegativeAndPositiveFloats(self):
self._runQuantizeTest(
-3, 3, np.float32, np.uint8,
expected_metadata={'scale': 6/255})
self._runQuantizeTest(
-3, 3, np.float32, np.uint16,
expected_metadata={'scale': 6/65536})
def testAffineQuantizeZeroAndPositiveFloats(self):
self._runQuantizeTest(
0, 3, np.float32, np.uint8,
expected_metadata={'scale': 3/255})
self._runQuantizeTest(
0, 3, np.float32, np.uint16,
expected_metadata={'scale': 3/65536})
def testAffineQuantizePositiveFloats(self):
self._runQuantizeTest(
1, 3, np.float32, np.uint8,
expected_metadata={'scale': 2/255})
self._runQuantizeTest(
1, 3, np.float32, np.uint16,
expected_metadata={'scale': 2/65536})
def testAffineQuantizeNormalizedFloats(self):
data = np.array(
[-0.29098126, -0.24776903, -0.27248842, 0.23848203], dtype=np.float32)
q, metadata = quantization.quantize_weights(data, np.uint16)
de_q = quantization.dequantize_weights(q, metadata, data.dtype)
np.testing.assert_array_almost_equal(de_q, data, decimal=5)
def testAffineQuantizeNegativeInts(self):
self._runQuantizeTest(
-3, -1, np.int32, np.uint8,
expected_metadata={'scale': 2/255})
self._runQuantizeTest(
-3, -1, np.int32, np.uint16,
expected_metadata={'scale': 2/65536})
def testAffineQuantizeNegativeAndZeroInts(self):
self._runQuantizeTest(
-3, 0, np.int32, np.uint8,
expected_metadata={'scale': 3/255})
self._runQuantizeTest(
-3, 0, np.int32, np.uint16,
expected_metadata={'scale': 3/65536})
def testAffineQuantizeNegativeAndPositiveInts(self):
self._runQuantizeTest(
-3, 3, np.int32, np.uint8,
expected_metadata={'scale': 6/255})
self._runQuantizeTest(
-3, 3, np.int32, np.uint16,
expected_metadata={'scale': 6/65536})
def testAffineQuantizeZeroAndPositiveInts(self):
self._runQuantizeTest(
0, 3, np.int32, np.uint8,
expected_metadata={'scale': 3/255})
self._runQuantizeTest(
0, 3, np.int32, np.uint16,
expected_metadata={'scale': 3/65536})
def testAffineQuantizePositiveInts(self):
self._runQuantizeTest(
1, 3, np.int32, np.uint8,
expected_metadata={'scale': 2/255})
self._runQuantizeTest(
1, 3, np.int32, np.uint16,
expected_metadata={'scale': 2/65536})
def testInvalidQuantizationTypes(self):
# Invalid quantization type
with self.assertRaises(ValueError):
quantization.quantize_weights(np.array([]), np.bool)
# Invalid data dtype for float16 quantization
with self.assertRaises(ValueError):
d = np.ones(1, dtype=np.int32)
quantization.quantize_weights(d, np.float16)
def testInvalidDequantizationTypes(self):
# Invalid metadata for affine quantization
with self.assertRaises(ValueError):
d =
|
np.ones(1, dtype=np.uint8)
|
numpy.ones
|
from functools import wraps
import numpy as np
import xarray as xr
from .utils import histogram
__all__ = ["Contingency"]
OBSERVATIONS_NAME = "observations"
FORECASTS_NAME = "forecasts"
def _get_category_bounds(category_edges):
"""Return formatted string of category bounds given list of category edges"""
bounds = [
f"[{str(category_edges[i])}, {str(category_edges[i + 1])})"
for i in range(len(category_edges) - 2)
]
# Last category is right edge inclusive
bounds.append(f"[{str(category_edges[-2])}, {str(category_edges[-1])}]")
return bounds
def dichotomous_only(method):
"""Decorator for methods that are defined for dichotomous forecasts only"""
@wraps(method)
def wrapper(self, *args, **kwargs):
if not self.dichotomous:
raise AttributeError(
f"{method.__name__} can only be computed for \
dichotomous (2-category) data"
)
return method(self, *args, **kwargs)
return wrapper
def _display_metadata(self):
"""Called when Contingency objects are printed"""
header = f"<xskillscore.{type(self).__name__}>\n"
summary = header + "\n".join(str(self.table).split("\n")[1:]) + "\n"
return summary
class Contingency:
"""Class for contingency based skill scores
Parameters
----------
observations : xarray.Dataset or xarray.DataArray
Labeled array(s) over which to apply the function.
forecasts : xarray.Dataset or xarray.DataArray
Labeled array(s) over which to apply the function.
observation_category_edges : array_like
Bin edges for categorising observations. Similar to np.histogram, \
all but the last (righthand-most) bin include the left edge and \
exclude the right edge. The last bin includes both edges.
forecast_category_edges : array_like
Bin edges for categorising forecasts. Similar to np.histogram, \
all but the last (righthand-most) bin include the left edge and \
exclude the right edge. The last bin includes both edges.
dim : str, list
The dimension(s) over which to compute the contingency table
Returns
-------
xskillscore.Contingency
Examples
--------
>>> da = xr.DataArray(np.random.normal(size=(3, 3)),
... coords=[("x", np.arange(3)), ("y", np.arange(3))])
>>> o = xr.Dataset({"var1": da, "var2": da})
>>> f = o * 1.1
>>> o_category_edges = np.linspace(-2, 2, 5)
>>> f_category_edges = np.linspace(-3, 3, 5)
>>> xs.Contingency(o, f,
... o_category_edges, f_category_edges,
... dim=['x', 'y']) # doctest: +SKIP
<xskillscore.Contingency>
Dimensions: (forecasts_category: 4, observations_category: 4)
Coordinates:
observations_category_bounds (observations_category) <U12 '[-2.0, -1.0)'...
forecasts_category_bounds (forecasts_category) <U12 '[-3.0, -1.5)' .....
* observations_category (observations_category) int64 1 2 3 4
* forecasts_category (forecasts_category) int64 1 2 3 4
Data variables:
var1 (observations_category, forecasts_category) int64
var2 (observations_category, forecasts_category) int64
References
----------
http://www.cawcr.gov.au/projects/verification/
"""
def __init__(
self,
observations,
forecasts,
observation_category_edges,
forecast_category_edges,
dim,
):
self._observations = observations.copy()
self._forecasts = forecasts.copy()
self._observation_category_edges = observation_category_edges.copy()
self._forecast_category_edges = forecast_category_edges.copy()
self._dichotomous = (
True
if (len(observation_category_edges) - 1 == 2)
& (len(forecast_category_edges) - 1 == 2)
else False
)
self._table = self._get_contingency_table(dim)
@property
def observations(self):
return self._observations
@property
def forecasts(self):
return self._forecasts
@property
def observation_category_edges(self):
return self._observation_category_edges
@property
def forecast_category_edges(self):
return self._forecast_category_edges
@property
def dichotomous(self):
return self._dichotomous
@property
def table(self):
return self._table
def _get_contingency_table(self, dim):
"""Build the contingency table
Parameters
----------
dim : str, list
The dimension(s) over which to compute the contingency table
Returns
-------
xarray.Dataset or xarray.DataArray
"""
table = histogram(
self.observations,
self.forecasts,
bins=[self.observation_category_edges, self.forecast_category_edges],
bin_names=[OBSERVATIONS_NAME, FORECASTS_NAME],
dim=dim,
bin_dim_suffix="_bin",
)
# Add some coordinates to simplify interpretation/post-processing
table = table.assign_coords(
{
OBSERVATIONS_NAME
+ "_bin": _get_category_bounds(self.observation_category_edges)
}
).rename({OBSERVATIONS_NAME + "_bin": OBSERVATIONS_NAME + "_category_bounds"})
table = table.assign_coords(
{
FORECASTS_NAME
+ "_bin": _get_category_bounds(self.forecast_category_edges)
}
).rename({FORECASTS_NAME + "_bin": FORECASTS_NAME + "_category_bounds"})
table = table.assign_coords(
{
OBSERVATIONS_NAME
+ "_category": (
OBSERVATIONS_NAME + "_category_bounds",
range(1, len(self.observation_category_edges)),
),
FORECASTS_NAME
+ "_category": (
FORECASTS_NAME + "_category_bounds",
range(1, len(self.forecast_category_edges)),
),
}
)
table = table.swap_dims(
{
OBSERVATIONS_NAME + "_category_bounds": OBSERVATIONS_NAME + "_category",
FORECASTS_NAME + "_category_bounds": FORECASTS_NAME + "_category",
}
)
return table
def _sum_categories(self, categories):
"""Returns sums of specified categories in contingency table
Parameters
----------
category : str, optional
Contingency table categories to sum.
Options are 'total', 'observations' and 'forecasts'
Returns
-------
Sum of all counts in specified categories
"""
if categories == "total":
N = self.table.sum(
dim=(OBSERVATIONS_NAME + "_category", FORECASTS_NAME + "_category"),
skipna=True,
)
elif categories == "observations":
N = self.table.sum(dim=FORECASTS_NAME + "_category", skipna=True).rename(
{OBSERVATIONS_NAME + "_category": "category"}
)
elif categories == "forecasts":
N = self.table.sum(dim=OBSERVATIONS_NAME + "_category", skipna=True).rename(
{FORECASTS_NAME + "_category": "category"}
)
else:
raise ValueError(
f"'{categories}' is not a recognised category. \
Pick one of ['total', 'observations', 'forecasts']"
)
return N
def __repr__(self):
return _display_metadata(self)
@dichotomous_only
def hits(self, yes_category=2):
"""Returns the number of hits (true positives) for dichotomous
contingency data.
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the number of hits
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return self.table.sel(
{
OBSERVATIONS_NAME + "_category": yes_category,
FORECASTS_NAME + "_category": yes_category,
},
drop=True,
)
@dichotomous_only
def misses(self, yes_category=2):
"""Returns the number of misses (false negatives) for dichotomous
contingency data.
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the number of misses
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
no_category = abs(yes_category - 2) + 1
return self.table.sel(
{
OBSERVATIONS_NAME + "_category": yes_category,
FORECASTS_NAME + "_category": no_category,
},
drop=True,
)
@dichotomous_only
def false_alarms(self, yes_category=2):
"""Returns the number of false alarms (false positives) for dichotomous
contingency data.
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the number of false alarms
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
no_category = abs(yes_category - 2) + 1
return self.table.sel(
{
OBSERVATIONS_NAME + "_category": no_category,
FORECASTS_NAME + "_category": yes_category,
},
drop=True,
)
@dichotomous_only
def correct_negatives(self, yes_category=2):
"""Returns the number of correct negatives (true negatives) for dichotomous
contingency data.
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the number of correct negatives
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
no_category = abs(yes_category - 2) + 1
return self.table.sel(
{
OBSERVATIONS_NAME + "_category": no_category,
FORECASTS_NAME + "_category": no_category,
},
drop=True,
)
@dichotomous_only
def bias_score(self, yes_category=2):
"""Returns the bias score(s) for dichotomous contingency data
.. math::
BS = \\frac{\\mathrm{hits} + \\mathrm{false~alarms}}
{\\mathrm{hits} + \\mathrm{misses}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the bias score(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return (self.hits(yes_category) + self.false_alarms(yes_category)) / (
self.hits(yes_category) + self.misses(yes_category)
)
@dichotomous_only
def hit_rate(self, yes_category=2):
"""Returns the hit rate(s) (probability of detection) for
dichotomous contingency data.
.. math::
HR = \\frac{hits}{hits + misses}
Parameters
----------
yes_category : value, optional
The category coordinate value of the
category corresponding to 'yes' (1 or 2)
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the hit rate(s)
See Also
--------
sklearn.metrics.recall_score
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return self.hits(yes_category) / (
self.hits(yes_category) + self.misses(yes_category)
)
@dichotomous_only
def false_alarm_ratio(self, yes_category=2):
"""Returns the false alarm ratio(s) for dichotomous contingency data.
.. math::
FAR = \\frac{\\mathrm{false~alarms}}{hits + \\mathrm{false~alarms}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the false alarm ratio(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return self.false_alarms(yes_category) / (
self.hits(yes_category) + self.false_alarms(yes_category)
)
@dichotomous_only
def false_alarm_rate(self, yes_category=2):
"""Returns the false alarm rate(s) (probability of false detection)
for dichotomous contingency data.
.. math::
FA = \\frac{\\mathrm{false~alarms}}
{\\mathrm{correct~negatives} + \\mathrm{false~alarms}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the false alarm rate(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return self.false_alarms(yes_category) / (
self.correct_negatives(yes_category) + self.false_alarms(yes_category)
)
@dichotomous_only
def success_ratio(self, yes_category=2):
"""Returns the success ratio(s) for dichotomous contingency data.
.. math::
SR = \\frac{hits}{hits + \\mathrm{false~alarms}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the success ratio(s)
See Also
--------
sklearn.metrics.precision_score
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return self.hits(yes_category) / (
self.hits(yes_category) + self.false_alarms(yes_category)
)
@dichotomous_only
def threat_score(self, yes_category=2):
"""Returns the threat score(s) for dichotomous contingency data.
.. math::
TS = \\frac{hits}{hits + misses + \\mathrm{false~alarms}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the threat score(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return self.hits(yes_category) / (
self.hits(yes_category)
+ self.misses(yes_category)
+ self.false_alarms(yes_category)
)
@dichotomous_only
def equit_threat_score(self, yes_category=2):
"""Returns the equitable threat score(s) for dichotomous contingency data.
.. math::
ETS = \\frac{hits - hits_{random}}
{hits + misses + \\mathrm{false~alarms} - hits_{random}}
.. math::
hits_{random} = \\frac{(hits + misses
(hits + \\mathrm{false~alarms})}{total}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the equitable threat score(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
hits_random = (
(self.hits(yes_category) + self.misses(yes_category))
* (self.hits(yes_category) + self.false_alarms(yes_category))
) / self._sum_categories("total")
return (self.hits(yes_category) - hits_random) / (
self.hits(yes_category)
+ self.misses(yes_category)
+ self.false_alarms(yes_category)
- hits_random
)
@dichotomous_only
def odds_ratio(self, yes_category=2):
"""Returns the odds ratio(s) for dichotomous contingency data
.. math::
OR = \\frac{hits * \\mathrm{correct~negatives}}
{misses * \\mathrm{false~alarms}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the equitable odds ratio(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return (self.hits(yes_category) * self.correct_negatives(yes_category)) / (
self.misses(yes_category) * self.false_alarms(yes_category)
)
@dichotomous_only
def odds_ratio_skill_score(self, yes_category=2):
"""Returns the odds ratio skill score(s) for dichotomous contingency data
.. math::
ORSS = \\frac{hits * \\mathrm{correct~negatives}
- misses * \\mathrm{false~alarms}}
{hits * \\mathrm{correct~negatives} + misses * \\mathrm{false~alarms}}
Parameters
----------
yes_category : value, optional
The category coordinate value of the category corresponding to 'yes'
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the equitable odds ratio skill score(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
return (
self.hits(yes_category) * self.correct_negatives(yes_category)
- self.misses(yes_category) * self.false_alarms(yes_category)
) / (
self.hits(yes_category) * self.correct_negatives(yes_category)
+ self.misses(yes_category) * self.false_alarms(yes_category)
)
def accuracy(self):
"""Returns the accuracy score(s) for a contingency table with K categories
.. math::
A = \\frac{1}{N}\\sum_{i=1}^{K} n(F_i, O_i)
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the accuracy score(s)
See Also
--------
sklearn.metrics.accuracy_score
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
corr = self.table.where(
self.table[OBSERVATIONS_NAME + "_category"]
== self.table[FORECASTS_NAME + "_category"]
).sum(
dim=(OBSERVATIONS_NAME + "_category", FORECASTS_NAME + "_category"),
skipna=True,
)
N = self._sum_categories("total")
return corr / N
def heidke_score(self):
"""Returns the Heidke skill score(s) for a contingency table with K categories
.. math::
HSS = \\frac{\\frac{1}{N}\\sum_{i=1}^{K}n(F_i, O_i) -
\\frac{1}{N^2}\\sum_{i=1}^{K}N(F_i)N(O_i)}
{1 - \\frac{1}{N^2}\\sum_{i=1}^{K}N(F_i)N(O_i)}
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the Heidke score(s)
See Also
--------
sklearn.metrics.cohen_kappa_score
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
numer_1 = (
self.table.where(
self.table[OBSERVATIONS_NAME + "_category"]
== self.table[FORECASTS_NAME + "_category"]
).sum(
dim=(OBSERVATIONS_NAME + "_category", FORECASTS_NAME + "_category"),
skipna=True,
)
/ self._sum_categories("total")
)
numer_2 = (
self._sum_categories("observations") * self._sum_categories("forecasts")
).sum(dim="category", skipna=True) / self._sum_categories("total") ** 2
denom = 1 - numer_2
return (numer_1 - numer_2) / denom
def peirce_score(self):
"""Returns the Peirce skill score(s) (Hanssen and Kuipers discriminantor true
skill statistic) for a contingency table with K categories.
.. math::
PS = \\frac{\\frac{1}{N}\\sum_{i=1}^{K}n(F_i, O_i) -
\\frac{1}{N^2}\\sum_{i=1}^{K}N(F_i)N(O_i)}{1 -
\\frac{1}{N^2}\\sum_{i=1}^{K}N(O_i)^2}
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the Peirce score(s)
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
numer_1 = (
self.table.where(
self.table[OBSERVATIONS_NAME + "_category"]
== self.table[FORECASTS_NAME + "_category"]
).sum(
dim=(OBSERVATIONS_NAME + "_category", FORECASTS_NAME + "_category"),
skipna=True,
)
/ self._sum_categories("total")
)
numer_2 = (
self._sum_categories("observations") * self._sum_categories("forecasts")
).sum(dim="category", skipna=True) / self._sum_categories("total") ** 2
denom = 1 - (self._sum_categories("observations") ** 2).sum(
dim="category", skipna=True
) / (self._sum_categories("total") ** 2)
return (numer_1 - numer_2) / denom
def gerrity_score(self):
"""Returns Gerrity equitable score for a contingency table with K categories.
.. math::
GS = \\frac{1}{N}\\sum_{i=1}^{K}\\sum_{j=1}^{K}n(F_i, O_j)s_{ij}
.. math::
s_{ii} = \\frac{1}{K-1}(\\sum_{r=1}^{i-1}a_r^{-1} +
\\sum_{r=i}^{K-1}a_r)
.. math::
s_{ij} = \\frac{1}{K-1}(\\sum_{r=1}^{i-1}a_r^{-1} - (j - i) +
\\sum_{r=j}^{K-1}a_r); 1 \\leq i < j \\leq K
.. math::
s_{ji} = s_{ij}
.. math::
a_i = \\frac{(1 - \\sum_{r=1}^{i}p_r)}{\\sum_{r=1}^{i}p_r}
.. math::
p_i = \\frac{N(O_i)}{N}
Returns
-------
xarray.Dataset or xarray.DataArray
An array containing the Gerrity scores
References
----------
https://www.cawcr.gov.au/projects/verification/#Contingency_table
"""
# TODO: Currently computes the Gerrity scoring matrix using nested for-loops.
# Is it possible to remove these?
def _gerrity_s(table):
"""Returns Gerrity scoring matrix, s"""
p_o = (table.sum(axis=-1).T / table.sum(axis=(-2, -1)).T).T
p_sum = np.cumsum(p_o, axis=-1)
a = (1.0 - p_sum) / p_sum
k = a.shape[-1]
s = np.zeros(table.shape, dtype=float)
for (i, j) in np.ndindex(*s.shape[-2:]):
if i == j:
s[..., i, j] = (
1.0
/ (k - 1.0)
* (
|
np.sum(1.0 / a[..., 0:j], axis=-1)
|
numpy.sum
|
"""
Trial-resampling: correcting for unbalanced designs
===================================================
This example illustrates how to correct information estimation in case of
unbalanced designs (i.e. when the number of epochs or trials is very different
between conditions).
The technique of trial-resampling consist in randomly taking an equal number of
trials per condition, estimating the effect size and then repeating this
procedure for a more reliable estimation.
"""
import numpy as np
import pandas as pd
from frites.estimator import GCMIEstimator, ResamplingEstimator, DcorrEstimator
from frites import set_mpl_style
import seaborn as sns
import matplotlib.pyplot as plt
set_mpl_style()
###############################################################################
# Data creation
# -------------
#
# This first section creates the data using random points drawn from gaussian
# distributions
n_variables = 1000 # number of random variables
n_epochs = 500 # total number of epochs
prop = 5 # proportion (in percent) of epochs in the first condition
# proportion of trials
n_prop = int(np.round(prop * n_epochs / 100))
# create continuous variables
x_1 =
|
np.random.normal(loc=1., size=(n_variables, 1, n_prop))
|
numpy.random.normal
|
import abc
import math
from typing import Any, Dict, Optional, Sequence
import numpy as np
try:
import numpy.typing as npt
except ImportError:
pass
from optur.proto.sampler_pb2 import RandomSamplerConfig, SamplerConfig
from optur.proto.search_space_pb2 import Distribution, ParameterValue, SearchSpace
from optur.proto.study_pb2 import AttributeValue, Target
from optur.proto.study_pb2 import Trial
from optur.proto.study_pb2 import Trial as TrialProto
from optur.samplers.random import RandomSampler
from optur.samplers.sampler import JointSampleResult, Sampler
from optur.utils.search_space_tracker import SearchSpaceTracker
from optur.utils.sorted_trials import (
SortedTrials,
TrialKeyGenerator,
TrialQualityFilter,
)
_N_RERFERENCED_TRIALS_KEY = "smpl.tpe.n"
class TPESampler(Sampler):
def __init__(self, sampler_config: SamplerConfig) -> None:
super().__init__(sampler_config=sampler_config)
assert sampler_config.HasField("tpe")
assert sampler_config.tpe.n_ei_candidates > 0
self._tpe_config = sampler_config.tpe
self._fallback_sampler = RandomSampler(SamplerConfig(random=RandomSamplerConfig()))
self._search_space_tracker: Optional[SearchSpaceTracker] = None
self._sorted_trials: Optional[SortedTrials] = None
def init(self, search_space: Optional[SearchSpace], targets: Sequence[Target]) -> None:
# We need to clear all caches because a set of "valid" past trials changes
# by this operation.
self._fallback_sampler = RandomSampler(SamplerConfig(random=RandomSamplerConfig()))
self._search_space_tracker = SearchSpaceTracker(search_space=search_space)
self._sorted_trials = SortedTrials(
trial_filter=TrialQualityFilter(filter_unknown=True),
trial_key_generator=TrialKeyGenerator(targets),
trial_comparator=None,
)
# We need all past trials in the next sync because we cleared the cache.
self.update_timestamp(timestamp=None)
def sync(self, trials: Sequence[TrialProto]) -> None:
assert self._sorted_trials is not None
assert self._search_space_tracker is not None
self._fallback_sampler.sync(trials=trials)
self._sorted_trials.sync(trials=trials)
self._search_space_tracker.sync(trials=trials)
def joint_sample(
self,
fixed_parameters: Optional[Dict[str, ParameterValue]] = None,
) -> JointSampleResult:
assert self._sorted_trials is not None
assert self._search_space_tracker is not None
sorted_trials = self._sorted_trials.to_list()
if len(sorted_trials) < self._tpe_config.n_startup_trials:
return JointSampleResult(parameters={}, system_attrs={})
search_space = self._search_space_tracker.current_search_space
# TODO(tsuzuku): Extend to MOTPE.
half_idx = len(sorted_trials) // 2
_less_half_trials = sorted_trials[:half_idx]
_greater_half_trials = sorted_trials[half_idx:]
if not _less_half_trials or not _greater_half_trials:
return JointSampleResult(parameters={}, system_attrs={})
kde_l = _UnivariateKDE( # D_l
search_space=search_space,
trials=_less_half_trials,
weights=self._calculate_sample_weights(_less_half_trials),
)
kde_g = _UnivariateKDE( # D_g
search_space=search_space,
trials=_greater_half_trials,
weights=self._calculate_sample_weights(_greater_half_trials),
)
samples = kde_l.sample(
fixed_parameters=fixed_parameters or {}, k=self._tpe_config.n_ei_candidates
)
log_pdf_l = kde_l.log_pdf(samples)
log_pdf_g = kde_g.log_pdf(samples)
best_sample_idx = np.argmax(log_pdf_l - log_pdf_g)
best_sample = {name: sample[best_sample_idx] for name, sample in samples.items()}
return JointSampleResult(
parameters=kde_l.sample_to_value(best_sample),
system_attrs={
_N_RERFERENCED_TRIALS_KEY: AttributeValue(
int_value=self._sorted_trials.n_trials()
),
},
)
def sample(self, distribution: Distribution) -> ParameterValue:
return self._fallback_sampler.sample(distribution=distribution)
def _calculate_sample_weights(self, trials: Sequence[Trial]) -> "npt.NDArray[np.float64]":
weights: "npt.NDArray[np.float64]" = np.asarray(
[
trial.system_attrs[_N_RERFERENCED_TRIALS_KEY].int_value + 1
if _N_RERFERENCED_TRIALS_KEY in trial.system_attrs
else 1
for trial in trials
],
dtype=np.float64,
)
weights /= weights.sum()
return weights
# The Gaussian kernel is used for continuous parameters.
# The Aitchison-Aitken kernel is used for categorical parameters.
class _UnivariateKDE:
def __init__(
self,
search_space: SearchSpace,
trials: Sequence[TrialProto],
weights: "npt.NDArray[np.float64]",
) -> None:
assert trials
assert weights.shape == (len(trials),), str(weights.shape) + ":" + str(len(trials))
self._search_space = search_space
n_distribution = len(search_space.distributions)
self.weights = weights
self._distributions: Dict[str, _MixturedDistribution] = {
name: _MixturedDistribution(
name=name,
distribution=distribution,
trials=trials,
n_distribution=n_distribution,
)
for name, distribution in search_space.distributions.items()
if not distribution.HasField("unknown_distribution")
}
def sample_to_value(self, sample: Dict[str, Any]) -> Dict[str, ParameterValue]:
return {
name: self._distributions[name].sample_to_value(value)
for name, value in sample.items()
}
def sample(
self, fixed_parameters: Dict[str, ParameterValue], k: int
) -> Dict[str, "npt.NDArray[Any]"]:
ret: Dict[str, "npt.NDArray[Any]"] = {}
for name in self._distributions:
if name in fixed_parameters:
raise NotImplementedError()
active = np.argmax(np.random.multinomial(1, self.weights, size=(k,)), axis=-1)
ret[name] = self._distributions[name].sample(active_indices=active)
return ret
def log_pdf(self, observations: Dict[str, "npt.NDArray[Any]"]) -> "npt.NDArray[np.float64]":
ret = np.zeros(shape=(1,))
weights = np.log(self.weights)[None]
for name, samples in observations.items():
log_pdf = self._distributions[name].log_pdf(samples)
# TODO(tsuzuku): Improve numerical stability.
ret = ret + np.log(np.exp(log_pdf + weights).sum(axis=1))
return ret
class _MixturedDistributionBase(abc.ABC):
@abc.abstractclassmethod
def sample(self, active_indices: "npt.NDArray[np.int_]") -> "npt.NDArray[Any]":
pass
@abc.abstractclassmethod
def sample_to_value(self, sample: Any) -> ParameterValue:
pass
# (n_sample,) -> (n_sample, n_distribution)
@abc.abstractclassmethod
def log_pdf(self, x: "npt.NDArray[Any]") -> "npt.NDArray[np.float64]":
pass
# TODO(tsuzuku): Implement this.
class _AitchisonAitken(_MixturedDistributionBase):
def __init__(
self,
n_choice: int,
selections: "npt.NDArray[np.int_]",
eps: float = 1e-6,
) -> None:
pass
def sample_to_value(self, sample: Any) -> ParameterValue:
raise NotImplementedError()
def sample(self, active_indices: "npt.NDArray[np.int_]") -> "npt.NDArray[np.int_]":
raise NotImplementedError()
def log_pdf(self, x: "npt.NDArray[np.int_]") -> "npt.NDArray[np.float64]":
raise NotImplementedError()
class _TruncatedLogisticMixturedDistribution(_MixturedDistributionBase):
"""Truncated Logistic Mixtured Distribution.
Mimics GMM with finite support.
We use logistic distribution instead of gaussian because logistic distribution
is easier to implement.
Even though scipy provides truncnorm, sometimes scipy is not easy to install and
it's great if we can avoid introducing the dependency.
Args:
low:
Lower bound of the support.
high:
Upper bound of the support.
loc:
Means of Logistic distributions.
scale:
Standardized variances of Logistic distributions.
eps:
A small constant for numerical stability.
"""
def __init__(
self,
low: float,
high: float,
loc: "npt.NDArray[np.float64]",
scale: "npt.NDArray[np.float64]",
eps: float = 1e-6,
) -> None:
assert loc.shape == scale.shape
self.low = low
self.high = high
self.loc = loc
self.scale = scale
self.eps = eps
self.normalization_constant: "npt.NDArray[np.float64]" = ( # (1, n_observation)
self.unnormalized_cdf(
|
np.asarray([high])
|
numpy.asarray
|
# -*- coding: utf-8 -*-
"""MegaSim asynchronous spiking simulator.
@author: <NAME>
"""
from __future__ import division, absolute_import
# For compatibility with python2
from __future__ import print_function, unicode_literals
import os
import subprocess
import sys
import abc
from abc import abstractmethod
from builtins import int, range
import numpy as np
from future import standard_library
from snntoolbox.simulation.utils import AbstractSNN
standard_library.install_aliases()
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
INT32_MAX = 2147483646
class Megasim_base(ABC):
"""
Class that holds the common attributes and methods for the MegaSim modules.
Parameters
----------
Attributes
----------
Attributes set to -1 must be set by each subclass, the rest can be used as default values
Attributes common to all MegaSim modules
n_in_ports: int
Number of input ports
n_out_ports: int
Number of output ports
delay_to_process: int
Delay to process an input event
delay_to_ack: int
Delay to acknoldege an event
fifo_depth: int
Depth of input fifo
n_repeat: int
delay_to_repeat: int
#Parameters for the convolutional module and avgerage pooling module
Nx_array: int
X dimensions of the feature map
Ny_array: int
Y dimensions of the feature map
Xmin: int
start counting from Xmin (=0)
Ymin: int
start counting from Ymin (=0)
THplus:
Positive threshold
THplusInfo:
Flag to enable spikes when reaching the positive threshold
THminus:
Negative threshold
THminusInfo:
Flag to enable spikes with negative polarity when reaching the negative threshold
Reset_to_reminder:
After reaching the threshold if set it will set the membrane to the difference
MembReset: int
Resting potential (=0)
TLplus: int
Linear leakage slope from the positive threshold
TLminus: int
Linear leakage slope from the negative threshold
Tmin: int
minimum time between 2 spikes
T_Refract: int
Refractory period
# Parameters for the output
crop_xmin: int
Xmin crop of the feature map
crop_xmax: int
crop_ymin: int
crop_ymax: int
xshift_pre: int
X shift before subsampling
yshift_pre: int
Y shift before subsampling
x_subsmp: int
Subsampling (=1 if none)
y_subsmp: int
xshift_pos: int
X shift after subsampling
yshift_pos: int
rectify: int
Flag that if set will force all spikes to have positive polarity
# The fully connected module has population_size instead of Nx_array
population_size: int
Number of neurons in the fully connected module
Nx_array_pre: int
Number of neurons in the previous layer
# Needed by the state file
time_busy_initial: int
Initial state of the module (=0)
# Scaling factor
scaling_factor: int
Scaling factor for the parameters since MegaSim works with integers
Methods
-------
build_state_file:
Input parameters: a string with the full path to the megasim SNN directory
This method is similar to all MegaSim modules. It generates an initial state file
per module based on the time_busy_initial.
build_parameter_file:
Input parameters: a string with the full path to the megasim SNN directory
This method generates the module's parameter file based on its attributes set by
the sub-class.
This method depends on the MegaSim module and will raise error if not implemented.
"""
# Attributes common to all MegaSim modules
n_in_ports = -1
n_out_ports = 1
delay_to_process = 0
delay_to_ack = 0
fifo_depth = 0
n_repeat = 1
delay_to_repeat = 15
# Parameters for the conv module and avg pooling
Nx_array = -1
Ny_array = 1
Xmin = 0
Ymin = 0
THplus = 0
THplusInfo = 1
THminus = -2147483646
THminusInfo = 0
Reset_to_reminder = 0
MembReset = 0
TLplus = 0
TLminus = 0
Tmin = 0
T_Refract = 0
# Parameters for the output
crop_xmin = -1
crop_xmax = -1
crop_ymin = -1
crop_ymax = -1
xshift_pre = 0
yshift_pre = 0
x_subsmp = 1
y_subsmp = 1
xshift_pos = 0
yshift_pos = 0
rectify = 0
# The fully connected module has population_size instead of Nx_array
population_size = -1
Nx_array_pre = -1
# Needed by the state file
time_busy_initial = 0
# Scaling factor
scaling_factor = 1
def __init__(self):
pass
def build_state_file(self, dirname):
"""
dirname = the full path of the
"""
f = open(dirname + self.label + ".stt", "w")
f.write(".integers\n")
f.write("time_busy_initial %d\n" % self.time_busy_initial)
f.write(".floats\n")
f.close()
@abstractmethod
def build_parameter_file(self, dirname):
pass
class module_input_stimulus:
"""
A dummy class for the input stimulus.
Parameters
----------
label: string
String to hold the module's name.
pop_size: int
Integer to store the population size.
Attributes
----------
label: string
pop_size: int
input_stimulus_file: string
String to hold the filename of the input stimulus
module_string: string
String that holds the module name for megasim
evs_files: list
List of strings of the event filenames that will generated when a
megasim simulation is over.
"""
def __init__(self, label, pop_size):
self.label = label
self.pop_size = pop_size
self.input_stimulus_file = "input_events.stim"
self.module_string = "source"
self.evs_files = []
class module_flatten(Megasim_base):
"""
A class for the flatten megasim module. The flatten module is used to
connect a 3D population to a
1D population. eg A convolutional layer to a fully connected one.
Parameters
----------
layer_params: Keras layer
Layer from parsed input model.
input_ports: int
Number of input ports (eg feature maps from the previous layer)
fm_size: tuple
Tuple of integers that holds the size of the feature maps from the
previous layer
Attributes
----------
module_string: string
String that holds the module name for megasim
output_shapes: tuple
Tuple that holds the shape of the output of the module. Used for the
plotting.
evs_files: list
List of strings of the event filenames that will generated when a
megasim simulation is over.
"""
def __init__(self, layer_params, input_ports, fm_size):
self.module_string = "module_flatten"
self.label = layer_params.name
self.output_shapes = layer_params.output_shape
self.evs_files = []
self.n_in_ports = input_ports
self.Nx_array = fm_size[0]
self.Ny_array = fm_size[1]
def build_parameter_file(self, dirname):
"""
"""
param1 = (
""".integers
n_in_ports %d
n_out_ports %d
delay_to_process %d
delay_to_ack %d
fifo_depth %d
n_repeat %d
delay_to_repeat %d
""" % (self.n_in_ports, self.n_out_ports, self.delay_to_process,
self.delay_to_ack, self.fifo_depth, self.n_repeat,
self.delay_to_repeat))
param_k = (
"""Nx_array %d
Ny_array %d
""" % (self.Nx_array,
self.Ny_array))
q = open(dirname + self.label + '.prm', "w")
q.write(param1)
for k in range(self.n_in_ports):
q.write(param_k)
q.write(".floats\n")
q.close()
class Module_average_pooling(Megasim_base):
"""
duplicate code with the module_conv class - TODO: merge them
layer_params
Attributes: ['label', 'layer_num', 'padding', 'layer_type', 'strides',
'input_shape', 'output_shape', 'get_activ', 'pool_size']
"""
def __init__(self, layer_params, neuron_params, reset_input_event=False,
scaling_factor=10000000):
self.uses_biases = False
if reset_input_event:
self.module_string = 'module_conv_NPP'
else:
self.module_string = 'module_conv'
self.layer_type = layer_params.__class__.__name__
self.output_shapes = layer_params.output_shape # (none, 32, 26, 26)
# last two
self.label = layer_params.name
self.evs_files = []
self.reset_input_event = reset_input_event
self.n_in_ports = 1 # one average pooling layer per conv layer
# self.in_ports = 1 # one average pooling layer per conv layer
self.num_of_FMs = layer_params.input_shape[1]
self.fm_size = layer_params.output_shape[2:]
self.Nx_array, self.Ny_array = self.fm_size[1] * 2, self.fm_size[0] * 2
self.Dx, self.Dy = 0, 0
self.crop_xmin, self.crop_xmax = 0, self.fm_size[0] * 2 - 1
self.crop_ymin, self.crop_ymax = 0, self.fm_size[1] * 2 - 1
self.xshift_pre, self.yshift_pre = 0, 0
self.strides = layer_params.strides
self.num_pre_modules = layer_params.input_shape[1]
self.scaling_factor = int(scaling_factor)
self.THplus = neuron_params["v_thresh"] * self.scaling_factor
self.THminus = -2147483646
self.refractory = neuron_params["tau_refrac"]
self.MembReset = neuron_params["v_reset"] * self.scaling_factor
self.TLplus = 0
self.TLminus = 0
self.kernel_size = (1, 1) # layer_params.pool_size
self.Reset_to_reminder = 0
if neuron_params["reset"] == 'Reset to zero':
self.Reset_to_reminder = 0
else:
self.Reset_to_reminder = 1
self.pre_shapes = layer_params.input_shape # (none, 1, 28 28) # last 2
self.padding = layer_params.padding
if self.padding != 'valid':
print("Not implemented yet!")
sys.exit(88)
if self.reset_input_event:
self.n_in_ports += 1
def build_parameter_file(self, dirname):
sc = self.scaling_factor
fm_size = self.fm_size
num_FMs = self.num_pre_modules
print(
"building %s with %d FM receiving input from %d pre pops. FM size is %d,%d" % (
self.label, self.output_shapes[1], self.pre_shapes[1],
self.output_shapes[2], self.output_shapes[3]))
kernel = np.ones(self.kernel_size, dtype="float") * sc
kernel *= (
(1.0 / np.sum(self.kernel_size))) # /(np.sum(self.kernel_size)))
kernel = kernel.astype("int")
for f in range(num_FMs):
fm_filename = self.label + "_" + str(f)
self.__build_single_fm(self.n_in_ports, self.n_out_ports, fm_size,
kernel, dirname, fm_filename)
pass
def __build_single_fm(self, num_in_ports, num_out_ports, fm_size, kernel,
dirname, fprmname):
"""
Parameters
----------
num_in_ports
num_out_ports
fm_size
kernel
dirname
fprmname
Returns
-------
"""
param1 = (
""".integers
n_in_ports %d
n_out_ports %d
delay_to_process %d
delay_to_ack %d
fifo_depth %d
n_repeat %d
delay_to_repeat %d
Nx_array %d
Ny_array %d
Xmin %d
Ymin %d
THplus %d
THplusInfo %d
THminus %d
THminusInfo %d
Reset_to_reminder %d
MembReset %d
TLplus %d
TLminus %d
Tmin %d
T_Refract %d
""" % (num_in_ports, num_out_ports, self.delay_to_process, self.delay_to_ack,
self.fifo_depth,
self.n_repeat, self.delay_to_repeat,
self.Nx_array, self.Ny_array, self.Xmin, self.Ymin,
self.THplus, self.THplusInfo, self.THminus, self.THminusInfo,
self.Reset_to_reminder, self.MembReset, self.TLplus, self.TLminus,
self.Tmin, self.T_Refract))
param_k = (
"""Nx_kernel %d
Ny_kernel %d
Dx %d
Dy %d
""" % (self.kernel_size[0],
self.kernel_size[1],
self.Dx, self.Dy))
kernels_list = []
for k in range(1):
# scale the weights
w = kernel
np.savetxt(dirname + "w.txt", w, delimiter=" ", fmt="%d")
q = open(dirname + "w.txt")
param2 = q.readlines()
q.close()
os.remove(dirname + "w.txt")
kernels_list.append(param2)
if self.reset_input_event:
param_reset1 = (
"""Nx_kernel %d
Ny_kernel %d
Dx %d
Dy %d
""" % (1,
1,
0, 0
))
param_reset2 = " ".join([str(x) for x in [0] * 1])
param5 = (
"""crop_xmin %d
crop_xmax %d
crop_ymin %d
crop_ymax %d
xshift_pre %d
yshift_pre %d
x_subsmp %d
y_subsmp %d
xshift_pos %d
yshift_pos %d
rectify %d
.floats
""" % (self.crop_xmin, self.crop_xmax,
self.crop_ymin, self.crop_ymax,
self.xshift_pre, self.yshift_pre,
2, 2, # self.kernel_size[0], self.kernel_size[1],
0, 0,
0)
)
'''
% (0, (fm_size[0] *self.kernel_size[0])-1,
0, (fm_size[1] *self.kernel_size [1]) -1,
0, 0,
self.kernel_size[0], self.kernel_size[1],
0, 0,
0)
)
'''
q = open(dirname + fprmname + '.prm', "w")
q.write(param1)
for k in range(len(kernels_list)):
q.write(param_k)
for i in param2:
q.write(i)
if self.reset_input_event:
q.write(param_reset1)
q.write(param_reset2)
q.write("\n")
q.write(param5)
q.close()
class Module_conv(Megasim_base):
"""
A class for the convolutional megasim module.
Parameters
----------
layer_params: Keras layer
Layer from parsed input model.
neuron_params: dictionary
This is the settings dictionary that is set in the config.py module
flip_kernels: boolean
If set will flip the kernels upside down.
scaling_factor: int
An integer that will be used to scale all parameters.
Attributes
----------
module_string: string
String that holds the module name for megasim
output_shapes: tuple
Tuple that holds the shape of the output of the module. Used for the
plotting.
evs_files: list
List of strings of the event filenames that will generated when a
megasim simulation is over.
num_of_FMs: int
Number of feature maps in this layer
w: list
list of weights
padding: string
String with the border mode used for the convolutional layer. So far
only the valid mode is implemented
layer_params
Attributes: ['kernel_size', 'activation', 'layer_type', 'layer_num',
'filters', 'output_shape', 'input_shape', 'label', 'parameters', 'padding']
"""
def __init__(self, layer_params, neuron_params, flip_kernels=True,
reset_input_event=False, scaling_factor=10000000):
if reset_input_event:
self.module_string = 'module_conv_NPP'
else:
self.module_string = 'module_conv'
self.layer_type = layer_params.__class__.__name__
self.output_shapes = layer_params.output_shape # (none, 32, 26, 26) last two
self.label = layer_params.name
self.evs_files = []
self.reset_input_event = reset_input_event
# self.size_of_FM = 0
self.w = layer_params.get_weights()[0]
self.num_of_FMs = self.w.shape[3]
self.kernel_size = self.w.shape[:2] # (kx, ky)
try:
self.b = layer_params.get_weights()[1]
if np.nonzero(self.b)[0].size != 0:
self.uses_biases = True
print("%s uses biases" % self.module_string)
else:
self.uses_biases = False
print("%s does not use biases" % self.module_string)
except IndexError:
self.uses_biases = False
print("%s does not use biases" % self.module_string)
self.n_in_ports = self.w.shape[2]
self.pre_shapes = layer_params.input_shape # (none, 1, 28 28) # last 2
self.fm_size = self.output_shapes[2:]
self.Nx_array = self.output_shapes[2:][1]
self.Ny_array = self.output_shapes[2:][0]
self.padding = layer_params.padding # 'same', 'valid',
self.Reset_to_reminder = 0
if neuron_params["reset"] == 'Reset to zero':
self.Reset_to_reminder = 0
else:
self.Reset_to_reminder = 1
if self.padding == 'valid':
# if its valid mode
self.Nx_array = self.output_shapes[2:][1] + self.kernel_size[1] - 1
self.Ny_array = self.output_shapes[2:][0] + self.kernel_size[0] - 1
self.xshift_pre, self.yshift_pre = -int(
self.kernel_size[1] / 2), -int(self.kernel_size[0] / 2)
self.crop_xmin, self.crop_xmax = int(self.kernel_size[1] / 2), (
self.Nx_array - self.kernel_size[1] + 1)
self.crop_ymin, self.crop_ymax = int(self.kernel_size[0] / 2), (
self.Ny_array - self.kernel_size[0] + 1)
else:
print("Not implemented yet!")
self.Nx_array = self.output_shapes[2:][1]
self.Ny_array = self.output_shapes[2:][0]
self.xshift_pre, self.yshift_pre = (0, 0)
self.crop_xmin, self.crop_xmax = (0, self.Nx_array)
self.crop_ymin, self.crop_ymax = (0, self.Ny_array)
self.scaling_factor = int(scaling_factor)
self.flip_kernels = flip_kernels
self.THplus = neuron_params["v_thresh"] * self.scaling_factor
self.T_Refract = neuron_params["tau_refrac"]
self.MembReset = neuron_params["v_reset"]
self.TLplus = 0
self.TLminus = 0
if self.reset_input_event:
self.n_in_ports += 1
if self.uses_biases:
self.n_in_ports += 1
def build_parameter_file(self, dirname):
fm_size = self.output_shapes[2:]
pre_num_ports = self.pre_shapes[1]
num_FMs = self.output_shapes[1]
print("building %s with %d FM receiving input from %d pre pops. FM "
"size is %d,%d" % (self.label, self.output_shapes[1],
self.pre_shapes[1], self.output_shapes[2],
self.output_shapes[3]))
for f in range(num_FMs):
fm_filename = self.label + "_" + str(f)
kernel = self.w[:, :, :, f]
if self.uses_biases:
bias = self.b[f]
else:
bias = 0.0
self.__build_single_fm(pre_num_ports, 1, fm_size, kernel, bias,
dirname, fm_filename)
pass
def __build_single_fm(self, num_in_ports, num_out_ports, fm_size, kernel,
bias, dirname, fprmname):
"""
Helper method to create a single feature map
Parameters
----------
num_in_ports: int
number of input ports
num_out_ports: int
number of output ports
fm_size: tuple
A tuple with the X, Y dimensions of the feature map
kernel: numpy array
A numpy array of X,Y dimensions with the kernel of the feature map
dirname: string
String with the full path of the megasim simulation folder
fprmname: string
Filename of the parameter file
Returns
-------
"""
sc = self.scaling_factor
param1 = (
""".integers
n_in_ports %d
n_out_ports %d
delay_to_process %d
delay_to_ack %d
fifo_depth %d
n_repeat %d
delay_to_repeat %d
Nx_array %d
Ny_array %d
Xmin %d
Ymin %d
THplus %d
THplusInfo %d
THminus %d
THminusInfo %d
Reset_to_reminder %d
MembReset %d
TLplus %d
TLminus %d
Tmin %d
T_Refract %d
""" % (self.n_in_ports, self.n_out_ports,
self.delay_to_process, self.delay_to_ack, self.fifo_depth, self.n_repeat,
self.delay_to_repeat,
self.Nx_array, self.Ny_array,
self.Xmin, self.Ymin,
self.THplus, self.THplusInfo,
self.THminus, self.THminusInfo,
self.Reset_to_reminder, self.MembReset,
self.TLplus, self.TLminus, self.Tmin, self.T_Refract))
param_k = (
"""Nx_kernel %d
Ny_kernel %d
Dx %d
Dy %d
""" % (self.kernel_size[0],
self.kernel_size[1],
-int(self.kernel_size[0] / 2), -int(self.kernel_size[1] / 2)
))
kernels_list = []
for k in range(kernel.shape[0]):
w = kernel[k] * sc
if self.flip_kernels:
'''
After tests i did in zurich we only need to flip the kernels upside down
'''
w = np.flipud(w)
np.savetxt(dirname + "w.txt", w, delimiter=" ", fmt="%d")
q = open(dirname + "w.txt")
param2 = q.readlines()
q.close()
os.remove(dirname + "w.txt")
kernels_list.append(param2)
if self.uses_biases:
param_biases1 = (
"""Nx_kernel %d
Ny_kernel %d
Dx %d
Dy %d
""" % (self.Nx_array,
self.Ny_array,
0, 0
))
b =
|
np.ones((self.Nx_array, self.Ny_array))
|
numpy.ones
|
import unittest
import mapf_gym as MAPF_Env
import numpy as np
# Agent 1
num_agents1 = 1
world1 = [[ 1, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals1 = [[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1
num_agents2 = 1
world2 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, -1, 1, -1, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals2 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1 and 2
num_agents3 = 2
world3 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, -1, 0, 0, 0, 0, 0],
[ 0, 0, -1, 1, 2, -1, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals3 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1 and 2
num_agents4 = 2
world4 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, -1, -1, 0, 0, 0, 0],
[ 0, 0, -1, 1, 2, -1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals4 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# action: {0:NOP, 1:MOVE_NORTH, 2:MOVE_EAST, 3:MOVE_south, 4:MOVE_WEST}
# MAPF_Env.ACTION_COST, MAPF_Env.IDLE_COST, MAPF_Env.GOAL_REWARD, MAPF_Env.COLLISION_REWARD
FULL_HELP = False
class MAPFTests(unittest.TestCase):
# Bruteforce tests
def test_validActions1(self):
# MAPF_Env.MAPFEnv(self, num_agents=1, world0=None, goals0=None, DIAGONAL_MOVEMENT=False, SIZE=10, PROB=.2, FULL_HELP=False)
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1), DIAGONAL_MOVEMENT=False)
validActions1 = gameEnv1._listNextValidActions(1)
self.assertEqual(validActions1, [0,1,2])
# With diagonal actions
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1), DIAGONAL_MOVEMENT=True)
validActions1 = gameEnv1._listNextValidActions(1)
self.assertEqual(validActions1, [0,1,2,5])
def test_validActions2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2), DIAGONAL_MOVEMENT=False)
validActions2 = gameEnv2._listNextValidActions(1)
self.assertEqual(validActions2, [0])
# With diagonal actions
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2), DIAGONAL_MOVEMENT=True)
validActions2 = gameEnv2._listNextValidActions(1)
self.assertEqual(validActions2, [0,5,6,7,8])
def test_validActions3(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3), DIAGONAL_MOVEMENT=False)
validActions3a = gameEnv3._listNextValidActions(1)
validActions3b = gameEnv3._listNextValidActions(2)
self.assertEqual(validActions3a, [0])
self.assertEqual(validActions3b, [0,2])
# With diagonal actions
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3), DIAGONAL_MOVEMENT=True)
validActions3a = gameEnv3._listNextValidActions(1)
validActions3b = gameEnv3._listNextValidActions(2)
self.assertEqual(validActions3a, [0,5,6,7])
self.assertEqual(validActions3b, [0,2,5,8])
def test_validActions4(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=False)
validActions4a = gameEnv4._listNextValidActions(1)
validActions4b = gameEnv4._listNextValidActions(2)
self.assertEqual(validActions4a, [0,2])
self.assertEqual(validActions4b, [0,2])
# With diagonal actions
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=True)
validActions4a = gameEnv4._listNextValidActions(1)
validActions4b = gameEnv4._listNextValidActions(2)
self.assertEqual(validActions4a, [0,2,5,6,7])
self.assertEqual(validActions4b, [0,2,5,6])
def testIdle1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal, blocking, valid_action
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,0))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,0))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle3(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,0))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,0))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle4(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=False)
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,0))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,0))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,1))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,1))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east4a(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east4b(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,2))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.ACTION_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,2))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0),
|
np.sum(s2)
|
numpy.sum
|
import tensorflow as tf
import numpy as np
import os
import math
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
@tf.function
def one_hot(labels, class_size):
"""
Create one hot label matrix of size (N, C)
Inputs:
- labels: Labels Tensor of shape (N,) representing a ground-truth label
for each MNIST image
- class_size: Scalar representing of target classes our dataset
Returns:
- targets: One-hot label matrix of (N, C), where targets[i, j] = 1 when
the ground truth label for image i is j, and targets[i, :j] &
targets[i, j + 1:] are equal to 0
"""
return tf.one_hot(labels, class_size)
def save_model_weights(model, args):
"""
Save trained VAE model weights to model_ckpts/
Inputs:
- model: Trained VAE model.
- cfg: All arguments.
"""
model_flag = "cvae" if args.is_cvae else "vae"
output_dir = os.path.join("model_ckpts", model_flag)
output_path = os.path.join(output_dir, model_flag)
os.makedirs("model_ckpts", exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
model.save_weights(output_path)
def show_vae_images(model, latent_size):
"""
Call this only if the model is VAE!
Generate 10 images from random vectors.
Show the generated images from your trained VAE.
Image will be saved to outputs/show_vae_images.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
# Generated images from vectors of random values.
z = tf.random.normal(shape=[10, latent_size])
samples = model.decoder(z).numpy()
# Visualize
fig = plt.figure(figsize=(10, 1))
gspec = gridspec.GridSpec(1, 10)
gspec.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gspec[i])
plt.axis("off")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect("equal")
plt.imshow(sample.reshape(28, 28), cmap="Greys_r")
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_vae_images.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
def show_vae_interpolation(model, latent_size):
"""
Call this only if the model is VAE!
Generate interpolation between two .
Show the generated images from your trained VAE.
Image will be saved to outputs/show_vae_interpolation.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
def show_interpolation(images):
"""
A helper to visualize the interpolation.
"""
images = tf.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(math.ceil(math.sqrt(images.shape[0])))
sqrtimg = int(math.ceil(math.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(tf.reshape(img, [sqrtimg, sqrtimg]))
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_vae_interpolation.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
S = 12
z0 = tf.random.normal(shape=[S, latent_size], dtype=tf.dtypes.float32) # [S, latent_size]
z1 = tf.random.normal(shape=[S, latent_size], dtype=tf.dtypes.float32)
w = tf.linspace(0, 1, S)
w = tf.cast(tf.reshape(w, (S, 1, 1)), dtype=tf.float32) # [S, 1, 1]
z = tf.transpose(w * z0 + (1 - w) * z1, perm=[1, 0, 2])
z = tf.reshape(z, (S * S, latent_size)) # [S, S, latent_size]
x = model.decoder(z) # [S*S, 1, 28, 28]
show_interpolation(x)
def show_cvae_images(model, latent_size):
"""
Call this only if the model is CVAE!
Conditionally generate 10 images for each digit.
Show the generated images from your trained CVAE.
Image will be saved to outputs/show_cvae_images.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
# Conditionally generated images from vectors of random values.
num_generation = 100
num_classes = 10
num_per_class = num_generation // num_classes
c = tf.eye(num_classes) # [one hot labels for 0-9]
z = []
labels = []
for label in range(num_classes):
curr_c = c[label]
curr_c = tf.broadcast_to(curr_c, [num_per_class, len(curr_c)])
curr_z = tf.random.normal(shape=[num_per_class, latent_size])
curr_z = tf.concat([curr_z, curr_c], axis=-1)
z.append(curr_z)
labels.append([label] * num_per_class)
z =
|
np.concatenate(z)
|
numpy.concatenate
|
from spimagine import volshow, volfig
import numpy as np
from utils import xyz_to_zyx, load_nifti, preprocess_histograms, drop_regions_with_few_comparisons
import pandas as pd
import matplotlib
#matplotlib.use('Qt5Agg')
#%matplotlib qt5
#%matplotlib notebook
#%matplotlib widget
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
from spimagine.models.imageprocessor import BlurProcessor
from spimagine.utils.quaternion import Quaternion
import sys
# activate latex text rendering
#from matplotlib import rc
#rc('text', usetex=True)
#matplotlib.rcParams['savefig.facecolor'] = (173/255, 216/255, 230/255)
def spimagine_show_volume_numpy(numpy_array, stackUnits=(1, 1, 1), interpolation="nearest", cmap="grays"):
# Spimagine OpenCL volume renderer.
volfig()
spim_widget = \
volshow(numpy_array[::-1, ::-1, ::-1], stackUnits=stackUnits, interpolation=interpolation)
spim_widget.set_colormap(cmap)
#spim_widget.transform.setRotation(np.pi/8,-0.6,0.5,1)
spim_widget.transform.setQuaternion(Quaternion(-0.005634209439510011,0.00790509382124309,-0.0013812284289010514,-0.9999519273706857))
def spimagine_show_mni_volume_numpy(numpy_array, stackUnits=(1, 1, 1), interpolation="nearest", cmap="grays"):
# Spimagine OpenCL volume renderer.
volfig()
spim_widget = \
volshow(numpy_array, stackUnits=stackUnits, interpolation=interpolation)
spim_widget.set_colormap(cmap)
#spim_widget.transform.setRotation(np.pi/8,-0.6,0.5,1)
# Up
spim_widget.transform.setQuaternion(Quaternion(0.0019763238787262496,4.9112439271825864e-05,0.9999852343690417,0.0050617341749588825))
def visualize_regions(regions_df, labels_data, labels_dims, interpolation="nearest", cmap="hot"):
"""
regions_df : pandas.core.frame.DataFrame
contains regions as index, together with a float
labels_dims
labels_data : np.array
"""
heat_map = labels_data.copy()
for region_value in np.array(np.unique(heat_map)):
if region_value not in regions_df.index:
heat_map[heat_map == region_value] = 0
else:
heat_map[heat_map == region_value] = regions_df.loc[region_value]
spimagine_show_mni_volume_numpy(heat_map, stackUnits=labels_dims, interpolation=interpolation, cmap=cmap)
def load_and_visualize_cbv(cbv_path, interpolation="linear", cmap="hot"):
cbv_data_temp, cbv_dims_temp, cbv_hdr_temp = load_nifti(str(cbv_path))
cbv_data_temp[np.isnan(cbv_data_temp)] = 0
cbv_data_temp = xyz_to_zyx(cbv_data_temp)
spimagine_show_mni_volume_numpy(cbv_data_temp, stackUnits=cbv_dims_temp, interpolation=interpolation, cmap=cmap)
def load_and_visualize_labels(labels_path, interpolation="nearest", cmap="grays"):
labels_data_temp, labels_dims_temp, labels_hdr_temp = load_nifti(str(labels_path))
labels_data_temp = xyz_to_zyx(labels_data_temp)
spimagine_show_mni_volume_numpy(labels_data_temp, stackUnits=labels_dims_temp, interpolation=interpolation, cmap=cmap)
def preprocess_and_plot_all_histograms(region_values,\
hist_edges,\
raw_e1_CBV_region_histograms,\
raw_e2_CBV_region_histograms,\
topup_e1_CBV_region_histograms,\
topup_e2_CBV_region_histograms,\
epic_e1_CBV_region_histograms,\
epic_e2_CBV_region_histograms,\
two_tail_fraction=0.03,\
subject_number=0,\
ID="1099269047"):
raw_e1 = pd.DataFrame(\
data=raw_e1_CBV_region_histograms[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
raw_e1_prep = preprocess_histograms(raw_e1, two_tail_fraction=two_tail_fraction)
raw_e2 = pd.DataFrame(\
data=raw_e2_CBV_region_histograms[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
raw_e2_prep = preprocess_histograms(raw_e2, two_tail_fraction=two_tail_fraction)
topup_e1 = pd.DataFrame(\
data=topup_e1_CBV_region_histograms[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
topup_e1_prep = preprocess_histograms(topup_e1, two_tail_fraction=two_tail_fraction)
topup_e2 = pd.DataFrame(\
data=topup_e2_CBV_region_histograms[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
topup_e2_prep = preprocess_histograms(topup_e2, two_tail_fraction=two_tail_fraction)
epic_e1 = pd.DataFrame(\
data=epic_e1_CBV_region_histograms[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
epic_e1_prep = preprocess_histograms(epic_e1, two_tail_fraction=two_tail_fraction)
epic_e2 = pd.DataFrame(\
data=epic_e2_CBV_region_histograms[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
epic_e2_prep = preprocess_histograms(epic_e2, two_tail_fraction=two_tail_fraction)
fig = plt.figure(figsize=np.array([6.4*0.8*3, 4.8*0.8]))
ax1 = fig.add_subplot(2, 3, 1)
ax1.yaxis.set_label_position("right")
ax1.set_ylabel("raw e1")
ax1.plot(raw_e1_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
ax2 = fig.add_subplot(2, 3, 4, sharex=ax1, sharey=ax1)
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("raw e2")
ax2.plot(raw_e2_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
ax3 = fig.add_subplot(2, 3, 2, sharex=ax1, sharey=ax1)
ax3.yaxis.set_label_position("right")
ax3.set_ylabel("topup e1")
ax3.plot(topup_e1_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
ax4 = fig.add_subplot(2, 3, 5, sharex=ax1, sharey=ax1)
ax4.yaxis.set_label_position("right")
ax4.set_ylabel("topup e2")
ax4.plot(topup_e2_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
ax5 = fig.add_subplot(2, 3, 3, sharex=ax1, sharey=ax1)
ax5.yaxis.set_label_position("right")
ax5.set_ylabel("epic e1")
ax5.plot(epic_e1_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
ax6 = fig.add_subplot(2, 3, 6, sharex=ax1, sharey=ax1)
ax6.yaxis.set_label_position("right")
ax6.set_ylabel("epic e2")
ax6.plot(epic_e2_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
#fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
#fig.tight_layout()
title = "MNI rCBV %s" % ID
fig.suptitle(title)
return raw_e1_prep,\
raw_e2_prep,\
topup_e1_prep,\
topup_e2_prep,\
epic_e1_prep,\
epic_e2_prep
def preprocess_and_plot_selected_histograms(region_values,\
hist_edges,\
raw_e1_CBV_region_histograms,\
raw_e2_CBV_region_histograms,\
topup_e1_CBV_region_histograms,\
topup_e2_CBV_region_histograms,\
epic_e1_CBV_region_histograms,\
epic_e2_CBV_region_histograms,\
two_tail_fraction=0.03,\
subject_number=0,\
correction_method="raw",\
ID="1099269047"):
fig = plt.figure(figsize=np.array([6.4*0.8, 4.8*0.8]))
cbv_hists_e1 = pd.DataFrame(\
data=eval(correction_method + "_e1_CBV_region_histograms")[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
cbv_hists_e1_prep = preprocess_histograms(cbv_hists_e1, two_tail_fraction=two_tail_fraction)
cbv_hists_e2 = pd.DataFrame(\
data=eval(correction_method + "_e2_CBV_region_histograms")[subject_number], \
index=region_values, \
columns=hist_edges[0][:-1])
cbv_hists_e2_prep = preprocess_histograms(cbv_hists_e2, two_tail_fraction=two_tail_fraction)
ax1 = fig.add_subplot(2, 1, 1)
ax1.yaxis.set_label_position("right")
ax1.set_ylabel("e1")
ax1.plot(cbv_hists_e1_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
ax2 = fig.add_subplot(2, 1, 2)
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("e2")
ax2.plot(cbv_hists_e2_prep.transpose());
plt.subplots_adjust(hspace = 0.001)
title = "MNI rCBV %s %s" % (ID, correction_method)
fig.suptitle(title)
fig2 = plt.figure()
cbv_hists_e1_prep.mean().plot()
cbv_hists_e1_prep.median().plot()
cbv_hists_e1_prep.std().plot()
cbv_hists_e2_prep.mean().plot()
cbv_hists_e2_prep.median().plot()
cbv_hists_e2_prep.std().plot()
plt.legend(("cbv_hists_e1 mean", "cbv_hists_e1 median", "cbv_hists_e1 std", "cbv_hists_e2 mean", "cbv_hists_e2 median", "cbv_hists_e2 std"))
plt.suptitle(title)
def sorted_boxplot_histogram_distances(all_distances_df, all_relative_rcbv_df, ax, region_values, region_names, region_names_to_exclude, ylabel2="Sorted Box Plot", ylabel="Hellinger distance", title="", xlabel="", top=20):
# Drop excluded regions
all_distances_df = \
all_distances_df.drop([str(region_values[np.where(region_names == region_name)[0][0]]) for region_name in region_names_to_exclude], axis=1)
all_relative_rcbv_df = \
all_relative_rcbv_df.drop([str(region_values[np.where(region_names == region_name)[0][0]]) for region_name in region_names_to_exclude], axis=1)
# --for distances
# Calculate medians
all_distances_medians_df = all_distances_df.median()
# Sort the medians
all_distances_medians_df.sort_values(ascending=False, inplace=True)
# Show the data according to the sorted medians
all_distances_sorted_df = all_distances_df[all_distances_medians_df.index]
# Remove regions if number of observations is less than 10
all_distances_sorted_df = drop_regions_with_few_comparisons(all_distances_sorted_df, num_comparisons_below=10)
# --for relative rcbv
# Calculate medians
all_relative_rcbv_medians_df = all_relative_rcbv_df.median()
if top=="all":
selected_data = all_distances_sorted_df
else:
# Pick top top highest columns after descending median
selected_data = all_distances_sorted_df[all_distances_sorted_df.keys()[0:top]]
# ymax used later for correct placement of title text
ax.set_ylim(auto=True)
"""
_, ymax = ax.get_ylim()
this_ymax = selected_data.median().max()
if ymax == 1 :
# Replace the default value
ax.set_ylim(top=this_ymax)
"""
_, ymax = ax.get_ylim()
# Create boxplot
# Reverse selected data
selected_data = selected_data[reversed(selected_data.keys())]
# Create boxplot
bp = selected_data.boxplot(rot=-90, ax=ax, grid=False)
#bp = selected_data.boxplot(rot=-55, ax=ax, grid=False)
# A list that is used give x placement of number of observations text
x = np.arange(selected_data.shape[1])
# Count the number of observations in each column
noofobs = selected_data.notna().sum()
# Write the number of observations above each box in the plot
#for tick, label in zip(x, bp.get_xticklabels()):
# bp.text(tick+1, ymax+0.05*ymax, noofobs[tick], horizontalalignment='center')
# Add number of observations to xticklabels
xticklabels = []
for tick, label in zip(x, bp.get_xticklabels()):
#xticklabels += [label.get_text() + " (n=" + str(noofobs[tick]) + ")"]
region_name = region_names[np.where(region_values == np.int64(label.get_text()))[0][0]]
region_median_relative_rcbv_change = all_relative_rcbv_medians_df.loc[label.get_text()]
if region_name[0:len("Left & right")] == "Left & right":
region_name = region_name[len("Left & right"):len(region_name)]
region_text_space = [" " for s in range(30-len("Left & right"))]
#region_text_space = [" " for s in range(30)] # 36
if len(region_name) > len(region_text_space):
space_indexes = np.where(np.array(list(region_name)) == " ")[0]
if len(space_indexes) > 1 and space_indexes[-2] != 0:
space_index = space_indexes[-2]
elif len(space_indexes) == 1 and space_indexes[-1] != 0:
space_index = len(region_name)-1
else:
space_index = space_indexes[-1]
line1 = region_name[0:space_index] + "\n"
line2 = region_name[space_index+1:]
#print("---")
#print(space_index)
#print(line1)
#print("--")
#print(line2)
#print("---")
#region_text_space[0:len(line1 + line2)] = line1 + line2
#region_name = region_name[0:len(region_text_space)]
#region_text_space[0:len(region_name)] = region_name
#region_text_space[-3:] = "..."
else:
line1 = region_name
line2 = ""
#region_text_space[0:len(region_name)] = region_name
# Some cleaning:
# Remove first char if space " "
# Set first char uppercase if lowercase
if line1[0] == " ":
line1 = "".join(list(line1[1:]))
if line1[0].islower():
line1 = "".join([line1[0].upper()]+list(line1[1:]))
#region_text = "".join(region_text_space)
#number_and_noofobs = str(tick+1) + ". (n=" + format(noofobs[tick], '02d') + ") "
#print(number_and_noofobs)
#print(len(number_and_noofobs))
#description_text = number_and_noofobs + line1 + "".join([" " for s in range(len(number_and_noofobs)+7)]) + line2 #+ " {0:.3f}".format(region_median_relative_rcbv_change)
tick_text = str(selected_data.shape[1]-tick) + ". "
if line2 == "":
description_text = tick_text + line1 + " " + "(n=" + \
format(noofobs[tick], '02d') + ")"
else:
description_text = tick_text + line1 + "".join([" " for s in range(len(tick_text)+1)]) + line2 + " (n=" + \
format(noofobs[tick], '02d') + ")"
#print(description_text + "|")
xticklabels += [description_text]
# Update the histogram plot with the new xticklabels
bp.set_xticklabels(xticklabels, {'fontsize': 12}, multialignment="left")
plt.xlabel(xlabel)
#bp.set_ylabel(ylabel, rotation=-90)
plt.ylabel(ylabel, rotation=-90, labelpad=11)
# Used as a placement for title
bp.text((tick//2)+1, ymax+0.2*ymax, title, horizontalalignment='center')
# Second twin y axis
ax_sec = ax.twinx()
ax_sec.set_yticklabels([])
ax_sec.yaxis.set_ticks_position('none')
ax_sec.set_ylabel(ylabel2, color='b')
# Add mean(medians) + 0.96 * std(medians) line
bp.plot(x+1, \
[all_distances_sorted_df.median().mean() + 0.96 * all_distances_sorted_df.median().std()]*len(x), \
linestyle="--")
# Return top medians df for further analysis
to_return = selected_data.median()
# Set the index elements
# (here the region values originally being string)
# to uint64 for compatibility with visualize_regions()
to_return.index = to_return.index.astype(np.uint64)
return to_return
def sorted_boxplot_significant_relative_rcbv_change(all_total_rcbv_df, pvalues_ser, p_alpha, ax, region_values, region_names, region_names_to_exclude, ascending=False, ylabel2="Sorted Box Plot", ylabel="Hellinger distance", title="", xlabel="", top=20):
# Number of observations (or) patients in each region before taking Wilcoxon signed rank test on the region
#num_observations = drop_regions_with_few_comparisons(all_total_rcbv_df, num_comparisons_below=10).notna().sum()
# Exclude regions
all_total_rcbv_df = \
all_total_rcbv_df.drop([str(region_values[np.where(region_names == region_name)[0][0]]) for region_name in region_names_to_exclude], axis=1)
# --for relative rcbv
# Calculate medians
#all_total_rcbv_medians_df = all_total_rcbv_df.median() # nb! This skips the np.nan values before median calculation
#all_total_rcbv_medians_df.sort_values(ascending=ascending, inplace=True)
# Show the data according to the sorted medians
#all_total_rcbv_sorted_df = all_total_rcbv_df[all_total_rcbv_medians_df.index]
# Remove regions if number of observations is less than 10
all_total_rcbv_rdropped_df = drop_regions_with_few_comparisons(all_total_rcbv_df, num_comparisons_below=10)
all_total_rcbv_medians_rdropped_ser = all_total_rcbv_rdropped_df.median() # Just for joining with pvalues_significant_sorted_ser
# Extract significant regions by pvalues_ser and p_alpha
#pvalues_significant_ser = pvalues_ser[pvalues_ser < p_alpha]
#pvalues_significant_sorted_ser = pvalues_significant_ser.sort_values(ascending=True)
#p_values_significant_sorted_joined_df = pvalues_significant_sorted_ser.to_frame().join(all_total_rcbv_medians_rdropped_ser.to_frame(), how="inner", rsuffix='_right')["0"]
p_values_joined_ser = pvalues_ser.to_frame().join(all_total_rcbv_medians_rdropped_ser.to_frame(), how="inner", rsuffix='_right')["0"]
# Extract significant regions
p_values_joined_significant_ser = p_values_joined_ser[p_values_joined_ser < p_alpha/len(p_values_joined_ser)] # Bonferroni correction for multiple tests. https://en.wikipedia.org/wiki/Bonferroni_correction
#p_values_joined_significant_ser = \
#p_values_joined_ser[[region_value_object for region_value_object in p_values_joined_ser.keys() if p_values_joined_ser[region_value_object] < p_alpha/num_observations[region_value_object]]]
p_values_joined_significant_sorted_ser = p_values_joined_significant_ser.sort_values(ascending=True)
if top=="all":
selected_data = all_total_rcbv_rdropped_df
elif top=="significant_all":
selected_data = all_total_rcbv_rdropped_df[p_values_joined_significant_sorted_ser.index]
elif top=="significant_top_10":
selected_data = all_total_rcbv_rdropped_df[p_values_joined_significant_sorted_ser.index[0:10]]
# ymax used later for correct placement of title text
ax.set_ylim(auto=True)
"""
_, ymax = ax.get_ylim()
this_ymax = selected_data.median().max()
if ymax == 1 :
# Replace the default value
ax.set_ylim(top=this_ymax)
"""
_, ymax = ax.get_ylim()
# Create boxplot
# Reverse selected data
selected_data = selected_data[reversed(selected_data.keys())]
bp = selected_data.boxplot(rot=-90, ax=ax, grid=False)
#bp = selected_data.boxplot(rot=-55, ax=ax, grid=False)
# A list that is used give x placement of number of observations text
x = np.arange(selected_data.shape[1])
# Count the number of observations in each column
noofobs = selected_data.notna().sum()
#print(selected_data.shape)
# Write the number of observations above each box in the plot
#for tick, label in zip(x, bp.get_xticklabels()):
# bp.text(tick+1, ymax+0.05*ymax, noofobs[tick], horizontalalignment='center')
# Add number of observations to xticklabels
xticklabels = []
for tick, label in zip(x, bp.get_xticklabels()):
#xticklabels += [label.get_text() + " (n=" + str(noofobs[tick]) + ")"]
region_name = region_names[np.where(region_values == np.int64(label.get_text()))[0][0]]
if region_name[0:len("Left & right")] == "Left & right":
region_name = region_name[len("Left & right"):len(region_name)]
region_text_space = [" " for s in range(30-len("Left & right"))]
#region_text_space = [" " for s in range(36)]
if len(region_name) > len(region_text_space):
space_indexes = np.where(np.array(list(region_name)) == " ")[0]
if len(space_indexes) > 1 and space_indexes[-2] != 0:
space_index = space_indexes[-2]
elif len(space_indexes) == 1 and space_indexes[-1] != 0:
space_index = len(region_name)-1
else:
space_index = space_indexes[-1]
line1 = region_name[0:space_index] + "\n"
line2 = region_name[space_index+1:]
#region_name = region_name[0:len(region_text_space)]
#region_text_space[0:len(region_name)] = region_name
#region_text_space[-3:] = "..."
else:
line1 = region_name
line2 = ""
# Some cleaning:
# Remove first char if space " "
# Set first char uppercase if lowercase
if line1[0] == " ":
line1 = "".join(list(line1[1:]))
if line1[0].islower():
line1 = "".join([line1[0].upper()]+list(line1[1:]))
#region_text_space[0:len(region_name)] = region_name
#region_text = "".join(region_text_space)
#description_text = str(tick+1) + ". " + r"\textbf{" + region_text + "}" + "\n (n=" + \
#description_text = str(tick+1) + ". " + region_text + "\n (n=" + \
#format(noofobs[tick], '02d') + ", p=" + "{0:.6f}".format(p_values_joined_significant_sorted_ser[label.get_text()]) + ")"
tick_text = str(selected_data.shape[1]-tick) + ". "
if line2 == "":
description_text = tick_text + line1 + " " + "(n=" + \
format(noofobs[tick], '02d') + ")"#+ ", " + "{0:.6f}".format(p_values_joined_ser[label.get_text()]) + "<" + "{0:.6f}".format(p_alpha/len(p_values_joined_ser)) + ")"
#format(noofobs[tick], '02d') + ")"
else:
description_text = tick_text + line1 + "".join([" " for s in range(len(tick_text)+1)]) + line2 + " (n=" + \
format(noofobs[tick], '02d') + ")"#+ ", " + "{0:.6f}".format(p_values_joined_ser[label.get_text()]) + "<" + "{0:.6f}".format(p_alpha/len(p_values_joined_ser)) + ")"
#format(noofobs[tick], '02d') + ")"
#print(description_text + "|")
xticklabels += [description_text]
# Update the histogram plot with the new xticklabels
bp.set_xticklabels(xticklabels, {'fontsize': 12}, multialignment="left")
"""
bp.set_xticklabels(xticklabels, y=ymax)
# Create offset transform
dx = -9/72.; dy = 5/72.
offset = matplotlib.transforms.ScaledTranslation(dx, dy, plt.gcf().dpi_scale_trans)
# apply offset transform to all x ticklabels.
for label in ax.xaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset)
"""
plt.xlabel(xlabel)
"""
if ylabel[-len("(A)"):] == "(A)" or ylabel[-len("(A)"):] == "(C)" or ylabel[-len("(A)"):] == " SE":
plt.ylabel(ylabel, rotation=-90, labelpad=11, color="red")
elif ylabel[-len("(A)"):] == "(B)" or ylabel[-len("(A)"):] == "(D)" or ylabel[-len("(A)"):] == " GE":
plt.ylabel(ylabel, rotation=-90, labelpad=11, color="blue")
else:
plt.ylabel(ylabel, rotation=-90, labelpad=11)
"""
plt.ylabel(ylabel, rotation=-90, labelpad=11)
# Used as a placement for title
bp.text((tick//2)+1, ymax+0.2*ymax, title, horizontalalignment='center')
# Second twin y axis
ax_sec = ax.twinx()
ax_sec.set_yticklabels([])
ax_sec.yaxis.set_ticks_position('none')
ax_sec.set_ylabel(ylabel2, rotation=-90, fontweight="bold")
"""
if ascending:
# Add mean(medians) - 0.96 * std(medians) line
bp.plot(x+1, \
[all_total_rcbv_rdropped_df.median().mean() - 0.96 * all_total_rcbv_rdropped_df.median().std()]*len(x), \
linestyle="--")
else:
# Add mean(medians) + 0.96 * std(medians) line
bp.plot(x+1, \
[all_total_rcbv_rdropped_df.median().mean() + 0.96 * all_total_rcbv_rdropped_df.median().std()]*len(x), \
linestyle="--")
"""
bp.plot(x+1, \
[1]*len(x), \
linestyle="--")
# Return top medians df for further analysis
to_return = selected_data.median()
# Set the index elements
# (here the region values originally being string)
# to uint64 for compatibility with visualize_regions()
to_return.index = to_return.index.astype(np.uint64)
return to_return
def sorted_medians(df, \
region_values, \
region_names, \
region_names_to_exclude, \
ascending=False, \
top=20):
# Drop excluded regions
df = \
df.drop([str(region_values[np.where(region_names == region_name)[0][0]]) for region_name in region_names_to_exclude], axis=1)
# Calculate medians
medians_df = df.median()
# Sort the medians
medians_df.sort_values(ascending=ascending, inplace=True)
# Show the data according to the sorted medians
sorted_df = df[medians_df.index]
# Remove regions if number of observations is less than 10
sorted_df = drop_regions_with_few_comparisons(sorted_df, num_comparisons_below=10)
if top=="all":
selected_data = sorted_df
else:
# Pick top top highest columns after descending median
selected_data = sorted_df[sorted_df.keys()[0:top]]
# Return top medians df for further analysis
to_return = selected_data.median()
# Set the index elements
# (here the region values originally being string)
# to uint64 for compatibility with visualize_regions()
to_return.index = to_return.index.astype(np.uint64)
return to_return
def sorted_medians_significant(df, \
pvalues_ser, \
p_alpha, \
region_values, \
region_names, \
region_names_to_exclude, \
ascending=False, \
top=20):
# Drop excluded regions
df_excluded_dropped = \
df.drop([str(region_values[np.where(region_names == region_name)[0][0]]) for region_name in region_names_to_exclude], axis=1)
# Calculate medians
#medians_df = df.median()
# Sort the medians
#medians_df.sort_values(ascending=ascending, inplace=True)
# Show the data according to the sorted medians
#sorted_df = df[medians_df.index]
# Remove regions if number of observations is less than 10
df_excluded_and_few_regions_dropped = drop_regions_with_few_comparisons(df_excluded_dropped, num_comparisons_below=10)
ser_medians_excluded_and_few_regions_dropped = df_excluded_and_few_regions_dropped.median()
# Extract significant regions by pvalues_ser and p_alpha
#pvalues_significant_ser = pvalues_ser[pvalues_ser < p_alpha]
#pvalues_significant_sorted_ser = pvalues_significant_ser.sort_values(ascending=True)
#p_values_significant_sorted_joined_df = pvalues_significant_sorted_ser.to_frame().join(ser_medians_excluded_and_few_regions_dropped.to_frame(), how="inner", rsuffix='right')["0"]
p_values_joined_ser = pvalues_ser.to_frame().join(ser_medians_excluded_and_few_regions_dropped.to_frame(), how="inner", rsuffix='_right')["0"]
p_values_joined_significant_ser = p_values_joined_ser[p_values_joined_ser < p_alpha/len(p_values_joined_ser)] # Bonferroni correction for multiple tests. https://en.wikipedia.org/wiki/Bonferroni_correction
p_values_joined_significant_sorted_ser = p_values_joined_significant_ser.sort_values(ascending=True)
if top=="all":
selected_data = df_excluded_and_few_regions_dropped
elif top=="significant_all":
selected_data = df_excluded_and_few_regions_dropped[p_values_joined_significant_sorted_ser.index]
elif top=="significant_top_10":
selected_data = df_excluded_and_few_regions_dropped[p_values_joined_significant_sorted_ser.index[0:10]]
# Return top medians df for further analysis
to_return = selected_data.median()
# Set the index elements
# (here the region values originally being string)
# to uint64 for compatibility with visualize_regions()
to_return.index = to_return.index.astype(np.uint64)
return to_return
def sorted_means(df, \
region_values, \
region_names, \
region_names_to_exclude, \
ascending=False, \
top=20):
# Drop excluded regions
df = \
df.drop([str(region_values[np.where(region_names == region_name)[0][0]]) for region_name in region_names_to_exclude], axis=1)
# Calculate medians
means_df = df.mean()
# Sort the medians
means_df.sort_values(ascending=ascending, inplace=True)
# Show the data according to the sorted medians
sorted_df = df[means_df.index]
# Remove regions if number of observations is less than 10
sorted_df = drop_regions_with_few_comparisons(sorted_df, num_comparisons_below=10)
if top=="all":
selected_data = sorted_df
else:
# Pick top top highest columns after descending median
selected_data = sorted_df[sorted_df.keys()[0:top]]
# Return top medians df for further analysis
to_return = selected_data.mean()
# Set the index elements
# (here the region values originally being string)
# to uint64 for compatibility with visualize_regions()
to_return.index = to_return.index.astype(np.uint64)
return to_return
def render_regions_set_to_pngs(regions_df, \
labels_data, \
labels_dims, \
output_rel_dir, \
png_prefix="", \
interpolation="nearest", \
cmap="hot", \
windowMin=0, \
windowMax=1, \
blur_3d=False, \
blur_3d_sigma=1):
# The regions data is visualized as a 3D heat map
heat_map = labels_data.copy()
# Fill regions in the the heat volume by
# corresponding values in regions_df
# Set a region to 0 if it is not in regions_df
for region_value in np.array(np.unique(heat_map)):
if region_value not in regions_df.index:
heat_map[heat_map == region_value] = 0
else:
heat_map[heat_map == region_value] = regions_df.loc[region_value]
# Three views are rendered to file with names:
png_file_1 = output_rel_dir + "/" + png_prefix + "-axial-inferior-superior.png"
png_file_2 = output_rel_dir + "/" + png_prefix + "-sagittal-r-l.png"
png_file_3 = output_rel_dir + "/" + png_prefix + "-mixed-r-l-anterior-posterior.png"
# Create a spimagine instance, then save three views to separate pngs
volfig()
spim_widget = \
volshow(heat_map, autoscale=False, stackUnits=labels_dims, interpolation=interpolation)
#volshow(heat_map[::-1, ::-1, ::-1], autoscale=False, stackUnits=labels_dims, interpolation=interpolation)
# A hack to enable blur. Currently not working, so should be disabled.
if blur_3d:
# Add a blur module with preferred sigma
spim_widget.impListView.add_image_processor(BlurProcessor(blur_3d_sigma))
# Animate click for enabling the blur
spim_widget.impListView.impViews[-1].children()[0].animateClick()
# Set colormap
spim_widget.set_colormap(cmap)
# Windowing
#spim_widget.transform.setValueScale(regions_df.min(), regions_df.max())
#spim_widget.transform.setValueScale(0, regions_df.max())
spim_widget.transform.setValueScale(windowMin, windowMax)
# Set interpolation directly
#spim_widget.transform.setInterpolate(1)
# Set bounding box not visible
spim_widget.transform.setBox(False)
# Zoom
spim_widget.transform.setZoom(1.4)
# NB: The rotations are not adding up.
# each spim_widget.transform.setRotation call
# rotates from original orientation given by volshow()
# First view
#spim_widget.transform.setRotation(np.pi/2,0,1,0)
#spim_widget.transform.setQuaternion(Quaternion(-0.005634209439510011,0.00790509382124309,-0.0013812284289010514,-0.9999519273706857))
# Up rightwards
spim_widget.transform.setQuaternion(Quaternion(-0.018120594789136298,0.708165522710642,0.7057824278204939,0.006663271093062176))
# Take snapshot
spim_widget.saveFrame(png_file_1)
# Second view
#spim_widget.transform.setRotation(np.pi/4,0,1,0)
#spim_widget.transform.setQuaternion(Quaternion(0.007638906066214874,-0.7092538697232732,-0.004760086014776442,0.7048956918250604))
# Sagittal
spim_widget.transform.setQuaternion(Quaternion(-0.0020183487063897406,0.7073151860516024,-0.0032067927203972405,0.7068881584667737))
# Take snapshot
spim_widget.saveFrame(png_file_2)
# Third view
#spim_widget.transform.setRotation(np.pi/8,-0.6,0.5,1)
#spim_widget.transform.setQuaternion(Quaternion(-0.3228904671232426,-0.8924886253708287,-0.28916944161613134,0.12484724075684332))
# Fancy
spim_widget.transform.setQuaternion(Quaternion(0.15439557175611332,0.7306565059064623,0.5147772256894417,0.4210789500980262))
# Take snapshot
spim_widget.saveFrame(png_file_3)
# Close spimagine
spim_widget.closeMe()
#spim_widget.minSlider.value()
#spim_widget.maxSlider.value()
#spim_widget.maxSlider.onChanged(500 + (500/16) * regions_df.max())
#spim_widget.maxSlider.onChanged(1000)
#spim_widget.minSlider.onChanged(500 + (500/16) * regions_df.min())
#spim_widget.minSlider.onChanged(0)
return png_file_1, png_file_2, png_file_3
def sorted_boxplot_heatmap_figure(df_1, \
df_1_rcbv, \
df_2, \
df_2_rcbv, \
df_3, \
df_3_rcbv, \
df_4, \
df_4_rcbv, \
ylabel_1, \
ylabel_2, \
ylabel_3, \
ylabel_4, \
distance_name, \
labels_data, \
labels_dims, \
CBV_out_dir, \
rendered_image_files_list, \
region_values, \
region_names, \
region_names_to_exclude, \
top = "all", \
render_pngs = True, \
windowMin = 0, \
windowMax = 1*0.8, \
interpolation = "nearest", \
cmap = "hot", \
blur_3d = False, \
blur_3d_sigma = 1, \
method_comparison = False):
fig = plt.figure(figsize=np.array([9, 10]))
fig.patch.set_facecolor((173/255, 216/255, 230/255))
gs1 = gridspec.GridSpec(4, 3)
gs1.update(left=0.08, right=0.48, bottom=0.09, top=0.91, hspace=0.5, wspace=0)
ax1 = plt.subplot(gs1[0, :])
medians_df_1 = sorted_boxplot_histogram_distances(df_1, \
df_1_rcbv, \
ax1, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2=ylabel_1, \
ylabel="", \
title="", \
xlabel="",
top=top)
ax2 = plt.subplot(gs1[1, :])
medians_df_2 = sorted_boxplot_histogram_distances(df_2, \
df_2_rcbv, \
ax2, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2=ylabel_2, \
ylabel="", \
title="", \
xlabel="", \
top=top)
ax3 = plt.subplot(gs1[2, :])
medians_df_3 = sorted_boxplot_histogram_distances(df_3, \
df_3_rcbv, \
ax3, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2=ylabel_3, \
ylabel="", \
title="", \
xlabel="", \
top=top)
ax4 = plt.subplot(gs1[3, :])
medians_df_4 = sorted_boxplot_histogram_distances(df_4, \
df_4_rcbv, \
ax4, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2=ylabel_4, \
ylabel="", \
title="", \
xlabel="", \
top=top)
gs2 = gridspec.GridSpec(4, 2)
gs2.update(left=0.52, right=1, bottom=0.09, top=0.91, hspace=0.5, wspace=0)
if render_pngs:
rendered_image_files_list = []
# Render pngs for raw_vs_topup_e1_hellinger_medians_df
if render_pngs:
r1_png_file_1, \
r1_png_file_2, \
r1_png_file_3 = \
render_regions_set_to_pngs(medians_df_1, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r1", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r1_png_file_1]
rendered_image_files_list += [r1_png_file_2]
rendered_image_files_list += [r1_png_file_3]
else:
r1_png_file_1, \
r1_png_file_2, \
r1_png_file_3 = \
rendered_image_files_list[0], \
rendered_image_files_list[1], \
rendered_image_files_list[2]
ax5 = plt.subplot(gs2[0, 0])
png_1=mpimg.imread(r1_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
ax6 = plt.subplot(gs2[0, 1])
png_2=mpimg.imread(r1_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax7 = plt.subplot(gs2[0, 2])
png_3=mpimg.imread(r1_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for raw_vs_epic_e1_hellinger_medians_df
if render_pngs:
r2_png_file_1, \
r2_png_file_2, \
r2_png_file_3 = \
render_regions_set_to_pngs(medians_df_2, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r2", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r2_png_file_1]
rendered_image_files_list += [r2_png_file_2]
rendered_image_files_list += [r2_png_file_3]
else:
r2_png_file_1, \
r2_png_file_2, \
r2_png_file_3 = \
rendered_image_files_list[3], \
rendered_image_files_list[4], \
rendered_image_files_list[5]
ax8 = plt.subplot(gs2[1, 0])
png_1=mpimg.imread(r2_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
ax9 = plt.subplot(gs2[1, 1])
png_2=mpimg.imread(r2_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax10 = plt.subplot(gs2[1, 2])
png_3=mpimg.imread(r2_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for raw_vs_topup_e2_hellinger_medians_df
if render_pngs:
r3_png_file_1, \
r3_png_file_2, \
r3_png_file_3 = \
render_regions_set_to_pngs(medians_df_3, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r3", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r3_png_file_1]
rendered_image_files_list += [r3_png_file_2]
rendered_image_files_list += [r3_png_file_3]
else:
r3_png_file_1, \
r3_png_file_2, \
r3_png_file_3 = \
rendered_image_files_list[6], \
rendered_image_files_list[7], \
rendered_image_files_list[8]
ax11 = plt.subplot(gs2[2, 0])
png_1=mpimg.imread(r3_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
ax12 = plt.subplot(gs2[2, 1])
png_2=mpimg.imread(r3_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax13 = plt.subplot(gs2[2, 2])
png_3=mpimg.imread(r3_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for raw_vs_epic_e2_hellinger_medians_df
if render_pngs:
r4_png_file_1, \
r4_png_file_2, \
r4_png_file_3 = \
render_regions_set_to_pngs(medians_df_4, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r4", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r4_png_file_1]
rendered_image_files_list += [r4_png_file_2]
rendered_image_files_list += [r4_png_file_3]
else:
r4_png_file_1, \
r4_png_file_2, \
r4_png_file_3 = \
rendered_image_files_list[9], \
rendered_image_files_list[10], \
rendered_image_files_list[11]
ax14 = plt.subplot(gs2[3, 0])
png_1=mpimg.imread(r4_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
#plt.title("axial \ninferior-superior", y=-0.4)
ax15 = plt.subplot(gs2[3, 1])
png_2=mpimg.imread(r4_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
#plt.title("sagittal \nright-left", y=-0.4)
"""
ax16 = plt.subplot(gs2[3, 2])
png_3=mpimg.imread(r4_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
plt.title("mixed \nright-left \nanterior-posterior", y=-0.4)
"""
# Common x axis
#fig.text(0.5, 0.04, 'common X', ha='center')
# Common y axis
#fig.text(0.01, 0.5, distance_name + " distance", va="center", rotation="vertical")
# Box plots supertitle
#fig.text(0.135, 0.975, "Top " + str(top) + " changing regions")
# Images supertitle
#if method_comparison:
# fig.text(0.6, 0.95, "Regions most different between correction \nmethods. Based on all median values")
#else:
# fig.text(0.6, 0.95, "Regions most affected \nby correction. Based on all median values")
# Supertitle
#fig.suptitle("rCBV change between TOPUP and EPIC corrections")
#fig.suptitle("rCBV change between corrections according to " + distance_name + " distance")
#plt.subplots_adjust(wspace=0)
#fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
#fig.subplots_adjust(left=0.3, bottom=0.3, right=0.3, top=0.3, wspace=0.3, hspace=0.3)
#fig.tight_layout()
return rendered_image_files_list
def sorted_boxplot_heatmap_figure_distances(df_1, \
df_1_rcbv, \
df_2, \
df_2_rcbv, \
df_3, \
df_3_rcbv, \
df_4, \
df_4_rcbv, \
ylabel_1, \
ylabel_2, \
ylabel_3, \
ylabel_4, \
distance_name, \
labels_data, \
labels_dims, \
CBV_out_dir, \
rendered_image_files_list, \
region_values, \
region_names, \
region_names_to_exclude, \
top = "all", \
render_pngs = True, \
windowMin = 0, \
windowMax = 1*0.8, \
interpolation = "nearest", \
cmap = "hot", \
blur_3d = False, \
blur_3d_sigma = 1, \
method_comparison = False):
fig = plt.figure(figsize=np.array([9, 10]))
fig.patch.set_facecolor((173/255, 216/255, 230/255))
gs1 = gridspec.GridSpec(4, 3)
gs1.update(left=0.08, right=0.48, bottom=0.09, top=0.91, hspace=0.5, wspace=0)
ax1 = plt.subplot(gs1[0, :])
medians_df_1 = sorted_boxplot_histogram_distances(df_1, \
df_1_rcbv, \
ax1, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2="", \
ylabel="", \
title="", \
xlabel="",
top=top)
if method_comparison:
ax1.set_ylabel("Hellinger distance (A)", rotation=-90, labelpad=11, color="red")
else:
ax1.set_ylabel(distance_name + " distance (A)", rotation=-90, labelpad=11, color="red")
#ax2 = plt.subplot(gs1[1, :])
medians_df_2 = sorted_medians(df_2, \
region_values, \
region_names, \
region_names_to_exclude, \
top=top)
ax3 = plt.subplot(gs1[2, :])
medians_df_3 = sorted_boxplot_histogram_distances(df_3, \
df_3_rcbv, \
ax3, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2="", \
ylabel="", \
title="", \
xlabel="", \
top=top)
if method_comparison:
ax3.set_ylabel("Wasserstein distance (C)", rotation=-90, labelpad=11, color="red")
else:
ax3.set_ylabel(distance_name + " distance (C)", rotation=-90, labelpad=11, color="red")
#ax4 = plt.subplot(gs1[3, :])
medians_df_4 = sorted_medians(df_4, \
region_values, \
region_names, \
region_names_to_exclude, \
top=top)
gs2 = gridspec.GridSpec(4, 2)
gs2.update(left=0.52, right=1, bottom=0.09, top=0.91, hspace=0.5, wspace=0)
if render_pngs:
rendered_image_files_list = []
# Render pngs for medians_df_1
if render_pngs:
r1_png_file_1, \
r1_png_file_2, \
r1_png_file_3 = \
render_regions_set_to_pngs(medians_df_1, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r1", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r1_png_file_1]
rendered_image_files_list += [r1_png_file_2]
rendered_image_files_list += [r1_png_file_3]
else:
r1_png_file_1, \
r1_png_file_2, \
r1_png_file_3 = \
rendered_image_files_list[0], \
rendered_image_files_list[1], \
rendered_image_files_list[2]
ax5 = plt.subplot(gs2[0, 0])
png_1=mpimg.imread(r1_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
plt.text(x=-150, y=772, s=ylabel_1, rotation=-90, color="red", fontsize="medium")
ax6 = plt.subplot(gs2[0, 1])
png_2=mpimg.imread(r1_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax7 = plt.subplot(gs2[0, 2])
png_3=mpimg.imread(r1_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for medians_df_2
if render_pngs:
r2_png_file_1, \
r2_png_file_2, \
r2_png_file_3 = \
render_regions_set_to_pngs(medians_df_2, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r2", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r2_png_file_1]
rendered_image_files_list += [r2_png_file_2]
rendered_image_files_list += [r2_png_file_3]
else:
r2_png_file_1, \
r2_png_file_2, \
r2_png_file_3 = \
rendered_image_files_list[3], \
rendered_image_files_list[4], \
rendered_image_files_list[5]
ax8 = plt.subplot(gs2[1, 0])
png_1=mpimg.imread(r2_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
plt.text(x=-150, y=772, s=ylabel_2, rotation=-90, color="blue", fontsize="medium")
ax9 = plt.subplot(gs2[1, 1])
png_2=mpimg.imread(r2_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax10 = plt.subplot(gs2[1, 2])
png_3=mpimg.imread(r2_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for medians_df_3
if render_pngs:
r3_png_file_1, \
r3_png_file_2, \
r3_png_file_3 = \
render_regions_set_to_pngs(medians_df_3, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r3", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r3_png_file_1]
rendered_image_files_list += [r3_png_file_2]
rendered_image_files_list += [r3_png_file_3]
else:
r3_png_file_1, \
r3_png_file_2, \
r3_png_file_3 = \
rendered_image_files_list[6], \
rendered_image_files_list[7], \
rendered_image_files_list[8]
ax11 = plt.subplot(gs2[2, 0])
png_1=mpimg.imread(r3_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
plt.text(x=-150, y=772, s=ylabel_3, rotation=-90, color="red", fontsize="medium")
ax12 = plt.subplot(gs2[2, 1])
png_2=mpimg.imread(r3_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax13 = plt.subplot(gs2[2, 2])
png_3=mpimg.imread(r3_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for medians_df_4
if render_pngs:
r4_png_file_1, \
r4_png_file_2, \
r4_png_file_3 = \
render_regions_set_to_pngs(medians_df_4, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r4", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r4_png_file_1]
rendered_image_files_list += [r4_png_file_2]
rendered_image_files_list += [r4_png_file_3]
else:
r4_png_file_1, \
r4_png_file_2, \
r4_png_file_3 = \
rendered_image_files_list[9], \
rendered_image_files_list[10], \
rendered_image_files_list[11]
ax14 = plt.subplot(gs2[3, 0])
png_1=mpimg.imread(r4_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
#plt.title("axial \ninferior-superior", y=-0.4)
plt.text(x=-150, y=772, s=ylabel_4, rotation=-90, color="blue", fontsize="medium")
ax15 = plt.subplot(gs2[3, 1])
png_2=mpimg.imread(r4_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
#plt.title("sagittal \nright-left", y=-0.4)
"""
ax16 = plt.subplot(gs2[3, 2])
png_3=mpimg.imread(r4_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
plt.title("mixed \nright-left \nanterior-posterior", y=-0.4)
"""
# Common x axis
#fig.text(0.5, 0.04, 'common X', ha='center')
# Common y axis
#fig.text(0.01, 0.5, distance_name + " distance", va="center", rotation="vertical")
# Box plots supertitle
#fig.text(0.135, 0.975, "Top " + str(top) + " changing regions")
# Images supertitle
#if method_comparison:
# fig.text(0.6, 0.95, "Regions most different between correction \nmethods. Based on all median values")
#else:
# fig.text(0.6, 0.95, "Regions most affected \nby correction. Based on all median values")
# Supertitle
#fig.suptitle("rCBV change between TOPUP and EPIC corrections")
#fig.suptitle("rCBV histogram distance between corrections according to " + distance_name + " distance")
#plt.subplots_adjust(wspace=0)
#fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
#fig.subplots_adjust(left=0.3, bottom=0.3, right=0.3, top=0.3, wspace=0.3, hspace=0.3)
#fig.tight_layout()
return rendered_image_files_list
def sorted_boxplot_heatmap_figure_distances_all(df_1, \
df_1_rcbv, \
df_2, \
df_2_rcbv, \
df_3, \
df_3_rcbv, \
df_4, \
df_4_rcbv, \
ylabel_1, \
ylabel_2, \
ylabel_3, \
ylabel_4, \
distance_name, \
labels_data, \
labels_dims, \
CBV_out_dir, \
rendered_image_files_list, \
region_values, \
region_names, \
region_names_to_exclude, \
top = "all", \
render_pngs = True, \
windowMin = 0, \
windowMax = 1*0.8, \
interpolation = "nearest", \
cmap = "hot", \
blur_3d = False, \
blur_3d_sigma = 1, \
method_comparison = False):
fig = plt.figure(figsize=np.array([9, 10*4]))
fig.patch.set_facecolor((173/255, 216/255, 230/255))
#matplotlib.rcParams['savefig.facecolor'] = (173/255, 216/255, 230/255)
gs1 = gridspec.GridSpec(4, 3)
gs1.update(left=0.08, right=0.48, bottom=0.09, top=0.91, hspace=0.5, wspace=0)
ax1 = plt.subplot(gs1[0, :])
medians_df_1 = sorted_boxplot_histogram_distances(df_1, \
df_1_rcbv, \
ax1, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2="", \
ylabel="", \
title="", \
xlabel="",
top=top)
if method_comparison:
ax1.set_ylabel("Hellinger distance (A)", rotation=-90, labelpad=11, color="red")
else:
ax1.set_ylabel(distance_name + " distance (A)", rotation=-90, labelpad=11, color="red")
ax2 = plt.subplot(gs1[1, :])
"""
medians_df_2 = sorted_medians(df_2, \
region_values, \
region_names, \
region_names_to_exclude, \
top=top)
"""
medians_df_2 = sorted_boxplot_histogram_distances(df_2, \
df_2_rcbv, \
ax2, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2="", \
ylabel="", \
title="", \
xlabel="",
top=top)
if method_comparison:
ax2.set_ylabel("Hellinger distance (B)", rotation=-90, labelpad=11, color="blue")
else:
ax2.set_ylabel(distance_name + " distance (B)", rotation=-90, labelpad=11, color="blue")
ax3 = plt.subplot(gs1[2, :])
medians_df_3 = sorted_boxplot_histogram_distances(df_3, \
df_3_rcbv, \
ax3, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2="", \
ylabel="", \
title="", \
xlabel="", \
top=top)
if method_comparison:
ax3.set_ylabel("Wasserstein distance (C)", rotation=-90, labelpad=11, color="red")
else:
ax3.set_ylabel(distance_name + " distance (C)", rotation=-90, labelpad=11, color="red")
ax4 = plt.subplot(gs1[3, :])
"""
medians_df_4 = sorted_medians(df_4, \
region_values, \
region_names, \
region_names_to_exclude, \
top=top)
"""
medians_df_4 = sorted_boxplot_histogram_distances(df_4, \
df_4_rcbv, \
ax4, \
region_values, \
region_names, \
region_names_to_exclude, \
ylabel2="", \
ylabel="", \
title="", \
xlabel="", \
top=top)
if method_comparison:
ax4.set_ylabel("Wasserstein distance (D)", rotation=-90, labelpad=11, color="blue")
else:
ax4.set_ylabel(distance_name + " distance (D)", rotation=-90, labelpad=11, color="blue")
gs2 = gridspec.GridSpec(4, 2)
gs2.update(left=0.52, right=1, bottom=0.09, top=0.91, hspace=0.5, wspace=0)
if render_pngs:
rendered_image_files_list = []
# Render pngs for medians_df_1
if render_pngs:
r1_png_file_1, \
r1_png_file_2, \
r1_png_file_3 = \
render_regions_set_to_pngs(medians_df_1, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r1", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r1_png_file_1]
rendered_image_files_list += [r1_png_file_2]
rendered_image_files_list += [r1_png_file_3]
else:
r1_png_file_1, \
r1_png_file_2, \
r1_png_file_3 = \
rendered_image_files_list[0], \
rendered_image_files_list[1], \
rendered_image_files_list[2]
ax5 = plt.subplot(gs2[0, 0])
png_1=mpimg.imread(r1_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
plt.text(x=-150, y=772, s=ylabel_1, rotation=-90, color="red", fontsize="medium")
ax6 = plt.subplot(gs2[0, 1])
png_2=mpimg.imread(r1_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax7 = plt.subplot(gs2[0, 2])
png_3=mpimg.imread(r1_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for medians_df_2
if render_pngs:
r2_png_file_1, \
r2_png_file_2, \
r2_png_file_3 = \
render_regions_set_to_pngs(medians_df_2, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r2", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r2_png_file_1]
rendered_image_files_list += [r2_png_file_2]
rendered_image_files_list += [r2_png_file_3]
else:
r2_png_file_1, \
r2_png_file_2, \
r2_png_file_3 = \
rendered_image_files_list[3], \
rendered_image_files_list[4], \
rendered_image_files_list[5]
ax8 = plt.subplot(gs2[1, 0])
png_1=mpimg.imread(r2_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
plt.text(x=-150, y=772, s=ylabel_2, rotation=-90, color="blue", fontsize="medium")
ax9 = plt.subplot(gs2[1, 1])
png_2=mpimg.imread(r2_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax10 = plt.subplot(gs2[1, 2])
png_3=mpimg.imread(r2_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for medians_df_3
if render_pngs:
r3_png_file_1, \
r3_png_file_2, \
r3_png_file_3 = \
render_regions_set_to_pngs(medians_df_3, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r3", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r3_png_file_1]
rendered_image_files_list += [r3_png_file_2]
rendered_image_files_list += [r3_png_file_3]
else:
r3_png_file_1, \
r3_png_file_2, \
r3_png_file_3 = \
rendered_image_files_list[6], \
rendered_image_files_list[7], \
rendered_image_files_list[8]
ax11 = plt.subplot(gs2[2, 0])
png_1=mpimg.imread(r3_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
plt.text(x=-150, y=772, s=ylabel_3, rotation=-90, color="red", fontsize="medium")
ax12 = plt.subplot(gs2[2, 1])
png_2=mpimg.imread(r3_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
"""
ax13 = plt.subplot(gs2[2, 2])
png_3=mpimg.imread(r3_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
"""
# Render pngs for medians_df_4
if render_pngs:
r4_png_file_1, \
r4_png_file_2, \
r4_png_file_3 = \
render_regions_set_to_pngs(medians_df_4, \
labels_data, \
labels_dims, \
CBV_out_dir, \
png_prefix=distance_name + "-r4", \
interpolation=interpolation, \
cmap=cmap, \
windowMin=windowMin, \
windowMax=windowMax, \
blur_3d=blur_3d, \
blur_3d_sigma=blur_3d_sigma)
rendered_image_files_list += [r4_png_file_1]
rendered_image_files_list += [r4_png_file_2]
rendered_image_files_list += [r4_png_file_3]
else:
r4_png_file_1, \
r4_png_file_2, \
r4_png_file_3 = \
rendered_image_files_list[9], \
rendered_image_files_list[10], \
rendered_image_files_list[11]
ax14 = plt.subplot(gs2[3, 0])
png_1=mpimg.imread(r4_png_file_1)
plt.imshow(png_1, aspect="equal")
plt.axis("off")
#plt.title("axial \ninferior-superior", y=-0.4)
plt.text(x=-150, y=772, s=ylabel_4, rotation=-90, color="blue", fontsize="medium")
ax15 = plt.subplot(gs2[3, 1])
png_2=mpimg.imread(r4_png_file_2)
plt.imshow(png_2, aspect="equal")
plt.axis("off")
#plt.title("sagittal \nright-left", y=-0.4)
"""
ax16 = plt.subplot(gs2[3, 2])
png_3=mpimg.imread(r4_png_file_3)
plt.imshow(png_3, aspect="equal")
plt.axis("off")
plt.title("mixed \nright-left \nanterior-posterior", y=-0.4)
"""
# Common x axis
#fig.text(0.5, 0.04, 'common X', ha='center')
# Common y axis
#fig.text(0.01, 0.5, distance_name + " distance", va="center", rotation="vertical")
# Box plots supertitle
#fig.text(0.135, 0.975, "Top " + str(top) + " changing regions")
# Images supertitle
#if method_comparison:
# fig.text(0.6, 0.95, "Regions most different between correction \nmethods. Based on all median values")
#else:
# fig.text(0.6, 0.95, "Regions most affected \nby correction. Based on all median values")
# Supertitle
#fig.suptitle("rCBV change between TOPUP and EPIC corrections")
#fig.suptitle("rCBV histogram distance between corrections according to " + distance_name + " distance")
#plt.subplots_adjust(wspace=0)
#fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
#fig.subplots_adjust(left=0.3, bottom=0.3, right=0.3, top=0.3, wspace=0.3, hspace=0.3)
#fig.tight_layout()
return rendered_image_files_list
def sorted_boxplot_heatmap_figure_rcbv(df_1_rcbv, \
df_2_rcbv, \
df_3_rcbv, \
df_4_rcbv, \
ser_1_pvalues, \
ser_2_pvalues, \
ser_3_pvalues, \
ser_4_pvalues, \
p_alpha, \
ylabel_1, \
ylabel_2, \
ylabel_3, \
ylabel_4, \
distance_name, \
labels_data, \
labels_dims, \
CBV_out_dir, \
rendered_image_files_list, \
region_values, \
region_names, \
region_names_to_exclude, \
ascending1=False, \
ascending2=False, \
ascending3=False, \
ascending4=False, \
top = "all", \
render_pngs = True, \
windowMin = 0, \
windowMax = 1*0.8, \
interpolation = "nearest", \
cmap = "hot", \
blur_3d = False, \
blur_3d_sigma = 1, \
method_comparison = False):
fig = plt.figure(figsize=
|
np.array([9, 10])
|
numpy.array
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
from bokeh.sampledata.stocks import AAPL, FB, GOOG, IBM, MSFT
from bokeh.plotting import *
output_server('stocks')
hold()
figure(x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
line(
|
np.array(AAPL['date'], 'M64')
|
numpy.array
|
#!/usr/bin/python3
# rosrun kuka_iiwa_utilities iiwa_camera_service.py
# rosrun kuka_iiwa_utilities iiwa_move_to.py
import numpy as np
import random
import rospy
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from gazebo_msgs.srv import GetModelState
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
from kuka_iiwa_utilities.srv import *
import time
import cv2
from cv_bridge import CvBridge, CvBridgeError
pub = rospy.Publisher('/iiwa/pos_effort_controller/command', Float64MultiArray, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(0.6) # 10hz
from scipy.interpolate import griddata
def get_centre(img):
#Convert ROS image to OpenCV image
bridge = CvBridge()
img = bridge.imgmsg_to_cv2(img, "rgb8")
#Blur the image (BGR), and convert it to the HSV color space
blurred = cv2.GaussianBlur(img, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
#Construct a mask for the color "green"
greenLower = np.array([111,189,93])
greenUpper = np.array([179,255,255])
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#Convert the mask to binary image
ret,thresh = cv2.threshold(mask,127,255,0)
#Calculate moments of binary image
M = cv2.moments(thresh)
#Calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
return
|
np.array([cX, cY])
|
numpy.array
|
import numpy as np
import time
import pandas as pd
from matplotlib import pyplot as plt
from benchmark import benchmark
from precomputing import compute_method_depth, precompute_smoothing, precompute_outofsample, minimal_method_depth, repair_increasing
from visualization import plot_last_forecast
from smoothing import simple_mirroring, piecewise_STL
from precompute_smoothing import update_presmoothing_one_country
from test_missing_or_zero import test_poisson
from misc_methods import mean_const, linear
from precomputing import precompute_forecasts
from ci import confidence_intervals, convert_quantiles_to_ci, save_ci
def forecast_one_country(return_dict, country, cumulative_cases, date_,
methods_, kwargs_list_, names_,
smoothing_fun,
datasource,
H=7,
type_data = "cases",
missing_var=True,
return_val = False,
saveplot=False, newly=False):
"""
main function with methodology for singly country/region numbers forecastng:
preprocessing, trend estimation and forecasting
Arguments:
-- return_dict: dictionary for parallelized runs
-- country: country or region name
-- cumulative_cases: np array with the cumulative historic observations
-- date_: list with dates corresponding to cumulative_cases
-- methods_, kwargs_list_, names_: parameters of the extrapolation methods
-- smoothing_fun: smoothing function
-- datasource: name of the datasource, e.g. "JHU"
-- H: forecasting horizon
-- type_data: type of the data, e.g. "cases"
-- missing_var: whether to check for missing values
-- return_val: whether to return values, for non-parallel use
-- saveplot: boolean, whether to save figures with forecast
-- newly: whether to save trend in smoothing_res
if return_val, function returns the trend, confidence intervals and retrospective trends (used in evaluation)
"""
methods_min_depths = [minimal_method_depth(methods_[i], kwargs_list_[i]) for i in range(len(methods_))]
comment = ""
inc_ = 1 #number of intermediate points for quantile estimation
history_for_conf_interval = ((inc_+1)*(19)+inc_)+H+3 #length of history used to compute CI
#------------------handling negative values--------------------------------------
cumulative_cases_ = repair_increasing(cumulative_cases.copy())
#------------------handling missingness------------------------------------------
if missing_var:
cumulative_cases_, date_, flag_nonzero_recent, zeros_missing, zeros_missing_nan = test_poisson(cumulative_cases_, date_)
else:
_, _, flag_nonzero_recent, _, _ = test_poisson(cumulative_cases_, date_)
zeros_missing, zeros_missing_nan = [], []
total_days_to_neglect = len(zeros_missing)
#------------------treating special cases in delay in reporing--------------------
if (country in ["Switzerland", "Belgium"]) or (datasource in ["OZH",'BAG_KT']):
step_back = 1
if country in ["Belgium"]:
step_back = 3
if datasource in ["OZH",'BAG_KT']:
step_back = 1
if (datasource == "OZH") and (len(zeros_missing)>step_back):
step_back=0
total_days_to_neglect = step_back + len(zeros_missing)
if (step_back>0):
date_ = date_[:-step_back]
zeros_missing = list(np.diff(cumulative_cases_)[-step_back:]) + zeros_missing
zeros_missing_nan = list(np.diff(cumulative_cases_)[-step_back:]) + zeros_missing_nan
cumulative_cases_ = cumulative_cases_[:-step_back]
#---------------------------------------------------------------------------------
H_ = H + total_days_to_neglect # forecast on longer horizon in case of missing data
data_hist = precompute_outofsample(cumulative_cases_)
start_smoothing = np.max([0,len(cumulative_cases_)-history_for_conf_interval-1])
#--------compute the trend--------------------------------------------------------
smoothed_hist = update_presmoothing_one_country(
cumulative_cases_, date_, country,
smoothing_fun,
datasource = datasource, newly=newly)
nnz_recent = np.count_nonzero(
|
np.diff(smoothed_hist[-1][-H:])
|
numpy.diff
|
from collections import namedtuple
from cognibench.utils import partialclass
import cognibench.scores as scores
from cognibench.testing.tests import BatchTest
from sciunit import TestSuite
from shutil import copytree, rmtree
from os.path import exists, join as pathjoin
import numpy as np
import pandas as pd
DATA_PATH = "data"
OUT_PATH = "output"
LIB_PATHS = ["/home/eozd/bachlab/pspm/src", "libcommon"]
class Dataset:
"""
Simple class to represent a dataset (e.g. doxmem2)
"""
def __init__(self, *args, name, subject_ids):
self.name = name
self.subject_ids = subject_ids
self.path = pathjoin(DATA_PATH, name)
DATASET_LIST = [
Dataset(name="doxmem2", subject_ids=np.arange(1, 80)),
Dataset(name="fer02", subject_ids=np.arange(1, 75)),
Dataset(name="fss6b", subject_ids=
|
np.arange(1, 19)
|
numpy.arange
|
import time
import pytest
import numpy as np
import scipy.stats as ss
import scipy.optimize as optimize
import statsmodels.formula.api as smf
import pandas as pd
from .context import bootstrap_stat as bp
from .context import datasets
class TestMisc:
def test_percentile(self):
z = np.array(range(1, 1001))
alpha = 0.05
expected_low = 50
expected_high = 950
actual = bp._percentile(z, [alpha, 1 - alpha])
assert actual[0] == expected_low
assert actual[1] == expected_high
def test_percentile_uneven(self):
z = np.array(range(1, 1000))
alpha = 0.05
expected_low = 50
expected_high = 950
actual = bp._percentile(z, [alpha, 1 - alpha])
assert actual[0] == expected_low
assert actual[1] == expected_high
def test_percentile_partial_sort(self):
z = np.array(range(1, 1000))
alpha = 0.05
expected_low = 50
expected_high = 950
actual = bp._percentile(z, [alpha, 1 - alpha], full_sort=False)
assert actual[0] == expected_low
assert actual[1] == expected_high
def test_adjust_percentiles(self):
alpha = 0.05
a_hat = 0.061
z0_hat = 0.146
expected_alpha1 = 0.110
expected_alpha2 = 0.985
actual_alpha1, actual_alpha2 = bp._adjust_percentiles(
alpha, a_hat, z0_hat
)
assert actual_alpha1 == pytest.approx(expected_alpha1, abs=1e-3)
assert actual_alpha2 == pytest.approx(expected_alpha2, abs=1e-3)
def test_jackknife_values_series(self):
df = datasets.spatial_test_data("A")
expected = 0.061
def statistic(x):
return np.var(x, ddof=0)
jv = bp.jackknife_values(df, statistic)
actual = bp._bca_acceleration(jv)
assert actual == pytest.approx(expected, abs=1e-3)
def test_jackknife_values_array(self):
df = datasets.spatial_test_data("A")
x = np.array(df)
expected = 0.061
def statistic(x):
return np.var(x, ddof=0)
jv = bp.jackknife_values(x, statistic)
actual = bp._bca_acceleration(jv)
assert actual == pytest.approx(expected, abs=1e-3)
def test_jackknife_values_dataframe(self):
df = datasets.spatial_test_data("both")
expected = 0.061
def statistic(df):
return np.var(df["A"], ddof=0)
jv = bp.jackknife_values(df, statistic)
actual = bp._bca_acceleration(jv)
assert actual == pytest.approx(expected, abs=1e-3)
def test_loess(self):
z = np.linspace(0, 1, num=50)
y = np.sin(12 * (z + 0.2)) / (z + 0.2)
np.random.seed(0)
ye = y + np.random.normal(0, 1, (50,))
alpha = 0.20
expected = 0.476
actual = [bp.loess(z0, z, ye, alpha) for z0 in z]
actual = np.array(actual)
actual = np.sqrt(np.mean((actual - y) ** 2))
assert actual == pytest.approx(expected, abs=1e-3)
def test_resampling_vector(self):
n = 8
expected = [1 / 8, 0, 0, 3 / 8, 1 / 8, 1 / 8, 0, 2 / 8]
np.random.seed(0)
actual = bp._resampling_vector(n)
np.testing.assert_array_equal(actual, expected)
def test_parametric_bootstrap(self):
df = datasets.law_data(full=False)
expected = 0.124
class EmpiricalGaussian(bp.EmpiricalDistribution):
def __init__(self, df):
self.mean = df.mean()
self.cov = np.cov(df["LSAT"], df["GPA"], ddof=1)
self.n = len(df)
def sample(self, size=None):
if size is None:
size = self.n
samples = np.random.multivariate_normal(
self.mean, self.cov, size=size
)
df = pd.DataFrame(samples)
df.columns = ["LSAT", "GPA"]
return df
def statistic(df):
return np.corrcoef(df["LSAT"], df["GPA"])[0, 1]
dist = EmpiricalGaussian(df)
np.random.seed(5)
actual = bp.standard_error(dist, statistic, B=3200)
assert actual == pytest.approx(expected, abs=0.002)
class TestStandardError:
def test_standard_error(self):
df = datasets.law_data(full=False)
expected = 0.132
def statistic(df):
return np.corrcoef(df["LSAT"], df["GPA"])[0, 1]
dist = bp.EmpiricalDistribution(df)
np.random.seed(0)
actual = bp.standard_error(dist, statistic, B=2000)
assert actual == pytest.approx(expected, abs=0.01)
def test_standard_error_robust(self):
df = datasets.law_data(full=False)
robustness = 0.95
expected = 0.132
def statistic(df):
return np.corrcoef(df["LSAT"], df["GPA"])[0, 1]
dist = bp.EmpiricalDistribution(df)
np.random.seed(0)
actual = bp.standard_error(
dist, statistic, robustness=robustness, B=2000
)
assert actual == pytest.approx(expected, abs=0.01)
def test_jackknife_after_bootstrap(self):
x = datasets.mouse_data("treatment")
expected_se = 24.27
expected_se_jack = 6.83
dist = bp.EmpiricalDistribution(x)
def stat(x):
return np.mean(x)
np.random.seed(0)
actual_se, actual_se_jack = bp.standard_error(
dist, stat, B=200, jackknife_after_bootstrap=True
)
assert actual_se == pytest.approx(expected_se, abs=0.01)
assert actual_se_jack == pytest.approx(expected_se_jack, abs=0.01)
def test_infinitesimal_jackknife(self):
df = datasets.law_data(full=False)
expected = 0.1243
def statistic(df, p):
mean_gpa = np.dot(df["GPA"], p)
mean_lsat = np.dot(df["LSAT"], p)
sigma_gpa = np.dot(p, (df["GPA"] - mean_gpa) ** 2)
sigma_lsat = np.dot(p, (df["LSAT"] - mean_lsat) ** 2)
corr = (df["GPA"] - mean_gpa) * (df["LSAT"] - mean_lsat)
corr = np.dot(corr, p) / np.sqrt(sigma_gpa * sigma_lsat)
return corr
actual = bp.infinitesimal_jackknife(df, statistic)
assert actual == pytest.approx(expected, abs=1e-4)
class TestConfidenceIntervals:
def test_t_interval(self):
df = datasets.mouse_data("control")
alpha = 0.05
expected_low = 35.8251
expected_high = 116.6049
def statistic(x):
return np.mean(x)
dist = bp.EmpiricalDistribution(df)
theta_hat = statistic(df)
np.random.seed(0)
se_hat = bp.standard_error(dist, statistic, B=2000)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
se_hat=se_hat,
alpha=alpha,
Bouter=1000,
Binner=25,
)
assert actual_low == pytest.approx(expected_low, abs=1)
assert actual_high == pytest.approx(expected_high, abs=1)
def test_t_interval_fast(self):
df = datasets.mouse_data("control")
alpha = 0.05
expected_low = 35.8251
expected_high = 116.6049
def statistic(x):
return np.mean(x)
def fast_std_err(x):
return np.sqrt(np.var(x, ddof=1) / len(x))
dist = bp.EmpiricalDistribution(df)
theta_hat = statistic(df)
np.random.seed(0)
se_hat = fast_std_err(df)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
se_hat=se_hat,
fast_std_err=fast_std_err,
alpha=alpha,
Bouter=1000,
)
assert actual_low == pytest.approx(expected_low, abs=3)
assert actual_high == pytest.approx(expected_high, abs=5)
@pytest.mark.slow
def test_t_interval_robust(self):
df = datasets.mouse_data("control")
alpha = 0.05
expected_low = 35.8251
expected_high = 116.6049
def statistic(x):
return np.mean(x)
def robust_std_err(x):
dist = bp.EmpiricalDistribution(x)
return bp.standard_error(dist, statistic, robustness=0.95, B=1000)
dist = bp.EmpiricalDistribution(df)
theta_hat = statistic(df)
np.random.seed(0)
se_hat = bp.standard_error(
dist, statistic, robustness=0.95, B=2000, num_threads=12
)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
se_hat=se_hat,
fast_std_err=robust_std_err,
alpha=alpha,
Bouter=1000,
num_threads=12,
)
assert actual_low == pytest.approx(expected_low, abs=1)
assert actual_high == pytest.approx(expected_high, abs=3)
def test_t_interval_law_data(self):
df = datasets.law_data(full=False)
alpha = 0.05
expected_low = 0.45
expected_high = 0.93
def statistic(df):
theta = np.corrcoef(df["LSAT"], df["GPA"])[0, 1]
return 0.5 * np.log((1 + theta) / (1 - theta))
def inverse(phi):
return (np.exp(2 * phi) - 1) / (np.exp(2 * phi) + 1)
def fast_std_err(df):
n = len(df.index)
return 1 / np.sqrt(n - 3)
dist = bp.EmpiricalDistribution(df)
theta_hat = statistic(df)
se_hat = fast_std_err(df)
np.random.seed(4)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
se_hat=se_hat,
fast_std_err=fast_std_err,
alpha=alpha,
Bouter=1000,
)
actual_low = inverse(actual_low)
actual_high = inverse(actual_high)
assert actual_low == pytest.approx(expected_low, abs=0.3)
assert actual_high == pytest.approx(expected_high, abs=0.03)
@pytest.mark.slow
def test_t_interval_law_data_variance_adjusted(self):
df = datasets.law_data(full=False)
alpha = 0.05
expected_low = 0.45
expected_high = 0.93
def statistic(df):
theta = np.corrcoef(df["LSAT"], df["GPA"])[0, 1]
return theta
dist = bp.EmpiricalDistribution(df)
theta_hat = statistic(df)
np.random.seed(0)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
stabilize_variance=True,
alpha=alpha,
Bouter=1000,
Binner=25,
Bvar=100,
num_threads=12,
)
assert actual_low == pytest.approx(expected_low, abs=0.05)
assert actual_high == pytest.approx(expected_high, abs=0.03)
def test_percentile_interval(self):
df = datasets.mouse_data("treatment")
alpha = 0.05
expected_low = 49.7
expected_high = 126.7
def statistic(x):
return np.mean(x)
dist = bp.EmpiricalDistribution(df)
np.random.seed(0)
actual_low, actual_high = bp.percentile_interval(
dist, statistic, alpha=alpha, B=1000
)
assert actual_low == pytest.approx(expected_low, abs=1)
assert actual_high == pytest.approx(expected_high, abs=4)
def test_percentile_interval_return_samples(self):
df = datasets.mouse_data("treatment")
alpha = 0.05
expected_low = 49.7
expected_high = 126.7
def statistic(x):
return np.mean(x)
dist = bp.EmpiricalDistribution(df)
np.random.seed(0)
ci_low, ci_high, theta_star = bp.percentile_interval(
dist, statistic, alpha=alpha, B=1000, return_samples=True
)
actual_low, actual_high = bp.percentile_interval(
dist, statistic, alpha=alpha, theta_star=theta_star
)
assert actual_low == pytest.approx(expected_low, abs=1)
assert actual_high == pytest.approx(expected_high, abs=4)
def test_bca(self):
"""Compare confidence intervals.
This test is intended to reproduce Table 14.2 in [ET93].
The results are show below. What we see is that the ABC method
is just as fast as the standard approach (theta_hat +/- 1.645
standard errors), but much more accurate. It gives similar
answers to the BCa method, but 10x as fast.
Method CI Low CI High Time
standard 98.7 244.4 0.030
percentile 99.4 234.2 0.225
BCa 116.2 258.7 0.241
ABC 116.7 260.9 0.028
bootstrap-t 110.0 303.6 0.316
"""
df = datasets.spatial_test_data("A")
alpha = 0.05
expected_standard_low = 98.8
expected_standard_high = 244.2 # I think [ET93] has a typo.
expected_percentile_low = 100.8
expected_percentile_high = 233.9
expected_bca_low = 115.8
expected_bca_high = 259.6
expected_abc_low = 116.7
expected_abc_high = 260.9
expected_t_low = 112.3
expected_t_high = 314.8
print(" Method \tCI Low\tCI High\tTime")
def statistic(x):
return np.var(x, ddof=0)
def resampling_statistic(x, p):
mu = np.dot(x, p)
return np.dot(p, (x - mu) ** 2)
def fast_std_err(x):
"""Fast calculation for the standard error of variance estimator"""
xhat = np.mean(x)
u2 = np.mean([(xi - xhat) ** 2 for xi in x])
u4 = np.mean([(xi - xhat) ** 4 for xi in x])
return np.sqrt((u4 - u2 * u2) / len(x))
theta_hat = statistic(df)
dist = bp.EmpiricalDistribution(df)
np.random.seed(6)
st = time.time()
se = bp.standard_error(dist, statistic)
actual_low = theta_hat - 1.645 * se
actual_high = theta_hat + 1.645 * se
duration = time.time() - st
print(
f"{'standard'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}"
f"\t{duration:.03f}"
)
assert actual_low == pytest.approx(expected_standard_low, abs=0.2)
assert actual_high == pytest.approx(expected_standard_high, abs=0.2)
st = time.time()
actual_low, actual_high, theta_star = bp.percentile_interval(
dist, statistic, alpha=alpha, B=2000, return_samples=True
)
duration = time.time() - st
print(
f"{'percentile'.ljust(12)}\t{actual_low:.01f}"
f"\t{actual_high:.01f}\t{duration:.03f}"
)
assert actual_low == pytest.approx(expected_percentile_low, abs=1.5)
assert actual_high == pytest.approx(expected_percentile_high, abs=0.4)
actual_low, actual_high = bp.bcanon_interval(
dist, statistic, df, alpha=alpha, theta_star=theta_star
)
duration = time.time() - st
print(
f"{'BCa'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}"
f"\t{duration:.03f}"
)
assert actual_low == pytest.approx(expected_bca_low, abs=0.5)
assert actual_high == pytest.approx(expected_bca_high, abs=1.0)
st = time.time()
actual_low, actual_high = bp.abcnon_interval(
df, resampling_statistic, alpha=alpha
)
duration = time.time() - st
print(
f"{'ABC'.ljust(12)}\t{actual_low:.01f}"
f"\t{actual_high:.01f}\t{duration:.03f}"
)
assert actual_low == pytest.approx(expected_abc_low, abs=0.1)
assert actual_high == pytest.approx(expected_abc_high, abs=0.1)
st = time.time()
se_hat = fast_std_err(df)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
se_hat=se_hat,
fast_std_err=fast_std_err,
alpha=alpha,
Bouter=1000,
)
duration = time.time() - st
print(
f"{'bootstrap-t'.ljust(12)}\t{actual_low:.01f}"
f"\t{actual_high:.01f}\t{duration:.03f}"
)
assert actual_low == pytest.approx(expected_t_low, abs=2.5)
assert actual_high == pytest.approx(expected_t_high, abs=12.0)
@pytest.mark.slow
def test_compare_intervals(self):
"""Compare confidence intervals.
This test is similar to the above test; here we explicitly
want to verify we can compute the bootstrap stats once and
recycle them in a variety of interval calculations. The
results are show below. In addition, unlike the above test we
calculate the variance-stabilized bootstrap-t interval. Note
how it gives very similar answers to BCa.
Method CI Low CI High
standard 103.0 240.1
percentile 100.6 236.2
BCa 115.1 261.6
bootstrap-t 111.6 295.8
var-stab-t 117.5 263.7
"""
df = datasets.spatial_test_data("A")
alpha = 0.05
expected_standard_low = 103.0
expected_standard_high = 240.1
expected_percentile_low = 100.6
expected_percentile_high = 236.2
expected_bca_low = 115.1
expected_bca_high = 261.6
expected_t_low = 111.6
expected_t_high = 295.8
expected_stab_low = 117.5
expected_stab_high = 263.7
print(" Method \tCI Low\tCI High")
def statistic(x):
return np.var(x, ddof=0)
def resampling_statistic(x, p):
mu = np.dot(x, p)
return np.dot(p, (x - mu) ** 2)
def fast_std_err(x):
"""Fast calculation for the standard error of variance estimator"""
xhat = np.mean(x)
u2 = np.mean([(xi - xhat) ** 2 for xi in x])
u4 = np.mean([(xi - xhat) ** 4 for xi in x])
return np.sqrt((u4 - u2 * u2) / len(x))
theta_hat = statistic(df)
dist = bp.EmpiricalDistribution(df)
np.random.seed(6)
B = 2000
statistics = {"theta_star": statistic, "se_star": fast_std_err}
boot_stats = bp.bootstrap_samples(dist, statistics, B, num_threads=12)
theta_star = boot_stats["theta_star"]
se_star = boot_stats["se_star"]
se = bp.standard_error(dist, statistic, theta_star=theta_star)
actual_low = theta_hat - 1.645 * se
actual_high = theta_hat + 1.645 * se
print(f"{'standard'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}")
assert actual_low == pytest.approx(expected_standard_low, abs=0.1)
assert actual_high == pytest.approx(expected_standard_high, abs=0.1)
actual_low, actual_high = bp.percentile_interval(
dist, statistic, alpha=alpha, theta_star=theta_star
)
print(
f"{'percentile'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}"
)
assert actual_low == pytest.approx(expected_percentile_low, abs=0.1)
assert actual_high == pytest.approx(expected_percentile_high, abs=0.1)
actual_low, actual_high = bp.bcanon_interval(
dist, statistic, df, alpha=alpha, theta_star=theta_star
)
print(f"{'BCa'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}")
assert actual_low == pytest.approx(expected_bca_low, abs=0.1)
assert actual_high == pytest.approx(expected_bca_high, abs=0.1)
se_hat = fast_std_err(df)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
stabilize_variance=False,
se_hat=se_hat,
fast_std_err=fast_std_err,
alpha=alpha,
theta_star=theta_star,
se_star=se_star,
)
print(
f"{'bootstrap-t'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}"
)
assert actual_low == pytest.approx(expected_t_low, abs=0.1)
assert actual_high == pytest.approx(expected_t_high, abs=0.1)
actual_low, actual_high = bp.t_interval(
dist,
statistic,
theta_hat,
stabilize_variance=True,
se_hat=se_hat,
fast_std_err=fast_std_err,
alpha=alpha,
theta_star=theta_star,
se_star=se_star,
Bvar=400,
num_threads=12,
)
print(
f"{'var-stab-t'.ljust(12)}\t{actual_low:.01f}\t{actual_high:.01f}"
)
assert actual_low == pytest.approx(expected_stab_low, abs=0.1)
assert actual_high == pytest.approx(expected_stab_high, abs=0.1)
@pytest.mark.slow
def test_calibrate_interval(self):
df = datasets.law_data(full=False)
alpha = 0.05
expected_ci_low = 0.1596
expected_ci_high = 0.9337
expected_a_low = 0.0090
expected_a_high = 0.9662
def statistic(df):
return np.corrcoef(df["LSAT"], df["GPA"])[0, 1]
def resampling_statistic(df, p):
mean_gpa = np.dot(df["GPA"], p)
mean_lsat = np.dot(df["LSAT"], p)
sigma_gpa = np.dot(p, (df["GPA"] - mean_gpa) ** 2)
sigma_lsat = np.dot(p, (df["LSAT"] - mean_lsat) ** 2)
corr = (df["GPA"] - mean_gpa) * (df["LSAT"] - mean_lsat)
sigma_gpa = max([sigma_gpa, 1e-9])
sigma_lsat = max([sigma_lsat, 1e-9])
corr = np.dot(corr, p) / np.sqrt(sigma_gpa * sigma_lsat)
return corr
theta_hat = statistic(df)
dist = bp.EmpiricalDistribution(df)
np.random.seed(3)
ci_low, ci_high, a_low, a_high = bp.calibrate_interval(
dist,
resampling_statistic,
df,
theta_hat,
alpha=alpha,
B=200,
return_confidence_points=True,
num_threads=12,
)
assert ci_low == pytest.approx(expected_ci_low, abs=1e-4)
assert ci_high == pytest.approx(expected_ci_high, abs=1e-4)
assert a_low == pytest.approx(expected_a_low, abs=1e-4)
assert a_high == pytest.approx(expected_a_high, abs=1e-4)
@pytest.mark.skip
def test_importance_sampling(self):
np.random.seed(0)
n = 100
alpha = 0.025
# The mean of n standard gaussians is gaussians with mean 0
# and variance 1/n. Thus theta ~ N(0, 1/n).
c = ss.norm.isf(alpha, 0, 1 / np.sqrt(n))
x = np.random.normal(loc=0.0, scale=1.0, size=n)
def statistic(x, p):
return np.dot(x, p)
def g(lmbda):
p = np.exp(lmbda * (x - np.mean(x)))
return p / sum(p)
lmbda = optimize.root_scalar(
lambda lmbda: c - sum(g(lmbda) * x),
method="bisect",
bracket=(-10, 10),
).root
p_i = g(lmbda)
assert sum(x * p_i) == pytest.approx(c)
B = 1000
ind = range(n)
prob = 0.0
for i in range(B):
x_star = np.random.choice(ind, n, True, p_i)
p_star = np.array(
[
|
np.count_nonzero(x_star == j)
|
numpy.count_nonzero
|
# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
import numpy as np
from typing import Union
from netcal import AbstractCalibration, dimensions, accepts
class NearIsotonicRegression(AbstractCalibration):
"""
Near Isotonic Regression Calibration method [1]_ (commonly used by :class:`ENIR` [2]_).
Parameters
----------
quick_init : bool, optional, default: True
Allow quick initialization of NIR (equal consecutive values are grouped directly).
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
References
----------
.. [1] <NAME>, <NAME>, and <NAME>:
"Nearly-isotonic regression."
Technometrics, 53(1):54–61, 2011.
`Get source online <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.365.7054&rep=rep1&type=pdf>`
.. [2] Naeini, <NAME>, and <NAME>:
"Binary classifier calibration using an ensemble of near isotonic regression models."
2016 IEEE 16th International Conference on Data Mining (ICDM). IEEE, 2016.
`Get source online <https://ieeexplore.ieee.org/iel7/7837023/7837813/07837860.pdf>`
"""
@accepts(bool, bool)
def __init__(self, quick_init: bool = True, independent_probabilities: bool = False):
"""
Create an instance of `NearIsotonicRegression`.
Parameters
----------
quick_init : bool, optional, default: True
Allow quick initialization of NIR (equal consecutive values are grouped directly).
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
"""
super().__init__(detection=False, independent_probabilities=independent_probabilities)
self._lambda = 0.0
# group values: betas/new confidence in each group
# group items: ground truth labels of each sample in a group
# group bounds: lower and upper boundaries of each group
self._num_groups = None
self._group_items = None
self._group_bounds = None
self._group_values = None
self.quick_init = quick_init
def clear(self):
"""
Clear model parameters.
"""
super().clear()
self._num_groups = None
self._group_items = None
self._group_bounds = None
self._group_values = None
self._lambda = 0.0
def fit(self, X: np.ndarray = None, y: np.ndarray = None,
last_model: 'NearIsotonicRegression' = None) -> Union['NearIsotonicRegression', None]:
"""
Build NIR model either as initial model given by parameters 'ground_truth' and 'confidences' or as
intermediate model based on 'last_model' which is an instance of 'NearIsotonicRegression'.
Parameters
----------
X : np.ndarray, optional, default: None, shape=(n_samples, [n_classes])
NumPy array with confidence values for each prediction.
1-D for binary classification, 2-D for multi class (softmax).
y : np.ndarray, optional, default: None, shape=(n_samples, [n_classes])
NumPy array with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).
last_model : NearIsotonicRegression, optional, default: None
Instance of NearIsotonicRegression (required, if 'X' and 'y' is empty).
Returns
-------
NearIsotonicRegression or None
Instance of class :class:`NearIsotonicRegression` or None
if next lambda is less than current lambda (end of mPAVA).
"""
if last_model is None:
if X is None or y is None:
raise AttributeError("Could not initialize mPAVA algorithm without "
"an array of ground truth and confidence values")
X, y = super().fit(X, y)
if self.quick_init:
self.__initial_model_quick(X, y)
else:
self.__initial_model_standard(X, y)
return self
# get attributes of previous NIR model
self._lambda = last_model._lambda
self._num_groups = last_model._num_groups
self._group_items = last_model._group_items
self._group_values = last_model._group_values
self._group_bounds = last_model._group_bounds
self.quick_init = last_model.quick_init
self.num_classes = last_model.num_classes
self.independent_probabilities = last_model.independent_probabilities
# get slopes and collision times (where consecutive bins might be merged)
slopes = self.__get_slopes()
t_values = self.__get_collision_times(slopes)
# calculate next lambda (monotony violation weight)
# this is denoted as lambda ast
next_lambda = np.min(t_values)
# if next lambda is less than current lambda, terminate mPAVA algorithm
if next_lambda < self._lambda or next_lambda == np.inf:
return None
# now update group values and merge groups with equal values
self.__update_group_values(slopes, next_lambda)
self.__merge_groups(t_values, next_lambda)
# get new lambda (ast) and set as current lambda
self._lambda = next_lambda
return self
@dimensions((1, 2))
def transform(self, X: np.ndarray) -> np.ndarray:
"""
After model calibration, this function is used to get calibrated outputs of uncalibrated
confidence estimates.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes])
NumPy array with uncalibrated confidence estimates.
1-D for binary classification, 2-D for multi class (softmax).
Returns
-------
np.ndarray, shape=(n_samples, [n_classes])
NumPy array with calibrated confidence estimates.
1-D for binary classification, 2-D for multi class (softmax).
"""
X = super().transform(X)
# prepare return value vector
calibrated = np.zeros_like(X)
for i in range(self._num_groups):
bounds = self._group_bounds[i]
if bounds[0] == 0.0:
calibrated[(X >= bounds[0]) & (X <= bounds[1])] = self._group_values[i]
else:
calibrated[(X > bounds[0]) & (X <= bounds[1])] = self._group_values[i]
if not self.independent_probabilities:
# apply normalization on multi class calibration
if len(X.shape) == 2:
# normalize to keep probability sum of 1
normalizer = np.sum(calibrated, axis=1, keepdims=True)
calibrated = np.divide(calibrated, normalizer)
return calibrated
def get_next_model(self) -> Union['NearIsotonicRegression', None]:
"""
Get next Near Isotonic Regression model based on mPAVA algorithm
Returns
-------
NearIsotonicRegression
Next instance of :class:`NearIsotonicRegression`.
"""
next_model = NearIsotonicRegression(self.quick_init)
if next_model.fit(last_model=self) is None:
del next_model
next_model = None
return next_model
def get_degrees_of_freedom(self) -> int:
"""
Needed for BIC. Returns the degree of freedom. This simply returns the
number of groups
Returns
-------
int
Integer with degree of freedom.
"""
return int(self._num_groups)
# -------------------------------------------------------------
@dimensions(1, (1, 2))
def __initial_model_standard(self, X: np.ndarray, y: np.ndarray):
"""
Initial NIR model standard initialization (like described in original NIR paper).
Each group holds only a single ground truth value and gets its value.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes])
NumPy array with confidence values for each prediction.
1-D for binary classification, 2-D for multi class (softmax).
y : np.ndarray, shape=(n_samples,)
NumPy 1-D array with ground truth labels.
"""
# one hot encoded label vector on multi class calibration
if len(X.shape) == 2:
y = np.eye(self.num_classes)[y]
# sort arrays by confidence - always flatten (this has no effect to 1-D arrays)
X, y = self._sort_arrays(X.flatten(), y.flatten())
self._num_groups = y.size
self._group_items = np.split(y, y.size)
# calculate bounds as median
bounds = np.divide(X[:-1] + X[1:], 2.)
lower_bounds = np.insert(bounds, 0, 0.0)
upper_bounds = np.append(bounds, 1.0)
self._group_bounds = np.stack((lower_bounds, upper_bounds), axis=1)
self._group_values = np.array(y, dtype=np.float)
@dimensions(1, (1, 2))
def __initial_model_quick(self, X: np.ndarray, y: np.ndarray):
"""
Initial NIR model quick initialization (own implementation).
Each group is computed by consecutive equal values of ground truth.
Therefore, the algorithm starts with perfect fit to data.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes])
NumPy array with confidence values for each prediction.
1-D for binary classification, 2-D for multi class (softmax).
y : np.ndarray, shape=(n_samples,)
NumPy 1-D array with ground truth labels.
"""
# one hot encoded label vector on multi class calibration
if len(X.shape) == 2:
y = np.eye(self.num_classes)[y]
# sort arrays by confidence - always flatten (this has no effect to 1-D arrays)
X, y = self._sort_arrays(X.flatten(), y.flatten())
# get monotony violations directly and create according groups
# compute differences of consecutive ground truth labels
differences = y[1:] - y[:-1]
# monotony violations are found where differences are either 1 (from 0 -> 1) or -1 (from 1 -> 0)
# store these violations as differences
violations = np.where(differences != 0.0)[0]
differences = differences[violations]
# amount of available groups is amount of differences (+ initial values)
self._num_groups = differences.size + 1
# group values are differences (map -1 to 0 and insert first ground truth value as first group value)
self._group_values = differences
self._group_values[differences == -1.] = 0.0
self._group_values = np.insert(differences, 0, y[0]).astype(np.float)
# get group items as NumPy arrays
# split arrays where monotony violations are found (index +1 needed)
self._group_items = np.split(y, violations + 1)
# group bounds can also be found where monotony violations are present
bounds = np.divide(X[violations] + X[violations + 1], 2.)
# include 0 and 1 as bounds, too
lower_bounds = np.insert(bounds, 0, 0.0)
upper_bounds = np.append(bounds, 1.0)
self._group_bounds = np.stack((lower_bounds, upper_bounds), axis=1)
def __get_slopes(self) -> np.ndarray:
"""
Get the derivative or slope of each bin value with respect to given lambda.
Returns
-------
np.ndarray, shape=(n_bins,)
NumPy 1-D array slopes of each bin.
"""
# determine amount of samples in each group and create Numpy vector
num_samples_per_group = np.array([self._group_items[i].size for i in range(self._num_groups)], dtype=np.float)
# calculate monotony violation of consecutive group values (consecutive betas)
pre_group_values = np.array(self._group_values[:-1])
post_group_values = np.array(self._group_values[1:])
# perform compare operations with NumPy methods
indicator = np.greater(pre_group_values, post_group_values)
indicator = np.insert(indicator, 0, False)
indicator = np.append(indicator, False)
indicator = indicator.astype(np.float)
# slopes are calculated by previously calculated indicator
slopes = indicator[:-1] - indicator[1:]
slopes = np.divide(slopes, num_samples_per_group)
return slopes
@dimensions(1)
def __get_collision_times(self, slopes: np.ndarray) -> np.ndarray:
"""
Calculate t values. These values give the indices of groups which can be merged.
Parameters
----------
slopes : np.ndarray, shape=(n_bins,)
NumPy 1-D array with slopes of each bin.
Returns
-------
np.ndarray, shape=(n_bins-1,)
NumPy 1-D array with t values.
"""
# calculate differences of consecutive group values and slopes
group_difference = self._group_values[1:] - self._group_values[:-1]
slope_difference = slopes[:-1] - slopes[1:]
# divide group differences by slope differences
# if slope differences are 0, set resulting value to inf
t_values = np.divide(group_difference, slope_difference,
out=np.full_like(group_difference, np.inf, dtype=np.float),
where=slope_difference != 0)
# add current lambda to t values
t_values = t_values + + self._lambda
return t_values
@accepts(np.ndarray, float)
def __update_group_values(self, slopes: np.ndarray, next_lambda: float):
"""
Perform update of group values by given slopes and value of next lambda.
Parameters
----------
slopes : np.ndarray, shape=(n_bins,)
NumPy 1-D array with slopes of each bin.
next_lambda : float
Lambda value of next model.
"""
for i in range(self._num_groups):
self._group_values[i] += slopes[i] * (next_lambda - self._lambda)
@accepts(np.ndarray, float)
def __merge_groups(self, t_values: np.ndarray, next_lambda: float):
"""
Merge all groups where t_values is equal to next_lambda
Parameters
----------
t_values : np.ndarray, shape=(n_bins-1,)
Current t-values.
next_lambda : float
Lambda value of next model.
"""
# groups are denoted as t_i,i+1
joined_groups =
|
np.where(t_values == next_lambda)
|
numpy.where
|
# =================================================================
#
# Authors: <NAME> <<EMAIL>>
#
# Copyright (c) 2018 <NAME>
#
# Modified by <NAME> 6/2/21 for the purposes of prototyping
# a provider for Himawari L2 Satellite Data
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import cartopy.crs as ccrs
import copy
from datacube.utils.cog import write_cog
import logging
from flask import request as rq
import flask
import json
import metpy
import numpy as np
import os
import pathlib
from pyproj import CRS
import xarray as xr
import uuid as ud
import zipfile
from scipy.interpolate import griddata
LOGGER = logging.getLogger(__name__)
class HimawariProvider(object):
def __init__(self, dataset, config):
"""
Initialize object
:param provider_def: provider definition
:returns: pygeoapi.providers.base.BaseProvider
"""
self.config = config
self.DATASET_FOLDER = config['datasets'][dataset]['provider']['data_source']
self.dir_root=self.DATASET_FOLDER
self.ps_cov= {
"domain": {
"axes": {
"t": {
"values": [
]
},
"x": {
"values": [
]
},
"y": {
"values": [
]
},
},
"domainType": "PointSeries",
"referencing": [
{
"coordinates": [
"y",
"x"
],
"system": {
"id": "http://www.opengis.net/def/crs/OGC/1.3/CRS84",
"type": "GeographicCRS"
}
},
{
"coordinates": [
"t"
],
"system": {
"calendar": "Gregorian",
"type": "TemporalRS"
}
}
],
"type": "Domain"
},
"parameters": {
"p1": {
"attrs": {
},
"description": {
"en": ""
},
"observedProperty": {
"label": {
"en": ""
}
},
"unit": {
"label": {
"en": ""
},
"symbol": {
"type": "",
"value": ""
}
}
}
},
"ranges": {
"p1": {
"axisNames": [
],
"dataType": "float",
"shape": [
],
"type": "NdArray",
"values": [
]
}
},
"type": "Coverage"
}
self.area_cov= {
"domain": {
"axes": {
"t": {
"values": [
]
},
"x": {
"values": [
]
},
"y": {
"values": [
]
},
},
"domainType": "Grid",
"referencing": [
{
"coordinates": [
"y",
"x"
],
"system": {
"id": "http://www.opengis.net/def/crs/OGC/1.3/CRS84",
"type": "GeographicCRS"
}
},
{
"coordinates": [
"t"
],
"system": {
"calendar": "Gregorian",
"type": "TemporalRS"
}
}
],
"type": "Domain"
},
"parameters": {
"p1": {
"attrs": {
},
"description": {
"en": ""
},
"observedProperty": {
"label": {
"en": ""
}
},
"unit": {
"label": {
"en": ""
},
"symbol": {
"type": "",
"value": ""
}
}
}
},
"ranges": {
"p1": {
"axisNames": [
],
"dataType": "float",
"shape": [
],
"type": "NdArray",
"values": [
]
}
},
"type": "Coverage"
}
def query(self, dataset, qtype, coords, time_range, z_value, params, instance, outputFormat):
self.uuid=str(ud.uuid4().hex)
zarr_ds = self.config['datasets'][dataset]['provider']['data_source']+'/zarr'
ds = xr.open_zarr(zarr_ds)
if qtype=='point':
output, output_boolean = self.get_position_data(ds,coords,qtype,params,time_range,outputFormat)
return output, output_boolean
if qtype=='polygon':
output, output_boolean = self.get_polygon_data(ds,coords,qtype,params,time_range,outputFormat)
return output, output_boolean
def get_position_data(self,ds,coords,qtype,params,time_range,outputFormat):
lon = coords[0] # longitude of interest
lat = coords[1] # latitude of interest
sat_height=ds.goes_imager_projection.attrs['perspective_point_height'][0]
sweep_angle_axis=ds.goes_imager_projection.attrs['sweep_angle_axis']
cen_lon=ds.geospatial_lat_lon_extent.geospatial_lon_center[0]
data_crs = ccrs.Geostationary(central_longitude=cen_lon,satellite_height=sat_height, false_easting=0, false_northing=0, globe=None, sweep_axis='x')
x, y = data_crs.transform_point(lon, lat, src_crs=ccrs.PlateCarree())
new_proj_attrs={}
for key, value in ds.goes_imager_projection.attrs.items():
if type(value)==list:
new_proj_attrs.update({key: value[0]})
else:
new_proj_attrs.update({key: value})
cc=CRS.from_cf(new_proj_attrs)
ds.coords["x"] = ds.x
ds.coords["y"] = ds.y
ds.coords["goes_imager_projection"] = ds.goes_imager_projection
ds.coords["time"] = ds.time
ds.rio.write_crs(cc, inplace=True)
ds=ds.assign_coords({'x':ds.x.values * sat_height})
ds=ds.assign_coords({'y':ds.y.values * sat_height})
output = ds.sel(x=lon,y=lat, method='nearest')
output=output[params]
j_output = output.to_dict()
if outputFormat=='CoverageJSON':
j_cov = self.to_covjson(j_output,qtype,lat,lon)
return json.dumps(j_cov, indent=4, sort_keys=True, default=str).replace('NaN', 'null'), 'no_delete'
def get_polygon_data(self,ds,coords,qtype,params,time_range,outputFormat):
output=ds[params]
output=output.sel({'time':slice(str(time_range[0]),str(time_range[1]))})
geometries=[];coord_list=list()
output=ds[params]
if len(coords) == 5:
coords_clip=[[coords[0][0],coords[0][1]],[coords[1][0],coords[1][1]],[coords[2][0]-1,coords[2][1]],[coords[3][0]-1,coords[3][1]],[coords[4][0],coords[4][1]]]
else:
coords_clip=coords
geometries.append({'type':'Polygon', 'coordinates':[coords_clip]})
output=output.rio.write_crs(4326)
output=output.rio.clip(geometries,output.rio.crs)
j_output = output.to_dict()
if outputFormat=='CoverageJSON':
j_cov = self.to_covjson(j_output,qtype)
return json.dumps(j_cov, indent=4, sort_keys=True, default=str).replace('NaN', 'null'), 'no_delete'
if outputFormat=="COGeotiff":
f_location,zip_bool=export_geotiff(self,output)
if zip_bool==False:
return flask.send_from_directory(self.dir_root,self.uuid+'.tif',as_attachment=True), self.dir_root+'/'+self.uuid+'.tif'
if zip_bool==True:
root=self.dir_root+'/temp_dir/'
zip_file=f_location.split('/')[-1]+'.zip'
return flask.send_from_directory(root,zip_file,as_attachment=True), 'no_delete'
if outputFormat=="NetCDF":
for data_vars in output.data_vars:
del output[data_vars].attrs['grid_mapping']
conversion=output.to_netcdf(self.dir_root+'/output-'+self.uuid+'.nc')
return flask.send_from_directory(self.dir_root,'output-'+self.uuid+'.nc',as_attachment=True), self.dir_root+'/output-'+self.uuid+'.nc'
def to_covjson(self,j_output,qtype):
if qtype == 'point':
cov = self.ps_cov
if qtype=='polygon':
cov = self.area_cov
new_output=copy.deepcopy(cov)
new_output['domain']['axes']['t']['values']=copy.deepcopy(j_output['coords']['time']['data'])
time_list=list()
for t in j_output['coords']['time']['data']:
time_list.append(t.isoformat())
new_output['domain']['axes']['t']['values']=time_list
try:
new_output['domain']['axes']['x']['values']=copy.deepcopy(j_output['coords']['x']['data'])
new_output['domain']['axes']['y']['values']=copy.deepcopy(j_output['coords']['y']['data'])
except:
new_output['domain']['axes']['x']['values']=copy.deepcopy(j_output['coords']['lon']['data'])
new_output['domain']['axes']['y']['values']=copy.deepcopy(j_output['coords']['lat']['data'])
for p in j_output['data_vars']:
new_output['parameters'][p]={}
new_output['parameters'][p]=copy.deepcopy(new_output['parameters']['p1'])
new_output['parameters'][p]['description']=[p]
try:
new_output['parameters'][p]['observedProperty']['label']['en']=p
new_output['parameters'][p]['unit']['label']['en']=p
new_output['parameters'][p]['unit']['symbol']={'value': copy.deepcopy(j_output['data_vars'][p]['attrs']['units'])}
except:
pass
new_output['ranges'][p]=copy.deepcopy(new_output['ranges']['p1'])
if qtype=='point':
new_output['ranges'][p]['values']=copy.deepcopy(j_output['data_vars'][p]['data'])
new_output['ranges'][p]['shape']=[np.array(j_output['data_vars'][p]['data']).shape[0],1,1]
if qtype=='polygon':
new_output['ranges'][p]['values']=copy.deepcopy(
|
np.array(j_output['data_vars'][p]['data'])
|
numpy.array
|
import pandas as pd
import numpy as np
#to read the data in the csv file
data = pd.read_csv(r'C:\Users\<NAME>\PycharmProjects\AIML lab\data.csv')
print(data,"n")
#making an array of all the attributes
d =
|
np.array(data)
|
numpy.array
|
###############################################################################
# #
# #
# Importing Libraries #
# #
# REQUIRED: CSV, SciPy, Sklearn, Numpy (Easiest to just #
# download Anaconda distribution) #
###############################################################################
import csv #Necessary for reading from data table.
import numpy, scipy; #Required for proper use of sk-learn
import sklearn; #Machine Learning Library
import datetime; #Good for data processing / getting dates
import time; #Good for data processing
from sklearn import preprocessing #Allows quick conversion of strings to
#integers.
from sklearn.ensemble import RandomForestClassifier
#Required to run Random Forest.
###############################################################################
# #
# #
# Data Processing #
# #
# Note: for RandomForest, all data must be a float #
# #
###############################################################################
# Getting data from CSV:
csvfile = open('C:/Users/jriggs/Documents/Forest_FINAL_t2.csv', 'rb');
r = csv.reader(csvfile, delimiter=',')
lst = [rw for rw in r][0:]
#Transposing list to improve ease of SKLearn preprocessing
#(Casting to Numpy Array and indexing by columns is an
#alternative strategy)
lst2 = [list(t) for t in zip(*lst)]
#Identifying Column Names (Good for Indexing into CSV):
col = {"UserId":0, "OId":1, "ThirtyDaysLate":2, "SixtyDaysLate":3, "NinetyDaysLate":4,
"PaymentPattern":5, "PaymentPatternStartDate":6, "AccountOpenedDate":7,
"AccountClosedDate":8, "AccountReportedDate":9, "LastActivityDate":10,
"AccountStatusDate":11, "AccountOwnershipType":12, "AccountStatus": 13,
"AccountType":14, "BusinessType":15, "LoanType":16, "MonthsReviewed":17,
"CreditLimit":18, "HighestBalanceAmount":19, "MonthlyPayment":20,
"UnpaidBalance":21, "TermsDescription":22, "TermsMonthsCount":23,
"CurrentRatingCode":24, "CurrentRatingType":25, "HighestAdverseCode":26,
"HighestAdverseDate":27, "HighestAdverseType":28, "RecentADverseCode":29,
"RecentAdverseDate":30, "RecentAdverseType":31, "PriorAdverseCode":32, "PriorAdverseDate":33, "PriorAdverseType":34,
"CreditScoreOne":35, "Reason1-1":36, "Reason1-2":37, "Reason1-3":38, "Reason1-4":39,
"CreditScoreTwo":40, "Reason2-1":41, "Reason2-2":42, "Reason2-3":43, "Reason2-4":44,
"InquiryDates_AVG":45, "InquiryDates_MED":46, "InquiryDatesLow":47, "InquiryDatesHigh":48,
"InquiryDatesNum":49, "NumberOfCurrentEmployers":50, "NumberofSelfEmployment":51, "CurrentlyAtResidence":52,
"CriminalRecords1":53, "DispositionType1":54, "CR2":55, "DT2":56, "CR3":57, "DT3":58, "CR4":59, "DT4":60, "CR5":61,
"DT5":62, "CR6":63, "DT6":64, "CR7":65, "DT7":66, "CR8":67, "DT8":68, "MostRecentCriminalDate":69, "TotalFees":70,
"CSC1":71, "CSC2":72, "CSC3":73, "CSC4":74, "CSC5":75, "CSC6":76, "CSC7":77, "CSC8":78, "Listof_TypeofOffense":79,
"Num_TypeofOffense":80}
#Identifying Different Operation Status IDs
_o_i_d = {"Prequalification succeed":213, "Qualification":215, "Documents":216,
"Payment":217, "Confirmation":218, "Paid Applicant":221,
"Background Check in Progress":222, "AutoCheck Completed":223,
"Conditional Approval":224, "Contingent Approval":225,
"Missing Documents":226, "Approved Resident":231, "Denied":232,
"NLI (No Longer Interested":233, "Disqualified":234,
"Insufficient Income":235, "FaultyApplication":236, "Dormant":237,
"Unknown":261}
###############################################################################
# #
# Functions for Data Processing #
# #
###############################################################################
# date_conv: Takes a list of columns and for each element of a column, converts
# the string of this element into a value in seconds since 1910
def date_conv(column_list):
for i in column_list:
date_acc = 0;
for j in lst2[col[i]]:
tmp_b = 0;
if ((j != 'NULL') & (j != '')):
l = [int(a) for a in j.split('-')]
tmp_b = time.mktime( (datetime.datetime(l[0], l[1], l[2]) +datetime.timedelta(seconds=((365.25)*(24)*(60)*(60)*(60)))).timetuple())
lst2[col[i]][date_acc] = float(tmp_b);
else:
lst2[col[i]][date_acc] = 'NULL'
date_acc = date_acc + 1;
#Takes a string input and returns a float or a 'NULL' based on whether or not
# useful data is contained within the string:
#
# unknown: the string being evaluated
#
#
# / if 0 =============> Returns actual value
# _count: ----
# \ if NOT 0 =============> Returns sum of digits
#######################################################
def int_conv(unknown, _count):
if (unknown == ''):
return 'NULL';
if (unknown[0] == '\xef'):
unknown = unknown[3:]
if (unknown == ''):
return 'NULL';
if (unknown == 'NULL'):
return 'NULL';
else:
tmp = float(unknown)
#tmp = int(unknown,base)
#if (is_int):
# tmp = int(unknown, base)
if (_count):
#The below line is only useful for calculating # of 1s in a binary string
# generalizing the implementation may require its removal.
tmp = sum([int(a) for a in unknown])
# else:
# tmp = unknown
return float(tmp)
#takes a single cell containing multiple elements of information:
# ie) [element1][char][element2][char][element3] in one cell
# and moves the first few elements up until the len_ element
# elements to subsequent cells seperated from the original
# cell by a factor of "mult"
#
# For example: [element1]_[element2]_[element3] | EmptyCell --> [element_1] | [element_2]
#
# via a call of folding_func([i st col[i] is the column containing [element1]_[element2]], 1, '_', 2)
#
# NO
#########################################################################################
def folding_func(i, mult, char, len_):
temp = [(['0','0','0','0'],sub_lst.split(char))[sub_lst != 'NULL'] for sub_lst in lst2[col[i]]]
row_acc = 0;
for k in temp:
len_tot = min(len(k),len_)
col_acc = 0;
while(col_acc < len_tot):
lst2[col[i]+mult*col_acc][row_acc] = k[col_acc]
col_acc = col_acc + 1;
row_acc = row_acc+1;
# Checks if unknown is a number (float or int)
#
# if so: return 1
# if not: return 0
###############################################
def t_f(unknown):
if (isinstance(unknown, int)):
return 1;
elif (isinstance(unknown, float)):
return 1;
else:
return 0;
#
# float_s:
# takes a "thing" and an "avg" you desire to replace it with
# when "thing" is not a float value
#
# This is used to eliminate missing or 'NULL' values in the
# script.
#################################################
def float_s(thing, avg):
if (t_f(thing)):
return float(a)
else:
return temp_avg
###############################################################################
# #
# Basic Data Processing #
# #
###############################################################################
#Step 1: Split columns containing multiple values
folding_func("Reason1-1", 1, ' ', 4)
folding_func("Reason2-1", 1, ' ', 4)
folding_func("CriminalRecords1", 2, '_', 8)
folding_func("DispositionType1", 2, '_', 8)
folding_func("CSC1", 1, '_', 8)
#Step 2: Deal with computations involving columns containing multiple values;
# In particular, those that you do not wish to be represented in the larger
# data set in their raw form.
#Example: InquiryDates_AVG is a column containing all inquiry dates of a
# customer. The code below takes this long list, and applies basic
# operations on it to calculate both the average and median of this
# list. The average goes back into InquiryDates_AVG while the median
# is moved into the subsequent column.
folding3 = ["InquiryDates_AVG"]
date_acc = 0;
for k in lst2[col[folding3[0]]]:
tmp_a = 0;
tmp_b = 0;
tmp_c = []
if (k != 'NULL'):
for j in k.split(' '):
l = [int(a) for a in j.split('-')]
tmp_c = tmp_c + [time.mktime(datetime.datetime(l[0], l[1], l[2]).timetuple())]
l_l = len(tmp_c)
l_2 = (l_l)/2
tmp_c_sorted = sorted(tmp_c)
tmp_a = ((tmp_c_sorted[l_2] + tmp_c_sorted[l_2-1])/2, tmp_c[(l_l-1)/2] )[l_l % 2]
tmp_b = (sum(tmp_c))/(l_l)
lst2[col[folding3[0]]][date_acc] = tmp_b;
lst2[col[folding3[0]] + 1][date_acc] = tmp_a;
date_acc = date_acc + 1;
# Step 3: Convert Dates to Float Values:
date_change = ["AccountOpenedDate", "AccountClosedDate", "AccountReportedDate", "LastActivityDate",
"AccountStatusDate", "PaymentPatternStartDate",
"HighestAdverseDate", "RecentAdverseDate", "PriorAdverseDate", "InquiryDatesLow",
"InquiryDatesHigh", "MostRecentCriminalDate"]
date_conv(date_change)
# Step 4: Convert labels to numbers. If using SK-Learn Preprocessing it is
# VERY important that you are inputting complete columns when engaging
# in these operations.
#
# STEP 4A: List what you want to be processed.
codify_list = ["PaymentPattern", "AccountOwnershipType", "AccountStatus", "AccountType", "BusinessType", "PriorAdverseType",
"HighestAdverseCode", "HighestAdverseType", "RecentADverseCode", "RecentAdverseType", "PriorAdverseCode",
"LoanType", "TermsDescription", "CurrentRatingCode", "CurrentRatingType", "CriminalRecords1", "DispositionType1",
"CR2", "DT2", "CR3", "DT3", "CR4", "DT4", "CR5", "DT5", "CR6", "DT6", "CR7", "DT7", "CR8", "LoanType",
"DT8", "CSC1", "CSC2", "CSC3", "CSC4", "CSC5", "CSC6", "CSC7", "CSC8", "Listof_TypeofOffense"]
# STEP 4B: Process the data that was listed!
number = preprocessing.LabelEncoder()
for i in codify_list:
tmp = number.fit_transform(lst2[col[i]]);
tmp2 = [int(a) for a in tmp]
lst2[col[i]] = tmp2;
# Step 5: Process columns containing integer data. Conversion here is simpler,
# but 'NULL' values need to be accounted for. For now 'NULL' values are not
# converted but remain 'NULL'. This is managed by the int_conv function.
#
#
#Currently at residence has unique representation in my data table as a binary
#string, so is handled differently.
tmp = [int_conv(i,1) for i in lst2[col["CurrentlyAtResidence"]]]
lst2[col["CurrentlyAtResidence"]] = tmp
#The other ints are handled pretty normally.
int_lst = ["UserId", "OId", "ThirtyDaysLate", "SixtyDaysLate", "NinetyDaysLate", "CreditLimit", "HighestBalanceAmount",
"MonthlyPayment", "UnpaidBalance", "CreditScoreOne", "Reason1-1", "Reason1-2", "Reason1-3",
"Reason1-4", "CreditScoreTwo", "Reason2-1", "Reason2-2", "Reason2-3", "Reason2-4", "NumberOfCurrentEmployers",
"NumberofSelfEmployment", "InquiryDatesNum", "Num_TypeofOffense", "TotalFees", "TermsMonthsCount", "MonthsReviewed"]
for i in int_lst:
#originally (j,10,0) below
tmp = [int_conv(j,0) for j in lst2[col[i]]]
lst2[col[i]] = tmp
# Step 6: Replace NULL values with the average values for that column.
for i in col:
l_temp = lst2[col[i]]
fltrd = filter(t_f, l_temp)
temp_avg = sum(fltrd)/(len(fltrd))
temp_new = [float_s(a, temp_avg) for a in l_temp]
lst2[col[i]] = temp_new
# Step 7: Transpose the table once again to return it to its original format.
lst_fin = [list(t) for t in zip(*lst2)]
#########################################################
# #
# #
# #
############ More Data Processing: ####################
########## Collapsing Multiple Rows ##################
# #
# #
# #
#########################################################
# Convert (almost entirely) processed data into a Numpy Array
a = numpy.array(lst_fin)
#Construct Unique List of User IDs:
ls = numpy.unique(a[:,0])
# bad_list: the list of values for operation status ID that signify a negative response.
#oids for this will be 232, 234, 235, 236
bad_list = [_o_i_d[name] for name in ["Denied", "Disqualified", "Insufficient Income", "Dormant"]]
# neut_list: the list of values for operation status ID that signify a neutral
# response.
# NOTE: It is unlikely there will be enough data points with the status IDs in
#neut_list for points to be grouped into that category after randomforest runs.
# NOTE: IF set to [] no neutral_categories will be attempted.
#
# Potential Choices for Neutral:
#neut_list = _o_i_d[name] for name in ["Conditional Approval" "Contingent Approval"]
#
neut_list = []
# Anything IN datatable but NOT in neut_list or bad_list: in good_list
# user_expand(relevant_list)
#
# Input: A list of rows in the data table that share the same user ID.
# These rows represent different credit history records on the user's part.
#
# Output: A single row that combines the data from up to 7 credit
# history records: arranged by Last Activity Date: the first two, middle
# three, and the last two. In addition, it includes maximum and average
# values for several differnet types of criteria accross those records.
#
# IF: There are less than 7 credit history records, empty spots will be
# filled by the medium record.
##############################################################################
def user_expand(relevant_list):
#BELOW: Combines raw data from the 7 Credit History Records
new_sorted = relevant_list[relevant_list[:,col["LastActivityDate"]].argsort()]
dealing_with = len(new_sorted)
indices_list = range(col["ThirtyDaysLate"], col["CreditScoreOne"])
n_list = indices_list[:col["PaymentPattern"]] + indices_list[col["PaymentPattern"]+1:col["TermsDescription"]] + indices_list[col["TermsDescription"]+1:]
left_most_half = new_sorted[:,n_list]
mid = round(numpy.median(range(0,dealing_with)))
index_list = [0,1,mid-1,mid,mid+1,dealing_with-2,dealing_with-1]
safe_index_list = [(mid,int(check))[check in range(0,dealing_with)] for check in index_list]
correct_portion = relevant_list[[0],[0,1] + range(col["CreditScoreOne"], 81)]
cur = correct_portion[1]
correct_portion[1] = (2.0,0.0,1.0)[(cur in bad_list) + (cur in neut_list)]
for i in safe_index_list:
correct_portion = numpy.append(correct_portion, left_most_half[i])
#BELOW: Calculates combined data from the 7 Credit History Records
leng_left = len(left_most_half)
thirty_late = sum(left_most_half[:,0])
thirty_late_max = max(left_most_half[:,0])
thirty_late_avg = thirty_late/float(leng_left)
thirty_late_std = numpy.std(left_most_half[:,0])
sixty_late = sum (left_most_half[:,1])
sixty_late_max = max(left_most_half[:,1])
sixty_late_avg = sixty_late/float(leng_left)
sixty_late_std = numpy.std(left_most_half[:,1])
ninety_late = sum(left_most_half[:,2])
ninety_late_max = max(left_most_half[:,2])
ninety_late_avg = ninety_late/float(leng_left)
ninety_late_std = numpy.std(left_most_half[:,2])
high_credit_lim_sum = sum(left_most_half[:,col["CreditLimit"]-3])
high_credit_lim_avg = high_credit_lim_sum/float(leng_left)
high_credit_limmax = max(left_most_half[:,col["CreditLimit"]-3])
high_credit_lim_min = min(left_most_half[:,col["CreditLimit"]-3])
high_credit_lim_std = numpy.std(left_most_half[:,col["CreditLimit"]-3])
high_bal_sum = sum(left_most_half[:,col["HighestBalanceAmount"]-3])
high_bal_avg = high_bal_sum/float(leng_left)
high_bal_max = max(left_most_half[:,col["HighestBalanceAmount"]-3])
high_bal_min = min(left_most_half[:,col["HighestBalanceAmount"]-3])
high_bal_std = numpy.std(left_most_half[:,col["HighestBalanceAmount"]-3])
monthly_pay_sum = sum(left_most_half[:,col["MonthlyPayment"]-3])
monthly_pay_avg = monthly_pay_sum/float(leng_left)
monthly_pay_max = max(left_most_half[:,col["MonthlyPayment"]-3])
monthly_pay_min = min(left_most_half[:,col["MonthlyPayment"]-3])
monthly_pay_std = numpy.std(left_most_half[:,col["MonthlyPayment"]-3])
terms_months_count_sum = sum(left_most_half[:,col["TermsMonthsCount"]-4])
terms_months_count_avg = terms_months_count_sum/float(leng_left)
terms_months_count_max = max(left_most_half[:,col["TermsMonthsCount"]-4])
terms_months_count_min = min(left_most_half[:,col["TermsMonthsCount"]-4])
terms_months_count_std = numpy.std(left_most_half[:,col["TermsMonthsCount"]-4])
prior_adv = max(left_most_half[:,col["PriorAdverseDate"]-4])
prior_adv_min = min(left_most_half[:,col["PriorAdverseDate"]-4])
prior_adv_avg =
|
numpy.average(left_most_half[:,col["PriorAdverseDate"]-4])
|
numpy.average
|
""" image processing:
class AIT for full sky
ZEA for square region
TSplot special adapter for ZEA
author: <NAME> <EMAIL>
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/image.py,v 1.47 2017/02/09 19:04:37 burnett Exp $
"""
version = '$Revision: 1.47 $'.split()[1]
import sys, pylab, types, os
import math
import numpy as np
import pylab as pl
import pylab as plt
from matplotlib import pyplot, ticker
import matplotlib as mpl
from skymaps import SkyImage, SkyDir, double2, SkyProj,PySkyFunction,Hep3Vector
from math import exp
from numpy.fft import fft2,ifft2,fftshift
from scipy import optimize
import keyword_options
SkyImage.setNaN(np.nan)
class Ellipse(object):
def __init__(self, q):
""" q: ellipical parameters
a, b, phi
"""
self.q = q
def contour(self, r=1, count=50):
""" return set of points in around closed figure"""
s,c = math.sin(-self.q[2]), math.cos(self.q[2])
a,b = self.q[0],self.q[1]
x = []
y = []
for t in np.linspace(0, 2*math.pi, count):
ct,st = math.cos(t), math.sin(t)
x.append( r*(a*ct*s - b*st*c))
y.append( r*(a*ct*c + b*st*s))
return x,y
class Rescale(object):
def __init__(self, image, nticks=5, galactic=False):
""" image: a SkyImage object
nticks: suggested number of ticks for the ticker
warning: fails if the north pole is in the image (TODO: figure out a sensible approach)
"""
# get ra range from top, dec range along center of SkyImage
nx,ny = image.nx, image.ny
self.nx=nx
self.ny=ny
# convenient lat, lon functions for pixel coords
lat = lambda x,y: image.skydir(x,y).l() if galactic else image.skydir(x,y).ra()
lon = lambda x,y: image.skydir(x,y).b() if galactic else image.skydir(x,y).dec()
xl,xr = lat(0,0), lat(nx,0)
if xl<xr: # did it span the boundary?
xr = xr-360
self.vmin, self.vmax = lon(0,0), lon(nx/2.,ny)
ticklocator = ticker.MaxNLocator(nticks, steps=[1,2,5])
self.uticks = [ix if ix>-1e-6 else ix+360\
#for ix in ticklocator.bin_boundaries(xr,xl)[::-1]] #reverse
for ix in ticklocator.tick_values(xr,xl)[::-1]] #reverse
self.ul = xl
self.ur = xr
#self.vticks = ticklocator.bin_boundaries(self.vmin,self.vmax)
self.vticks = ticklocator.tick_values(self.vmin,self.vmax)
if len(self.vticks)==0: # protect against rare situatin
self.vticks = [self.vmin,self.vmax]
# extract positions in image coords, text labels
self.xticks = [image.pixel(SkyDir(x,self.vmin,SkyDir.GALACTIC if galactic else SkyDir.EQUATORIAL))[0]\
for x in self.uticks]
#self.yticks = [image.pixel(SkyDir(xl,v))[1] for v in self.vticks]
# proportional is usually good?
try:
yscale = ny/(lon(0,ny)-self.vmin)
self.yticks = [ (v-self.vmin)*yscale for v in self.vticks]
self.xticklabels = self.formatter(self.uticks)
self.yticklabels = self.formatter(self.vticks)
except Exception as msg:
print ('formatting failure in image.py: {}'.format(msg))
self.xticks=self.yticks=None
def formatter(self, t):
n=0
s = np.abs(np.array(t))+1e-9
for i in range(4):
#print (s, s-np.floor(s), (s-np.floor(s)).max())
if (s-np.floor(s)).max()<1e-3: break
s = s*10
n+=1
fmt = '%%5.%df'%n
return [(fmt% x).strip() for x in t]
def apply(self, axes):
if self.xticks is None:
return
#note remove outer ones
if len(self.xticks)>=3:
axes.set_xticks(self.xticks[1:-1])
axes.set_xticklabels(self.xticklabels[1:-1])
axes.xaxis.set_ticks_position('bottom')
#axes.set_xlim((0.5,self.nx+0.5)) # have to do again?
if len(self.yticks)>=3:
axes.set_yticks(self.yticks[1:-1])
axes.set_yticklabels(self.yticklabels[1:-1])
axes.yaxis.set_ticks_position('left')
#axes.set_ylim((0.5,self.ny+0.5)) # have to do again?
class AITproj(object):
def __init__(self, proj):
self.proj = proj
self.center = proj(0,0)
self.scale = ((proj(180,0)[0]-self.center[0])/180., (proj(0,90)[1]-self.center[1])/90)
def __call__(self,l,b):
r = self.proj(l,b)
return [(r[i]-self.center[i])/self.scale[i] for i in range(2)]
def draw_grid(self, labels=True, color='gray', pixelsize=0.5, textsize=8):
label_offset = 5/pixelsize
#my_axes = pylab.axes() #creates figure and axes if not set
# # pylab.matplotlib.interactive(False)
#my_axes.set_autoscale_on(False)
#my_axes.set_xlim(0, 360/pixelsize)
#my_axes.set_ylim(0, 180/pixelsize)
#my_axes.set_axis_off()
#my_axes.set_aspect('equal')
ait = AITproj(self.projector.sph2pix) # the projector to use
axes = self.axes
bs = np.arange(-90, 91, 5)
for l in np.hstack((np.arange(0, 360, 45),[180.01])):
lstyle = '-' if int(l)==180 or int(l)==0 else '--'
# axes.plot([ait(l,b)[0] for b in bs], [ait(l,b)[1] for b in bs], lstyle, color=color)
axes.plot([ait(l,b)[0] for b in bs], [ait(l,b)[1] for b in bs], lstyle, color=color)
if labels:
x,y = ait(l, 45)
axes.text(x,y, '%3.0f'%l ,size=textsize, ha='center')
ls = np.hstack((np.arange(180, 0, -5), np.arange(355, 180,-5), [180.01]))
for b in np.arange(-60, 61, 30):
lstyle = '-' if int(b)==0 else '--'
axes.plot([ait(l,b)[0] for l in ls], [ait(l,b)[1] for l in ls], lstyle, color=color)
if labels:
x,y = ait(180.1, b)
axes.text(x+label_offset,y+b/60*label_offset, '%+3.0f'%b, size=textsize, ha='center',va='center')
if labels:
for b in [90,-90]:
x,y = ait(0,b)
axes.text(x,y+b/90*label_offset,'%+3.0f'%b, size=textsize, ha='center',va='center')
class AIT_grid():
def __init__(self, fignum=20, axes=None, labels=True, color='gray', pixelsize=0.5, textsize=8, linestyle='-'):
"""Draws gridlines and labels for map.
"""
if axes is None:
fig=plt.figure(fignum, figsize=(12,6))
fig.clf()
self.axes = fig.gca()
else: self.axes = axes
self.pixelsize = pixelsize
xsize,ysize = 325,162
crpix = double2(xsize/pixelsize/2., ysize/pixelsize/2.)
crval = double2(0,0)
cdelt = double2(-pixelsize, pixelsize)
self.proj = SkyProj('AIT', crpix, crval, cdelt, 0, True)
self.axes.set_autoscale_on(False)
self.axes.set_xlim(0, 360/self.pixelsize)
self.axes.set_ylim(0, 180/self.pixelsize)
self.axes.set_axis_off()
self.axes.set_aspect('equal')
self.extent= (self.ait(180,0)[0],self.ait(180.001,0)[0], self.ait(0,-90)[1], self.ait(0,90)[1])
label_offset = 5/self.pixelsize
bs = np.arange(-90, 91, 5)
for l in np.hstack((np.arange(0, 360, 45),[180.01])):
self.axes.plot([self.ait(l,b)[0] for b in bs], [self.ait(l,b)[1] for b in bs], linestyle, color=color)
if labels:
x,y = self.ait(l, 45)
self.axes.text(x,y, '%3.0f'%l ,size=textsize, ha='center')
ls = np.hstack((np.arange(180, 0, -5), np.arange(355, 180,-5), [180.01]))
for b in np.arange(-60, 61, 30):
lstyle = '-' if int(b)==0 else linestyle
self.axes.plot([self.ait(l,b)[0] for l in ls], [self.ait(l,b)[1] for l in ls], lstyle, color=color)
if labels:
x,y = self.ait(180.1, b)
self.axes.text(x+label_offset,y+b/60*label_offset, '%+3.0f'%b, size=textsize, ha='center',va='center')#, weight = 'bold')
if labels:
for b in [90,-90]:
x,y = self.ait(0,b)
self.axes.text(x,y+b/90*label_offset,'%+3.0f'%b, size=textsize, ha='center',va='center')
def skydir(self, x, y):
""" from pixel coordinates to sky """
return SkyDir(x+0.5, y+0.5, self.proj)
def pixel(self, sdir):
""" return pixel coordinates for the skydir
"""
x,y=self.proj.sph2pix(sdir.l(),sdir.b())
return (x-0.5,y-0.5)
def ait(self, l, b):
" convert lon, lat to car "
# floats seem to be necessary: gcc SWIG oddity?
z = self.proj.sph2pix(float(l), float(b))
return (float(z[0]),float(z[1]))
def plot(self, sources, marker='o', text=None, fontsize=8, colorbar=False, **kwargs):
""" plot symbols at points
sources: list of SkyDir
keywords:
text: optional text strings (same length as sources if specified)
fontsize: for text
marker: symbol to use, see scatter doc.
colorbar: set True to add a colorbar. (See method to attach label)
kwargs: applied to scatter, use c as an array of floats, with optional
cmap=None, norm=None, vmin=None, vmax=None
to make color key for another value
in that case, you can use Axes.colorbar
set s to change from default size =10
"""
X=[]
Y=[]
for i,s in enumerate(sources):
x,y = self.ait(s.l(),s.b())
X.append(x)
Y.append(y)
if text is not None:
self.axes.text(x,y,text[i],fontsize=fontsize)
#self.axes.plot(X,Y, symbol, **kwargs)
self.col=self.axes.scatter(X,Y, marker=marker, **kwargs)
if colorbar: self.colorbar()
plt.draw_if_interactive()
def colorbar(self, label=None, **kwargs):
""" attach a colorbar to a plot, expect it was generated with the 'c' argument
"""
if 'shrink' not in kwargs: kwargs['shrink'] = 0.7
fig = self.axes.figure
if 'col' not in self.__dict__:
raise Exception('colorbar called with no mappable defined, say by plot(..., c=...)')
cb =fig.colorbar(self.col, cax=None, ax=self.axes, **kwargs)
fig.sca(self.axes) # restore selected axes objet
if label: cb.set_label(label)
plt.draw_if_interactive()
class AIT(object):
""" Manage a full-sky image of a SkyProjection or SkyFunction, wrapping SkyImage
"""
defaults= (
('pixelsize', 0.5, 'size, in degrees, of pixels'),
('galactic', True, 'galactic or equatorial coordinates'),
('fitsfile', '', 'if set, write the projection to a FITS file'),
('proj', 'AIT', 'could be ''CAR'' for carree or ''ZEA'': used by wcslib'),
('center', None, 'if default center at (0,0) in coord system'),
('size', 180, 'make less for restricted size'),
('galactic', True, 'use galactic coordinates'),
('earth', False, 'if looking down at Earth'),
('axes', None, 'set to use, otherwise create figure if necessary'),
('nocolorbar',False, 'set to turn off colorbar' ),
('cbtext', None, 'text for colorbar'),
('background',None, 'if set, a value to apply to NaN: default is to not set pixels\n'
'nb: do not set 0 if log scale'),
('grid_color',None, 'if set, draw a grid with given color. To annotate it, use the grid method'),
)
@keyword_options.decorate(defaults)
def __init__(self, skyfun, **kwargs):
"""
skyfun SkyProjection or SkyFunction object
"""
keyword_options.process(self, kwargs)
self.skyfun = skyfun
# set up, then create a SkyImage object to perform the projection to a grid
if self.center is None:
self.center = SkyDir(0,0, SkyDir.GALACTIC if self.galactic else SkyDir.EQUATORIAL)
self.skyimage = SkyImage(self.center, self.fitsfile, self.pixelsize,
self.size, 1, self.proj, self.galactic, self.earth)
# we want access to the projection object, to allow interactive display via pix2sph function
self.projector = self.skyimage.projector()
self.x = self.y = 100 # initial def
self.nx, self.ny = self.skyimage.naxis1(), self.skyimage.naxis2()
self.center = self.projector.sph2pix(0,0)
self.scale = ((self.projector.sph2pix(180,0)[0]-self.center[0])/180.,
(self.projector.sph2pix( 0,90)[1]-self.center[1])/90)
if skyfun is not None:
self.fill(skyfun)
self.setup_image(self.earth)
else:
# special case: want to set pixels by hand
pass
def fill(self, skyfun):
""" fill the image with the skyfunction"""
if skyfun.__class__.__name__ !='PySkyFunction':
def pyskyfun(v):
return skyfun(SkyDir(Hep3Vector(v[0],v[1],v[2])))
self.skyimage.fill(PySkyFunction(pyskyfun))
else:
self.skyimage.fill(skyfun)
def setup_image(self, earth=False):
# now extract stuff for the pylab image, creating a masked array to deal with the NaN values
self.image = np.array(self.skyimage.image()).reshape((self.ny, self.nx))
self.mask = np.isnan(self.image)
if self.background is None:
self.masked_image = np.ma.array( self.image, mask=self.mask)
else:
self.masked_image = np.ma.array( self.image)
self.masked_image[self.mask]=self.background
size = self.size
if not earth:
self.extent = (180,-180, -90, 90) if size==180 else (size, -size, -size, size)
else:
self.extent = (-180,180, -90, 90) if size==180 else (-size, size, -size, size)
self.vmin ,self.vmax = self.skyimage.minimum(), self.skyimage.maximum()
def __call__(self,l,b):
""" return x, y plot coordinates given l,b """
r = self.projector.sph2pix(l,b)
return [(r[i]-self.center[i])/self.scale[i] for i in range(2)]
def plot_coord(self, sdir):
""" return x,y plot coordinates given a SkyDir, or (ra,dec) tuple"""
if not isinstance(sdir, SkyDir): sdir=SkyDir(*sdir)
if self.galactic:
return self(sdir.l(),sdir.b())
return self(sdir.ra(), sdir.dec())
def grid(self, labels=False, color='gray', textsize=8, label_offset=10):
"""Draws gridlines and optional labels for map. """
bs = np.arange(-90, 91, 5)
for l in np.hstack((np.arange(0, 360, 45),[180.01])):
lstyle = '-' if int(l)==180 or int(l)==0 else '--'
#self.axes.plot([self(l,b)[0] for b in bs], [self(l,b)[1] for b in bs], lstyle, color=color)
self.axes.plot(map(lambda b: self(l,b)[0], bs),
map(lambda b: self(l,b)[1], bs), lstyle, color=color)
if labels:
x,y = self(l, 45)
self.axes.text(x,y, '%3.0f'%l ,size=textsize, ha='center')
ls = np.hstack((np.arange(180, 0, -5), np.arange(355, 180,-5), [180.01]))
for b in np.arange(-60, 61, 30):
lstyle = '-' if int(b)==0 else '--'
self.axes.plot(map(lambda l: self(l,b)[0], ls),
map(lambda l: self(l,b)[1], ls), lstyle, color=color)
if labels:
x,y = self(180.1, b)
self.axes.text(x+label_offset,y+b/60*label_offset, '%+3.0f'%b,
size=textsize, ha='center',va='center')
if labels:
for b in [90,-90]:
x,y = self(0,b)
self.axes.text(x,y+b/90*label_offset,'%+3.0f'%b,
size=textsize, ha='center',va='center')
def plot(self, sources, marker='o', text=None,
colorbar=False, text_kw={}, **kwargs):
""" plot symbols at points
sources: list of SkyDir
keywords:
text: optional text strings (same length as sources if specified)
text_kw: dict
keywords for the text function
marker: symbol to use, see scatter doc.
colorbar: set True to add a colorbar. (See method to attach label)
kwargs: applied to scatter, use c as an array of floats, with optional
cmap=None, norm=None, vmin=None, vmax=None
to make color key for another value
in that case, you can use Axes.colorbar
set s to change from default size =10
"""
if 'fontsize' not in text_kw: text_kw['fontsize']=8
X=[]
Y=[]
for i,s in enumerate(sources):
x,y = self.plot_coord(s)
X.append(x)
Y.append(y)
if text is not None:
self.axes.text(x,y,text[i], **text_kw )
self.source_cb=self.axes.scatter(X,Y, marker=marker, **kwargs)
if colorbar: assert False, "not implemented" #self.colorbar()
def on_move(self, event):
"""Reports mouse's position in galactic coordinates."""
from numpy import fabs
if event.xdata == None or event.ydata == None:
pass
else:
try:
coords = self.proj.pix2sph(event.xdata, event.ydata)
self.poslabel.set_text("long=%1.2f\n lat=%1.2f" %(coords[0],coords[1]))
except:
self.poslabel.set_text("")
self.figure.canvas.draw()
def imshow(self, title=None, scale='linear', title_kw={}, **kwargs):
"""run imshow
scale : string defining the translation or a ufunc
the string can specify linear [default], log for log10, sqrt, or asinh
"""
nocolorbar =kwargs.pop('nocolorbar', self.nocolorbar)
grid_color = kwargs.pop('grid_color', self.grid_color)
cb_kw = kwargs.pop('colorbar_kw',
dict(orientation='vertical', shrink=0.6 if self.size==180 else 1.0))
from numpy import ma
scale_fun = kwargs.pop('fun', lambda x : x)
# set defaults
imshow_kw =dict(origin='lower', interpolation='nearest', extent=self.extent)
imshow_kw.update( kwargs)
if self.axes is None:
self.figure, self.axes = pylab.subplots(1,1, figsize=(10,5))
if self.size==180: self.axes.set_axis_off()
# set initially to all white for eps
self.axes.imshow(np.ones([self.nx,self.ny,3]), **imshow_kw)
# use masked array to set the oval
fun_dict = dict(linear=scale_fun, log=ma.log10, sqrt=ma.sqrt, asinh=ma.arcsinh)
fun = fun_dict.get(scale, None) if type(scale)==types.StringType else fun
if fun is None:
raise Exception('bad scale function: %s, must be one of %s'%(scale,fun_dict.keys()))
m = self.axes.imshow(fun(self.masked_image), **imshow_kw)
if not nocolorbar:
self.colorbar =self.axes.figure.colorbar(m, ax=self.axes, **cb_kw)
self.colorbar.set_label(self.cbtext)
self.mappable = self.colorbar.mappable
else:
self.mappable=m
self.title(title, **title_kw)
if grid_color:
self.grid(color=grid_color)
# for interactive formatting of the coordinates when hovering
##pylab.gca().format_coord = self.format_coord # replace the function on the fly!
def pcolor(self, title=None, scale='linear', **kwargs):
'run pcolor'
from numpy import ma, array
import pylab
#self.axes = pylab.axes()
if self.galactic:
xvalues=array([self.skydir(i,0).l() for i in range(self.nx+1)])
yvalues=array([self.skydir(0,i).b() for i in range(self.ny+1)])
pylab.xlabel('glon'); pylab.ylabel('glat')
else:
xvalues=array([self.skydir(i,0).ra() for i in range(self.nx+1)])
yvalues=array([self.skydir(0,i).dec() for i in range(self.ny+1)])
pylab.xlabel('ra'); pylab.ylabel('dec')
if scale=='linear': pylab.pcolor(self.masked_image, **kwargs)
elif scale=='log': pylab.pcolor(ma.log10(self.masked_image), **kwargs)
else: raise Exception('bad scale: %s'%scale)
self.colorbar=pylab.colorbar(orientation='horizontal', shrink=1.0 if self.size==180 else 1.0)
self.title(title)
if self.axes is None: self.axes = pylab.gca()
def axislines(self, color='black', **kwargs):
' overplot axis lines'
import pylab
self.axes.axvline(0, color=color, **kwargs)
self.axes.axhline(0, color=color, **kwargs)
pylab.axis(self.extent)
def title(self, text=None, **kwargs):
' plot a title, default the name of the SkySpectrum'
try:
self.axes.set_title( text if text is not None else self.skyfun.name(), **kwargs)
except AttributeError: #no name?
pass
def skydir(self, x, y):
" from pixel coordinates to sky "
xpixel = (180-x)*float(self.nx)/360.
ypixel = (y+90)*float(self.ny)/180.
if self.proj.testpix2sph(xpixel,ypixel) !=0: return None #outside valid region
sdir = SkyDir(x, y, self.proj)
return sdir
def pixel(self, sdir):
""" return pixel coordinates for the skydir"""
if self.galactic: return self.proj.sph2pix(sdir.l(),sdir.b())
return self.proj.sph2pix(sdir.ra(),sdir.dec())
def format_coord(self, x, y):
" replacement for Axes.format_coord"
sdir = self.skydir(x,y)
val = self.skyfun(sdir)
return 'ra,dec: (%7.2f,%6.2f); l,b: (%7.2f,%6.2f), value:%6.3g' %\
( sdir.ra(), sdir.dec(), sdir.l(), sdir.b(), val)
def scale_bar(self, delta=1,text='$1^o$', color='k'):
""" draw a scale bar in lower left """
xmin, xmax= self.axes.get_xlim()
ymin, ymax = self.axes.get_ylim()
x1,y1 = 0.95*xmin + 0.05*xmax, 0.95*ymin+0.05*ymax
sd = self.skydir(x1,y1)
x2,y2 = self.pixel(SkyDir(sd.ra()-delta/math.cos(math.radians(sd.dec())), sd.dec()))
self.axes.plot([x1,x2],[y1,y1], linestyle='-', color=color, lw=2)
self.axes.text( (x1+x2)/2, (y1+y2)/2+self.ny/200., text, ha='center', color=color, fontsize=10)
def box(self, image, **kwargs):
""" draw a box at the center, the outlines of the image """
if 'lw' not in kwargs: kwargs['lw']=2
nx,ny = image.nx, image.ny
corners = [(0,0), (0,ny), (nx,ny), (nx,0), (0,0) ]
dirs = [image.skydir(x,y) for x,y in corners]
rp = [ self.pixel(sdir) for sdir in dirs]
self.axes.plot( [r[0] for r in rp], [r[1] for r in rp], 'k', **kwargs)
def galactic_map(skydir, axes=None, pos=(0.77,0.88), width=0.2,
color='w', marker='s', markercolor='r', markersize=40):
"""
insert a little map showing the galactic position
skydir: sky coordinate for point
axes: Axes object, use gca() if None
pos: location within the map
width: width, fraction of map size
color: line color
marker, markercolor, markersize ['s', 'r', 40] plot symbol,color,size (see scatter)
returns the AIT_grid to allow plotting other points
"""
# create new a Axes object positioned according to axes that we are using
if axes is None: axes=pl.gca()
b = axes.get_position()
xsize, ysize = b.x1-b.x0, b.y1-b.y0
axi = axes.figure.add_axes((b.x0+pos[0]*xsize, b.y0+pos[1]*ysize, width*xsize, 0.5*width*ysize))
ait_insert=AIT_grid(axes=axi, labels=False, color=color)
ait_insert.plot([skydir], s=markersize, marker=marker, c=markercolor, zorder=100)
axes.figure.sca(axes) # restore previous axes
return ait_insert
class ZEA(object):
""" Manage a square image SkyImage
"""
defaults = (
('size', 2, 'size of image in degrees'),
('pixelsize',0.1, 'size, in degrees, of pixels'),
('galactic', False, 'galactic or equatorial coordinates'),
('fitsfile', '', 'set non-empty to write out as a FITS file'),
('axes', None, 'Axes object to use: \nif None, ...'),
('nticks', 5, 'number of tick marks to attempt'),
('nocolorbar', False, 'set to suppress the colorbar'),
50*'-',
('proj', 'ZEA', 'projection name: can change if desired'),
('size2', -1, 'vertical size, if specified' ),
)
@keyword_options.decorate(defaults)
def __init__(self, center, **kwargs):
"""
center SkyDir specifying center of image or tuple
tuple interpreted as (l,b) or (ra,dec) depending on self.galactic
"""
keyword_options.process(self, kwargs)
if not isinstance(center, SkyDir):
self.center=SkyDir(center[0],center[1], SkyDir.GALACTIC if self.galactic else SkyDir.EQUATORIAL)
else:
self.center = center
# set up, then create a SkyImage object to perform the projection to a grid and manage an image
self.skyimage = SkyImage(self.center, self.fitsfile, self.pixelsize, self.size, 1, self.proj, self.galactic, False, self.size2)
# now extract stuff for the pylab image
self.nx, self.ny = self.skyimage.naxis1(), self.skyimage.naxis2()
# we want access to the projection object, to allow interactive display via pix2sph function
self.projector = self.skyimage.projector()
self.set_axes()
self.cid = None #callback id
# note the 1/2 pixel offset: WCS convention is that pixel centers are integers starting from 1.
# here we use 0 to nx, 0 to ny standard for image.
def skydir(self, x, y):
""" from pixel coordinates to sky """
return SkyDir(x+0.5, y+0.5, self.projector)
def pixel(self, sdir):
""" return coordinates for the skydir
"""
x,y = self.projector.sph2pix(sdir.ra(),sdir.dec()) \
if not self.galactic else self.projector.sph2pix(sdir.l(),sdir.b())
return (x-0.5,y-0.5)
def inside(self, sdir):
""" is the direction sdir inside the boundary """
x,y =self.pixel(sdir)
return x> 0 and y>0 and x<self.nx and y<self.ny
def fill(self, skyfun):
""" fill the image from a SkyFunction
sets self.image with numpy array appropriate for imshow
"""
if skyfun.__class__.__name__ !='PySkyFunction':
def pyskyfun(v):
return skyfun(SkyDir(Hep3Vector(v[0],v[1],v[2])))
self.skyimage.fill(PySkyFunction(pyskyfun))
else:
self.skyimage.fill(skyfun)
self.image = np.array(self.skyimage.image()).reshape((self.ny, self.nx))
self.vmin ,self.vmax = self.skyimage.minimum(), self.skyimage.maximum()
return self.image
def set_axes(self):
""" configure the axes object
if self.axes is None, simply use gca()
(set coordinate scale offset by 0.5 from WCS standard)
"""
if self.axes is None:
figure = pyplot.gcf()
if len(figure.get_axes())==0:
# no axes in the current figure: add one that has equal aspect ratio
h,w = figure.get_figheight(), figure.get_figwidth()
if w>h:
figure.add_axes((0.18, 0.15, h/w*0.75, 0.75))
else:
figure.add_axes((0.18, 0.15, 0.75, w/h*0.75))
self.axes=pyplot.gca()
self.axes.set_aspect(1)
self.axes.set_xlim((0.0,self.nx))
self.axes.set_ylim((0.0,self.ny))
self.axes.set_autoscale_on(False)
r =Rescale(self,self.nticks, galactic = self.galactic)
r.apply(self.axes)
labels = ['$l$','$b$'] if self.galactic else ['RA','Dec']
self.axes.set_xlabel(labels[0]);self.axes.set_ylabel(labels[1])
def grid(self, nticks=None, **kwargs):
""" draw a grid
"""
label_offset = 5 # units degrees?
if nticks is None: nticks=self.nticks
r = Rescale(self, nticks, galactic = self.galactic)
r.apply(self.axes)
self.axes.xaxis.set_ticks_position('none')
try:
# need this for 1.0.0 due to bug that turns off labels
self.axes.yaxis.set_tick_params(which='both', right=False, left=False)
except:
self.axes.yaxis.set_ticks_position('none')
uticks, vticks = r.uticks, r.vticks
cs = SkyDir.GALACTIC if self.galactic else SkyDir.EQUATORIAL
for u in uticks:
w = [self.pixel(SkyDir(u,v,cs)) for v in np.linspace(r.vmin,r.vmax, 2*nticks)]
self.axes.plot([q[0] for q in w], [q[1] for q in w], '-k', **kwargs)
for v in vticks:
w = [self.pixel(SkyDir(u,v,cs)) for u in np.linspace(r.ul, r.ur,2*nticks)]
self.axes.plot([q[0] for q in w], [q[1] for q in w], '-k', **kwargs)
return r
def scale_bar(self, delta=1,text='$1^o$', color='k'):
""" draw a scale bar in lower left """
xmin, xmax= self.axes.get_xlim()
ymin, ymax = self.axes.get_ylim()
x1,y1 = 0.95*xmin + 0.05*xmax, 0.95*ymin+0.05*ymax
sd = self.skydir(x1,y1)
if self.galactic:
x2,y2 = self.pixel(SkyDir(sd.l()-delta/math.cos(math.radians(sd.b())), sd.b(),SkyDir.GALACTIC))
else:
x2,y2 = self.pixel(SkyDir(sd.ra()-delta/math.cos(math.radians(sd.dec())), sd.dec()))
self.axes.plot([x1,x2],[y1,y1], linestyle='-', color=color, lw=3)
self.axes.text( (x1+x2)/2, (y1+y2)/2+self.ny/80., text, ha='center', color=color)
def imshow(self, **kwargs):
""" run imshow on the image, presumably set by a fill: set up for colorbar.
"""
if 'image' not in self.__dict__: raise Exception('no data to show: must run fill first')
# set up kw
self.imshow_kw = dict(cmap=None, norm=None, interpolation='nearest')
self.imshow_kw.update(kwargs)
fun = self.imshow_kw.pop('fun', lambda x: x)
self.cax = self.axes.imshow(fun(self.image), **self.imshow_kw)
def colorbar(self, label=None, **kwargs):
"""
draw a color bar using the pylab colorbar facility
note that the 'shrink' parameter needs to be adjusted if not a full figure
Must have called imshow, which will will be used for default cmap, norm
returns the colorbar object
"""
if self.nocolorbar: return
if 'cax' not in self.__dict__: raise Exception('You must call imshow first')
cb_kw=dict(orientation= 'vertical',
pad = 0.01,
ticks = ticker.MaxNLocator(4),
fraction=0.10,
shrink = 1.0,
cmap = self.imshow_kw['cmap'],
norm = self.imshow_kw['norm'],
)
cb_kw.update(kwargs)
self.cb=self.axes.figure.colorbar(self.cax, **cb_kw)
if label is not None: self.cb.set_label(label)
return self.cb
def box(self, image, **kwargs):
""" draw a box at the center, the outlines of the image
+image An object of this class, or implementing the skydir function
"""
if 'lw' not in kwargs: kwargs['lw']=2
nx,ny = image.nx, image.ny
corners = [(0,0), (0,ny), (nx,ny), (nx,0), (0,0) ]
dirs = [image.skydir(x,y) for x,y in corners]
rp = [ self.pixel(sdir) for sdir in dirs]
self.axes.plot( [r[0] for r in rp], [r[1] for r in rp], 'k', **kwargs)
def plot_source(self, name, source, symbol='+', fontsize=10, **kwargs):
""" plot symbols at points
name: text string
source: a SkyDir
"""
x,y = self.pixel(source)
if x<0 or x> self.nx or y<0 or y>self.ny: return False
self.axes.plot([x],[y], symbol,
markersize=kwargs.pop('markersize',12), **kwargs)
#self.axes.text(x,y, name, fontsize=fontsize, **kwargs)
if name is not None:
self.axes.text( x+self.nx/100., y+self.nx/100., name,
fontsize=fontsize, **kwargs)
return True
def plot(self, sources, marker='o', text=None,
colorbar=False, text_offset=(0,0),text_kw={},
c=None, s=20, **kwargs):
""" plot symbols at points
see AIT.plot
"""
if 'fontsize' not in text_kw: text_kw['fontsize']=8
X=[]
Y=[]
C=[] if c is not None else None
dx,dy=text_offset
for i,t in enumerate(sources):
if not self.inside(t): continue
x,y = self.pixel(t)
X.append(x)
Y.append(y)
if c is not None: C.append(c[i])
if text is not None:
self.axes.text(x+dx,y+dy,text[i], **text_kw )
self.source_cb=self.axes.scatter(X,Y,c=C, marker=marker, s=s, **kwargs)
#if colorbar: assert False, "not implemented" #self.colorbar()
if colorbar:
plt.colorbar(self.source_cb) #self.colorbar()
def cross(self, sdir, size, text=None, **kwargs):
""" draw a cross at sdir,
size: half-length of each arm, in deg.
"""
x,y = self.pixel(sdir)
if x<0 or x> self.nx or y<0 or y>self.ny: return False
pixelsize = self.pixelsize
delta = size/pixelsize
axes = self.axes
axes.plot([x-delta, x+delta], [y,y], '-k', **kwargs)
axes.plot([x,x], [y-delta, y+delta], '-k', **kwargs)
if text is not None:
if 'lw' in kwargs: kwargs.pop('lw') # allow lw for the lines.
axes.text(x,y, text, **kwargs)
return True
def ellipse(self, sdir, par, symbol='-', **kwargs ):
""" sdir: SkyDir or (ra, dec) or (l,b)
ellipse parameters: a, b, ang (all deg)
"""
if not isinstance(sdir, SkyDir):
sdir = SkyDir(sdir[0],sdir[1], SkyDir.GALACTIC if self.galactic else SkyDir.EQUATORIAL)
x0,y0 = self.pixel(sdir)
a,b,ang = par
if self.galactic:
# adjust angle for local coordinate
up = SkyDir(sdir.ra()+a, sdir.dec())
xup,yup =self.pixel(up)
ang += np.degrees(np.arctan2( yup-y0,xup-x0))
ellipse = Ellipse([a,b,np.radians(ang)])
x,y = ellipse.contour(1.0)
pixelsize=self.pixelsize #scale for plot
self.axes.plot(x0+np.asarray(x)/pixelsize, y0+np.asarray(y)/pixelsize, symbol, **kwargs)
def clicker(self, onclick=None):
""" enable click processing: default callback prints the location
callback example:
def default_onclick(event):
print ('button %d, %s' % (event.button, zea.skydir(event.xdata,event.ydata)))
"""
def default_onclick(event):
print ('button %d, %s' % (event.button, self.skydir(event.xdata,event.ydata)))
if onclick==None: onclick=default_onclick
if self.cid is not None: self.noclicker()
self.cid=self.axes.figure.canvas.mpl_connect('button_press_event', onclick)
def noclicker(self):
if self.cid is not None: self.axes.figure.canvas.mpl_disconnect(self.cid)
self.cid=None
def smooth(self,scale=0.1,smoother=None):
""" smooth the image using a Gaussian kernel. Reverse process by calling ZEA.unsmooth.
NB -- if more than one image with the same dimension is to be smoothed, it is
more computationally efficient to create a single GaussSmoothZEA object and use it
to make smoothed images. This can be done manually, or using the smoother returned
by this message and passing it as the argument for future calls to smooth for
other ZEA objects.
scale: the smoothing scale (std. dev.) in deg
returns: the GaussSmoothZEA object for use in smoothing additional images
"""
if 'image' not in self.__dict__.keys(): return
gsz = smoother if smoother is not None else GaussSmoothZEA(self,scale)
self.original = self.image.copy()
self.image = gsz(self.image)
return gsz
def unsmooth(self):
""" replace smoothed image with original unsmoothed image."""
if 'original' not in self.__dict__.keys(): return
self.image = self.original
self.__dict__.pop('original')
def circle(self, sdir, size, fill=False, **kwargs):
"""draw a cirle
"""
self.axes.add_artist(plt.Circle(self.pixel(sdir), size/self.pixelsize, fill=fill, **kwargs))
def ZEA_test(ra=90, dec=80, size=5, nticks=8, galactic=False, **kwargs):
""" exercise (most) everything """
pyplot.clf()
q = ZEA(SkyDir(ra,dec), size=size, nticks=nticks, galactic=galactic, **kwargs)
q.grid(color='gray')
q.scale_bar(1, '$1^0$')
q.axes.set_title('test of ZEA region plot')
t=q.cross( SkyDir(ra,dec), 1, 'a red cross, arms +/- 1 deg', color='r', lw=2)
if not t: print ('failed to plot the cross')
q.plot_source('(80,76)', SkyDir(80,76), 'd')
q.plot_source('(110,74)', SkyDir(110,74), 'x')
q.ellipse( SkyDir(ra,dec), (1, 0.25, 0))
for dec in np.arange(-90, 91, 2):
q.plot_source( '(%d,%d)'%(ra,dec), SkyDir(ra,dec), 'x')
#def myfun(v): return v[0] # x-component of skydir to make a pattern
#q.fill(PySkyFunction(myfun))
q.fill(lambda sd: sd.ra())
q.imshow()
q.colorbar()
q.axes.figure.show()
return q
class ZEA_from_fits(ZEA):
""" subclass of ZEA with input FITS file, produced by ZEA"""
defaults = ZEA.defaults
@keyword_options.decorate(defaults)
def __init__(self, filename, **kwargs):
"""
filename: string | skymaps.SkyImage object
"""
keyword_options.process(self,kwargs)
if not isinstance(filename, SkyImage):
try:
self.skyimage =SkyImage(filename)
except Exception as msg:
raise Exception('failed to load file %s: %s)' % (filename, msg))
else:
self.skyimage=filename
s = self.skyimage
image_array = np.array(s.image())
self.nx,self.ny = s.naxis1(), s.naxis2()
self.projector = s.projector()
image_array.resize(self.nx,self.ny)
self.image=image_array
self.center = SkyDir(*self.projector.pix2sph(self.nx/2,self.ny/2.))
left = SkyDir(*self.projector.pix2sph(0,self.ny/2.))
edge = SkyDir(*self.projector.pix2sph(self.nx,self.ny/2.))
self.size = 2.*np.degrees(self.center.difference(edge))
self.set_axes()
class TSplot(object):
"""
Create a "TS plot"
Uses the ZEA class for display
"""
defaults = dict(
pixelsize = None, # passed to ZEA
axes = None, # passed to ZEA
nticks = 4, # passed to ZEA
fitsfile = '',
galmap = True, # determine if overlay a galactic map
galpos = (0.77,0.88), # position if galmap set
scalebar = True,
)
def __init__(self, tsmap, center, size, **kwargs):
"""
parameters:
*tsmap* a SkyFunction, that takes a SkyDir argument and returns a value
*center* SkyDir direction to center the plot
*size* (degrees) width of plot
*pixelsize* [None] size (degrees) of a pixel: if not specified, will be size/10
*axes* [None] Axes object to use: if None, use current
*nticks* [4] Suggestion for labeling
*fitsfile*['']
*galmap* [True] overplot a little map in galactic coordinates showing the position
*scalebar" [True] overplot a scalebar in lower left
**kwargs additional args for ZEA, like galactic
"""
self.__dict__.update(TSplot.defaults)
for key in self.__dict__.keys():
if key in kwargs: self.__dict__[key] = kwargs.pop(key)
self.tsmap = tsmap
self.size=size
if self.pixelsize is None: self.pixelsize=size/10.
npix = round(size/self.pixelsize)
self.pixelsize=size/npix
self.zea= ZEA(center, size=size, pixelsize=self.pixelsize, axes=self.axes,
nticks=self.nticks,fitsfile=self.fitsfile, **kwargs)
print ('TSplot: filling %d pixels (size=%.2f, npix=%d)...'%( (size/self.pixelsize)**2, size, npix))
sys.stdout.flush()
self.zea.fill(tsmap)
# create new image that is the significance in sigma with respect to local max
self.tsmaxpos=tsmaxpos = find_local_maximum(tsmap, center) # get local maximum, then check that is in the image
x,y = self.zea.pixel(tsmaxpos)
if x>=0 and x < self.zea.nx and y>=0 and y<self.zea.ny:
tsmaxval = tsmap(tsmaxpos)
else: # not in image: use maximm actually in the image
tsmaxval = self.zea.image.max()
tmap = tsmaxval-self.zea.image
tmap[tmap<0] = 0
self.image =np.sqrt(tmap)
self.cb=None
# np.sqrt(-2* np.log(1-np.array([0.68,0.95, 0.99]))
self.clevels = np.array([1.51, 2.45, 3.03])
def show(self, colorbar=True):
"""
Generate the basic plot, with contours, scale bar, color bar, and grid
"""
norm2 = mpl.colors.Normalize(vmin=0, vmax=5)
cmap2 = mpl.cm.hot_r
self.nx,self.ny = self.image.shape
axes = self.zea.axes
t = axes.imshow(self.image, origin='lower',
extent=(0,self.nx,0,self.ny),
cmap=cmap2, norm=norm2,
interpolation='bilinear')
if colorbar:
if self.cb is None:
self.cb = pyplot.colorbar(t, ax=axes,
cmap=cmap2, norm=norm2,
ticks=ticker.MultipleLocator(),
orientation='vertical',
#shrink=1.0,
)
self.cb.set_label('$\mathrm{sqrt(TS difference)}$')
ct=axes.contour( np.arange(0.5, self.nx,1), np.arange(0.5, self.ny, 1), self.image,
self.clevels,
colors='k', linestyles='-' )
if axes.get_xlim()[0] !=0:
print ('Warning: coutour modified: limits', axes.get_xlim(), axes.get_ylim())
cfmt={}
for key,t in zip(self.clevels,['68%','95%', '99%']): cfmt[key]=t
plt.clabel(ct, fmt=cfmt, fontsize=8)
#axes.set_xlim((0,nx)); axes.set_ylim((0,ny))
#print ('after reset', axes.get_xlim(), axes.get_ylim())
if self.scalebar:
t = 3
if self.size< 0.03*t:
self.zea.scale_bar(1/60., "1'", color='w')
elif self.size<0.6*t:
self.zea.scale_bar(0.1, "$0.1^o$", color='w')
elif self.size<1.1*t:
self.zea.scale_bar(0.5, "$0.5^o$", color='w')
else:
self.zea.scale_bar(1.0, '$1^o$', color='w')
self.zea.grid(color='gray')
if self.galmap:
galactic_map(self.zea.center, axes=self.axes,
color='w', marker='s', markercolor='r', pos=self.galpos);
def overplot(self, quadfit, sigma=1.0,contours=None, **kwargs):
"""
OVerplot contours from a fit to surface
quadfit: either: a uw.like.quadform.Localize object used for the fit,
or: an array [ra,dec, a,b, phi, qual]
sigma: scale factor
contours: [None] default is the 68,95,99% assuming the radius is 1 sigma
if specified, must be a list, eg [1.0]
"""
axes = self.zea.axes
if contours is None: contours = self.clevels
if getattr(quadfit,'__iter__', False):
ra,dec,a,b,phi=quadfit[:5]
qual = None if len(quadfit)==5 else quadfit[5]
ellipse = Ellipse([a,b,np.radians(phi)])
x,y = ellipse.contour(1.0)
else:
x,y = quadfit.ellipse.contour(quadfit.fit_radius)
ra,dec = quadfit.ra, quadfit.dec
qual = quadfit.quality()
pixelsize=self.zea.pixelsize #scale for plot
x0,y0 = self.zea.pixel(SkyDir(ra,dec))
f=sigma/pixelsize #scale factor
xa = f*
|
np.array(x)
|
numpy.array
|
import warnings
import pickle
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import sys
import os
import os.path
import math
import bisect
import tensorflow as tf
sys.path.insert(0, '../../..')
from cde.density_estimator import GPDExtremeValueMixtureDensityNetwork,NoNaNGPDExtremeValueMixtureDensityNetwork, NewGPDExtremeValueMixtureDensityNetwork
from cde.density_estimator import MixtureDensityNetwork
from cde.density_estimator import ExtremeValueMixtureDensityNetwork
from cde.data_collector import MatlabDataset, MatlabDatasetH5, get_most_common_unique_states
from cde.density_estimator import plot_conditional_hist, measure_percentile, measure_percentile_allsame, measure_tail, measure_tail_allsame, init_tail_index_hill, estimate_tail_index_hill
from cde.evaluation.empirical_eval import evaluate_models_singlestate, empirical_measurer, evaluate_model_allstates, evaluate_models_allstates_plot, obtain_exp_value, evaluate_models_allstates_agg
""" Load or Create Project """
# this project uses the train dataset with arrival rate 0.95, and service rate 1
# name
PROJECT_NAME = 'onehop_p95'
# Path
projects_path = 'saves/projects/'
path = projects_path + PROJECT_NAME + '/'
# Get the directory name from the specified path
dirname = os.path.dirname(path)
# create the dir
os.makedirs(dirname, exist_ok=True)
""" load training data """
FILE_NAME = 'traindata_1hop_p95.npz'
try:
npzfile = np.load(path + FILE_NAME)
train_data = npzfile['arr_0']
meta_info = npzfile['arr_1']
batch_size = meta_info[0]
ndim_x = meta_info[1]
print('training data loaded from .npz file. Rows: %d ' % len(train_data), ' Columns: %d ' % len(train_data[0]), 'Batch_size: %d' % batch_size, 'ndim_x: %d' % ndim_x)
except:
print('train data .npz file does not exist, import and create the train dataset into Numpy array')
""" import and create the train dataset into Numpy array """
file_addr = '../../data/train_records_single_p95.mat'
content_key = 'train_records'
select_cols = [0,1]
batch_size = 10000
training_dataset = MatlabDataset(file_address=file_addr,content_key=content_key,select_cols=select_cols)
train_data = training_dataset.get_data(batch_size)
ndim_x = len(train_data[0])-1
meta_info =
|
np.array([batch_size,ndim_x])
|
numpy.array
|
import numpy as np
import numpy.linalg as la
from modpy._exceptions import InfeasibleProblemError
class Presolver:
def __init__(self, g, A, b, C, d, lb, ub, H=None):
"""
Parameters
----------
g : array_like, shape (n,)
System vector with coefficients of the linear terms.
A : array_like, shape (me, n)
System matrix with coefficients for the equality constraints.
b : array_like, shape (me,)
Results vector of the equality constraints.
C : array_like, shape (mi, n)
System matrix with coefficients for the inequality constraints.
d : array_like, shape (mi,)
Upper bound vector of the inequality constraints.
lb : array_like, shape (n,)
Lower bound.
ub : array_like, shape (n,)
Upper bound.
H : array_like, shape (n, n), optional
System matrix with coefficients of the quadratic terms.
"""
# problem vectors/matrices (will change during presolve procedure)
self.H = None # quadratic coefficient matrix (for QP)
self.g = g # linear coefficient vector
self.A = A # equality constraint coefficients
self.b = b # equality constraint solutions
self.C = C # inequality constraint coefficients
self.d = d # inequality constraint upper bounds
self.lb = lb # variable lower bound
self.ub = ub # variable upper bound
# most QP literature assumes Cx >= d, but the pre-solver
# assumes Cx <= d to share methods between LP and QP.
# the inequality constraints are reversed to comply.
if H is not None:
self.H = H
self.C = -self.C
self.d = -self.d
# original problem dimensions
self.n = g.size
self.me = b.size
self.mi = d.size
# presolved solution
self.x = np.zeros((self.n,)) # presolved solution to the primal problem
self.k = 0. # constant to be added to the optimal function value.
# Lagrangian multiplier bounds
self.yl = np.zeros((self.me,)) # lower bound on the equality Lagrangian multiplier
self.yu = np.zeros((self.me,)) # Upper bound on the equality Lagrangian multiplier
self.zl = np.zeros((self.mi,)) # lower bound on the inequality Lagrangian multiplier
self.zu = np.zeros((self.mi,)) # Upper bound on the inequality Lagrangian multiplier
# index trackers of remaining equations and variables
self.idx_x = np.arange(self.n) # index of variables remaining in the reduced LP
self.idx_e = np.arange(self.me) # index of eq. constraints remaining in the reduced LP
self.idx_i = np.arange(self.mi) # index of ineq. constraints remaining in the reduced LP
# internal parameters
self._tol = 1e-13 # tolerance below which values are assumed 0.
self._reduced = True # used in pre-solver loops
self._scale = 1. # scaling factor
def get_LP(self):
return self.g, self.A, self.b, self.C, self.d, self.lb, self.ub
def get_QP(self):
return self.H, self.g, self.A, self.b, -self.C, -self.d, self.lb, self.ub
def presolve_LP(self):
"""
Pre-solves a linear programming problem of the form::
min g'x
s.t. Ax = b
s.t. Cx <= d
s.t. lb <= x <= ub
References
----------
[1] <NAME>., <NAME>. (1993). Presolving in Linear Programming. Mathematical Programming 71.
Page: 235
[2] <NAME>., <NAME>., <NAME>., <NAME>. Preprocessing Techniques.
[3] Linear Programming Algorithms. MathWorks.
Link: https://se.mathworks.com/help/optim/ug/linear-programming-algorithms.html
"""
# remove fixed variables
self._remove_fixed_variables()
# iteratively reduce system
while self._reduced:
self._reduced = False
# remove singleton rows
self._remove_singleton_rows_eq()
self._remove_singleton_rows_iq()
# remove forcing constraints
self._remove_forcing_constraints() # equality
self._tighten_forcing_constraints() # inequality
# remove duplicate rows
self.A, self.b, self.idx_e = self._remove_duplicate_rows(self.A, self.b, self.idx_e, eq=True)
self.C, self.d, self.idx_i = self._remove_duplicate_rows(self.C, self.d, self.idx_i, eq=False)
# remove zero rows
self.A, self.b, self.idx_e = self._remove_zero_rows(self.A, self.b, self.idx_e, eq=True)
self.C, self.d, self.idx_i = self._remove_zero_rows(self.C, self.d, self.idx_i, eq=False)
# remove zero columns
self._remove_zero_columns_LP()
# shift bounds
self._shift_lower_bounds()
# scale system
self._scaling_equilibration()
def presolve_QP(self):
"""
Pre-solves a quadratic programming problem of the form::
min 1/2 x'Hx + g'x
s.t. Ax = b
s.t. Cx <= d
s.t. lb <= x <= ub
Notice the inequality constraints are reversed relative to
most QP algorithms in literature. This is to re-use pre-solver
methods from LP.
References
----------
[1] <NAME>., <NAME>. (1993). Presolving in Linear Programming. Mathematical Programming 71.
Page: 235
[2] <NAME>., <NAME>., <NAME>., <NAME>. Preprocessing Techniques.
[3] Quadratic Programming Algorithms. MathWorks.
Link: https://se.mathworks.com/help/optim/ug/quadratic-programming-algorithms.html
"""
# remove fixed variables
self._remove_fixed_variables()
# iteratively reduce system
while self._reduced:
self._reduced = False
# remove singleton rows
self._remove_singleton_rows_eq()
self._remove_singleton_rows_iq()
# remove forcing constraints
self._remove_forcing_constraints() # equality
self._tighten_forcing_constraints() # inequality
# remove duplicate rows
self.A, self.b, self.idx_e = self._remove_duplicate_rows(self.A, self.b, self.idx_e, eq=True)
self.C, self.d, self.idx_i = self._remove_duplicate_rows(self.C, self.d, self.idx_i, eq=False)
# remove zero rows
self.A, self.b, self.idx_e = self._remove_zero_rows(self.A, self.b, self.idx_e, eq=True)
self.C, self.d, self.idx_i = self._remove_zero_rows(self.C, self.d, self.idx_i, eq=False)
# remove zero columns
self._remove_zero_columns_QP()
# shift bounds
self._shift_lower_bounds()
# scale system
self._scaling_equilibration()
def postsolve(self, x):
"""
Post-solves a linear programming problem of the form::
min g'x
s.t. Ax = b
s.t. Cx <= d
s.t. lb <= x <= ub
References
----------
[1] <NAME>., <NAME>. (1993). Presolving in Linear Programming. Mathematical Programming 71.
Page: 235
[2] <NAME>., <NAME>., <NAME>., <NAME>. Preprocessing Techniques.
Parameters
----------
x : array_like, shape (n-r,)
Solution to the reduced LP.
Returns
-------
x : array_like, shape (n,)
Solution to the full LP.
f : float
Optimal function value of the full LP.
"""
# scale the solved solution back to the original problem domain
x *= self._scale
# shift the solved solution back to the original problem domain
x = self._shift_x_by_bound(x)
# merge pre-solved and solved solution
self.x[self.idx_x] = x
# calculate optimal function value
f = self.g.T @ x + self.k
if self.H is not None:
f += x.T @ self.H @ self.x
return x, f
def is_solved(self):
return not self.idx_x.size
def _shift_lower_bounds(self):
"""
Shifts the lower bounds of variables to 0 prior to solving LP.
"""
mask = self.lb != -np.inf
if np.any(mask):
self.b -= self.A[:, mask] @ self.lb[mask]
self.d -= self.C[:, mask] @ self.lb[mask]
self.ub[mask] -= self.lb[mask]
def _shift_x_by_bound(self, x):
"""
Shifts the resulting LP solution by the lower bounds, if lower bounds were shifted during presolve.
Parameters
----------
x : array_like, shape (n-r,)
Solution to the reduced LP.
Returns
-------
x : array_like, shape (n-r,)
Solution to the reduced LP shifted back to original the domain w.r.t. lower bounds.
"""
mask = self.lb != -np.inf
if np.any(mask):
x[mask] += self.lb[mask]
return x
def _scaling_equilibration(self):
"""
Performs an equilibration scaling of the programming problem to improve numerical stability.
The equilibration does not use the max(A), but rather the sqrt(max(A)) similar to [1].
References
----------
[1] <NAME>. (1996). Solving Large-Scale Linear Programs by Interior-Point Methods under the MATLAB Environment.
"""
# scaling is only done if the system is poorly scaled.
absnz = np.abs(np.block([self.A[np.nonzero(self.A)], self.C[np.nonzero(self.C)]]))
max_scale = np.amin(absnz) / np.amax(absnz) if absnz.size else np.inf
if max_scale >= 1e-4:
return
# calculate column scaling
A_max = np.amax(np.abs(self.A), axis=0) if self.A.size else 0.
C_max = np.amax(np.abs(self.C), axis=0) if self.C.size else 0.
col_scale = np.sqrt(np.maximum(A_max, C_max))
self._scale = 1. / col_scale
# calculate row scaling
A_row_scale = np.sqrt(np.amax(np.abs(self.A), axis=1))
C_row_scale = np.sqrt(np.amax(np.abs(self.C), axis=1))
# scale columns
self.ub *= col_scale
self.g /= col_scale
col_scale = np.diag(1. / col_scale)
self.A = self.A @ col_scale
self.C = self.C @ col_scale
del col_scale
# scale rows
self.b /= A_row_scale
self.d /= C_row_scale
self.A = np.diag(1. / A_row_scale) @ self.A
self.C = np.diag(1. / C_row_scale) @ self.C
# what is the logic of the following?
norm = la.norm(np.block([self.b, self.d]))
if norm > 0.:
q = np.median([1., la.norm(self.g) / norm, 1e8])
if q > 10.:
self.A *= q
self.C *= q
self.b *= q
self.d *= q
def _remove_zero_rows(self, A, b, idx, eq=True):
"""
Removes zero-rows from the constraints. If the corresponding results vector is non-empty,
the system is checked primal-infeasibility and an error is raised.
Parameters
----------
A : array_like, shape (m, n)
System matrix with coefficients for the equality/inequality constraints.
b : array_like, shape (m,)
Results vector of the equality/inequality constraints.
idx : array_like, shape (m,)
Index tracker of remaining equality/inequality constraints
eq : bool
True if equality constraints, False if inequality constraints.
Returns
-------
A : array_like, shape (m-r, n)
Reduced system matrix with coefficients for the equality/inequality constraints.
b : array_like, shape (m-r,)
Reduced results vector of the equality/inequality constraints.
idx : array_like, shape (m-r,)
Reduced index tracker of remaining equality/inequality constraints
"""
mask = _non_zero_rows(A, tol=self._tol)
A = A[mask, :]
if eq:
if not (np.allclose(b[~mask], 0, atol=self._tol)):
raise InfeasibleProblemError(-2, 'LP is primal-infeasible due to zero-row in `A`'
' with corresponding non-zero row in `b`.')
else:
if np.any(b[~mask] < -self._tol):
raise InfeasibleProblemError(-2, 'LP is primal-infeasible due to zero-row in `C`'
' with corresponding negative row in `d`.')
b = b[mask]
idx = idx[mask]
return A, b, idx
def _remove_zero_columns_LP(self):
"""
Removes redundant columns from an LP. If the corresponding variable is unbounded,
then an error is raised due to dual-infeasibility of the system.
"""
mask = self._non_zero_columns_mask()
x = self._assign_variables_empty(mask)
self._remove_variables(x, mask)
def _remove_zero_columns_QP(self):
"""
Removes redundant columns from a QP. If the corresponding variable is unbounded,
then an error is raised due to dual-infeasibility of the system.
"""
mask = self._non_zero_columns_mask()
me = np.full_like(mask, False)
ms = np.full_like(mask, False)
# the structure of H has to be investigated for QPs, prior to removing columns.
# H is per definition symmetric, so only have to check row or column, not both
for i in np.nonzero(~mask):
# check if all coefficients are zero
if np.allclose(self.H[i, :], 0, atol=self._tol):
me[i] = True
# check if all coefficients are zero except for H[i, i]
elif np.abs(self.H[i, i]) > self._tol:
nz = np.abs(self.H[i, :]) < self._tol
nz = np.delete(nz, i)
if np.all(nz):
ms[i] = True
# reduce the programming problem
if np.any(me):
xe = self._assign_variables_empty(me)
self._remove_variables(xe, me)
if np.any(ms):
xs = self._assign_variables_single(ms)
self._remove_variables(xs, ms)
def _remove_fixed_variables(self):
"""
Removes fixed variables from an LP.
"""
# mask of non-fixed variables
mask = np.abs(self.ub - self.lb) > self._tol
self._remove_variables(self.lb[~mask], mask)
def _remove_forcing_constraints(self):
"""
Remove forcing constraints.
References
----------
[1] <NAME>., <NAME>. (1993). Presolving in Linear Programming. Mathematical Programming 71.
Page: 226
"""
m, n = self.A.shape
# calculate the lower and upper constraint bounds
g, h, P, M = self._calculate_constraint_bounds(self.A)
# test for inequality: h < b or b > g
if np.any((h < self.b) | (g > self.b)):
raise InfeasibleProblemError(-2, 'LP is primal-infeasible due to '
'an infeasible forcing constraint '
'of the equality system.')
else:
g_mask = np.abs(g - self.b) < self._tol
h_mask = np.abs(h - self.b) < self._tol
idx = []
# lower forcing constraint
if np.any(g_mask):
ig = np.argwhere(g_mask).flatten()
for i in ig:
for j in P[i]:
self.x[j] = self.lb[j]
idx.append(j)
for j in M[i]:
self.x[j] = self.ub[j]
idx.append(j)
# upper forcing constraint
if np.any(h_mask):
ih = np.argwhere(h_mask).flatten()
for i in ih:
for j in P[i]:
self.x[j] = self.ub[j]
idx.append(j)
for j in M[i]:
self.x[j] = self.lb[j]
idx.append(j)
if np.any(idx):
# ensure index list is unique
idx = list(set(idx))
mask = _idx2mask(idx, n)
self._remove_variables(self.x[idx], mask)
self._reduced = True
def _tighten_forcing_constraints(self):
"""
Checks for infeasibility of the inequality constraints and tighten them if possible.
"""
# calculate the lower and upper constraint bounds
g, h, _, _ = self._calculate_constraint_bounds(self.C)
# if lower constraint bound is larger than 'd', then the problem is infeasible.
if np.any(g > self.d):
raise InfeasibleProblemError(-2, 'LP is primal-infeasible due to '
'an infeasible forcing constraint '
'of the inequality system.')
# if upper constraint bound is more restrictive than 'd', then tighten 'd'.
h_mask = h < self.d
self.d[h_mask] = h[h_mask]
if np.any(h_mask):
self._reduced = True
def _remove_singleton_rows_eq(self):
"""
Removes singleton rows from equality constraints.
"""
x, i, j = self._get_singleton_rows(self.A, self.b)
if np.any((x < self.lb[j]) | (x > self.ub[j])):
raise InfeasibleProblemError(-2, 'LP is primal-infeasible due to '
'an infeasible singleton row.')
# reduce the problem dimension
mask = _idx2mask(j, self.g.size)
if np.any(~mask):
self._remove_variables(x, ~mask)
# remove constraints
if np.any(i):
self.A = self.A[~i, :]
self.b = self.b[~i]
self.idx_e = self.idx_e[~i]
self._reduced = True
def _remove_singleton_rows_iq(self):
"""
Change singleton rows from inequality constraints to upper bounds
or remove constraint if the existing upper bound is more restrictive.
"""
x, ii, jj = self._get_singleton_rows(self.C, self.d)
for x_, i, j in zip(x, *np.nonzero(ii), jj):
if self.lb[j] <= x_:
if x_ <= self.ub[j]: # tighten the bound
if self.C[i, j] > 0.:
self.ub[j] = x_
else:
self.lb[j] = x_
else:
raise InfeasibleProblemError(-2, 'LP is primal-infeasible due to '
'an infeasible singleton row.')
# remove constraints
if np.any(ii):
self.C = self.C[~ii, :]
self.d = self.d[~ii]
self.idx_i = self.idx_i[~ii]
self._reduced = True
def _remove_duplicate_rows(self, A, b, idx, eq=True):
"""
Removes redundant rows from the constraints. If the corresponding results vector is non-empty,
then an error is raised due to primal-infeasibility of the system.
Parameters
----------
A : array_like, shape (m, n)
System matrix with coefficients for the equality/inequality constraints.
b : array_like, shape (m,)
Results vector of the equality/inequality constraints.
idx : array_like, shape (m,)
Index tracker of remaining equality/inequality constraints
eq : bool
True if equality constraints, False if inequality constraints.
Returns
-------
A : array_like, shape (m-r, n)
Reduced system matrix with coefficients for the equality/inequality constraints.
b : array_like, shape (m-r,)
Reduced results vector of the equality/inequality constraints.
idx : array_like, shape (m-r,)
Reduced index tracker of remaining equality/inequality constraints
"""
nz = _non_zero_count(A, axis=1, tol=self._tol)
id_ = np.arange(nz.size)[nz > 1]
# split row indices into lists with same number of non-zeroes
snz = {} # split_non_zero
for i in id_:
key = nz[i]
if key in snz:
snz[key].append(i)
else:
snz[key] = [i]
# find rows with similar sparsity pattern
sp = [] # sparsity_pattern
for rows in snz.values():
for i, ri in enumerate(rows):
spi = np.nonzero(A[ri])
for rk in rows[(i+1):]:
spk = np.nonzero(A[rk])
if np.array_equal(spi, spk):
sp.append((ri, rk))
# check if the rows with similar sparsity pattern are duplicates
dup = []
for ri, rk in sp:
nu = A[ri] / A[rk]
if np.all(nu == nu[0]):
ratio = b[ri] / b[rk]
if eq: # equality constraints
if ratio == nu[0]:
dup.append(rk)
else:
raise InfeasibleProblemError(-2, 'Problem is primal-infeasible due to '
'duplicate rows with varying `b`.')
else: # inequality constraints
if ratio == nu[0]:
dup.append(ri if (np.abs(ratio) < 1.) else rk)
if dup:
A = np.delete(A, dup, axis=0)
b = np.delete(b, dup)
idx = np.delete(idx, dup)
return A, b, idx
def _remove_variables(self, x, mask):
"""
Removes a variable from the programming problem.
Parameters
----------
x : array_like, shape (n-r,)
Vector of removed variables.
mask : array_like, shape (n,)
Mask of variables to keep in the programming problem.
"""
if np.any(~mask):
# update constant and constraint equations
self.k += self.g[~mask] @ x
self.b -= self.A[:, ~mask] @ x
self.d -= self.C[:, ~mask] @ x
# update pre-solved solution
self.x[~mask] = x
self.idx_x = self.idx_x[mask]
# update programming problem
self.g = self.g[mask]
self.A = self.A[:, mask]
self.C = self.C[:, mask]
self.lb = self.lb[mask]
self.ub = self.ub[mask]
if self.H is not None:
self.k += self.H[~mask, ~mask] @ (x ** 2.)
self.g += 2. * (self.H[~mask][mask] @ x)
self.H = self.H[mask, :]
self.H = self.H[:, mask]
def _get_singleton_rows(self, A, b):
i = _non_zero_count(A, axis=1, tol=self._tol) == 1
j = [int(np.argwhere(np.abs(a) > self._tol)) for a in A[i, :]]
x = b[i] / A[i, j]
return x, i, j
def _calculate_constraint_bounds(self, A):
"""
Calculate the upper and lower bounds of each constraint.
Parameters
----------
A : array_like, shape (m, n)
Coefficient matrix of equality or inequality system.
"""
m, n = A.shape
# define the sets P and M
js = np.arange(n)
P = [js[A[i, :] > 0.] for i in range(m)]
M = [js[A[i, :] < 0.] for i in range(m)]
# calculate lower and upper term of constraint bounds
r = range(m)
gp = np.array([
|
np.sum([A[i, j] * self.lb[j] for j in P[i]])
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
import numba
import time as tm
import platform
import os
import sys
cythonc = True
try:
import psearch_pyc
except ImportError:
cythonc = False
# version information:
from collections import namedtuple
version_info = namedtuple('version_info','major minor micro')
version_info = version_info(major=0,minor=23,micro=6)
__version__ = '%d.%d.%d' % version_info
def reference():
msg ='pure Python (*** slow ***)'
if (cythonc):
msg = 'Python/Cython/C (*** fast ***)'
print(' ')
print('<NAME>., & <NAME>. 2017, Astronomical Journal, 154, 231;')
print(' "A Hybrid Algorithm for Period Analysis from Multiband Data with')
print(' Sparse and Irregular Sampling for Arbitrary Light-curve Shapes"')
print('IDL CODE (Abhijit Saha):')
print(' https://github.com/AbhijitSaha/Psearch')
print('PYTHON/CYTHON/C CODE (Kenenth Mighell):')
print(' https://github.com/AbhijitSaha/Psearch/tree/master/psearch_py')
print('\nMODULE:')
print(' %s' % os.path.abspath(__file__))
print(' [psearch_py (%s) mode: %s ]' % (__version__,msg))
print(' ')
return
def psearch_py( hjd, mag, magerr, filts, filtnams, pmin, dphi, n_thresh=1, \
pmax=None, periods=None, verbose=False ):
"""
NAME:
psearch_py
INPUTS:
hjd: time (Heliocentric Julian Day) input data array
mag: magnitude input data array (co-alligned with hjd)
magerr: magnitude error input data array (co-alligned with hjd)
filts: filter input data array (co-aligned with hjd) with
integer identifier indicating passband
filtnams = string array containing character names corresponding to
coded filts values. E.g., if you have 5 bands labeled u,g,r,i,z
with filts values 0,1,2,3,4 respectively, filtnams would be set by:
filtnams = ['u', 'g', 'r', 'i', 'z']
pmin: Minimum value of period to be tested.
E.g., pmin = 0.2
dphi: Maximum change in relative phase between first and last epoch to
be permitted when stepping to next test period.
E.g., dphi = 0.02
n_thresh: Number of simulated error runs (default=1,min=0)
pmax: maximum period to explore (default=None)
periods: array of periods to explore (default=None)
verbose: want verbose information? (default=False)
OUTPUTS:
ptest: 1-D array with N dimensions of periods for which the periodograms
are computed. It is the same for ALL bands/channels.
psi_m: M x N array of the Psi periodogram, where M is the number of
bands/channels in the input array filtnams
N.B. if only one filter is used,
psi_m is a 1d array (vector) of N elements
thresh_m: M x N array containing threshold values of Psi at each period
and band for assessing significance for psi_m
N.B. if only one filter is used,
thresh_m is a 1d array (vector) of N elements
ORIGINAL IDL DEFINITION:
pro Psearch, hjd, mag, magerr, filts, filtnams, pmin, dphi, ptest, $
psi_m, thresh_m
"""
assert isinstance(hjd,np.ndarray)
assert (hjd.dtype == np.float64)
assert (hjd.ndim == 1)
hjd_shape = hjd.shape
assert isinstance(mag,np.ndarray)
assert (mag.dtype == np.float64)
assert (mag.shape == hjd_shape)
assert isinstance(magerr,np.ndarray)
assert (magerr.dtype == np.float64)
assert (magerr.shape == hjd_shape)
assert isinstance(filts,np.ndarray)
assert (filts.dtype == np.float64)
assert (filts.shape == hjd_shape)
assert (n_thresh >= 0)
print('psearch: BEGIN =====================================================')
print('\nREFERENCE:')
reference()
nfilts = len(filtnams)
assert (nfilts >= 1)
psiacc = 0.
confacc = 0.
for i in range(nfilts):
fwant = i
print('psearch: ',filtnams[fwant],' filter')
x, fy, theta, psi, conf = periodpsi2_py( hjd, mag, magerr, filts, \
pmin, dphi, fwant, n_thresh=n_thresh, \
maxper=pmax, periods=periods, verbose=verbose )
if (i == 0):
# define the output arrays
# -- needs size of period array from 1st band call to periodpsi2
psi_m = np.zeros(shape=(nfilts,len(x)))
thresh_m = np.zeros(shape=(nfilts,len(x)))
psi_m[i,:] = psi
thresh_m[i,:] = conf
table_psi_kjm_py( xx=x, yy=psi, ee=conf, n=10)
psiacc = psi + psiacc
confacc = conf + confacc
if (nfilts == 1):
psi_m = psi_m.flatten()
thresh_m = thresh_m.flatten()
else:
print(' ')
print('========== ALL FILTERS ========== ')
print(' ')
table_psi_kjm_py( xx=x, yy=psiacc, ee=confacc, n=10)
ptest = x
print('\nReference:')
reference()
print('psearch: END =======================================================')
return ptest, psi_m, thresh_m
def periodpsi2_py( hjd, mag, magerr, filts, minper, dphi, fwant, n_thresh=1,
maxper=None, periods=None, verbose=False ):
"""
NAME:
periodpsi2_py
INPUTS:
hjd: time (Heliocentric Julian Day) input data array
mag: magnitude input data array (co-alligned with hjd)
magerr: magnitude error input data array (co-alligned with hjd)
filts: filter input data array (co-aligned with hjd) with
integer identifier indicating passband
minper: minimum period to explore
dphi: maximum phase change between any two data points resulting from
one step in frequency or period
fwant: integer value corresponding to desired passband from among values
in filts array.
n_thresh: Number of simulated error runs (default=1,min=0)
maxper: maximum period to explore (default=None)
periods: array of periods to explore (default=None)
verbose: want verbose information? (default=False)
OUTPUTS:
x: period array for which periodograms are computed
fy: Lomb-Scargle periodogram (co-aligned with x)
theta: Lafler-Kinman periodogram (co-aligned with x)
psi: psi periodogram (co-aligned with x)
conf: simulated PSI periodogram for a non-periodic variable with
amplitude and noise mimicking real source PLUS of an unvarying
object with noise mimicking
ORIGINAL IDL DEFINITION:
pro periodpsi2, HJD, MAG, MAGERR, FILTS, minper, dphi, fwant, x, fy, $
theta, psi, conf
"""
print('periodpsi2: BEGIN')
debug1 = False
#debug1 = True
debug2 = False
#debug2 = True
assert isinstance(hjd,np.ndarray)
assert (hjd.dtype == np.float64)
assert (hjd.ndim == 1)
hjd_shape = hjd.shape
assert isinstance(mag,np.ndarray)
assert (mag.dtype == np.float64)
assert (mag.shape == hjd_shape)
assert isinstance(magerr,np.ndarray)
assert (magerr.dtype == np.float64)
assert (magerr.shape == hjd_shape)
assert isinstance(filts,np.ndarray)
assert (filts.dtype == np.float64)
assert (filts.shape == hjd_shape)
assert (minper > 0.0) # type: float
assert (dphi > 0.0) # type: float
assert (n_thresh >= 0) # type: int
# normal usage:
t0 = np.min(hjd)
tmax = np.max(hjd)
tspan = tmax - t0
maxfreq = 1./minper
minfreq = 2./tspan
deltafreq = dphi/tspan
nfreq = int( (maxfreq-minfreq)/deltafreq )
farray = minfreq + np.arange(nfreq)*deltafreq
x = 1./farray
# user supplies period array or sets an upper period limit:
if ((periods is not None) or (maxper is not None)):
if (periods is not None):
x = periods.copy()
elif (maxper is not None):
assert (maxper > minper)
idx = ((x >= minper)&(x <= maxper))
assert (np.count_nonzero(idx) > 0), 'Need at least one period 8=X'
x = x[idx].copy()
farray = 1./x
nfreq = len(x)
print('periodpsi2: minimum and maximum periods: %14.8f %14.8f days' % \
(min(x), max(x)))
print('periodpsi2: number of period (frequency) samples: ', nfreq, \
' <----------')
if (verbose):
print('periodpsi2: ', x)
print('periodpsi2: ^----- periods to be tested')
print('periodpsi2: minimum and maximum frequencies: %14.8f %14.8f' % \
(min(farray), max(farray)))
omega = farray * 2.0 * np.pi # scargle_fast uses *angular* frequencies
ok = (filts == fwant) & (magerr <= 0.2) & (magerr >= 0.0)
tr = hjd[ok]
nok = len(tr)
print('periodpsi2: ',nok,' observations <----------')
yr = mag[ok]
yr_err = magerr[ok]
sss = np.argsort(tr)
tr = tr[sss]
yr = yr[sss]
yr_err = yr_err[sss]
################################################################################
########## psi #################################################################
################################################################################
time20 = tm.time()
#om, fy = scargle_py( tr, yr, omega=omega, nfreq=nfreq, old=False )[:2]
#fy = scargle_fast_py( tr, yr, omega, nfreq )
#fy = psearch_pyc.scargle_fast( tr, yr, omega, nfreq )
if (cythonc):
fy = psearch_pyc.scargle_fast( tr, yr, omega, nfreq )
else:
fy = scargle_fast_py( tr, yr, omega, nfreq )
time21 = tm.time()
print('scargle: DONE %8.3f seconds' % (time21-time20))
if (debug1):
om_, fy_ = scargle_py( tr, yr, omega=omega, nfreq=nfreq, old=False )[:2]
print(np.allclose(fy,fy_),'=np.allclose(fy,fy_)')
ok1 = np.allclose(fy,fy_)
if (not ok1):
print('^--- FY NOT OK!\n')
plot_absdiff_py( fy_, fy, 'FY' )
time20 = tm.time()
#theta = ctheta_slave_py(x, yr, tr, version=1)
#theta = ctheta_slave_py(x, yr, tr)
#theta = ctheta_slave_v3_pyjit(x, yr, tr)
#theta = psearch_pyc.ctheta_slave(x, yr, tr)
if (cythonc):
#theta = psearch_pyc.ctheta_slave(x, yr, tr)
theta = psearch_pyc.ctheta_slave(x, yr, tr)
else:
theta = ctheta_slave_v3_pyjit(x, yr, tr)
time21 = tm.time()
print('ctheta_slave: DONE %8.3f seconds' % (time21-time20))
if (debug2):
theta_ = ctheta_slave_py(x, yr, tr, version=1)
print(np.allclose(theta,theta_),'=np.allclose(theta,theta_)')
ok4 = np.allclose(theta,theta_)
if (not ok4):
print('^--- THETA NOT OK!\n')
plot_absdiff_py( theta_, theta, 'THETA' )
psi = 2.*fy/theta
################################################################################
################################################################################
################################################################################
conf1 = np.zeros_like( psi )
conf2 = np.zeros_like( psi )
count = 0
while (count < n_thresh):
count += 1
print('periodpsi2_py: ',count,' of ',n_thresh,' (thresh loop)')
########################################################################
########## conf1 #######################################################
########################################################################
er = yr_err*np.random.normal(0.,1.,nok)
time20 = tm.time()
#om, fe = scargle_py( tr, er, omega=omega, nfreq=nfreq, old=False )[:2]
#fe = scargle_fast_py( tr, er, omega, nfreq )
#fe = psearch_pyc.scargle_fast( tr, er, omega, nfreq )
if (cythonc):
fe = psearch_pyc.scargle_fast( tr, er, omega, nfreq )
else:
fe = scargle_fast_py( tr, er, omega, nfreq )
time21 = tm.time()
print('scargle: DONE %8.3f seconds' % (time21-time20))
if (debug1):
om_, fe_ = scargle_py( tr, er, omega=omega, nfreq=nfreq, old=False )[:2]
print(np.allclose(fe,fe_),'=np.allclose(fe,fe_)')
ok2 = np.allclose(fe,fe_)
if (not ok2):
print('^--- FE NOT OK!\n')
plot_absdiff_py( fe_, fe, 'FE' )
time20 = tm.time()
#thetaerr = ctheta_slave_py(x, er, tr, version=1)
#thetaerr = ctheta_slave_py(x, er, tr)
#thetaerr = ctheta_slave_v3_pyjit(x, er, tr)
#thetaerr = psearch_pyc.ctheta_slave(x, er, tr)
if (cythonc):
thetaerr = psearch_pyc.ctheta_slave(x, er, tr)
else:
thetaerr = ctheta_slave_v3_pyjit(x, er, tr)
time21 = tm.time()
print('ctheta_slave: DONE %8.3f seconds' % (time21-time20))
if (debug2):
thetaerr_ = ctheta_slave_py(x, er, tr, version=1)
print(np.allclose(thetaerr,thetaerr_),\
'=np.allclose(thetaerr,thetaerr_)')
ok5 = np.allclose(thetaerr,thetaerr_)
if (not ok5):
print('^--- THETAERR NOT OK!\n')
plot_absdiff_py( thetaerr_, thetaerr, 'THETAERR' )
conf1a = 2.*fe/thetaerr
conf1b = conf1a*np.sum(psi)/np.sum(conf1a)
conf1 = np.maximum(conf1,conf1b)
########################################################################
########## conf2 #######################################################
########################################################################
zr, _ = scramble_py( yr )
time20 = tm.time()
#om, fz = scargle_py( tr, zr, omega=omega, nfreq=nfreq, old=False )[:2]
#fz = scargle_fast_py( tr, zr, omega, nfreq )
#fz = psearch_pyc.scargle_fast( tr, zr, omega, nfreq )
if (cythonc):
fz = psearch_pyc.scargle_fast( tr, zr, omega, nfreq )
else:
fz = scargle_fast_py( tr, zr, omega, nfreq )
time21 = tm.time()
print('scargle: DONE %8.3f seconds' % (time21-time20))
if (debug1):
om_, fz_ = scargle_py( tr, zr, omega=omega, nfreq=nfreq,\
old=False )[:2]
print(np.allclose(fz,fz_),'=np.allclose(fz,fz_)')
ok3 = np.allclose(fz,fz_)
if (not ok3):
print('^--- FZ NOT OK!\n')
plot_absdiff_py( fz_, fz, 'FZ' )
time20 = tm.time()
#thetaz = ctheta_slave_py(x, zr, tr, version=1)
#thetaz = ctheta_slave_py(x, zr, tr)
#thetaz = ctheta_slave_v3_pyjit(x, zr, tr)
#thetaz = psearch_pyc.ctheta_slave(x, zr, tr)
if (cythonc):
thetaz = psearch_pyc.ctheta_slave(x, zr, tr)
else:
thetaz = ctheta_slave_v3_pyjit(x, zr, tr)
time21 = tm.time()
print('ctheta_slave: DONE %8.3f seconds' % (time21-time20))
if (debug2):
thetaz_ = ctheta_slave_py(x, zr, tr, version=1)
print(np.allclose(thetaz,thetaz_),'=np.allclose(thetaz,thetaz_)')
ok6 = np.allclose(thetaz,thetaz_)
if (not ok6):
print('^--- THETAZ NOT OK!\n')
plot_absdiff_py( thetaz_, thetaz, 'THETAZ' )
conf2a = 2.*fz/thetaz
conf2b = conf2a*np.sum(psi)/np.sum(conf2a)
conf2 = np.maximum(conf2,conf2b)
########################################################################
########################################################################
########################################################################
conf = conf1 + conf2
print('periodpsi2: END')
return x, fy, theta, psi, conf
def scramble_py( inarr ):
"""
NAME:
scramble_py
INPUTS:
inarr
OUTPUTS:
scrambarr
pickarr
ORIGINAL IDL DEFINITION:
pro scramble, inarr, scrambarr, pickarr
"""
#DEBUG: print('scramble: BEGIN'
ns = len(inarr)
x = np.random.choice( ns, size=ns )
#assert x.dtype == np.int64
assert (np.min(x) >= 0) & (np.max(x) < ns)
s = np.argsort(x) #KJM: BEWARE of the IDL sort() gotcha!
#assert s.dtype == np.int64
assert (np.min(s) >= 0) & (np.max(s) < ns)
# outputs:
scrambarr = inarr[s]
pickarr = inarr[x]
#DEBUG: print('scramble: END'
return scrambarr, pickarr
def scargle_fast_py( t, c, omega, nfreq ):
"""
NAME:
scargle_fast_py
PURPOSE:
Compute the Lomb-Scargle periodogram of an unevenly sampled lightcurve
CATEGORY:
time series analysis
INPUTS:
t: The times at which the time series was measured (e.g. HJD)
c: counts (corresponding count rates)
omega: angular frequencies for which the PSD values are desired
[PSD: Fourier Power Spectral Density]
nfreq: number of independent frequencies
OUTPUTS:
px: the psd-values corresponding to omega
DESCRIPTION:
The Lomb Scargle PSD is computed according to the
definitions given by Scargle, 1982, ApJ, 263, 835, and Horne
and Baliunas, 1986, MNRAS, 302, 757. Beware of patterns and
clustered data points as the Horne results break down in
this case! Read and understand the papers and this
code before using it! For the fast algorithm read W.H. Press
and <NAME> 1989, ApJ 338, 277.
The code is still stupid in the sense that it wants normal
frequencies, but returns angular frequency...
MODIFICATION HISTORY OF IDL VERSION:
Version 1.0, 1997, <NAME> IAAT
Version 1.1, 1998.09.23, JW: Do not normalize if variance is 0
(for computation of LSP of window function...)
Version 1.2, 1999.01.07, JW: force nfreq to be int
Version 1.3, 1999.08.05, JW: added omega keyword
Version 1.4, 1999.08
KP: significance levels
JW: pmin,pmax keywords
Version 1.5, 1999.08.27, JW: compute the significance levels
from the horne number of independent frequencies, and not from
nfreq
Version 1.6, 2000.07.27, SS and SB: added fast algorithm and FAP
according to white noise lc simulations.
Version 1.7, 2000.07.28 JW: added debug keyword, sped up
simulations by factor of four (use /slow to get old
behavior of the simulations)
WEBSITE FOR THE IDL VERSION (Version 1.7, 2000.07.28):
http://astro.uni-tuebingen.de/software/idl/aitlib/timing/scargle.pro
ORIGINAL IDL DEFINITION:
PRO scargle,t,c,om,px,fmin=fmin,fmax=fmax,nfreq=nfreq, $
nu=nu,period=period,omega=omega, $
fap=fap,signi=signi,simsigni=simsigni, $
pmin=pmin,pmax=pmax,old=old, $
psdpeaksort=psdpeaksort,multiple=multiple,noise=noise, $
debug=debug,slow=slow
"""
assert isinstance(t,np.ndarray)
assert (t.dtype == np.float64)
assert (t.ndim == 1)
assert isinstance(c,np.ndarray)
assert (c.dtype == np.float64)
assert (c.ndim == 1)
assert (len(t)==len(c))
assert isinstance(omega,np.ndarray)
assert (omega.dtype == np.float64)
assert (omega.ndim == 1)
assert (omega.size == nfreq)
#
noise = np.sqrt(np.var(c))
#
# make times manageable (Scargle periodogram is time-shift invariant)
time = t-t[0]
#
n0 = len(time)
assert (n0 > 0)
#
om = omega # alias
#
#===== PERIODOGRAM: FAST VERSION ========================================
# Reference:
# Press, W.H., & Rybicki, G.B. 1989, ApJ 338, 277;
# FAST ALGORITHM FOR SPECTRAL ANALYSIS OF UNEVENLY SAMPLED DATA"
#
# Eq. (6); s2, c2
s2 = np.zeros(nfreq)
c2 = np.zeros(nfreq)
two_time = 2.0*time
for i in range(nfreq):
s2[i] = np.sum( np.sin(two_time*om[i]) )
c2[i] = np.sum( np.cos(two_time*om[i]) )
#s2[i] = np.sum( np.sin(2.*om[i]*time) )
#c2[i] = np.sum( np.cos(2.*om[i]*time) )
two_time = 0.0 # clean up
#
# Eq. (2): Definition -> tan(2omtau)
# --- tan(2omtau) = s2 / c2
omtau = np.arctan(s2/c2) / (2.)
# cos(tau), sin(tau)
cosomtau = np.cos(omtau)
sinomtau = np.sin(omtau)
#
# Eq. (7); total(cos(t-tau)^2) and total(sin(t-tau)^2)
tmp = c2*np.cos(2.*omtau) + s2*np.sin(2.*omtau)
tc2 = 0.5*(n0+tmp)
ts2 = 0.5*(n0-tmp)
# clean up
tmp = 0.
omtau = 0.
s2 = 0.
#
# computing the periodogram for the original light curve
#
# subtract mean from data
cn = c - np.mean(c)
#
# Eq. (5); sh and ch
sh = np.zeros(nfreq)
ch = np.zeros(nfreq)
omi_time = np.zeros(nfreq)
for i in range(nfreq):
omi_time = om[i] * time
sh[i] = np.sum( cn*np.sin(omi_time) )
ch[i] = np.sum( cn*np.cos(omi_time) )
#sh[i] = np.sum( cn*np.sin(om[i]*time) )
#ch[i] = np.sum( cn*np.cos(om[i]*time) )
omi_time = 0.0 # clean up
#
# Eq. (3)
px = ((ch*cosomtau + sh*sinomtau)**2 / tc2) + \
((sh*cosomtau - ch*sinomtau)**2 / ts2)
# correct normalization
px = 0.5*px/(noise**2)
#
return px
def scargle_py(
#INPUTS:
t,c,
#OPTIONAL INPUTS:
fmin=None,fmax=None,pmin=None,pmax=None,omega=None,fap=None,noise=None,
multiple=None,nfreq=None,
#OPTIONAL BOOLEAN INPUTS (IDL: KEYWORD PARAMETERS):
old=None,debug=False,slow=None
):
"""
NAME:
scargle_py
PURPOSE:
Compute the Lomb-Scargle periodogram of an unevenly sampled lightcurve
CATEGORY:
time series analysis
INPUTS:
t: The times at which the time series was measured (e.g. HJD)
c: counts (corresponding count rates)
OPTIONAL INPUTS:
fmin: minimum frequency (NOT ANGULAR FREQ!) to be used
(has precedence over pmin)
fmax: maximum frequency (NOT ANGULAR FREQ!) to be used
(has precedence over pmax)
pmin: minimum PERIOD to be used
pmax: maximum PERIOD to be used
omega: angular frequencies for which the PSD values are desired
[PSD: Fourier Power Spectral Density]
fap: false alarm probability desired
(see Scargle et al., p. 840,a and signi keyword).
Default equal to 0.01 (99% significance)
noise: for the normalization of the periodogram and the
compute (sp?) of the white noise simulations.
If not set, equal to the variance of the original lc.
multiple: number of white noise simulations for the FAP
power level. Default equal to 0 (i.e., no simulations).
nfreq: number of independent frequencies
OPTIONAL BOOLEAN INPUTS (IDL: KEYWORD PARAMETERS):
old: if set computing the periodogram according to
Scargle, J.D. 1982, ApJ 263, 835.
If not set, compute the periodogram with the fast algorithm of
Press, W.H., & <NAME>, G.B. 1989, ApJ 338, 277.
debug: print out debugging information if set
slow: if set, a much slower but less memory intensive way to
perform the white noise simulations is used.
OUTPUTS:
om: angular frequency of PSD [PSD: Fourier Power Spectral Density]
px: the psd-values corresponding to omega
[KJM: original IDL documentation refers to psd
--- which did not exist]
nu: normal frequency [nu = om/(2.*np.pi)]
period: period corresponding to each omega [period = 1./nu]
signi: power threshold corresponding to the given false alarm
probabilities fap and according to the desired number of independent
frequencies
simsigni: power threshold corresponding to the given false alarm
probabilities fap according to white noise simulations
psdpeaksort: array with the maximum peak pro (sp?) each simulation
DESCRIPTION:
The Lomb Scargle PSD is computed according to the
definitions given by Scargle, 1982, ApJ, 263, 835, and Horne
and Baliunas, 1986, MNRAS, 302, 757. Beware of patterns and
clustered data points as the Horne results break down in
this case! Read and understand the papers and this
code before using it! For the fast algorithm read W.H. Press
and <NAME> 1989, ApJ 338, 277.
The code is still stupid in the sense that it wants normal
frequencies, but returns angular frequency...
MODIFICATION HISTORY OF IDL VERSION:
Version 1.0, 1997, <NAME> IAAT
Version 1.1, 1998.09.23, JW: Do not normalize if variance is 0
(for computation of LSP of window function...)
Version 1.2, 1999.01.07, JW: force nfreq to be int
Version 1.3, 1999.08.05, JW: added omega keyword
Version 1.4, 1999.08
KP: significance levels
JW: pmin,pmax keywords
Version 1.5, 1999.08.27, JW: compute the significance levels
from the horne number of independent frequencies, and not from
nfreq
Version 1.6, 2000.07.27, SS and SB: added fast algorithm and FAP
according to white noise lc simulations.
Version 1.7, 2000.07.28 JW: added debug keyword, sped up
simulations by factor of four (use /slow to get old
behavior of the simulations)
WEBSITE FOR THE IDL VERSION (Version 1.7, 2000.07.28):
http://astro.uni-tuebingen.de/software/idl/aitlib/timing/scargle.pro
ORIGINAL IDL DEFINITION:
PRO scargle,t,c,om,px,fmin=fmin,fmax=fmax,nfreq=nfreq, $
nu=nu,period=period,omega=omega, $
fap=fap,signi=signi,simsigni=simsigni, $
pmin=pmin,pmax=pmax,old=old, $
psdpeaksort=psdpeaksort,multiple=multiple,noise=noise, $
debug=debug,slow=slow
"""
#DEBUG: print('scargle: BEGIN'
# initial optional output default values
nu = None
period = None
signi = None
simsigni = None
psdpeaksort = None
# defaults
if noise is None:
assert c.dtype == np.float64
noise = np.sqrt(np.var(c))
if multiple is None: multiple = 0
if fap is None: fap = 0.01
# make times manageable (Scargle periodogram is time-shift invariant)
time = t-t[0]
# number of independent frequencies
# (Horne and Baliunas, eq. 13)
n0 = len(time)
assert n0 > 0
horne = int(-6.362+1.193*n0+0.00098*(n0**2.))
if horne < 0: horne = 5
if nfreq is None:
nfreq = horne
else:
horne = nfreq
# mininum frequency is 1/T
if fmin is None:
#IF (n_elements(pmax) EQ 0) THEN BEGIN
if pmax is None:
fmin = 1. / max(time)
else:
fmin = 1. / pmax
pass
# maximum frequency approximately equal to Nyquist frequency
if fmax is None:
#IF (n_elements(pmin) EQ 0) THEN BEGIN
if pmin is None:
fmax = n0 / (2.*max(time))
else:
fmax = 1. / pmin
pass
# if omega is not given, compute it
#IF (n_elements(omega) EQ 0) THEN BEGIN
if omega is None:
om = 2. * np.pi * (fmin+(fmax-fmin)* \
np.arange(nfreq,dtype=np.float64)/(nfreq-1.))
else:
om = omega
signi = -np.log( 1. - ((1.-fap)**(1./horne)) )
#
# Periodogram
#
#
#===== PERIODOGRAM: SLOW VERSION ========================================
if (old is True):
# Subtract mean from data
assert c.dtype == np.float64
cn = c-np.mean(c)
# computing the periodogram
px = np.zeros(nfreq, dtype=np.float64)
for i in range(nfreq):
tau = np.arctan(np.sum(np.sin(2.*om[i]*time))/\
np.sum(np.cos(2.0*om[i]*time)))
tau = tau/(2.*om[i])
co = np.cos(om[i]*(time-tau))
si = np.sin(om[i]*(time-tau))
px[i]=0.5*(np.sum(cn*co)**2/np.sum(c**2)+np.sum(cn*si)**2/\
np.sum(si**2))
# correct normalization
var = np.var(cn)
if var != 0:
px = px/var
else:
print('scargle: ***** WARNING ***** Variance is zero (var == 0)')
# some other nice helpers
# computed here due to memory usage reasons
nu = om/(2.*np.pi)
period = 1./nu
#DEBUG: print('scargle: DONE (slow version)'
#DEBUG: print('scargle: END (slow version)'
return om,px,nu,period,signi,simsigni,psdpeaksort
#
#
#===== PERIODOGRAM: FAST VERSION ========================================
# Reference:
# Press, W.H., & Rybicki, G.B. 1989, ApJ 338, 277;
# FAST ALGORITHM FOR SPECTRAL ANALYSIS OF UNEVENLY SAMPLED DATA"
# Eq. (6); s2, c2
s2 = np.zeros(nfreq)
c2 = np.zeros(nfreq)
for i in range(nfreq):
s2[i] = np.sum( np.sin(2.*om[i]*time) )
c2[i] = np.sum( np.cos(2.*om[i]*time) )
# Eq. (2): Definition -> tan(2omtau)
# --- tan(2omtau) = s2 / c2
omtau = np.arctan(s2/c2) / (2.)
# cos(tau), sin(tau)
cosomtau = np.cos(omtau)
sinomtau = np.sin(omtau)
# Eq. (7); total(cos(t-tau)^2) and total(sin(t-tau)^2)
tmp = c2*np.cos(2.*omtau) + s2*np.sin(2.*omtau)
tc2 = 0.5*(n0+tmp)
ts2 = 0.5*(n0-tmp)
# clean up
tmp = 0.
omtau = 0.
s2 = 0.
#t2 = 0. #UNUSED? [KJM]
# computing the periodogram for the original lc
# Subtract mean from data
cn = c - np.mean(c)
# Eq. (5); sh and ch
sh = np.zeros(nfreq)
ch = np.zeros(nfreq)
if (multiple > 0) and (slow is None):
sisi = np.zeros(shape=(n0,nfreq))
coco = np.zeros(shape=(n0,nfreq))
for i in range(nfreq):
sisi[:,i] = np.sin(om[i]*time)
coco[:,i] = np.cos(om[i]*time)
sh[i] = np.sum(cn*sisi[:,i])
ch[i] = np.sum(cn*coco[:,i])
else:
for i in range(nfreq):
sh[i] = np.sum( cn*np.sin(om[i]*time) )
ch[i] = np.sum( cn*np.cos(om[i]*time) )
pass
# Eq. (3)
px = ((ch*cosomtau + sh*sinomtau)**2 / tc2) + \
((sh*cosomtau - ch*sinomtau)**2 / ts2)
# correct normalization
px = 0.5*px/(noise**2)
# --- RUN SIMULATIONS for multiple > 0
if (multiple > 0):
if (multiple*min(fap) < 10):
print('scargle: message: WARNING [/informational]')
print('scargle: message: Number of iterations (multiple keyword)'+\
' [/informational]')
print('scargle: message: not large enough for false alarm '+\
'probability [/informational]')
print('scargle: message: requested (need multiple*FAP > 10 ) '+\
'[/informational]')
psdpeak = np.zeros(multiple)
for m in range(multiple):
if debug is True:
if ((m+1) % 100 == 0):
print('scargle: working on the', m,'th simulation')
# white noise simulation
cn = np.random.normal(0.,1.,n0)*noise
cn = cn-np.mean(cn) # .. force OBSERVED count rate to zero
# Eq. (5); sh and ch
if slow is not None:
for i in range(nfreq):
sh[i] = np.sum(cn*sisi[:,i])
ch[i] = np.sum(cn*coco[:,i])
else:
for i in range(0, nfreq-1):
sh[i] = np.sum( cn * np.sin(om[i]*time) )
ch[i] = np.sum( cn * np.cos(om[i]*time) )
# Eq. (3) ; computing the periodogram for each simulation
spud = ((ch*cosomtau + sh*sinomtau)**2 / tc2) + \
((sh*cosomtau - ch*sinomtau)**2 / ts2)
psdpeak[m] = max( spud )
# False Alarm Probability according to simulations
if len(psdpeak) != 0:
idx = np.argsort(psdpeak) #BEWARE of the IDL sort() gotcha!
# correct normalization
psdpeaksort = 0.5 * psdpeak[idx]/(noise**2)
simsigni = psdpeaksort[int((1.-fap)*(multiple-1))]
# some other nice helpers
# computed here due to memory usage reasons
nu = om/(2.*np.pi)
period = 1./nu
#DEBUG: print('scargle: DONE (fast version)'
#DEBUG: print('scargle: END (fast version)'
return om,px,nu,period,signi,simsigni,psdpeaksort
def ctheta_slave_py( parray, mag, tobs, version=2 ):
"""
NAME:
ctheta_slave_py
INPUTS:
parray
mag
tobs
(version)
OUTPUTS:
theta
DESCRIPTION:
Computes theta for a pre-specified array of test periods.
ORIGINAL IDL DEFINITION:
pro Ctheta_slave, parray, mag, tobs, theta
"""
assert isinstance(parray,np.ndarray)
assert (parray.dtype == np.float64)
assert (parray.ndim == 1)
assert (parray.size >= 1)
assert isinstance(mag,np.ndarray)
assert (mag.dtype == np.float64)
assert (mag.ndim == 1)
assert (mag.size >= 1)
assert isinstance(tobs,np.ndarray)
assert (tobs.dtype == np.float64)
assert (tobs.shape == mag.shape)
assert isinstance(version,int)
#DEBUG: print('ctheta_slave: BEGIN'
#DEBUG: time10 = tm.time()
t0 = np.min(tobs)
#tlast = np.max(tobs) #UNUSED? [KJM]
tt = tobs - t0
theta = 0.*parray
# Loop over all periods
if (version == 2):
# optimized version (about 35% faster than original)
avm_km = np.sum(mag)/len(mag)
denom_km = np.sum( (mag-avm_km)**2 )
for k in range(len(parray)):
period = parray[k]
phi = tt/period
nphi = phi.astype(np.int64)
phi = phi - nphi
ss = np.argsort(phi)
mm = mag[ss]
mmplus = np.append(mm[1:], mm[0])
numer = np.sum( (mmplus - mm)**2 )
theta[k] = numer/denom_km
elif (version == 1):
# original version (line-by-line translation of IDL code)
for k in range(len(parray)):
period = parray[k]
phi = tt/period
nphi = phi.astype(np.int64)
phi = phi - nphi
ss = np.argsort(phi) #KJM: BEWARE of the IDL sort() gotcha!
phi = phi[ss] #KJM: Not used -- so why compute?
mm = mag[ss]
avm = np.sum(mm)/len(mm) #KJM: Suboptimal: move before loop
denom = np.sum( (mm-avm)**2 ) #KJM: Suboptimal: move before loop
mmplus = np.append(mm[1:], mm[0])
numer = np.sum( (mmplus - mm)**2 )
theta[k] = numer/denom
else:
assert( (version == 2) or (version == 1)) #KJM: bad version value
#DEBUG: time11 = tm.time()
#DEBUG: print('ctheta_slave: DONE [%d] %8.3f seconds' % (version,(time11-time10))
#DEBUG: print('ctheta_slave: END'
return theta
def ctheta_slave_v3_py( parray, mag, tobs ):
"""
NAME:
ctheta_slave_v3_py
INPUTS:
parray
mag
tobs
OUTPUTS:
theta
DESCRIPTION:
Computes theta for a pre-specified array of test periods.
ORIGINAL IDL DEFINITION:
pro Ctheta_slave, parray, mag, tobs, theta
"""
t0 = np.min(tobs)
tt = tobs - t0
theta = np.zeros_like(parray)
mmplus_km = np.zeros_like(mag)
avm_km = np.sum(mag)/len(mag)
denom_km = np.sum( (mag-avm_km)**2 )
m = len(parray)
for k in range(m):
period = parray[k]
phi = tt / period
#nphi = np.fix(phi) #KJM: literal but slower
nphi = phi.astype(np.int64) #KJM: ~25% faster
phi = phi - nphi
ss = np.argsort(phi) #KJM: BEWARE the IDL sort gotcha!
mm = mag[ss]
#mmplus = np.append(mm[1:], mm[0]) #KJM: literal but slower
#numer = np.sum( (mmplus - mm)**2 ) #KJM: uses mmplus
mmplus_km[:-1] = mm[1:] #KJM: Don't use np.append within loops!
mmplus_km[-1] = mm[0] #KJM: Don't use np.append within loops!
#assert np.allclose(mmplus,mmplus_km) #KJM: NUM? ME VEXO?
numer = np.sum( (mmplus_km - mm)**2 ) #KJM: uses mmplus_km
#KJM: using mmplus_km is ~24% faster
theta[k] = numer/denom_km
return theta
#KJM: ========== NUMBA JUST-IN-TIME COMPILATION: BEGIN
ctheta_slave_v3_pyjit = numba.jit(nopython=True)(ctheta_slave_v3_py)
#KJM: ========== NUMBA JUST-IN-TIME COMPILATION: END
def table_psi_kjm_py( xx=None, yy=None, ee=None, n=None):
"""
NAME:
table_psi_kjm_py
INPUTS:
xx: periods (e.g., x)
yy: power (e.g., psi)
ee: thresh (e.g., conf)
n: number of ranked periods to show (e.g., 10)
"""
assert isinstance(xx,np.ndarray)
assert (xx.ndim == 1)
xx_shape = xx.shape
assert isinstance(yy,np.ndarray)
assert (yy.shape == xx_shape)
assert isinstance(ee,np.ndarray)
assert (ee.shape == xx_shape)
assert isinstance(n,np.int)
assert (n >= 1)
sz = len(xx)
lm_x = np.zeros(sz)
lm_y = np.zeros(sz)
lm_k = np.zeros(sz,dtype=np.int_)
j = 0
assert (sz >= 3)
for k in range(1,sz-1):
ym1 = yy[k-1]
y = yy[k]
yp1 = yy[k+1]
if ((y>ym1) and (y>yp1)):
lm_y[j] = yy[k] # local maximum psiacc value
lm_x[j] = xx[k] # local maximum period value
lm_k[j] = k
j += 1
lm_n = j
lm_x = lm_x[:lm_n]
lm_y = lm_y[:lm_n]
lm_k = lm_k[:lm_n]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This doesn't seem vital, but it keeps crashing when there are few peaks.
# I decided it's okay if the tables show fewer than n entries.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# assert (len(lm_y) >= n)
if len(lm_y) < n:
n = len(lm_y)
idx = (-lm_y).argsort()[:n] # indices (location) of the n largest values
print('TABLE: BEGIN')
print('rank -------Period [days]------ Psi index Frequency Thresh')
fmt = '%2d %14.8f +- %11.8f %9.2f %8d %10.6f %7.2f'
for j in range(n):
k=idx[j]
kk = lm_k[k]
p0 = xx[kk]
y0 = yy[kk]
y0err = ee[kk]
kkp1 = kk + 1
p1 = xx[kkp1]
sigma = abs((p0-p1)/2.) # estimate of error (one standard deviation)
print(fmt % ( j+1, p0, sigma, y0, kk, 1./p0, y0err))
print('TABLE: END')
return
def fig_obs_kjm_py( hjd=None, mag=None, filts=None, filtnams=None, tag=None, \
plotfile=None, xlim=None ):
"""
NAME:
fig_obs_kjm_py
INPUTS:
hjd: time (Heliocentric Julian Day) input data array
mag: magnitude input data array (co-alligned with hjd)
filts: filter input data array (co-aligned with hjd) with
integer identifier indicating passband
filtnams = string array containing character names corresponding to
coded filts values. E.g., if you have 5 bands labeled u,g,r,i,z
with filts values 0,1,2,3,4 respectively, filtnams would be set by:
filtnams = ['u', 'g', 'r', 'i', 'z']
tag: String written in the bottom-left corner (if any)
plotfile: filename of the output plot (if any)
xlim: user-defined limits of the x-axis (if any)
"""
assert isinstance(hjd,np.ndarray)
assert (hjd.ndim == 1)
hjd_shape = hjd.shape
assert isinstance(mag,np.ndarray)
assert (mag.shape == hjd_shape)
assert isinstance(filts,np.ndarray)
assert (filts.shape == hjd_shape)
#DEBUG: print('fig_obs_kjm: ','OK! :-)'
color = ['dodgerblue'] # matplotlib 1.5.0 color names
nfilts = len(filtnams)
assert (nfilts >= 1)
hjd0 = int(np.min(hjd)) # offset
x = hjd - hjd0 # days from offset
dx = max(0.08 * np.max(x),0.25)
if xlim is None:
xlim = [-dx, (np.max(x)+dx)]
xlabel = 'HJD - %d [days]' % hjd0
dy = 0.5 # delta_mag
if (nfilts > 1):
fig, axarr = plt.subplots( nfilts, sharex=True, figsize=(8.5,11) )
for i in range(nfilts):
fwant = float(i)
ok = (filts == fwant)
xx = x[ok]
yy = mag[ok]
axarr[i].scatter( xx, yy, color=color[0], alpha=0.5 )
axarr[i].set_xlim( xlim ) # all subplots have the same X-axis
# expand and flip Y-axis:
axarr[i].set_ylim( [(np.max(yy)+dy),(np.min(yy)-dy)] )
axarr[i].set_ylabel( 'mag', size='x-large' )
axarr[i].text( 0.97, 0.80, filtnams[i], ha='right', size='x-large',\
transform=axarr[i].transAxes )
# ^----- relative coordinates within subplot
if (i == (nfilts-1)): # last subplot needs a label for the X-axis
axarr[i].set_xlabel( xlabel, size='x-large' )
else:
fig, ax = plt.subplots( nfilts, figsize=(8.5,11) )
fwant = float(0)
ok = (filts == fwant)
xx = x[ok]
yy = mag[ok]
ax.scatter( xx, yy, color=color[0], alpha=0.5 )
ax.set_xlim( xlim )
# expand and flip Y-axis:
ax.set_ylim( [(np.max(yy)+dy),(np.min(yy)-dy)] )
ax.set_ylabel( 'mag', size='x-large' )
ax.set_xlabel( xlabel, size='x-large' )
ax.text( 0.97, 0.90, filtnams[0], ha='right', size='x-large',\
transform=ax.transAxes )
if (tag is not None):
plt.figtext( 0.95, 0.1, tag, ha='right', va='bottom', color='grey', \
size='large', rotation=90)
if (plotfile is not None):
plt.savefig( plotfile, dpi=300 )
#DEBUG: plt.show()
plt.close()
if (plotfile is not None):
print(plotfile, ' <--- plotfile written :-)')
return
def fig_psi_kjm_py( freq=None, psi_m=None, thresh_m=None, filtnams=None, \
tag=None, plotfile=None, ylim=None, verbose=False ):
"""
NAME:
fig_psi_kjm_py
INPUTS:
freq: 1-D array (length of N) frequencies for which the periodograms are
computed. It is the same for ALL bands/channels.
psi_m: M x N array of the Psi periodogram, where M is the number of
bands/channels in the input array filtnams
thresh_m: M x N array containing threshold values of Psi at each period
and band for assessing significance for psi_m
filtnams = string array (length of M) containing character names
corresponding to coded filts values. E.g., 5 bands labeled
u,g,r,i,z with filts values:
filtnams = ['u', 'g', 'r', 'i', 'z']
tag: String written in the bottom-left corner (if any)
plotfile: filename of the output plot (if any)
ylim: user-defined limits of the y-axis (if any)
verbose: show frequency/period table (default=False)
"""
assert (filtnams is not None)
nfilts = len(filtnams)
assert (nfilts >= 1)
assert isinstance(freq,np.ndarray)
assert (freq.ndim == 1)
ndata = len(freq)
assert isinstance(psi_m,np.ndarray)
psi_m_shape = psi_m.shape
if (nfilts > 1):
assert (psi_m_shape[0] == nfilts)
assert (psi_m_shape[1] == ndata)
assert isinstance(thresh_m,np.ndarray)
assert (thresh_m.shape == psi_m_shape)
periods = 1.0/freq
color = ['dodgerblue','salmon'] # matplotlib 1.5.0 color names
if (verbose):
print(' filter : Psi Frequency Period[days]')
if (nfilts > 1):
fig, axarr = plt.subplots( nfilts+1, sharex=True, figsize=(8.5,11) )
for i in range(len(filtnams)):
axarr[i].plot( freq, psi_m[i], color=color[0], zorder=0 )
if (np.any(thresh_m[i])):
axarr[i].plot( freq, thresh_m[i], color=color[1], zorder=10 )
if ylim is not None:
axarr[i].set_ylim( ylim )
axarr[i].set_ylabel( r'${\Psi}$', size=19 )
axarr[i].text( 0.97, 0.80, filtnams[i], ha='right', size='x-large',\
transform=axarr[i].transAxes )
# ^---- relative coordinates within subplot
idx = np.argmax(psi_m[i])
freq_peak = freq[idx]
period_peak = periods[idx]
if (verbose):
print('%8s : %12.2f %11.6f %12.7f' %\
(filtnams[i],psi_m[i][idx],freq_peak,period_peak))
# combine results for all filters
j = nfilts
axarr[j].plot( freq, psi_m.sum(0), color=color[0], zorder=0 )
if (np.any(thresh_m.sum(0))):
axarr[j].plot( freq, thresh_m.sum(0), color=color[1], zorder=10 )
if ylim is not None:
axarr[j].set_ylim( ylim )
axarr[j].set_ylabel( r'${\Psi}$', size=19 )
axarr[j].set_xlabel( r'Frequency [days${^{-1}}$]', size='x-large' )
axarr[j].text( 0.985, 0.80, 'ALL', ha='right', size='x-large', \
transform=axarr[j].transAxes )
# ^----- relative coordinates within subplot
idx = np.argmax(psi_m.sum(0))
freq_peak = freq[idx]
period_peak = periods[idx]
if (verbose):
print('%8s : %12.2f %11.6f %12.7f' %\
('ALL',psi_m.sum(0)[idx],freq_peak,period_peak))
else:
fig, ax = plt.subplots( nfilts, figsize=(8.5,11) )
ax.plot( freq, psi_m, color=color[0], zorder=0 )
if (np.any(thresh_m)):
ax.plot( freq, thresh_m, color=color[1], zorder=10 )
if ylim is not None:
ax.set_ylim( ylim )
ax.set_ylabel( r'${\Psi}$', size=19 )
ax.set_xlabel( r'Frequency [days${^{-1}}$]', size='x-large' )
ax.text( 0.97, 0.90, filtnams[0], ha='right', size='x-large', \
transform=ax.transAxes ) # relative coordinates within subplot
idx = np.argmax(psi_m)
freq_peak = freq[idx]
period_peak = periods[idx]
if (verbose):
print('%8s : %12.2f %11.6f %12.7f' %\
(filtnams[0],psi_m[idx],freq_peak,period_peak))
if (tag is not None):
plt.figtext( 0.95, 0.1, tag, ha='right', va='bottom', color='grey', \
size='large', rotation=90)
if (plotfile is not None):
plt.savefig( plotfile, dpi=300 )
#DEBUG: plt.show()
plt.close()
if (plotfile is not None):
print(plotfile, ' <--- plotfile written :-)')
return
def fig_phi_kjm_py( hjd=None, mag=None, magerr=None, filts=None, filtnams=None,
period=None, tag=None, plotfile=None ):
"""
NAME:
fig_phi_kjm_py
INPUTS:
hjd: time (Heliocentric Julian Day) input data array
mag: magnitude input data array (co-alligned with hjd)
magerr: magnitude error input data array (co-alligned with hjd)
filts: filter input data array (co-aligned with hjd) with
integer identifier indicating passband
filtnams = string array containing character names corresponding to
coded filts values. E.g., if you have 5 bands labeled u,g,r,i,z
with filts values 0,1,2,3,4 respectively, filtnams would be set by:
filtnams = ['u', 'g', 'r', 'i', 'z']
period: period to be used to phase up the data [days]
tag: String written in the bottom-left corner (if any)
plotfile: filename of the output plot (if any)
"""
assert isinstance(hjd,np.ndarray)
assert (hjd.ndim == 1)
hjd_shape = hjd.shape
assert isinstance(mag,np.ndarray)
assert (mag.shape == hjd_shape)
assert isinstance(magerr,np.ndarray)
assert (magerr.shape == hjd_shape)
assert isinstance(filts,np.ndarray)
assert (filts.shape == hjd_shape)
assert (period is not None)
#DEBUG: print('fig_phi_kjm: ','OK! :-)'
color = ['dodgerblue'] # matplotlib 1.5.0 color names
nfilts = len(filtnams)
assert (nfilts >= 1)
hjd0 = int(np.min(hjd)) # offset
x = hjd - hjd0 # days from offset
dx = 0.1
xlim = [0.0-dx, 2.0+dx]
xlabel = r'${\phi}$'
dy = 0.5 # delta_mag
if (nfilts > 1):
fig, axarr = plt.subplots( nfilts, sharex=True, figsize=(8.5,11) )
for i in range(nfilts):
fwant = float(i)
ok = (filts == fwant)
xx = x[ok]
yy = mag[ok]
ee = magerr[ok]
phi0 = xx / period
nphi0 = phi0.astype(np.int64)
phi = phi0 - nphi0
axarr[i].errorbar( phi, yy, yerr=ee, fmt='o', color=color[0], \
alpha=0.5 )
axarr[i].errorbar( phi+1, yy, yerr=ee, fmt='o', color=color[0], \
alpha=0.5 )
axarr[i].set_xlim( xlim ) # all subplots have the same X-axis
# expand and flip Y-axis:
axarr[i].set_ylim( [(np.max(yy+ee)+dy),(np.min(yy-ee)-dy)] )
axarr[i].set_ylabel( 'mag', size='x-large' )
axarr[i].text( 0.97, 0.80, filtnams[i], ha='right', size='x-large',\
transform=axarr[i].transAxes )
# ^---- relative coordinates within subplot
if (i == (nfilts-1)): # last subplot needs a label for the X-axis
axarr[i].set_xlabel( xlabel, size=20 )
else:
fig, ax = plt.subplots( nfilts, sharex=True, figsize=(8.5,11) )
fwant = float(0)
ok = (filts == fwant)
xx = x[ok]
yy = mag[ok]
ee = magerr[ok]
phi0 = xx / period
nphi0 = phi0.astype(np.int64)
phi = phi0 - nphi0
ax.errorbar( phi, yy, yerr=ee, fmt='o', color=color[0], alpha=0.5 )
ax.errorbar( phi+1, yy, yerr=ee, fmt='o', color=color[0], alpha=0.5 )
ax.set_xlim( xlim )
# expand and flip Y-axis:
ax.set_ylim( [(
|
np.max(yy+ee)
|
numpy.max
|
from PIL import Image
import numpy as np
import math
def roll_horizontal(image, num):
width, height = image.size
arr2 = np.zeros_like(image)
for p in range(num + 1):
width_left = round(p / num * width)
arr1 = np.asarray(image)
part1 = arr1[:, :width_left, :]
part2 = arr2[:, width_left:, :]
# part1 = np.concatenate((part1, part2), axis=1)
part1 = np.hstack((part1, part2))
img = Image.fromarray(part1)
# img.show()
img.save('tmp/%s-%s.jpg' % ('roll', p), 'JPEG')
def wipe_vertical(image1, num):
for p in range(num + 1): # 1-10
percent1 = p / num
percent2 = (p + 1) / num
arr = np.array(image1.convert('RGBA'))
alpha = arr[:, :, 3]
n = len(alpha)
alpha[:] = np.interp(np.arange(n), [0, percent1 * n, percent2 * n, n], [255, 255, 0, 0])[:, np.newaxis]
img = Image.fromarray(arr, mode='RGBA')
# img.show()
img.save('tmp/%s-%s.png' % ('wipe', p), 'PNG')
def fade(image, num):
for p1 in range(num):
alpha = p1 / num * 255
arr1 = np.array(image.convert('RGBA'))
arr1[:, :, 3] = np.multiply(np.ones_like(arr1)[:, :, 0], alpha)
img = Image.fromarray(arr1)
# img.show()
img.save('tmp/%s-%s.jpg' % ('fade', p1), 'PNG')
def panning(image, step=100):
arr = np.asarray(image)
res = np.zeros(arr.shape)
vector = [[1, 0, 0], [0, 1, 0], [step, 0, 1]]
for x in range(len(arr)):
for y in range(len(arr[0, :])):
point = np.dot(np.array([x, y, 1]), np.array(vector))
new_x, new_y = point[:2]
if new_x >= res.shape[0] or new_y >= res.shape[1]:
continue
res[point[0], point[1], :] = arr[x, y, :]
img = Image.fromarray(np.uint8(res))
img.show()
img.save('tmp/%s-%s.png' % ('panning', step), 'PNG')
def hole(image, radius=100):
width, height = image.size
arr = np.array(image.convert('RGBA'))
# 考虑使用zip?
for x in range(len(arr)):
for y in range(len(arr[0, :])):
real_x = x - width / 2
real_y = y - height / 2
if real_x * real_x + real_y * real_y < radius * radius:
# 将该坐标的RGB值保持不变,A值置空
arr[y, x][3] = 0
img = Image.fromarray(arr, mode='RGBA')
img.save('tmp/%s-%s.png' % ('hole', radius), 'PNG')
def rotate(image, alpha=math.pi / 6):
arr = np.asarray(image)
res = np.zeros(arr.shape)
vector = [[math.cos(alpha), -math.sin(alpha), 0], [math.sin(alpha), math.cos(alpha), 0], [0, 0, 1]]
for x in range(len(arr)):
for y in range(len(arr[0, :])):
point = np.dot(
|
np.array([x, y, 1])
|
numpy.array
|
import os
import copy
import sys
import pickle
import time
import os.path as osp
import shlex
import shutil
import subprocess
import lmdb
import msgpack_numpy
import numpy as np
import torch
import torch.utils.data as data
import tqdm
from collections import defaultdict
BASE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.join(BASE_DIR, '../../'))
from utils.splits import get_split_data, parse_line, get_ot_pairs_taskgrasp
from visualize import draw_scene, get_gripper_control_points
from geometry_utils import regularize_pc_point_count
def pc_normalize(pc, grasp, pc_scaling=True):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
grasp[:3, 3] -= centroid
if pc_scaling:
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = np.concatenate([pc, np.ones([pc.shape[0], 1])], axis=1)
scale_transform = np.diag([1 / m, 1 / m, 1 / m, 1])
pc = np.matmul(scale_transform, pc.T).T
pc = pc[:, :3]
grasp = np.matmul(scale_transform, grasp)
return pc, grasp
def get_task1_hits(object_task_pairs, num_grasps=25):
candidates = object_task_pairs['False'] + object_task_pairs['Weak False']
lines = []
label = -1 # All grasps are negatives
for ot in candidates:
for grasp_idx in range(num_grasps):
obj, task = ot.split('-')
line = "{}-{}-{}:{}\n".format(obj, str(grasp_idx), task, label)
lines.append(line)
return lines
class SGNTaskGrasp(data.Dataset):
def __init__(
self,
num_points,
transforms=None,
train=0,
download=True,
base_dir=None,
folder_dir='',
normal=True,
tasks=None,
map_obj2class=None,
class_list=None,
split_mode=None,
split_idx=0,
split_version=0,
pc_scaling=True,
use_task1_grasps=True):
"""
Args:
num_points: Number of points in point cloud (used to downsample data to a fixed number)
transforms: Used for data augmentation during training
train: 1 for train, 0 for test, 2 for validation
base_dir: location of dataset
folder_dir: name of dataset
tasks: list of tasks
class_list: list of object classes
map_obj2class: dictionary mapping dataset object to corresponding WordNet class
split_mode: Choose between held-out instance ('i'), tasks ('t') and classes ('o')
split_version: Choose 1, 0 is deprecated
split_idx: For each split mode, there are 4 cross validation splits (choose between 0-3)
pc_scaling: True if you want to scale the point cloud by the standard deviation
include_reverse_relations: True since we are modelling a undirected graph
use_task1_grasps: True if you want to include the grasps from the object-task pairs
rejected in Stage 1 (and add these grasps are negative samples)
Deprecated args (not used anymore): normal, download
"""
super().__init__()
self._pc_scaling = pc_scaling
self._split_mode = split_mode
self._split_idx = split_idx
self._split_version = split_version
self._num_points = num_points
self._transforms = transforms
self._tasks = tasks
self._num_tasks = len(self._tasks)
self._train = train
self._map_obj2class = map_obj2class
data_dir = os.path.join(base_dir, folder_dir, "scans")
data_txt_splits = {
0: 'test_split.txt',
1: 'train_split.txt',
2: 'val_split.txt'}
if train not in data_txt_splits:
raise ValueError("Unknown split arg {}".format(train))
self._parse_func = parse_line
lines = get_split_data(
base_dir,
folder_dir,
self._train,
self._split_mode,
self._split_idx,
self._split_version,
use_task1_grasps,
data_txt_splits,
self._map_obj2class,
self._parse_func,
get_ot_pairs_taskgrasp,
get_task1_hits)
self._data = []
self._pc = {}
self._grasps = {}
self._object_classes = class_list
self._num_object_classes = len(self._object_classes)
start = time.time()
correct_counter = 0
all_object_instances = []
self._object_task_pairs_dataset = []
self._data_labels = []
self._data_label_counter = {0: 0, 1: 0}
for i in tqdm.trange(len(lines)):
obj, obj_class, grasp_id, task, label = self._parse_func(lines[i])
obj_class = self._map_obj2class[obj]
all_object_instances.append(obj)
self._object_task_pairs_dataset.append("{}-{}".format(obj, task))
pc_file = os.path.join(data_dir, obj, "fused_pc_clean.npy")
if pc_file not in self._pc:
if not os.path.exists(pc_file):
raise ValueError(
'Unable to find processed point cloud file {}'.format(pc_file))
pc = np.load(pc_file)
pc_mean = pc[:, :3].mean(axis=0)
pc[:, :3] -= pc_mean
self._pc[pc_file] = pc
grasp_file = os.path.join(
data_dir, obj, "grasps", str(grasp_id), "grasp.npy")
if grasp_file not in self._grasps:
grasp =
|
np.load(grasp_file)
|
numpy.load
|
"""Test cases for the core module."""
import numpy as np
import numpy.typing as npt
import pint
import pytest
import xarray as xr
from ussa1976.core import AR_7
from ussa1976.core import compute_high_altitude
from ussa1976.core import compute_levels_temperature_and_pressure_low_altitude
from ussa1976.core import compute_low_altitude
from ussa1976.core import compute_mean_molar_mass_high_altitude
from ussa1976.core import compute_number_densities_high_altitude
from ussa1976.core import compute_temperature_gradient_high_altitude
from ussa1976.core import compute_temperature_high_altitude
from ussa1976.core import create
from ussa1976.core import H
from ussa1976.core import init_data_set
from ussa1976.core import M
from ussa1976.core import M0
from ussa1976.core import make
from ussa1976.core import O2_7
from ussa1976.core import O_7
from ussa1976.core import SPECIES
from ussa1976.core import to_altitude
from ussa1976.core import VARIABLES
from ussa1976.units import to_quantity
from ussa1976.units import ureg
def test_make() -> None:
"""Returned data set has expected data."""
# default constructor
profile = make()
assert profile["z_level"].values[0] == 0.0
assert profile["z_level"].values[-1] == 100.0
assert profile.dims["z_layer"] == 50
assert profile.dims["species"] == 12
# custom levels altitudes
profile = make(levels=ureg.Quantity(np.linspace(2.0, 15.0, 51), "km"))
assert profile.dims["z_layer"] == 50
assert profile["z_level"].values[0] == 2.0
assert profile["z_level"].values[-1] == 15.0
assert profile.dims["species"] == 12
# custom number of layers
profile = make(levels=ureg.Quantity(np.linspace(0.0, 150.0, 37), "km"))
assert profile.dims["z_layer"] == 36
assert profile["z_level"].values[0] == 0.0
assert profile["z_level"].values[-1] == 150.0
assert profile.dims["species"] == 12
profile = make(levels=ureg.Quantity(np.linspace(0.0, 80.0, 2), "km"))
assert profile.dims["z_layer"] == 1
assert profile["z_level"].values[0] == 0.0
assert profile["z_level"].values[-1] == 80.0
assert profile.dims["species"] == 12
def test_make_invalid_levels() -> None:
"""Raises a ValueError on invalid level altitudes."""
with pytest.raises(ValueError):
make(levels=np.linspace(-4000, 50000) * ureg.m)
with pytest.raises(ValueError):
make(levels=np.linspace(500.0, 5000000.0) * ureg.m)
@pytest.fixture
def test_altitudes() -> pint.Quantity:
"""Test altitudes fixture."""
return np.linspace(0.0, 100000.0, 101) * ureg.m
def test_create(test_altitudes: pint.Quantity) -> None:
"""Creates a data set with expected data."""
ds = create(z=test_altitudes)
assert all([v in ds.data_vars for v in VARIABLES])
variables = ["p", "t", "n", "n_tot"]
ds = create(z=test_altitudes, variables=variables)
assert len(ds.dims) == 2
assert "z" in ds.dims
assert "species" in ds.dims
assert len(ds.coords) == 2
assert np.all(to_quantity(ds.z) == test_altitudes)
assert [s for s in ds.species] == [s for s in SPECIES]
for var in variables:
assert var in ds
assert all(
[
x in ds.attrs
for x in ["convention", "title", "history", "source", "references"]
]
)
def test_create_invalid_variables(test_altitudes: npt.NDArray[np.float64]) -> None:
"""Raises when invalid variables are given."""
invalid_variables = ["p", "t", "invalid", "n"]
with pytest.raises(ValueError):
create(z=test_altitudes, variables=invalid_variables)
def test_create_invalid_z() -> None:
"""Raises when invalid altitudes values are given."""
with pytest.raises(ValueError):
create(z=np.array([-5.0]) * ureg.m)
with pytest.raises(ValueError):
create(z=np.array([1000001.0]) * ureg.m)
def test_create_below_86_km_layers_boundary_altitudes() -> None:
"""
Produces correct results.
We test the computation of the atmospheric variables (pressure,
temperature and mass density) at the level altitudes, i.e. at the model
layer boundaries. We assert correctness by comparing their values with the
values from the table 1 of the U.S. Standard Atmosphere 1976 document.
"""
z = to_altitude(H)
ds = create(z=z, variables=["p", "t", "rho"])
level_temperature = (
np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65, 186.87])
* ureg.K
)
level_pressure = (
np.array([101325.0, 22632.0, 5474.8, 868.01, 110.90, 66.938, 3.9564, 0.37338])
* ureg.Pa
)
level_mass_density = (
np.array(
[
1.225,
0.36392,
0.088035,
0.013225,
0.0014275,
0.00086160,
0.000064261,
0.000006958,
]
)
* ureg.kg
/ ureg.m ** 3
)
assert np.allclose(to_quantity(ds.t), level_temperature, rtol=1e-4)
assert np.allclose(to_quantity(ds.p), level_pressure, rtol=1e-4)
assert np.allclose(to_quantity(ds.rho), level_mass_density, rtol=1e-3)
def test_create_below_86_km_arbitrary_altitudes() -> None:
"""
Produces correct results.
We test the computation of the atmospheric variables (pressure,
temperature and mass density) at arbitrary altitudes. We assert correctness
by comparing their values to the values from table 1 of the U.S. Standard
Atmosphere 1976 document.
"""
# The values below were selected arbitrarily from Table 1 of the document
# such that there is at least one value in each of the 7 temperature
# regions.
h = (
np.array(
[
200.0,
1450.0,
5250.0,
6500.0,
9800.0,
17900.0,
24800.0,
27100.0,
37200.0,
40000.0,
49400.0,
61500.0,
79500.0,
84000.0,
]
)
* ureg.m
)
temperatures = (
np.array(
[
286.850,
278.725,
254.025,
245.900,
224.450,
216.650,
221.450,
223.750,
243.210,
251.050,
270.650,
241.250,
197.650,
188.650,
]
)
* ureg.K
)
pressures = (
np.array(
[
98945.0,
85076.0,
52239.0,
44034.0,
27255.0,
7624.1,
2589.6,
1819.4,
408.7,
277.52,
81.919,
16.456,
0.96649,
0.43598,
]
)
* ureg.Pa
)
mass_densities = (
np.array(
[
1.2017,
1.0633,
0.71641,
0.62384,
0.42304,
0.12259,
0.040739,
0.028328,
0.0058542,
0.0038510,
0.0010544,
0.00023764,
0.000017035,
0.0000080510,
]
)
* ureg.kg
/ ureg.m ** 3
)
z = to_altitude(h)
ds = create(z=z, variables=["t", "p", "rho"])
assert np.allclose(to_quantity(ds.t), temperatures, rtol=1e-4)
assert np.allclose(to_quantity(ds.p), pressures, rtol=1e-4)
assert np.allclose(to_quantity(ds.rho), mass_densities, rtol=1e-4)
def test_init_data_set() -> None:
"""Data set is initialised.
Expected data variables are created and fill with nan values.
Expected dimensions and coordinates are present.
"""
def check_data_set(ds: xr.Dataset) -> None:
"""Check a data set."""
for var in VARIABLES:
assert var in ds
assert np.isnan(ds[var].values).all()
assert ds.n.values.ndim == 2
assert all(
ds.species.values
== ["N2", "O2", "Ar", "CO2", "Ne", "He", "Kr", "Xe", "CH4", "H2", "O", "H"]
)
z1 = np.linspace(0.0, 50000.0) * ureg.m
ds1 = init_data_set(z1)
check_data_set(ds1)
z2 = np.linspace(120000.0, 650000.0) * ureg.m
ds2 = init_data_set(z2)
check_data_set(ds2)
z3 = np.linspace(70000.0, 100000.0) * ureg.m
ds3 = init_data_set(z3)
check_data_set(ds3)
def test_compute_levels_temperature_and_pressure_low_altitude() -> None:
"""Computes correct level temperature and pressure values.
The correct values are taken from :cite:`NASA1976USStandardAtmosphere`.
"""
tb, pb = compute_levels_temperature_and_pressure_low_altitude()
level_temperature = (
np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65, 186.87])
* ureg.K
)
level_pressure = (
np.array([101325.0, 22632.0, 5474.8, 868.01, 110.90, 66.938, 3.9564, 0.37338])
* ureg.Pa
)
assert np.allclose(tb, level_temperature, rtol=1e-3)
assert
|
np.allclose(pb, level_pressure, rtol=1e-3)
|
numpy.allclose
|
import numpy as np
# physical/external base state of all entites
class EntityState(object):
def __init__(self):
# physical position
self.p_pos = None
# physical velocity
self.p_vel = None
# state of agents (including communication and internal/mental state)
class AgentState(EntityState):
def __init__(self):
super(AgentState, self).__init__()
# current number of zombie to human physical contacts
self.biting = 0
# health
self.health = 1.0
# fraction of time until can fire again (0: reloaded, 1: just fired)
self.reloading = 0.0
# which way is the agent aiming?
self.aim_heading = None
# angular velocity of aim
self.aim_vel = None
# action of the agent
class Action(object):
def __init__(self):
# physical action
self.u = None
# arms action
self.a = None
# properties and state of physical world entity
class Entity(object):
def __init__(self):
# name
self.name = ''
# properties:
self.size = 0.050
# entity can move / be pushed
self.movable = False
# entity collides with others
self.collide = True
# color
self.color = None
# max speed and accel
self.max_speed = None
self.accel = None
# state
self.state = EntityState()
# mass
self.mass = 1.0
# properties of agent entities
class Agent(Entity):
def __init__(self):
super(Agent, self).__init__()
# agents are movable by default
self.movable = True
# cannot observe the world
self.blind = False
# can the agent shoot?
self.armed = False
# aim physics
self.max_aim_vel = 2*np.pi
self.arms_act_pres = 0.5
self.arms_act_sens = 20
self.arms_pallet_count = 6
self.arms_pallet_damage = 0.05
self.arms_pallet_range = 10
self.arms_pallet_spread = 10 /360.0*2*np.pi
# time taken to reload
self.arms_reload_time = 0.5
# physical motor noise amount
self.u_noise = None
# arms handling noise
self.a_noise = None
# control range
self.u_range = 1.0
# team membership
self.team = 0
# state
self.state = AgentState()
self.health_decay = 1.0
# action
self.action = Action()
# script behavior to execute
self.action_callback = None
# multi-agent world
class World(object):
def __init__(self):
# list of agents and entities (can change at execution-time!)
self.agents = []
# communication channel dimensionality
self.dim_c = 0
# position dimensionality
self.dim_p = 2
# color dimensionality
self.dim_color = 3
# simulation timestep
self.dt = 0.1
# physical damping
self.damping = 0.25
# contact response parameters
self.contact_force = 1e+2
self.contact_margin = 1e-3
# projectile paths
self.projectiles = None
# info
self.info = None
# return all entities in the world
@property
def entities(self):
return self.agents
# return all agents controllable by external policies
@property
def policy_agents(self):
return [agent for agent in self.agents if agent.action_callback is None]
# return all agents controlled by world scripts
@property
def scripted_agents(self):
return [agent for agent in self.agents if agent.action_callback is not None]
# update state of the world
def step(self):
n = len(self.agents)
for i, agent in enumerate(self.agents): agent.index = i
self.info = {
'dist': np.zeros((n, n)),
'speed': np.zeros((n,)),
'health': np.zeros((n,)),
'fire': np.zeros((n,)),
'bite': np.zeros((n, n)),
'hit': np.zeros((n, n))
}
# record distance
for agent in self.agents:
for other in self.agents:
_, self.info['dist'][agent.index, other.index] = self.distance(agent, other)
# set actions for scripted agents
for agent in self.scripted_agents:
agent.action = agent.action_callback(agent, self)
# gather forces applied to entities
p_force = [None] * len(self.entities)
# apply agent physical controls
p_force = self.apply_action_force(p_force)
# apply environment forces
p_force = self.apply_environment_force(p_force)
# integrate physical state
self.integrate_state(p_force)
# update agent state
self.projectiles = np.zeros((0, 4))
for agent in self.agents:
self.update_agent_state(agent)
# record health
for agent in self.agents:
self.info['health'][agent.index] = agent.state.health
# gather agent action forces
def apply_action_force(self, p_force):
# set applied forces
for i,agent in enumerate(self.agents):
if agent.movable:
noise = np.random.randn(*agent.action.u.shape) * agent.u_noise if agent.u_noise else 0.0
p_force[i] = agent.action.u + noise
return p_force
# gather physical forces acting on entities
def apply_environment_force(self, p_force):
# simple (but inefficient) collision response
for a,entity_a in enumerate(self.entities):
for b,entity_b in enumerate(self.entities):
if(b <= a): continue
[f_a, f_b] = self.get_collision_force(entity_a, entity_b)
if(f_a is not None):
if(p_force[a] is None): p_force[a] = 0.0
p_force[a] = f_a + p_force[a]
if(f_b is not None):
if(p_force[b] is None): p_force[b] = 0.0
p_force[b] = f_b + p_force[b]
# wall forces
for i, entity in enumerate(self.entities):
walls = np.array([
[[0, 1], [0, -1]], # top
[[-1, 0], [1, 0]], # left
[[0, -1], [0, 1]], # bottom
[[1, 0], [-1, 0]], # right
])
for j in range(walls.shape[0]):
f = self.get_wall_force(entity, walls[j])
if f is not None:
if p_force[i] is None: p_force[i] = 0.0
p_force[i] += f
return p_force
# integrate physical state
def integrate_state(self, p_force):
for i, entity in enumerate(self.entities):
if not entity.movable: continue
entity.state.p_vel = entity.state.p_vel * (1 - self.damping)
if (p_force[i] is not None):
entity.state.p_vel += (p_force[i] / entity.mass) * self.dt
if entity.max_speed is not None:
speed = np.sqrt(np.square(entity.state.p_vel[0]) + np.square(entity.state.p_vel[1]))
max_speed = entity.max_speed * entity.state.health
if speed > max_speed:
entity.state.p_vel = entity.state.p_vel / np.sqrt(np.square(entity.state.p_vel[0]) +
np.square(entity.state.p_vel[1])) * max_speed
entity.state.p_pos += entity.state.p_vel * self.dt
# record speed
self.info['speed'][entity.index] = np.sqrt(np.sum(np.square(entity.state.p_vel)))
def update_agent_state(self, agent):
# compute ballistics
if agent.armed:
# aim direction
noise = np.random.randn(*agent.action.a.shape) * agent.a_noise if agent.a_noise else 0.0
agent.state.aim_vel = (agent.action.a[0] + noise) * agent.max_aim_vel
agent.state.aim_heading += agent.state.aim_vel * self.dt
agent.state.aim_heading %= 2 * np.pi
# firing
if agent.state.reloading > 0:
reload_amount = self.dt / agent.arms_reload_time
agent.state.reloading = np.clip(agent.state.reloading - reload_amount, 0.0, 1.0)
else:
a_force = agent.action.a[1] + noise
act_prob = 1/(1+np.exp(agent.arms_act_sens * (agent.arms_act_pres - a_force)))
activated = np.random.binomial(1, act_prob * agent.state.health)
if activated:
# record fire
self.info['fire'][agent.index] += 1
agent.state.reloading = 1.0
# create rays representing projectiles
ray_pos = agent.state.p_pos + np.array([
|
np.cos(agent.state.aim_heading)
|
numpy.cos
|
import os
import numpy as np
from six import BytesIO
from tempfile import NamedTemporaryFile
from ... import data_dir, img_as_float
from .. import imread, imsave, use_plugin, reset_plugins
from PIL import Image
from .._plugins.pil_plugin import (
pil_to_ndarray, ndarray_to_pil, _palette_is_grayscale)
from ...measure import compare_ssim as ssim
from ...color import rgb2lab
from skimage._shared import testing
from skimage._shared.testing import (mono_check, color_check,
assert_equal, assert_array_equal,
assert_array_almost_equal,
assert_allclose)
from skimage._shared._warnings import expected_warnings
from skimage._shared._tempfile import temporary_file
def setup():
use_plugin('pil')
def teardown():
reset_plugins()
def setup_module(self):
"""The effect of the `plugin.use` call may be overridden by later imports.
Call `use_plugin` directly before the tests to ensure that PIL is used.
"""
try:
use_plugin('pil')
except ImportError:
pass
def test_png_round_trip():
f = NamedTemporaryFile(suffix='.png')
fname = f.name
f.close()
I = np.eye(3)
with expected_warnings(['Possible precision loss']):
imsave(fname, I)
Ip = img_as_float(imread(fname))
os.remove(fname)
assert np.sum(np.abs(Ip-I)) < 1e-3
def test_img_as_gray_flatten():
img = imread(os.path.join(data_dir, 'color.png'), as_gray=True)
with expected_warnings(['deprecated']):
img_flat = imread(os.path.join(data_dir, 'color.png'), flatten=True)
assert_array_equal(img, img_flat)
def test_imread_flatten():
# a color image is flattened
img = imread(os.path.join(data_dir, 'color.png'), as_gray=True)
assert img.ndim == 2
assert img.dtype == np.float64
img = imread(os.path.join(data_dir, 'camera.png'), as_gray=True)
# check that flattening does not occur for an image that is grey already.
assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
def test_imread_separate_channels():
# Test that imread returns RGBA values contiguously even when they are
# stored in separate planes.
x = np.random.rand(3, 16, 8)
f = NamedTemporaryFile(suffix='.tif')
fname = f.name
f.close()
imsave(fname, x)
img = imread(fname)
os.remove(fname)
assert img.shape == (16, 8, 3), img.shape
def test_imread_multipage_rgb_tif():
img = imread(os.path.join(data_dir, 'multipage_rgb.tif'))
assert img.shape == (2, 10, 10, 3), img.shape
def test_imread_palette():
img = imread(os.path.join(data_dir, 'palette_gray.png'))
assert img.ndim == 2
img = imread(os.path.join(data_dir, 'palette_color.png'))
assert img.ndim == 3
def test_imread_index_png_with_alpha():
# The file `foo3x5x4indexed.png` was created with this array
# (3x5 is (height)x(width)):
data = np.array([[[127, 0, 255, 255],
[127, 0, 255, 255],
[127, 0, 255, 255],
[127, 0, 255, 255],
[127, 0, 255, 255]],
[[192, 192, 255, 0],
[192, 192, 255, 0],
[0, 0, 255, 0],
[0, 0, 255, 0],
[0, 0, 255, 0]],
[[0, 31, 255, 255],
[0, 31, 255, 255],
[0, 31, 255, 255],
[0, 31, 255, 255],
[0, 31, 255, 255]]], dtype=np.uint8)
img = imread(os.path.join(data_dir, 'foo3x5x4indexed.png'))
assert_array_equal(img, data)
def test_palette_is_gray():
gray = Image.open(os.path.join(data_dir, 'palette_gray.png'))
assert _palette_is_grayscale(gray)
color = Image.open(os.path.join(data_dir, 'palette_color.png'))
assert not _palette_is_grayscale(color)
def test_bilevel():
expected = np.zeros((10, 10))
expected[::2] = 255
img = imread(os.path.join(data_dir, 'checker_bilevel.png'))
assert_array_equal(img, expected)
def test_imread_uint16():
expected = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy'))
img = imread(os.path.join(data_dir, 'chessboard_GRAY_U16.tif'))
assert np.issubdtype(img.dtype, np.uint16)
assert_array_almost_equal(img, expected)
def test_imread_truncated_jpg():
with testing.raises(IOError):
imread(os.path.join(data_dir, 'truncated.jpg'))
def test_jpg_quality_arg():
chessboard = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy'))
with temporary_file(suffix='.jpg') as jpg:
imsave(jpg, chessboard, quality=95)
im = imread(jpg)
sim = ssim(chessboard, im,
data_range=chessboard.max() - chessboard.min())
assert sim > 0.99
def test_imread_uint16_big_endian():
expected = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy'))
img = imread(os.path.join(data_dir, 'chessboard_GRAY_U16B.tif'))
assert img.dtype == np.uint16
assert_array_almost_equal(img, expected)
class TestSave:
def roundtrip_file(self, x):
with temporary_file(suffix='.png') as fname:
imsave(fname, x)
y = imread(fname)
return y
def roundtrip_pil_image(self, x):
pil_image = ndarray_to_pil(x)
y = pil_to_ndarray(pil_image)
return y
def verify_roundtrip(self, dtype, x, y, scaling=1):
assert_array_almost_equal((x * scaling).astype(np.int32), y)
def verify_imsave_roundtrip(self, roundtrip_function):
for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]:
for dtype in (np.uint8, np.uint16, np.float32, np.float64):
x = np.ones(shape, dtype=dtype) * np.random.rand(*shape)
if np.issubdtype(dtype, np.floating):
yield (self.verify_roundtrip, dtype, x,
roundtrip_function(x), 255)
else:
x = (x * 255).astype(dtype)
yield (self.verify_roundtrip, dtype, x,
roundtrip_function(x))
def test_imsave_roundtrip_file(self):
self.verify_imsave_roundtrip(self.roundtrip_file)
def test_imsave_roundtrip_pil_image(self):
self.verify_imsave_roundtrip(self.roundtrip_pil_image)
def test_imsave_incorrect_dimension():
with temporary_file(suffix='.png') as fname:
with testing.raises(ValueError):
with expected_warnings([fname + ' is a low contrast image']):
imsave(fname, np.zeros((2, 3, 3, 1)))
with testing.raises(ValueError):
with expected_warnings([fname + ' is a low contrast image']):
imsave(fname, np.zeros((2, 3, 2)))
def test_imsave_filelike():
shape = (2, 2)
image = np.zeros(shape)
s = BytesIO()
# save to file-like object
with expected_warnings(['precision loss',
'is a low contrast image']):
imsave(s, image)
# read from file-like object
s.seek(0)
out = imread(s)
assert_equal(out.shape, shape)
assert_allclose(out, image)
def test_imsave_boolean_input():
shape = (2, 2)
image =
|
np.eye(*shape, dtype=np.bool)
|
numpy.eye
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 10:17:13 2018
@author: David
"""
# Built-in libraries
#import argparse
#import collections
#import multiprocessing
import os
#import pickle
#import time
# External libraries
#import rasterio
#import gdal
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.stats import linregress
from scipy.stats import median_abs_deviation
import xarray as xr
# Local libraries
import debrisglobal.globaldebris_input as debris_prms
from meltcurves import melt_fromdebris_func
#%%% ===== SCRIPT OPTIONS =====
option_melt_comparison = False
option_hd_comparison = True
option_hd_centerline = False
option_hd_spatial_compare = False
hd_obs_fp = debris_prms.main_directory + '/../hd_obs/'
melt_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_melt_compare/'
hd_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_obs_compare/'
hd_centerline_fp = debris_prms.main_directory + '/../hd_obs/centerline_hd/'
if os.path.exists(melt_compare_fp) == False:
os.makedirs(melt_compare_fp)
if os.path.exists(hd_compare_fp) == False:
os.makedirs(hd_compare_fp)
#%% ===== FUNCTIONS =====
def plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn, ds_names=None,
hd_min=0, hd_max=2, hd_tick_major=0.25, hd_tick_minor=0.05,
melt_min=0, melt_max=70, melt_tick_major=10, melt_tick_minor=5,
plot_meltfactor=False, z_value = 1.645, fontsize=11):
#%%
""" Plot comparison of debris vs. melt for various sites """
# Dataset of melt data
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
color_dict = {0:'k', 1:'b', 2:'r'}
symbol_dict = {0:'D', 1:'o', 2:'^'}
# ===== PLOT DEBRIS VS. SURFACE LOWERING =====
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False,
gridspec_kw = {'wspace':0.4, 'hspace':0.15})
melt_obs_all = []
hd_obs_all = []
melt_mod_all = []
melt_mod_bndlow_all = []
melt_mod_bndhigh_all = []
for n in np.arange(0,len(measured_hd_list)):
measured_hd = measured_hd_list[n]
measured_melt = measured_melt_list[n]
melt_obs_all.extend(measured_melt)
hd_obs_all.extend(measured_hd)
yearfracs = yearfracs_list[n]
start_yearfrac = yearfracs[0]
end_yearfrac = yearfracs[1]
if ds_names is not None:
ds_name = ds_names[n]
else:
ds_name = None
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_thicknesses = ds_ostrem.hd_cm.values
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
#%%
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve = melt_fromdebris_func(debris_4curve, func_coeff[0], func_coeff[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve = np.concatenate([[melt_cleanice], melt_4curve])
# Linearly interpolate between 0 cm and 2 cm for the melt rate
def melt_0to2cm_adjustment(melt, melt_clean, melt_2cm, hd):
""" Linearly interpolate melt factors between 0 and 2 cm
based on clean ice and 2 cm sub-debris melt """
melt[(hd >= 0) & (hd < 0.02)] = (
melt_clean + hd[(hd >= 0) & (hd < 0.02)] / 0.02 * (melt_2cm - melt_clean))
return melt
melt_mod = melt_fromdebris_func(measured_hd, func_coeff[0], func_coeff[1])
melt_2cm = melt_fromdebris_func(0.02, func_coeff[0], func_coeff[1])
melt_mod = melt_0to2cm_adjustment(melt_mod, melt_cleanice, melt_2cm, measured_hd)
melt_mod_all.extend(melt_mod)
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndlow = melt_fromdebris_func(debris_4curve, func_coeff_bndlow[0], func_coeff_bndlow[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndlow = np.concatenate([[melt_cleanice_bndlow], melt_4curve_bndlow])
melt_mod_bndlow = melt_fromdebris_func(measured_hd, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_2cm_bndlow = melt_fromdebris_func(0.02, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_mod_bndlow = melt_0to2cm_adjustment(melt_mod_bndlow, melt_cleanice_bndlow, melt_2cm_bndlow, measured_hd)
melt_mod_bndlow_all.extend(melt_mod_bndlow)
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndhigh = melt_fromdebris_func(debris_4curve, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndhigh = np.concatenate([[melt_cleanice_bndhigh], melt_4curve_bndhigh])
melt_mod_bndhigh = melt_fromdebris_func(measured_hd, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_2cm_bndhigh = melt_fromdebris_func(0.02, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_mod_bndhigh = melt_0to2cm_adjustment(melt_mod_bndhigh, melt_cleanice_bndhigh,melt_2cm_bndhigh, measured_hd)
melt_mod_bndhigh_all.extend(melt_mod_bndhigh)
if plot_meltfactor:
melt_4curve = melt_4curve / melt_cleanice
melt_4curve_bndlow = melt_4curve_bndlow / melt_cleanice
melt_4curve_bndhigh = melt_4curve_bndhigh / melt_cleanice
# Plot curve
ax[0,0].plot(measured_hd, measured_melt, symbol_dict[n], color=color_dict[n],
markersize=3, markerfacecolor="None", markeredgewidth=0.5, zorder=5, label=ds_name, clip_on=False)
ax[0,0].plot(debris_4curve, melt_4curve,
color=color_dict[n], linewidth=1, linestyle='--', zorder=5-n)
ax[0,0].fill_between(debris_4curve, melt_4curve_bndlow, melt_4curve_bndhigh,
color=color_dict[n], linewidth=0, zorder=5-n, alpha=0.2)
# text
# ax[0,0].text(0.5, 1.09, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes)
ax[0,0].text(0.5, 1.11, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
transform=ax[0,0].transAxes)
# eqn_text = r'$b = \frac{b_{0}}{1 + kb_{0}h}$'
# coeff1_text = r'$b_{0} = ' + str(np.round(func_coeff[0],2)) + '$'
# coeff2_text = r'$k = ' + str(np.round(func_coeff[1],2)) + '$'
# # coeff$\frac{b_{0}}{1 + 2kb_{0}h}$'
# ax[0,0].text(0.9, 0.95, eqn_text, size=12, horizontalalignment='right', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.615, 0.83, 'where', size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.77, coeff1_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.7, coeff2_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# X-label
# ax[0,0].set_xlabel('Debris thickness (m)', size=fontsize)
ax[0,0].set_xlim(hd_min, hd_max)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(hd_tick_major))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(hd_tick_minor))
# Y-label
# if plot_meltfactor:
# ylabel_str = 'Melt (-)'
# else:
# ylabel_str = 'Melt (mm w.e. d$^{-1}$)'
# ax[0,0].set_ylabel(ylabel_str, size=fontsize)
ax[0,0].set_ylim(melt_min, melt_max)
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(melt_tick_major))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(melt_tick_minor))
# Tick parameters
ax[0,0].yaxis.set_ticks_position('both')
ax[0,0].tick_params(axis='both', which='major', labelsize=fontsize-2, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=fontsize-4, direction='in')
# Legend
ax[0,0].legend(ncol=1, fontsize=fontsize-3, frameon=True, handlelength=1,
handletextpad=0.15, columnspacing=0.5, borderpad=0.25, labelspacing=0.5, framealpha=0.5)
# Save plot
fig.set_size_inches(2, 1.5)
fig.savefig(melt_compare_fp + fig_fn, bbox_inches='tight', dpi=300, transparent=True)
plt.close()
return hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all
#%%
if option_melt_comparison:
# glaciers = ['1.15645', '2.14297', '6.00474', '7.01044', '10.01732', '11.00719', '11.02810', '11.02858', '11.03005',
# '12.01012', '12.01132', '13.05000', '13.43232', '14.06794', '14.16042', '15.03733', '15.03743',
# '15.04045', '15.07886', '15.11758', '18.02397']
glaciers = ['1.15645', '2.14297', '7.01044', '11.00719', '11.02472', '11.02810', '11.02858', '11.03005',
'12.01012', '12.01132', '13.05000', '13.43165', '13.43232', '14.06794', '14.16042', '15.03733',
'15.03743', '15.04045', '15.07122', '15.07886', '15.11758', '18.02375', '18.02397']
# glaciers = ['10.01732']
# glaciers = ['13.43165']
# glaciers = ['13.43232']
# glaciers = ['11.02858']
z_value = 1.645
hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all, reg_all = [], [], [], [], [], []
rgiid_all = []
# ===== KENNICOTT (1.15645) ====
if '1.15645' in glaciers:
print('\nmelt comparison with Anderson et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/1.15645_kennicott_anderson_2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Kennicott (1.15645)"
fig_fn = '1.15645_hd_melt_And2019.png'
# ds_names = ['Anderson 2019\n(6/18/11$\u2009$-$\u2009$8/16/11)']
ds_names = ['6/18/11$\u2009$-$\u2009$8/16/11']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6150N-21700E-debris_melt_curve.nc'
yearfracs_list = [[2011 + 169/365, 2011 + 228/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['01'])
rgiid_all.append(['1.15645'])
# ===== Emmons (2.14297) ====
if '2.14297' in glaciers:
print('\nmelt comparison with Moore et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/2.14297_moore2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Emmons (2.14297)"
fig_fn = '2.14297_hd_melt_Moo2019.png'
# ds_names = ['Moore 2019\n(7/31/14$\u2009$-$\u2009$8/10/14)']
ds_names = ['7/31/14$\u2009$-$\u2009$8/10/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-23825E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 212/365, 2014 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['02'])
rgiid_all.append(['2.14297'])
# ===== Svinafellsjokull (06.00474) ====
if '6.00474' in glaciers:
print('\nmelt comparison with Moller et al (2016)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/6.00474_moller2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df['melt_mf'].values]
glac_name = "Svinafellsjokull (6.00474)"
fig_fn = '6.00474_hd_melt_Moller2016.png'
# ds_names = ['Moller 2016\n(5/17/13$\u2009$-$\u2009$5/30/13)']
ds_names = ['5/17/13$\u2009$-$\u2009$5/30/13']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6400N-34325E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 137/365, 2013 + 150/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 0.1) * 0.1,1) + 0.1
melt_tick_major, melt_tick_minor = 0.5, 0.1
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor,
plot_meltfactor=True)
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['06'])
rgiid_all.append(['6.00474'])
# ===== Larsbreen (7.01044) ====
if '7.01044' in glaciers:
print('\nmelt comparison with Nicholson and Benn 2006')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/7.01044_larsbreen_NB2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Larsbreen (7.01044)"
fig_fn = '7.01044_hd_melt_NichBenn2006.png'
# ds_names = ['Nicholson 2006\n(7/09/02$\u2009$-$\u2009$7/20/02)']
ds_names = ['7/09/02$\u2009$-$\u2009$7/20/02']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '7825N-1600E-debris_melt_curve.nc'
yearfracs_list = [[2002 + 191/366, 2002 + 202/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['07'])
rgiid_all.append(['7.01044'])
# ===== <NAME> (10.01732) ====
if '10.01732' in glaciers:
# print('\nmelt comparison with Mayer et al (2011)')
assert True == False, '10.01732 NEEDS TO DO THE MODELING FIRST!'
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/10.01732_mayer2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# glac_name = "<NAME> (10.01732)"
# fig_fn = '10.01732_hd_melt_Mayer2011.png'
## ds_names = ['Mayer 2011\n(7/11/07$\u2009$-$\u2009$7/30/07)']
# ds_names = ['7/11/07$\u2009$-$\u2009$7/30/07']
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '5000N-8775E-debris_melt_curve.nc'
# yearfracs_list = [[2007 + 192/365, 2007 + 211/365]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
# hd_obs_all.append(hd_obs)
# melt_obs_all.append(melt_obs)
# melt_mod_all.append(melt_mod)
# melt_mod_bndlow_all.append(melt_mod_bndlow)
# melt_mod_bndhigh_all.append(melt_mod_bndhigh)
# reg_all.append(['10'])
# ===== Vernagtferner (11.00719) ====
if '11.00719' in glaciers:
print('\nmelt comparison with Juen et al (2013)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.00719_vernagtferner_juen2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Vernagtferner (11.00719)"
fig_fn = '11.00719_hd_melt_Juen2013.png'
# ds_names = ['Juen 2013\n(6/25/10$\u2009$-$\u2009$7/10/10)']
ds_names = ['6/25/10$\u2009$-$\u2009$7/10/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-1075E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 176/365, 2010 + 191/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.00719'])
# ===== Vernocolo (11.02472) =====
if '11.02472' in glaciers:
print('\nmelt comparison with bocchiola et al. (2015)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02472_bocchiola2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Venerocolo (11.02472)"
fig_fn = '11.02472_hd_melt_Boc2015.png'
ds_names = ['8/10/07$\u2009$-$\u2009$9/13/07']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4625N-1050E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 222/365, 2007 + 256/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02472'])
# ===== Arolla (11.02810) ====
if '11.02810' in glaciers:
print('\nmelt comparison with Reid et al (2012)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02810_arolla_reid2012-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Arolla (11.02810)"
fig_fn = '11.02810_hd_melt_Reid2012.png'
# ds_names = ['Reid 2012\n(7/28/10$\u2009$-$\u2009$9/09/10)']
ds_names = ['7/28/10$\u2009$-$\u2009$9/09/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-750E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 209/365, 2010 + 252/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02810'])
# ===== Belvedere (11.02858) ====
if '11.02858' in glaciers:
print('\nmelt comparison with Nicholson and Benn (2006)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02858_belvedere_nb2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Belvedere (11.02858)"
fig_fn = '11.02858_hd_melt_NB2006.png'
# ds_names = ['Nicholson 2006\n(8/06/03$\u2009$-$\u2009$8/10/03)']
ds_names = ['8/06/03$\u2009$-$\u2009$8/10/03']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-800E-debris_melt_curve.nc'
yearfracs_list = [[2003 + 218/365, 2003 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 10
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02858'])
# ===== MIAGE (11.03005) ====
if '11.03005' in glaciers:
print('\nmelt comparison with Reid and Brock (2010)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.03005_reid2010-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Miage (11.03005)'
fig_fn = '11.03005_hd_melt_Reid2010.png'
# ds_names = ['Reid 2010\n(6/21/05$\u2009$-$\u2009$9/04/05)']
ds_names = ['6/21/05$\u2009$-$\u2009$9/04/05']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4575N-675E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 172/365, 2005 + 247/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.03005'])
# ===== Zopkhito (12.01012) ====
if '12.01012' in glaciers:
print('\nmelt comparison with Lambrecht et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2008.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2009.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df['melt_mmwed'].values, mb_df2['melt_mmwed'].values]
glac_name = "Zopkhito (12.01012)"
fig_fn = '12.01012_hd_melt_Lambrecht2011.png'
# ds_names = ['Lambrecht 2011\n(6/20/08$\u2009$-$\u2009$6/27/08)',
# 'Lambrecht 2011\n(7/01/09$\u2009$-$\u2009$7/08/09)']
ds_names = ['6/26/08$\u2009$-$\u2009$7/01/08',
'7/13/09$\u2009$-$\u2009$7/17/09']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4300N-4350E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 178/366, 2008 + 183/366], [2009 + 194/365, 2009 + 198/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 10
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['12'])
rgiid_all.append(['12.01012'])
# ===== Djankuat (12.01132) ====
if '12.01132' in glaciers:
print('\nmelt comparison with Lambrecht et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt2007.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt2008.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df['melt_mmwed'].values, mb_df2['melt_mmwed'].values]
glac_name = "Djankuat (12.01132)"
fig_fn = '12.01132_hd_melt_Lambrecht2011.png'
ds_names = ['6/26/07$\u2009$-$\u2009$6/30/07','6/30/08$\u2009$-$\u2009$9/14/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4325N-4275E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 177/366, 2007 + 181/366], [2008 + 182/366, 2008 + 258/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['12'])
rgiid_all.append(['12.01132'])
# ===== S Inylchek (13.05000) ====
if '13.05000' in glaciers:
print('\nmelt comparison with Hagg et al (2008)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.05000_hagg2008-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "S Inylchek (13.05000)"
fig_fn = '13.05000_hd_melt_Hagg2008.png'
# ds_names = ['Hagg 2008\n(7/30/05$\u2009$-$\u2009$8/10/05)']
ds_names = ['7/30/05$\u2009$-$\u2009$8/10/05']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4200N-8025E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 211/365, 2005 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.05000'])
# ===== No 72 =====
if '13.43165' in glaciers:
print('\nmelt comparison with Wang et al (2017)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43165_wang2017-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Qingbingtan (13.43165)"
fig_fn = '13.43165_hd_melt_wang2017.png'
ds_names = ['8/01/08$\u2009$-$\u2009$8/01/09']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 214/366, 2009 + 213/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43165'])
# ===== Koxkar (13.43232) ====
if '13.43232' in glaciers:
print('\nmelt comparison with Han et al 2006 (measured via conduction place, not ablation stake)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site1.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site2.csv')
mb_df3 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site3.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values, mb_df3.hd_m.values,]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values, mb_df3.melt_mmwed.values]
glac_name = "Koxkar (13.43232)"
fig_fn = '13.43232_hd_melt_han2006.png'
ds_names = ['09/15/04$\u2009$-$\u2009$09/19/04', '09/24/04$\u2009$-$\u2009$09/28/04',
'10/02/04$\u2009$-$\u2009$10/06/04']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2004 + 259/366, 2004 + 263/366], [2004 + 268/366, 2004 + 272/366],
[2004 + 276/366, 2004 + 280/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.5, 0.1
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43232'])
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_juen2014-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Koxkar (13.43232)"
fig_fn = '13.43232_hd_melt_juen2014.png'
# ds_names = ['Juen 2014\n(8/10/10$\u2009$-$\u2009$8/29/10)']
ds_names = ['8/10/10$\u2009$-$\u2009$8/29/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 222/365, 2010 + 241/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43232'])
# ===== Baltoro (14.06794) ====
if '14.06794' in glaciers:
print('\nmelt comparison with Mihalcea et al 2006 and Groos et al 2017')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.06794_mihalcea2006-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.06794_groos2017-melt.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values]
glac_name = "Baltoro (14.06794)"
fig_fn = '14.06794_hd_melt_Mih2006_Gro2017.png'
# ds_names = ['Mihalcea 2006\n(7/04/04$\u2009$-$\u2009$7/14/04)', 'Groos 2017\n(7/22/11$\u2009$-$\u2009$8/10/11)']
ds_names = ['7/04/04$\u2009$-$\u2009$7/14/04', '7/22/11$\u2009$-$\u2009$8/10/11']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3575N-7650E-debris_melt_curve.nc'
yearfracs_list = [[2004 + 186/366, 2004 + 196/366],
[2011 + 203/365, 2011 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['14'])
rgiid_all.append(['14.06794'])
# ===== Batal (14.16042) ====
if '14.16042' in glaciers:
print('\nmelt comparison with Patel et al (2016)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.16042_patel2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Batal (14.16042)'
fig_fn = '14.16042_hd_melt_Patel2016.png'
# ds_names = ['Patel 2016\n(8/01/14$\u2009$-$\u2009$10/15/14)']
ds_names = ['8/01/14$\u2009$-$\u2009$10/15/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3225N-7750E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 213/365, 2014 + 288/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['14'])
rgiid_all.append(['14.16042'])
# ===== Khumbu (15.03733) ====
if '15.03733' in glaciers:
print('\nmelt comparison with Kayastha et al 2000')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.03733_kayastha2000-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Khumbu (15.03733)'
fig_fn = '15.03733_hd_melt_Kay2000.png'
# ds_names = ['Kayastha 2000\n(5/22/00$\u2009$-$\u2009$6/01/00)']
ds_names = ['5/22/00$\u2009$-$\u2009$6/01/00']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2800N-8700E-debris_melt_curve.nc'
yearfracs_list = [[2000 + 143/366, 2000 + 153/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['13.03733'])
# ===== Imja-Lhotse Shar (15.03743) ====
if '15.03743' in glaciers:
print('\nmelt comparison with Rounce et al. (2015)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.03743_rounce2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Imja-L<NAME> (15.03743)'
fig_fn = '15.03743_hd_melt_Rou2015.png'
ds_names = ['5/18/14$\u2009$-$\u2009$11/09/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2800N-8700E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 138/365, 2014 + 315/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.03743'])
# ===== Lirung (15.04045) ====
if '15.04045' in glaciers:
print('\nmelt comparison with Chand et al 2015')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df1 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_fall-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_winter-melt.csv')
mb_df3 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_spring-melt.csv')
measured_hd_list = [mb_df1.hd_m.values, mb_df2.hd_m.values, mb_df3.hd_m.values]
measured_melt_list = [mb_df1.melt_mmwed.values, mb_df2.melt_mmwed.values, mb_df3.melt_mmwed.values]
glac_name = 'Lirung (15.04045)'
fig_fn = '15.04045_hd_melt_Cha2015.png'
# ds_names = ['Chand 2000\n(9/22/13$\u2009$-$\u2009$10/03/13)', 'Chand 2000\n(11/29/13$\u2009$-$\u2009$12/12/13)',
# 'Chand 2000\n(4/07/14$\u2009$-$\u2009$4/19/14)']
ds_names = ['9/22/13$\u2009$-$\u2009$10/03/13', '11/29/13$\u2009$-$\u2009$12/12/13',
'4/07/14$\u2009$-$\u2009$4/19/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2825N-8550E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 265/365, 2013 + 276/365], [2013 + 333/365, 2013 + 346/365],
[2014 + 97/365, 2014 + 109/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.04045'])
# ===== Satopanth (15.07122) ====
if '15.07122' in glaciers:
print('\nmelt comparison with Shah et al (2019) - MUST BE DONE FOR INDIVIDUAL STAKES, NO MELT CURVE')
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.07122_shah2019-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df.melt_mmwed.values]
# glac_name = 'Satopanth (15.07122)'
# fig_fn = '15.07122_hd_melt_shah2019.png'
# ds_names = ['5/21/16$\u2009$-$\u2009$10/24/16']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3075N-7925E-debris_melt_curve.nc'
# yearfracs_list = [[2016 + 142/366, 2016 + 298/366]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
# hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
# hd_obs_all.append(hd_obs)
# melt_obs_all.append(melt_obs)
# melt_mod_all.append(melt_mod)
# melt_mod_bndlow_all.append(melt_mod_bndlow)
# melt_mod_bndhigh_all.append(melt_mod_bndhigh)
# reg_all.append(['15'])
# rgiid_all.append(['15.07122'])
obs_df_fullfn = (debris_prms.main_directory +
'/../hd_obs/datasets/Shah2019_satopanth/satopanth2019-melt-processed.csv')
if os.path.exists(obs_df_fullfn):
obs_df = pd.read_csv(obs_df_fullfn)
else:
# Special processing since dates vary drastically
obs_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/Shah2019_satopanth/satopanth2019-melt.csv')
obs_df['hd_m'] = obs_df.hd_cm / 100
obs_df['doy_start'] = obs_df['doy'] - obs_df['obs_period_d']
obs_df['doy_end'] = obs_df['doy']
obs_df['mb_obs_mmwed'] = obs_df.melt_cm * 0.9 * 10 / obs_df.obs_period_d
obs_df['mb_mod_mmwed'] = np.nan
obs_df['mb_mod_mmwed_low'] = np.nan
obs_df['mb_mod_mmwed_high'] = np.nan
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
debris_thicknesses = ds_ostrem.hd_cm.values
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
# Loop through each point individually because they all differ
for ndata in obs_df.index.values:
measured_hd = obs_df.loc[ndata,'hd_m']
# Start and end indices
if obs_df.loc[ndata,'year']%4 == 0:
daysperyear = 366
else:
daysperyear = 365
start_yearfrac = obs_df.loc[ndata,'year'] + obs_df.loc[ndata,'doy_start'] / daysperyear
end_yearfrac = obs_df.loc[ndata,'year'] + obs_df.loc[ndata,'doy_end'] / daysperyear
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve = melt_fromdebris_func(debris_4curve, func_coeff[0], func_coeff[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve = np.concatenate([[melt_cleanice], melt_4curve])
# Linearly interpolate between 0 cm and 2 cm for the melt rate
def melt_0to2cm_adjustment_value(melt, melt_clean, melt_2cm, hd):
""" Linearly interpolate melt factors between 0 and 2 cm
based on clean ice and 2 cm sub-debris melt """
# ADJUST SINGLE VALUE TO LIST
melt = np.array([melt])
hd = np.array([hd])
melt[(hd >= 0) & (hd < 0.02)] = (
melt_clean + hd[(hd >= 0) & (hd < 0.02)] / 0.02 * (melt_2cm - melt_clean))
return melt
melt_mod = melt_fromdebris_func(measured_hd, func_coeff[0], func_coeff[1])
melt_2cm = melt_fromdebris_func(0.02, func_coeff[0], func_coeff[1])
melt_mod = melt_0to2cm_adjustment_value(melt_mod, melt_cleanice, melt_2cm, measured_hd)
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndlow = melt_fromdebris_func(debris_4curve, func_coeff_bndlow[0], func_coeff_bndlow[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndlow = np.concatenate([[melt_cleanice_bndlow], melt_4curve_bndlow])
melt_mod_bndlow = melt_fromdebris_func(measured_hd, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_2cm_bndlow = melt_fromdebris_func(0.02, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_mod_bndlow = melt_0to2cm_adjustment_value(melt_mod_bndlow, melt_cleanice_bndlow, melt_2cm_bndlow,
measured_hd)
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndhigh = melt_fromdebris_func(debris_4curve, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndhigh = np.concatenate([[melt_cleanice_bndhigh], melt_4curve_bndhigh])
melt_mod_bndhigh = melt_fromdebris_func(measured_hd, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_2cm_bndhigh = melt_fromdebris_func(0.02, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_mod_bndhigh = melt_0to2cm_adjustment_value(melt_mod_bndhigh, melt_cleanice_bndhigh,melt_2cm_bndhigh,
measured_hd)
# RECORD THE DATA
obs_df.loc[ndata,'mb_mod_mmwed'] = melt_mod[0]
obs_df.loc[ndata,'mb_mod_mmwed_low'] = melt_mod_bndlow[0]
obs_df.loc[ndata,'mb_mod_mmwed_high'] = melt_mod_bndhigh[0]
# Compare all time steps (all intervals over the entire melt season included)
fig, ax = plt.subplots(1, 1, squeeze=False, gridspec_kw = {'wspace':0, 'hspace':0})
ax[0,0].scatter(obs_df.mb_obs_mmwed.values, obs_df.mb_mod_mmwed.values,
color='k', marker='o', linewidth=0.5, facecolor='none', s=30, zorder=1, clip_on=True)
ax[0,0].plot([0,200],[0,200], color='k', linewidth=0.5)
ax[0,0].set_xlim(0,75)
ax[0,0].set_ylim(0,75)
fig.set_size_inches(3.45,3.45)
fig_fullfn = melt_compare_fp + 'melt_compare-Satopanth_all_timesteps.png'
fig.savefig(fig_fullfn, bbox_inches='tight', dpi=150)
obs_df.to_csv(obs_df_fullfn)
# obs_df_stakes = pd.DataFrame(np.zeros((0, len(obs_df.columns))), columns=list(obs_df.columns))
# unique_years = list(obs_df.year.unique())
# for nyear, year in enumerate(unique_years[0:1]):
# obs_df_year = obs_df[obs_df['year'] == year]
# obs_df_year.reset_index(inplace=True, drop=True)
#
# stake_ids = list(obs_df_year.stake_id.unique())
# for nstake, stake_id in enumerate(stake_ids[0:10]):
# obs_df_stakes_subset = obs_df_year[obs_df_year['stake_id'] == stake_id]
# obs_df_stakes_subset.reset_index(inplace=True, drop=True)
#
# # Melt average over the duration of period
# obs_melt_cm_total = obs_df_stakes_subset.melt_cm.sum()
# obs_period_d_total = obs_df_stakes_subset.obs_period_d.sum()
# obs_mmwed = obs_melt_cm_total * 0.9 * 10 / obs_period_d_total
# # Start and end date of the entire duration
# start_doy = obs_df_stakes_subset.doy_start.min()
# end_doy = obs_df_stakes_subset.doy_end.max()
hd_obs_all.append(list(obs_df.hd_m.values))
melt_obs_all.append(list(obs_df.mb_obs_mmwed.values))
melt_mod_all.append(list(obs_df.mb_mod_mmwed.values))
melt_mod_bndlow_all.append(list(obs_df.mb_mod_mmwed_low))
melt_mod_bndhigh_all.append(list(obs_df.mb_mod_mmwed_high))
reg_all.append(['15'])
rgiid_all.append(['15.07122'])
# ===== Hailuogou (15.07886) ====
if '15.07886' in glaciers:
print('\nmelt comparison with Zhang et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
measured_hd_list = [np.array([2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 6, 7, 7, 10, 10, 11, 13]) / 100]
measured_melt_list = [np.array([65.2, 55.4, 52.8, 51.6, 47.0, 53.4, 44.4, 50.3, 58, 48.9, 58.4, 54.4, 44.8,
52.6, 43.7, 52.5, 38.5, 36.5, 34.2, 28.4])]
glac_name = 'Hailuogou (15.07886)'
fig_fn = '15.07886_hd_melt_Zhang2011.png'
# ds_names = ['Zhang 2011\n(7/02/08$\u2009$-$\u2009$9/30/08)']
ds_names = ['7/02/08$\u2009$-$\u2009$9/30/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2950N-10200E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 184/366, 2008 + 274/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.07886'])
# ===== 24K (15.11758) ====
if '15.11758' in glaciers:
print('\nmelt comparison with Wei et al (2010) and Yang et al (2017)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.11758_yang2017-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.11758_wei2010-melt.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values]
glac_name = "24K (15.11758)"
fig_fn = '15.11758_hd_melt_Wei2010_Yang2017.png'
# ds_names = ['Yang 2017\n(6/01/16$\u2009$-$\u2009$9/30/16)', 'Wei et al 2010\n(7/19/08$\u2009$-$\u2009$9/04/08)']
ds_names = ['6/01/16$\u2009$-$\u2009$9/30/16', '7/19/08$\u2009$-$\u2009$9/04/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2975N-9575E-debris_melt_curve.nc'
yearfracs_list = [[2016 + 153/366, 2016 + 274/366], [2008 + 201/366, 2008 + 248/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.11758'])
# ===== Fox (18.02375) ====
if '18.02375' in glaciers:
print('\nmelt comparison with Brook and Paine (2012)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/18.02375_brook2012-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Fox (18.02375)'
fig_fn = '18.02375_hd_melt_Brook2012.png'
ds_names = ['11/23/07$\u2009$-$\u2009$12/03/07']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4350S-17025E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 327/365, 2007 + 337/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['18'])
rgiid_all.append(['18.02375'])
# ===== <NAME> (18.02397) ====
if '18.02397' in glaciers:
print('\nmelt comparison with Brook et al (2013)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/18.02397_brook2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = '<NAME> (18.02397)'
fig_fn = '18.02397_hd_melt_Brook2013.png'
# ds_names = ['Brook 2013\n(2/07/12$\u2009$-$\u2009$2/16/12)']
ds_names = ['2/07/12$\u2009$-$\u2009$2/16/12']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4350S-17025E-debris_melt_curve.nc'
yearfracs_list = [[2012 + 38/366, 2012 + 47/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['18'])
rgiid_all.append(['18.02397'])
#%% ----- ROOT MEAN SQUARE ERROR PLOT!!! -----
melt_obs_all_values, melt_mod_all_values = [], []
melt_df_all = None
melt_df_cns = ['reg', 'hd', 'melt_obs', 'melt_mod', 'melt_mod_bndlow', 'melt_mod_bndhigh']
for nlist in np.arange(len(melt_obs_all)):
melt_df = pd.DataFrame(np.zeros((len(melt_obs_all[nlist]), len(melt_df_cns))), columns=melt_df_cns)
melt_df['reg'] = reg_all[nlist][0]
melt_df['hd'] = hd_obs_all[nlist]
melt_df['melt_obs'] = melt_obs_all[nlist]
melt_df['melt_mod'] = melt_mod_all[nlist]
melt_df['melt_mod_bndlow'] = melt_mod_bndlow_all[nlist]
melt_df['melt_mod_bndhigh'] = melt_mod_bndhigh_all[nlist]
if melt_df_all is None:
melt_df_all = melt_df
else:
melt_df_all = pd.concat([melt_df_all, melt_df], axis=0)
# Correlation
slope, intercept, r_value, p_value, std_err = linregress(melt_df_all.melt_obs.values, melt_df_all.melt_mod.values)
print('melt compare: r = ' + str(np.round(r_value,2)), '(p = ' + str(np.round(p_value,3)) +
', slope = ' + str(np.round(slope,2)) + ', intercept = ' + str(np.round(intercept,2)) + ')')
# Root mean square error
# All
rmse = (np.sum((melt_df_all.melt_obs.values - melt_df_all.melt_mod.values)**2) / melt_df_all.shape[0])**0.5
print('RMSE analysis:', rmse)
#%%
# subset
rmse_hd_list = [(0,0.05), (0.05,0.1), (0.1,0.2), (0.2, 1)]
for rmse_hd in rmse_hd_list:
melt_df_all_subset = melt_df_all[(melt_df_all['hd'] >= rmse_hd[0]) & (melt_df_all['hd'] < rmse_hd[1])]
rmse = (np.sum((melt_df_all_subset['melt_obs'].values - melt_df_all_subset['melt_mod'])**2) /
melt_df_all_subset.shape[0])**0.5
print(' hd:', rmse_hd, '(n=' + str(melt_df_all_subset.shape[0]) + ')', 'RMSE:', np.round(rmse,2))
# Correlation
slope, intercept, r_value, p_value, std_err = linregress(melt_df_all_subset['melt_obs'].values,
melt_df_all_subset['melt_mod'].values)
print(' r = ' + str(np.round(r_value,2)), '(p = ' + str(np.round(p_value,3)) +
', slope = ' + str(np.round(slope,2)) + ', intercept = ' + str(np.round(intercept,2)) + ')')
#%%
fig, ax = plt.subplots(1, 1, squeeze=False, gridspec_kw = {'wspace':0, 'hspace':0})
ax[0,0].scatter(melt_df_all.melt_obs.values, melt_df_all.melt_mod.values,
color='k', marker='o', linewidth=0.5, facecolor='none', s=30, zorder=1, clip_on=True)
ax[0,0].plot([0,200],[0,200], color='k', linewidth=0.5)
ax[0,0].set_xlim(0,125)
ax[0,0].set_ylim(0,125)
fig.set_size_inches(3.45,3.45)
fig_fullfn = melt_compare_fp + 'melt_compare-wellmeasured_lowres.png'
fig.savefig(fig_fullfn, bbox_inches='tight', dpi=150)
print('\n\nThe outliers where we underestimate is likely due to aspect and slope\n\n')
print('To-do list:')
print(' - color by debris thickness')
print(' - consider removing error bars')
print(' - add individual glacier names to facilitate main plot of curve comparisons')
#%%
marker_list = ['P', 'X', 'o', '<', 'v', '>', '^', 'd', 'h', 'p', 'D', '*', 'H', '8']
marker_dict = {'01':'P', '02':'X', '07':'h', '11':'o', '12':'^', '13':'<', '14':'v', '15':'>', '18':'*'}
markers_per_roi = False
fig, ax = plt.subplots(1, 1, squeeze=False, gridspec_kw = {'wspace':0, 'hspace':0})
reg_str_list = []
count_reg = -1
for nroi, reg_str in enumerate(melt_df_all.reg.unique()):
# for nroi, reg_str in enumerate(['01']):
if reg_str not in reg_str_list:
label_str = reg_str
reg_str_list.append(reg_str)
count_reg += 1
else:
label_str = None
if not markers_per_roi:
label_str = None
melt_df_subset = melt_df_all[melt_df_all['reg'] == reg_str].copy()
melt_df_subset['err'] = (np.abs(melt_df_subset.melt_mod_bndhigh.values - melt_df_subset.melt_mod.values) +
np.abs(melt_df_subset.melt_mod_bndlow - melt_df_subset.melt_mod) / 2)
if markers_per_roi:
marker = marker_dict[reg_str]
else:
marker = 'o'
# Size thresholds
s_plot = 20
lw = 0.3
lw_err = 0.1
colors = 'k'
zorders = [3,4,5]
# Color symbols by debris thickness
hd_values = melt_df_subset.hd.values
hd_colors = list(melt_df_subset.hd.values)
for nd, hd in enumerate(hd_colors):
if hd <= 0.05:
hd_colors[nd] = '#2c7bb6'
elif hd <= 0.1:
hd_colors[nd] = '#abd9e9'
elif hd <= 0.2:
hd_colors[nd] = '#ffffbf'
elif hd <= 0.4:
hd_colors[nd] = '#fdae61'
else:
hd_colors[nd] = '#d7191c'
ax[0,0].scatter(melt_df_subset['melt_obs'], melt_df_subset['melt_mod'], marker=marker, edgecolor=hd_colors,
linewidth=lw, facecolor='none', s=30, zorder=3, label=label_str, clip_on=True)
# ax[0,0].errorbar(melt_df_subset['melt_obs'].values, melt_df_subset['melt_mod'].values,
# xerr=melt_df_subset['err'].values, yerr=None,
# capsize=1, capthick=0.15, elinewidth=lw_err, linewidth=0, color='grey', alpha=1, zorder=2,
# label=None)
# Labels
ax[0,0].set_xlabel('Observed Melt (mm w.e. d$^{-1}$)', size=12)
ax[0,0].set_ylabel('Modeled Melt (mm w.e. d$^{-1}$)', size=12)
ax[0,0].set_xlim(0,125)
ax[0,0].set_ylim(0,125)
ax[0,0].plot([0,200],[0,200], color='k', linewidth=0.5, zorder=1)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(25))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(5))
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(25))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(5))
# Tick parameters
ax[0,0].tick_params(axis='both', which='major', labelsize=12, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=10, direction='in')
# Legend
# leg = ax[0,0].legend(loc='upper left', ncol=1, fontsize=10, frameon=False, handlelength=1,
# handletextpad=0.15, columnspacing=0.25, borderpad=0.25, labelspacing=0.5,
# bbox_to_anchor=(1.035, 1.0), title=' ')
nlabel = 0
none_count = 5
# Hack to get proper columns
if markers_per_roi:
obs_labels = [None, '< 0.05', '0.05-0.10', '0.10-0.20', '0.20-0.40', '>0.40']
else:
obs_labels = ['< 0.05', '0.05-0.10', '0.10-0.20', '0.20-0.40', '>0.40']
colors = ['#2c7bb6', '#abd9e9', '#ffffbf', '#fdae61', '#d7191c']
for obs_label in obs_labels:
if obs_label is None:
none_count += 1
ax[0,0].scatter([-5],[-5], color='k', marker='s', linewidth=1,
edgecolor='white', facecolor='white', s=1, zorder=3, label=' '*none_count)
else:
ax[0,0].scatter([-5],[-5], color=colors[nlabel], marker='s', linewidth=lw,
facecolor=colors[nlabel], s=30, zorder=3, label=obs_label)
nlabel += 1
if markers_per_roi:
leg = ax[0,0].legend(loc='upper left', ncol=3, fontsize=10, frameon=True, handlelength=1,
handletextpad=0.15, columnspacing=0.25, borderpad=0.25, labelspacing=0.5,
bbox_to_anchor=(0.0,1.01), title='Region $h_{d}$ ', framealpha=1)
else:
leg = ax[0,0].legend(loc='upper left', ncol=1, fontsize=10, frameon=True, handlelength=1,
handletextpad=0.25, columnspacing=0, borderpad=0.2, labelspacing=0.2,
bbox_to_anchor=(0.0,1.01), title='$h_{d}$ (m)', framealpha=1)
# for nmarker in np.arange(0,count_reg+1):
# leg.legendHandles[nmarker]._sizes = [30]
# leg.legendHandles[nmarker]._linewidths = [0.5]
# leg.legendHandles[nmarker].set_edgecolor('k')
# ax[0,0].text(0.17, 0.98, 'Region', size=10, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes, zorder=4)
# ax[0,0].text(1.5, 0.95, '$n_{obs}$', size=10, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes)
# # Create a Rectangle patchss
# rect = FancyBboxPatch((4.35,2.35),2.1,1.45,linewidth=1, edgecolor='lightgrey', facecolor='none', clip_on=False,
# boxstyle='round, pad=0.1')
# ax[0,0].add_patch(rect)
# ax[0,0].axvline(x=5.45, ymin=0.565, ymax=0.97, clip_on=False, color='lightgrey', linewidth=1)
fig.set_size_inches(3.45,3.45)
fig_fullfn = melt_compare_fp + 'melt_compare.png'
fig.savefig(fig_fullfn, bbox_inches='tight', dpi=300)
#%%
if option_hd_comparison:
# All glaciers
# glaciers = ['1.15645', '2.14297', '7.01044', '7.01107', '11.00106', '11.01604', '11.02472', '11.02810', '11.03005',
# '12.01132', '13.43165', '13.43174', '13.43232', '13.43207', '14.06794', '14.16042', '15.03473',
# '15.03733', '15.03743', '15.04045', '15.07122', '15.07886', '15.11758', '17.13720', '18.02397']
# Good glaciers for publication quality figure
glaciers = ['1.15645', '2.14297', '11.00106', '11.01604', '11.02472', '11.02810', '11.03005',
'11.01450', '11.01509', '11.01827', '11.02749', '11.02771', '11.02796',
'13.43165', '13.43174', '13.43232', '13.43207', '14.06794', '14.15536', '14.16042',
'15.03733', '15.03473','15.04045', '15.03743', '15.07122', '15.07886', '15.11758', '17.13720',
'18.02397']
# roughly estimated from maps
# glaciers = ['13.43165', '13.43174', '13.43207']
# glaciers = ['14.15536']
# glaciers = ['11.03005']
process_files = True
regional_hd_comparison = True
bin_width = 50
n_obs_min = 5
total_obs_count_all = 0
total_obs_count = 0
if process_files:
# #%%
# glaciers_subset = [
# '18.02397']
# hd_compare_all_subset = hd_compare_all[hd_compare_all['glacno'] == 2397]
# print('\nn_obs:', int(np.round(hd_compare_all_subset.obs_count.sum())),
# '\ndensity:', np.round(hd_compare_all_subset.obs_count.sum() / hd_compare_all_subset.dc_bin_area_km2.sum(),1))
# #%%
hd_datasets_fp = hd_obs_fp + 'datasets/'
hd_datasets_pt_fp = hd_obs_fp + 'datasets/hd_pt_data/'
hd_ds_dict = {'1.15645': [(hd_datasets_fp + '1.15645_kennicott_anderson_2019-hd.csv', 'Anderson 2019')],
'2.14297': [(hd_datasets_fp + '2.14297_moore2019-melt.csv', 'Moore 2019')],
'7.01044': [(hd_datasets_fp + '7.01044_lukas2005-hd.csv', 'Lukas 2005')],
'7.01107': [(hd_datasets_fp + '7.01107_lukas2005-hd.csv', 'Lukas 2005')],
'11.00106': [(hd_datasets_fp + '11.00106_kellerer2008-hd.csv', 'Kellerer 2008')],
'11.01450': [(hd_datasets_fp + '11.01450_grosseraletsch_anderson2019-hd.csv', 'Anderson 2020')],
'11.01509': [(hd_datasets_fp + '11.01509_oberaar_anderson2019-hd.csv', 'Anderson 2020')],
'11.01604': [(hd_datasets_fp + '11.01604_delGobbo2017-hd.csv', 'Del Gobbo 2017')],
'11.01827': [(hd_datasets_fp + '11.01827_oberaletsch_anderson2019-hd.csv', 'Anderson 2020')],
'11.02472': [(hd_datasets_fp + '11.02472_bocchiola2015-melt.csv', 'Bocchiola 2015')],
'11.02749': [(hd_datasets_fp + '11.02749_cheilon_anderson2019-hd.csv', 'Anderson 2020')],
'11.02771': [(hd_datasets_fp + '11.02771_piece_anderson2019-hd.csv', 'Anderson 2020')],
'11.02796': [(hd_datasets_fp + '11.02796_brenay_anderson2019-hd.csv', 'Anderson 2020')],
'11.02810': [(hd_datasets_fp + '11.02810_reid2012-hd.csv', 'Reid 2012')],
'11.03005': [(hd_datasets_fp + '11.03005_foster2012-hd.csv', 'Foster 2012'),
(hd_datasets_fp + '11.03005_mihalcea2008-hd.csv', 'Mihalcea 2008'),
(hd_datasets_fp + '11.03005_miage_anderson2019-hd.csv', 'Anderson 2020')],
'12.01132': [(hd_datasets_fp + '12.01132_popovnin2002_layers-hd.csv', 'Popovnin 2002')],
'13.43165': [(hd_datasets_fp + '13.43165_wang2011-hd.csv', 'Wang 2011')],
'13.43174': [(hd_datasets_fp + '13.43174_wang2011-hd.csv', 'Wang 2011')],
'13.43207': [(hd_datasets_fp + '13.43207_wang2011-hd.csv', 'Wang 2011')],
'13.43232': [(hd_datasets_fp + '13.43232_zhen2019-hd.csv', 'Zhen 2019'),
(hd_datasets_fp + '13.43232_haidong2006-hd.csv', 'Haidong 2006')],
'14.06794': [(hd_datasets_fp + '14.06794_mihalcea2006-melt.csv', 'Mihalcea 2006'),
(hd_datasets_fp + '14.06794_minora2015-hd.csv', 'Minora 2015')],
'14.15536': [(hd_datasets_fp + '14.15536_banerjee2018-hd.csv', 'Banerjee 2018')],
'14.16042': [(hd_datasets_fp + '14.16042_patel2016-hd.csv', 'Patel 2016')],
'15.03473': [(hd_datasets_fp + '15.03473_nicholson2012-hd.csv', 'Nicholson 2012'),
# (hd_datasets_fp + '15.03473_nicholson2017-hd.csv', 'Nicholson 2017'),
(hd_datasets_fp + '15.03473_nicholson2018_gokyo-hd.csv', 'Nicholson 2018'),
(hd_datasets_fp + '15.03473_nicholson2018_margin-hd.csv', 'Nicholson 2018')],
'15.03733': [(hd_datasets_fp + '15.03733_gibson-hd.csv', 'Gibson 2014')],
'15.03743': [(hd_datasets_fp + '15.03743_rounce2014-hd.csv', 'Rounce 2014')],
'15.04045': [(hd_datasets_fp + '15.04045_mccarthy2017-hd.csv', 'McCarthy 2017')],
'15.07122': [(hd_datasets_fp + '15.07122_shah2019-hd.csv', 'Shah 2019')],
'15.07886': [(hd_datasets_fp + '15.07886_zhang2011-hd.csv', 'Zhang 2011')],
'15.11758': [(hd_datasets_fp + '15.11758_yang2010-hd.csv', 'Yang 2010')],
'17.13720': [(hd_datasets_fp + '17.13720_ayala2016-hd.csv', 'Ayala 2016')],
'18.02397': [(hd_datasets_fp + '18.02397_brook2013-hd.csv', 'Brook 2013'),
(hd_datasets_fp + '18.02397_brook2013-hd_map_binned.csv', 'Brook 2013')],
}
for glacier in glaciers:
print('\n')
for hd_ds_info in hd_ds_dict[glacier]:
# Load observations
hd_ds_fn, ds_name = hd_ds_info[0], hd_ds_info[1]
hd_obs = pd.read_csv(hd_ds_fn)
# remove clean ice values because only comparing to debris-covered areas
hd_obs = hd_obs[hd_obs['hd_m'] > 0]
hd_obs.reset_index(inplace=True, drop=True)
# track to record how many are discarded
if 'n_obs' in hd_obs.columns:
total_obs_count_all = total_obs_count_all + hd_obs['n_obs'].sum()
else:
total_obs_count_all = total_obs_count_all + hd_obs.shape[0]
# if lat/lon provided, remove "off-glacier" points, i.e., points not covered by debris cover maps
if 'hd_ts_cal' in hd_obs.columns:
hd_obs = hd_obs.dropna(subset=['hd_ts_cal'])
hd_obs.reset_index(inplace=True, drop=True)
glac_str = hd_ds_fn.split('/')[-1].split('_')[0]
reg = int(glac_str.split('.')[0])
glacno = int(glac_str.split('.')[1])
# Load modeled debris thickness
try:
hdts_fp = debris_prms.mb_binned_fp_wdebris_hdts
hdts_fn = glac_str + '_mb_bins_hdts.csv'
hdts_df = pd.read_csv(hdts_fp + hdts_fn)
except:
hdts_fp = debris_prms.mb_binned_fp_wdebris_hdts + '../_wdebris_hdts_extrap/'
if reg < 10:
hdts_fn = (glac_str.split('.')[0].zfill(2) + '.' + glac_str.split('.')[1] +
'_mb_bins_hdts_extrap.csv')
else:
hdts_fn = glac_str + '_mb_bins_hdts_extrap.csv'
hdts_df = pd.read_csv(hdts_fp + hdts_fn)
hdts_df.loc[:,:] = hdts_df.values.astype(np.float64)
# ===== PROCESS BINNED DATA =====
if not hd_obs['elev'].isnull().any() and 'hd_m_std' not in hd_obs.columns:
# Bins
zmin = hd_obs.elev.min()
zmax = hd_obs.elev.max()
zbincenter_min = hdts_df.loc[0,'bin_center_elev_m']
zbincenter_max = hdts_df.loc[hdts_df.shape[0]-1,'bin_center_elev_m']
# Find minimum bin
while zbincenter_min - bin_width / 2 + bin_width < zmin:
zbincenter_min += bin_width
# Find maximum bin size
while zbincenter_max - bin_width /2 > zmax:
zbincenter_max -= bin_width
# Compute relevant statistics for each bin
hd_compare_all_array = None
for nbin, zbincenter in enumerate(np.arange(zbincenter_min, zbincenter_max+bin_width/2, bin_width)):
zbincenter_min = zbincenter - bin_width/2
zbincenter_max = zbincenter + bin_width/2
elev_idx_obs = np.where((hd_obs['elev'].values >= zbincenter_min) &
(hd_obs['elev'].values < zbincenter_max))[0]
obs_count = 0
if len(elev_idx_obs) > 0:
# Observations
hd_obs_subset = hd_obs.loc[elev_idx_obs,'hd_m']
hd_bin_med = np.median(hd_obs_subset)
hd_bin_mad = np.median(abs(hd_obs_subset -
|
np.median(hd_obs_subset)
|
numpy.median
|
import sys
import os
# -- Limit number of OPENBLAS library threads --
# On linux based operation systems, we observed a occupation of all cores by the underlying openblas library. Often,
# this slowed down other processes, as well as the planner itself. Therefore, it is recommended to set the number of
# threads to one. Note: this import must happen before the import of any openblas based package (e.g. numpy)
os.environ['OPENBLAS_NUM_THREADS'] = str(1)
import numpy as np
import datetime
import json
import time
import configparser
import yaml
import gym
from argparse import Namespace
import graph_ltpl
from numba import njit
import math
@njit(fastmath=False, cache=True)
def nearest_point_on_trajectory(point, trajectory):
'''
Return the nearest point along the given piecewise linear trajectory.
Same as nearest_point_on_line_segment, but vectorized. This method is quite fast, time constraints should
not be an issue so long as trajectories are not insanely long.
Order of magnitude: trajectory length: 1000 --> 0.0002 second computation (5000fps)
point: size 2 numpy array
trajectory: Nx2 matrix of (x,y) trajectory waypoints
- these must be unique. If they are not unique, a divide by 0 error will destroy the world
'''
diffs = trajectory[1:, :] - trajectory[:-1, :]
l2s = diffs[:, 0] ** 2 + diffs[:, 1] ** 2
# this is equivalent to the elementwise dot product
# dots = np.sum((point - trajectory[:-1,:]) * diffs[:,:], axis=1)
dots = np.empty((trajectory.shape[0] - 1,))
for i in range(dots.shape[0]):
dots[i] = np.dot((point - trajectory[i, :]), diffs[i, :])
t = dots / l2s
t[t < 0.0] = 0.0
t[t > 1.0] = 1.0
# t = np.clip(dots / l2s, 0.0, 1.0)
projections = trajectory[:-1, :] + (t * diffs.T).T
# dists = np.linalg.norm(point - projections, axis=1)
dists = np.empty((projections.shape[0],))
for i in range(dists.shape[0]):
temp = point - projections[i]
dists[i] = np.sqrt(np.sum(temp * temp))
min_dist_segment = np.argmin(dists)
return projections[min_dist_segment], dists[min_dist_segment], t[min_dist_segment], min_dist_segment
@njit(fastmath=False, cache=True)
def first_point_on_trajectory_intersecting_circle(point, radius, trajectory, t=0.0, wrap=False):
''' starts at beginning of trajectory, and find the first point one radius away from the given point along the trajectory.
Assumes that the first segment passes within a single radius of the point
http://codereview.stackexchange.com/questions/86421/line-segment-to-circle-collision-algorithm
'''
start_i = int(t)
start_t = t % 1.0
first_t = None
first_i = None
first_p = None
trajectory = np.ascontiguousarray(trajectory)
for i in range(start_i, trajectory.shape[0] - 1):
start = trajectory[i, :]
end = trajectory[i + 1, :] + 1e-6
V = np.ascontiguousarray(end - start)
a = np.dot(V, V)
b = 2.0 * np.dot(V, start - point)
c = np.dot(start, start) + np.dot(point, point) - 2.0 * np.dot(start, point) - radius * radius
discriminant = b * b - 4 * a * c
if discriminant < 0:
continue
# print "NO INTERSECTION"
# else:
# if discriminant >= 0.0:
discriminant = np.sqrt(discriminant)
t1 = (-b - discriminant) / (2.0 * a)
t2 = (-b + discriminant) / (2.0 * a)
if i == start_i:
if t1 >= 0.0 and t1 <= 1.0 and t1 >= start_t:
first_t = t1
first_i = i
first_p = start + t1 * V
break
if t2 >= 0.0 and t2 <= 1.0 and t2 >= start_t:
first_t = t2
first_i = i
first_p = start + t2 * V
break
elif t1 >= 0.0 and t1 <= 1.0:
first_t = t1
first_i = i
first_p = start + t1 * V
break
elif t2 >= 0.0 and t2 <= 1.0:
first_t = t2
first_i = i
first_p = start + t2 * V
break
# wrap around to the beginning of the trajectory if no intersection is found1
if wrap and first_p is None:
for i in range(-1, start_i):
start = trajectory[i % trajectory.shape[0], :]
end = trajectory[(i + 1) % trajectory.shape[0], :] + 1e-6
V = end - start
a = np.dot(V, V)
b = 2.0 * np.dot(V, start - point)
c = np.dot(start, start) + np.dot(point, point) - 2.0 * np.dot(start, point) - radius * radius
discriminant = b * b - 4 * a * c
if discriminant < 0:
continue
discriminant = np.sqrt(discriminant)
t1 = (-b - discriminant) / (2.0 * a)
t2 = (-b + discriminant) / (2.0 * a)
if t1 >= 0.0 and t1 <= 1.0:
first_t = t1
first_i = i
first_p = start + t1 * V
break
elif t2 >= 0.0 and t2 <= 1.0:
first_t = t2
first_i = i
first_p = start + t2 * V
break
return first_p, first_i, first_t
# @njit(fastmath=False, cache=True)
def get_actuation(pose_theta, lookahead_point, position, lookahead_distance, wheelbase):
waypoint_y = np.dot(np.array([np.sin(-pose_theta), np.cos(-pose_theta)]), lookahead_point[0:2] - position)
speed = lookahead_point[2]
if np.abs(waypoint_y) < 1e-6:
return speed, 0.
radius = 1 / (2.0 * waypoint_y / lookahead_distance ** 2)
steering_angle = np.arctan(wheelbase / radius)
return speed, steering_angle
@njit(fastmath=False, cache=True)
def pi_2_pi(angle):
if angle > math.pi:
return angle - 2.0 * math.pi
if angle < -math.pi:
return angle + 2.0 * math.pi
return angle
class PurePursuitPlanner:
"""
Example Planner
"""
def __init__(self, conf, wb):
self.wheelbase = wb
self.conf = conf
self.load_waypoints(conf)
self.max_reacquire = 20.
def load_waypoints(self, conf):
# load waypoints
self.waypoints = np.loadtxt(conf.wpt_path, delimiter=conf.wpt_delim, skiprows=conf.wpt_rowskip)
def _get_current_waypoint(self, waypoints, lookahead_distance, position, theta):
wpts = np.vstack((self.waypoints[:, self.conf.wpt_xind], self.waypoints[:, self.conf.wpt_yind])).T
nearest_point, nearest_dist, t, i = nearest_point_on_trajectory(position, wpts)
if nearest_dist < lookahead_distance:
lookahead_point, i2, t2 = first_point_on_trajectory_intersecting_circle(position, lookahead_distance, wpts, i+t, wrap=True)
if i2 == None:
return None
current_waypoint = np.empty((3, ))
# x, y
current_waypoint[0:2] = wpts[i2, :]
# speed
current_waypoint[2] = waypoints[i, self.conf.wpt_vind]
return current_waypoint
elif nearest_dist < self.max_reacquire:
return np.append(wpts[i, :], waypoints[i, self.conf.wpt_vind])
else:
return None
def plan(self, pose_x, pose_y, pose_theta, lookahead_distance, vgain):
position = np.array([pose_x, pose_y])
lookahead_point = self._get_current_waypoint(self.waypoints, lookahead_distance, position, pose_theta)
if lookahead_point is None:
return 4.0, 0.0
speed, steering_angle = get_actuation(pose_theta, lookahead_point, position, lookahead_distance, self.wheelbase)
speed = vgain * speed
return speed, steering_angle
class Controllers:
"""
This is the PurePursuit ALgorithm that is traccking the desired path. In this case we are following the curvature
optimal raceline.
"""
def __init__(self, conf, wb):
self.wheelbase = wb
self.conf = conf
self.max_reacquire = 20.
self.vehicle_control_e_f = 0 # Control error
self.vehicle_control_error3 = 0
def _get_current_waypoint(self, lookahead_distance, position, traj_set, sel_action):
# Check which trajectory set is available and select one
for sel_action in ["right", "left", "straight", "follow"]: # try to force 'right', else try next in list
if sel_action in traj_set.keys():
break
# Extract Trajectory informtion from the current set: X-Position, Y-Position, Velocity
path_x = traj_set[sel_action][0][:,1]
path_y = traj_set[sel_action][0][:, 2]
velocity = traj_set[sel_action][0][:, 5]
# Create waypoints based on the current path
wpts = np.vstack((np.array(path_x), np.array(path_y))).T
nearest_point, nearest_dist, t, i = nearest_point_on_trajectory(position, wpts)
#print ('nearest distance: ', nearest_dist)
if nearest_dist < lookahead_distance:
lookahead_point, i2, t2 = first_point_on_trajectory_intersecting_circle(position, lookahead_distance, wpts,
i + t, wrap=True)
if i2 == None:
return None
current_waypoint = np.empty((3,))
# x, y
current_waypoint[0:2] = wpts[i2, :]
# speed
current_waypoint[2] = velocity[i2]
return current_waypoint
elif nearest_dist < self.max_reacquire:
return np.append(wpts[i, :], velocity[i])
else:
return None
def PurePursuit(self, pose_x, pose_y, pose_theta, lookahead_distance, vgain, traj_set, sel_action):
position = np.array([pose_x, pose_y])
lookahead_point = self._get_current_waypoint(lookahead_distance, position, traj_set,sel_action)
if lookahead_point is None:
return 4.0, 0.0
speed, steering_angle = get_actuation(pose_theta, lookahead_point, position, lookahead_distance, self.wheelbase)
speed = vgain * speed
return speed, steering_angle
def calc_theta_and_ef(self, vehicle_state, waypoints, goal_heading, goal_velocity):
"""
calc theta and ef
Theta is the heading of the car, this heading must be minimized
ef = crosstrack error/The distance from the optimal path/ lateral distance in frenet frame (front wheel)
"""
############# Calculate closest point to the front axle based on minimum distance calculation ################
# Calculate Position of the front axle of the vehicle based on current position
fx = vehicle_state[0] + self.wheelbase * math.cos(vehicle_state[2])
fy = vehicle_state[1] + self.wheelbase * math.sin(vehicle_state[2])
position_front_axle = np.array([fx, fy])
# Find target index for the correct waypoint by finding the index with the lowest distance value/hypothenuses
#wpts = np.vstack((self.waypoints[:, self.conf.wpt_xind], self.waypoints[:, self.conf.wpt_yind])).T
nearest_point_front, nearest_dist, t, target_index = nearest_point_on_trajectory(position_front_axle, waypoints)
# Calculate the Distances from the front axle to all the waypoints
distance_nearest_point_x = fx - nearest_point_front[0]
distance_nearest_point_y = fy - nearest_point_front[1]
vec_dist_nearest_point =
|
np.array([distance_nearest_point_x, distance_nearest_point_y])
|
numpy.array
|
import cv2
import numpy as np
import math
import torch
import torch.nn.functional as F
import nn_cuda
import ransac_voting_gpu
import transforms3d.quaternions as txq
import transforms3d.euler as txe
def b_inv(b_mat):
'''
code from
https://stackoverflow.com/questions/46595157/how-to-apply-the-torch-inverse-function-of-pytorch-to-every-sample-in-the-batc
:param b_mat:
:return:
'''
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
if torch.__version__ >= '1.0.0':
b_inv, _ = torch.solve(eye, b_mat)
else:
b_inv, _ = torch.gesv(eye, b_mat)
# b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def ransac_voting_vertex(mask, vertex, round_hyp_num, inlier_thresh=0.999, confidence=0.99, max_iter=20,
min_num=5, max_num=30000):
'''
:param mask: [b,h,w]
:param vertex: [b,h,w,vn,2]
:param round_hyp_num:
:param inlier_thresh:
:return: [b,vn,2]
'''
b, h, w, vn, _ = vertex.shape
batch_win_pts = []
for bi in range(b):
hyp_num = 0
cur_mask = (mask[bi]).byte()
foreground_num = torch.sum(cur_mask)
# if too few points, just skip it
if foreground_num < min_num:
win_pts = torch.zeros([1, vn, 2], dtype=torch.float32, device=mask.device)
batch_win_pts.append(win_pts) # [1,vn,2]
continue
# if too many inliers, we randomly down sample it
if foreground_num > max_num:
selection = torch.zeros(cur_mask.shape, dtype=torch.float32, device=mask.device).uniform_(0, 1)
selected_mask = (selection < (max_num / foreground_num.float()))
cur_mask *= selected_mask
coords = torch.nonzero(cur_mask).float() # [tn,2]
# print(coords.shape)
coords = coords[:, [1, 0]]
direct = vertex[bi].masked_select(torch.unsqueeze(torch.unsqueeze(cur_mask, 2), 3)) # [tn,vn,2]
direct = direct.view([coords.shape[0], vn, 2])
tn = coords.shape[0]
idxs = torch.zeros([round_hyp_num, vn, 2], dtype=torch.int32, device=mask.device).random_(0, direct.shape[0])
all_win_ratio = torch.zeros([vn], dtype=torch.float32, device=mask.device)
all_win_pts = torch.zeros([vn, 2], dtype=torch.float32, device=mask.device)
cur_iter = 0
while True:
# generate hypothesis
cur_hyp_pts = ransac_voting_gpu.generate_hypothesis(direct, coords, idxs) # [hn,vn,2]
# voting for hypothesis
cur_inlier = torch.zeros([round_hyp_num, vn, tn], dtype=torch.uint8, device=mask.device)
ransac_voting_gpu.voting_for_hypothesis(direct, coords, cur_hyp_pts, cur_inlier, inlier_thresh) # [hn,vn,tn]
# find max
cur_inlier_counts = torch.sum(cur_inlier, 2) # [hn,vn]
cur_win_counts, cur_win_idx = torch.max(cur_inlier_counts, 0) # [vn]
cur_win_pts = cur_hyp_pts[cur_win_idx, torch.arange(vn)]
cur_win_ratio = cur_win_counts.float() / tn
# update best point
larger_mask = all_win_ratio < cur_win_ratio
all_win_pts[larger_mask, :] = cur_win_pts[larger_mask, :]
all_win_ratio[larger_mask] = cur_win_ratio[larger_mask]
# check confidence
hyp_num += round_hyp_num
cur_iter += 1
cur_min_ratio = torch.min(all_win_ratio)
if (1 - (1 - cur_min_ratio ** 2) ** hyp_num) > confidence or cur_iter > max_iter:
break
# compute mean intersection again
normal = torch.zeros_like(direct) # [tn,vn,2]
normal[:, :, 0] = direct[:, :, 1]
normal[:, :, 1] = -direct[:, :, 0]
all_inlier = torch.zeros([1, vn, tn], dtype=torch.uint8, device=mask.device)
all_win_pts = torch.unsqueeze(all_win_pts, 0) # [1,vn,2]
ransac_voting_gpu.voting_for_hypothesis(direct, coords, all_win_pts, all_inlier, inlier_thresh) # [1,vn,tn]
# coords [tn,2] normal [vn,tn,2]
all_inlier=torch.squeeze(all_inlier.float(),0) # [vn,tn]
normal=normal.permute(1,0,2) # [vn,tn,2]
normal=normal*torch.unsqueeze(all_inlier,2) # [vn,tn,2] outlier is all zero
if torch.norm(normal, p=2) > 1e-6:
b=torch.sum(normal*torch.unsqueeze(coords,0),2) # [vn,tn]
ATA=torch.matmul(normal.permute(0,2,1),normal) # [vn,2,2]
ATb=torch.sum(normal*torch.unsqueeze(b,2),1) # [vn,2]
all_win_pts=torch.matmul(b_inv(ATA),torch.unsqueeze(ATb,2)) # [vn,2,1]
batch_win_pts.append(all_win_pts[None,:,:,0])
if len(batch_win_pts) > 0:
return 'success', torch.cat(batch_win_pts)
else:
return 'failed', None
def ransac_voting_vertex_v2(mask, vertex, round_hyp_num, inlier_thresh=0.999, confidence=0.99, max_iter=20,
min_num=30, max_num=30000, min_inlier_count=5, neighbor_radius=6.0):
'''
:param mask: [b,h,w]
:param vertex: [b,h,w,vn,2]
:param round_hyp_num:
:param inlier_thresh:
:return: [b,vn,2]
'''
b, h, w, vn, _ = vertex.shape
batch_win_pts = []
for bi in range(b):
hyp_num = 0
cur_mask = (mask[bi]).byte()
foreground_num = torch.sum(cur_mask)
# if too few points, just skip it
if foreground_num < min_num:
win_pts = torch.zeros([1, vn, 2], dtype=torch.float32, device=mask.device)
batch_win_pts.append(win_pts) # [1,vn,2]
continue
# if too many inliers, we randomly down sample it
if foreground_num > max_num:
selection = torch.zeros(cur_mask.shape, dtype=torch.float32, device=mask.device).uniform_(0, 1)
selected_mask = (selection < (max_num / foreground_num.float()))
cur_mask *= selected_mask
coords = torch.nonzero(cur_mask).float() # [tn,2]
coords = coords[:, [1, 0]]
direct = vertex[bi].masked_select(torch.unsqueeze(torch.unsqueeze(cur_mask, 2), 3)) # [tn,vn,2]
direct = direct.view([coords.shape[0], vn, 2])
tn = coords.shape[0]
idxs = torch.zeros([round_hyp_num, vn, 2], dtype=torch.int32, device=mask.device).random_(0, direct.shape[0])
all_win_ratio = torch.zeros([vn], dtype=torch.float32, device=mask.device)
all_win_pts = torch.zeros([vn, 2], dtype=torch.float32, device=mask.device)
cur_iter = 0
while True:
# generate hypothesis
cur_hyp_pts = ransac_voting_gpu.generate_hypothesis(direct, coords, idxs) # [hn,vn,2]
# voting for hypothesis
cur_inlier = torch.zeros([round_hyp_num, vn, tn], dtype=torch.uint8, device=mask.device)
ransac_voting_gpu.voting_for_hypothesis(direct, coords, cur_hyp_pts, cur_inlier, inlier_thresh) # [hn,vn,tn]
# find max
cur_inlier_counts = torch.sum(cur_inlier, 2) # [hn,vn]
cur_win_counts, cur_win_idx = torch.max(cur_inlier_counts, 0) # [vn]
cur_win_pts = cur_hyp_pts[cur_win_idx, torch.arange(vn)]
cur_win_ratio = cur_win_counts.float() / tn
# update best point
larger_mask = all_win_ratio < cur_win_ratio
all_win_pts[larger_mask, :] = cur_win_pts[larger_mask, :]
all_win_ratio[larger_mask] = cur_win_ratio[larger_mask]
# check confidence
hyp_num += round_hyp_num
cur_iter += 1
cur_min_ratio = torch.min(all_win_ratio)
if (1 - (1 - cur_min_ratio ** 2) ** hyp_num) > confidence:
# curr_confidence = (1 - (1 - cur_min_ratio ** 2) ** hyp_num)
# print('stop by confidence', curr_confidence, 'cur_min_ratio', cur_min_ratio, 'final iter', cur_iter)
break
if cur_iter > max_iter:
# print('stop by max_iter, final iter', cur_iter)
break
# compute mean intersection again
normal = torch.zeros_like(direct) # [tn,vn,2]
normal[:, :, 0] = direct[:, :, 1]
normal[:, :, 1] = -direct[:, :, 0]
all_inlier = torch.zeros([1, vn, tn], dtype=torch.uint8, device=mask.device)
all_win_pts = torch.unsqueeze(all_win_pts, 0) # [1,vn,2]
ransac_voting_gpu.voting_for_hypothesis(direct, coords, all_win_pts, all_inlier, inlier_thresh) # [1,vn,tn]
# coords [tn,2] normal [vn,tn,2]
all_inlier=torch.squeeze(all_inlier.float(),0) # [vn,tn]
normal=normal.permute(1,0,2) # [vn,tn,2]
normal=normal*torch.unsqueeze(all_inlier,2) # [vn,tn,2] outlier is all zero
if torch.norm(normal, p=2) > 1e-6:
b=torch.sum(normal*torch.unsqueeze(coords,0),2) # [vn,tn]
ATA=torch.matmul(normal.permute(0,2,1),normal) # [vn,2,2]
ATb=torch.sum(normal*torch.unsqueeze(b,2),1) # [vn,2]
all_win_pts=torch.matmul(b_inv(ATA),torch.unsqueeze(ATb,2)) # [vn,2,1]
batch_win_pts.append(all_win_pts[None,:,:,0])
# iterative recompute new vertex from neighbor points
# neighbor_radius = 5.0
iter_time = 0
while True:
iter_time += 1
if iter_time > 20:
break
if len(batch_win_pts) <= 0: break
last_vertex = batch_win_pts[0].reshape(1, 2)
# last_vertex = torch.from_numpy(np.array(batch_win_pts[0].cpu()).reshape(1, 2)).to(coords.device)
# print(iter_time, last_vertex)
# compute distance
dist = torch.norm(coords - last_vertex, p=2, dim=1)
# we only use points near last vertex to compute new vertex
neighbor_idx = (dist < neighbor_radius).nonzero().squeeze()
if neighbor_idx.nelement() < min_inlier_count:
return 'failed', None
neighbor_coords = coords[neighbor_idx, :]
neighbor_direct = direct[neighbor_idx, :, :]
tn = neighbor_coords.shape[0]
idxs = torch.zeros([round_hyp_num, vn, 2], dtype=torch.int32, device=mask.device).random_(0, direct.shape[0])
all_win_ratio = torch.zeros([vn], dtype=torch.float32, device=mask.device)
all_win_pts = torch.zeros([vn, 2], dtype=torch.float32, device=mask.device)
# compute mean intersection again
normal = torch.zeros_like(neighbor_direct) # [tn,vn,2]
normal[:, :, 0] = neighbor_direct[:, :, 1]
normal[:, :, 1] = -neighbor_direct[:, :, 0]
# torch.ones! all neighbors will be regarded as inliers
all_inlier = torch.ones([1, vn, tn], dtype=torch.uint8, device=mask.device)
# coords [tn,2] normal [vn,tn,2]
batch_win_pts = []
all_inlier=torch.squeeze(all_inlier.float(),0) # [vn,tn]
normal=normal.permute(1,0,2) # [vn,tn,2]
normal=normal*torch.unsqueeze(all_inlier,2) # [vn,tn,2] outlier is all zero
if torch.norm(normal, p=2) > 1e-6:
b=torch.sum(normal*torch.unsqueeze(neighbor_coords,0),2) # [vn,tn]
ATA=torch.matmul(normal.permute(0,2,1),normal) # [vn,2,2]
ATb=torch.sum(normal*torch.unsqueeze(b,2),1) # [vn,2]
all_win_pts=torch.matmul(b_inv(ATA),torch.unsqueeze(ATb,2)) # [vn,2,1]
batch_win_pts.append(all_win_pts[None,:,:,0])
if len(batch_win_pts) > 0:
curr_vertex = batch_win_pts[0].reshape(1, 2)
# curr_vertex = torch.from_numpy(np.array(batch_win_pts[0].cpu()).reshape(1, 2)).to(coords.device)
iter_step = torch.norm(curr_vertex - last_vertex, p=2, dim=1)
if iter_step < 1e-3:
# print('iter stop at %d with step %.5e' % (iter_time, iter_step))
break
if len(batch_win_pts) > 0:
return 'success', torch.cat(batch_win_pts)
else:
return 'failed', None
def evaluate_segmentation(seg_pred, coding_book, size=None, use_own_nn=False):
# evaluate seg_pred
epsilon = 1e-8
seg_pred = seg_pred / (torch.norm(seg_pred, dim=1) + epsilon)[:, None, :, :].expand_as(seg_pred)
coding_book = coding_book / (torch.norm(coding_book, dim=1) + epsilon)[:, None].expand_as(coding_book)
n, c, h, w = seg_pred.shape
if use_own_nn == True:
seg_mask = torch.zeros(n, h, w).cuda().float()
nn_cuda.NearestNeighbor(seg_pred.data.permute(0, 2, 3, 1).contiguous(), coding_book.data, seg_mask)
seg_pred = seg_mask.detach().squeeze().float()
else:
assert n == 1
e, _ = coding_book.shape
coding_book = coding_book.detach().unsqueeze(2).unsqueeze(3).expand(e, c, h, w)
seg_pred = seg_pred.detach(0).expand(e, c, h, w)
seg_pred = torch.argmin((seg_pred - coding_book).pow(2).sum(1), dim=0).float()
seg_pred = F.interpolate(seg_pred.unsqueeze(0).unsqueeze(0), size=size, mode="nearest").squeeze().long()
return seg_pred, None
def evaluate_vertex(vertex_pred, seg_pred, id2center, round_hyp_num=256, inlier_thresh=0.999, max_num=10000, max_iter=30, min_mask_num=20):
vertex_pred = vertex_pred.permute(0, 2, 3, 1)
b, h, w, vn_2 = vertex_pred.shape
vertex_pred = vertex_pred.view(b, h, w, vn_2//2, 2)
unique_labels = torch.unique(seg_pred)
keypoint_preds = []
for label in unique_labels:
if label == 0: continue
mask = (seg_pred == label)
mask = mask.unsqueeze(0)
if mask.sum() < min_mask_num: continue
keypoint_preds.append((ransac_voting_vertex(mask, vertex_pred, round_hyp_num,
inlier_thresh=inlier_thresh, max_num=max_num, max_iter=max_iter), label))
pt3d_filter = []
pt2d_filter = []
idx_filter = []
for (status, pt2d_pred), idx in keypoint_preds:
if status == 'failed': continue
pt2d_pred = pt2d_pred.cpu().numpy()
if True in np.isnan(pt2d_pred): continue
pt3d_filter.append(id2center[idx])
pt2d_filter.append(pt2d_pred[0][0])
idx_filter.append(idx.data.item())
if len(pt3d_filter) > 0:
pt3d_filter = np.concatenate(pt3d_filter).reshape(-1, 3)
pt2d_filter = np.concatenate(pt2d_filter).reshape(-1, 2)
else:
pt3d_filter = np.array(pt3d_filter)
pt2d_filter = np.array(pt2d_filter)
idx_filter = np.array(idx_filter)
return pt3d_filter, pt2d_filter, idx_filter
def evaluate_vertex_v2(vertex_pred, seg_pred, id2center, round_hyp_num=256, inlier_thresh=0.999, max_num=10000, max_iter=30, min_mask_num=20, min_inlier_count=5, neighbor_radius=6.0):
vertex_pred = vertex_pred.permute(0, 2, 3, 1)
b, h, w, vn_2 = vertex_pred.shape
vertex_pred = vertex_pred.view(b, h, w, vn_2//2, 2)
unique_labels = torch.unique(seg_pred)
keypoint_preds = []
for label in unique_labels:
if label == 0: continue
mask = (seg_pred == label)
mask = mask.unsqueeze(0)
if mask.sum() < min_mask_num: continue
keypoint_preds.append((ransac_voting_vertex_v2(mask, vertex_pred, round_hyp_num,
inlier_thresh=inlier_thresh, max_num=max_num, max_iter=max_iter,
min_inlier_count=min_inlier_count, neighbor_radius=neighbor_radius), label))
pt3d_filter = []
pt2d_filter = []
idx_filter = []
for (status, pt2d_pred), idx in keypoint_preds:
if status == 'failed': continue
pt2d_pred = pt2d_pred.cpu().numpy()
if True in np.isnan(pt2d_pred): continue
pt3d_filter.append(id2center[idx])
pt2d_filter.append(pt2d_pred[0][0])
idx_filter.append(idx.data.item())
if len(pt3d_filter) > 0:
pt3d_filter =
|
np.concatenate(pt3d_filter)
|
numpy.concatenate
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for dealing with HEALPix projections and mappings."""
import copy
import re
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.units import Quantity
from astropy import units as u
from astropy.utils import lazyproperty
from gammapy.utils.array import is_power2
from .geom import Geom, MapCoord, pix_tuple_to_idx, skycoord_to_lonlat
from .axes import MapAxes
from .utils import INVALID_INDEX, coordsys_to_frame, frame_to_coordsys
from .wcs import WcsGeom
# Not sure if we should expose this in the docs or not:
# HPX_FITS_CONVENTIONS, HpxConv
__all__ = ["HpxGeom"]
# Approximation of the size of HEALPIX pixels (in degrees) for a particular order.
# Used to convert from HEALPIX to WCS-based projections.
HPX_ORDER_TO_PIXSIZE = np.array(
[32.0, 16.0, 8.0, 4.0, 2.0, 1.0, 0.50, 0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.002]
)
class HpxConv:
"""Data structure to define how a HEALPIX map is stored to FITS."""
def __init__(self, convname, **kwargs):
self.convname = convname
self.colstring = kwargs.get("colstring", "CHANNEL")
self.firstcol = kwargs.get("firstcol", 1)
self.hduname = kwargs.get("hduname", "SKYMAP")
self.bands_hdu = kwargs.get("bands_hdu", "EBOUNDS")
self.quantity_type = kwargs.get("quantity_type", "integral")
self.frame = kwargs.get("frame", "COORDSYS")
def colname(self, indx):
return f"{self.colstring}{indx}"
@classmethod
def create(cls, convname="gadf"):
return copy.deepcopy(HPX_FITS_CONVENTIONS[convname])
@staticmethod
def identify_hpx_format(header):
"""Identify the convention used to write this file."""
# Hopefully the file contains the HPX_CONV keyword specifying
# the convention used
if "HPX_CONV" in header:
return header["HPX_CONV"].lower()
# Try based on the EXTNAME keyword
hduname = header.get("EXTNAME", None)
if hduname == "HPXEXPOSURES":
return "fgst-bexpcube"
elif hduname == "SKYMAP2":
if "COORDTYPE" in header.keys():
return "galprop"
else:
return "galprop2"
elif hduname == "xtension":
return "healpy"
# Check the name of the first column
colname = header["TTYPE1"]
if colname == "PIX":
colname = header["TTYPE2"]
if colname == "KEY":
return "fgst-srcmap-sparse"
elif colname == "ENERGY1":
return "fgst-template"
elif colname == "COSBINS":
return "fgst-ltcube"
elif colname == "Bin0":
return "galprop"
elif colname == "CHANNEL1" or colname == "CHANNEL0":
if hduname == "SKYMAP":
return "fgst-ccube"
else:
return "fgst-srcmap"
else:
raise ValueError("Could not identify HEALPIX convention")
HPX_FITS_CONVENTIONS = {}
"""Various conventions for storing HEALPIX maps in FITS files"""
HPX_FITS_CONVENTIONS[None] = HpxConv("gadf", bands_hdu="BANDS")
HPX_FITS_CONVENTIONS["gadf"] = HpxConv("gadf", bands_hdu="BANDS")
HPX_FITS_CONVENTIONS["fgst-ccube"] = HpxConv("fgst-ccube")
HPX_FITS_CONVENTIONS["fgst-ltcube"] = HpxConv(
"fgst-ltcube", colstring="COSBINS", hduname="EXPOSURE", bands_hdu="CTHETABOUNDS"
)
HPX_FITS_CONVENTIONS["fgst-bexpcube"] = HpxConv(
"fgst-bexpcube", colstring="ENERGY", hduname="HPXEXPOSURES", bands_hdu="ENERGIES"
)
HPX_FITS_CONVENTIONS["fgst-srcmap"] = HpxConv(
"fgst-srcmap", hduname=None, quantity_type="differential"
)
HPX_FITS_CONVENTIONS["fgst-template"] = HpxConv(
"fgst-template", colstring="ENERGY", bands_hdu="ENERGIES"
)
HPX_FITS_CONVENTIONS["fgst-srcmap-sparse"] = HpxConv(
"fgst-srcmap-sparse", colstring=None, hduname=None, quantity_type="differential"
)
HPX_FITS_CONVENTIONS["galprop"] = HpxConv(
"galprop",
colstring="Bin",
hduname="SKYMAP2",
bands_hdu="ENERGIES",
quantity_type="differential",
frame="COORDTYPE",
)
HPX_FITS_CONVENTIONS["galprop2"] = HpxConv(
"galprop",
colstring="Bin",
hduname="SKYMAP2",
bands_hdu="ENERGIES",
quantity_type="differential",
)
HPX_FITS_CONVENTIONS["healpy"] = HpxConv(
"healpy",
hduname=None,
colstring=None
)
def unravel_hpx_index(idx, npix):
"""Convert flattened global map index to an index tuple.
Parameters
----------
idx : `~numpy.ndarray`
Flat index.
npix : `~numpy.ndarray`
Number of pixels in each band.
Returns
-------
idx : tuple of `~numpy.ndarray`
Index array for each dimension of the map.
"""
if npix.size == 1:
return tuple([idx])
dpix = np.zeros(npix.size, dtype="i")
dpix[1:] = np.cumsum(npix.flat[:-1])
bidx = np.searchsorted(np.cumsum(npix.flat), idx + 1)
pix = idx - dpix[bidx]
return tuple([pix] + list(np.unravel_index(bidx, npix.shape)))
def ravel_hpx_index(idx, npix):
"""Convert map index tuple to a flattened index.
Parameters
----------
idx : tuple of `~numpy.ndarray`
Returns
-------
idx : `~numpy.ndarray`
"""
if len(idx) == 1:
return idx[0]
# TODO: raise exception for indices that are out of bounds
idx0 = idx[0]
idx1 = np.ravel_multi_index(idx[1:], npix.shape, mode="clip")
npix = np.concatenate((np.array([0]), npix.flat[:-1]))
return idx0 + np.cumsum(npix)[idx1]
def coords_to_vec(lon, lat):
"""Converts longitude and latitude coordinates to a unit 3-vector.
Returns
-------
array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines
"""
phi = np.radians(lon)
theta = (np.pi / 2) - np.radians(lat)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
x = sin_t * np.cos(phi)
y = sin_t * np.sin(phi)
z = cos_t
# Stack them into the output array
out = np.vstack((x, y, z)).swapaxes(0, 1)
return out
def get_nside_from_pix_size(pixsz):
"""Get the NSIDE that is closest to the given pixel size.
Parameters
----------
pix : `~numpy.ndarray`
Pixel size in degrees.
Returns
-------
nside : `~numpy.ndarray`
NSIDE parameter.
"""
import healpy as hp
pixsz = np.array(pixsz, ndmin=1)
nside = 2 ** np.linspace(1, 14, 14, dtype=int)
nside_pixsz = np.degrees(hp.nside2resol(nside))
return nside[np.argmin(np.abs(nside_pixsz - pixsz[..., None]), axis=-1)]
def get_pix_size_from_nside(nside):
"""Estimate of the pixel size from the HEALPIX nside coordinate.
This just uses a lookup table to provide a nice round number
for each HEALPIX order.
"""
order = nside_to_order(nside)
if np.any(order < 0) or np.any(order > 13):
raise ValueError(f"HEALPIX order must be 0 to 13. Got: {order!r}")
return HPX_ORDER_TO_PIXSIZE[order]
def match_hpx_pix(nside, nest, nside_pix, ipix_ring):
"""TODO: document."""
import healpy as hp
ipix_in = np.arange(12 * nside * nside)
vecs = hp.pix2vec(nside, ipix_in, nest)
pix_match = hp.vec2pix(nside_pix, vecs[0], vecs[1], vecs[2]) == ipix_ring
return ipix_in[pix_match]
def parse_hpxregion(region):
"""Parse the ``HPX_REG`` header keyword into a list of tokens.
"""
m = re.match(r"([A-Za-z\_]*?)\((.*?)\)", region)
if m is None:
raise ValueError(f"Failed to parse hpx region string: {region!r}")
if not m.group(1):
return re.split(",", m.group(2))
else:
return [m.group(1)] + re.split(",", m.group(2))
def nside_to_order(nside):
"""Compute the ORDER given NSIDE.
Returns -1 for NSIDE values that are not a power of 2.
"""
nside = np.array(nside, ndmin=1)
order = -1 * np.ones_like(nside)
m = is_power2(nside)
order[m] = np.log2(nside[m]).astype(int)
return order
def get_superpixels(idx, nside_subpix, nside_superpix, nest=True):
"""Compute the indices of superpixels that contain a subpixel.
Parameters
----------
idx : `~numpy.ndarray`
Array of HEALPix pixel indices for subpixels of NSIDE
``nside_subpix``.
nside_subpix : int or `~numpy.ndarray`
NSIDE of subpixel.
nside_superpix : int or `~numpy.ndarray`
NSIDE of superpixel.
nest : bool
If True, assume NESTED pixel ordering, otherwise, RING pixel
ordering.
Returns
-------
idx_super : `~numpy.ndarray`
Indices of HEALpix pixels of nside ``nside_superpix`` that
contain pixel indices ``idx`` of nside ``nside_subpix``.
"""
import healpy as hp
idx = np.array(idx)
nside_superpix = np.asarray(nside_superpix)
nside_subpix = np.asarray(nside_subpix)
if not nest:
idx = hp.ring2nest(nside_subpix, idx)
if np.any(~is_power2(nside_superpix)) or np.any(~is_power2(nside_subpix)):
raise ValueError("NSIDE must be a power of 2.")
ratio = np.array((nside_subpix // nside_superpix) ** 2, ndmin=1)
idx //= ratio
if not nest:
m = idx == INVALID_INDEX.int
idx[m] = 0
idx = hp.nest2ring(nside_superpix, idx)
idx[m] = INVALID_INDEX.int
return idx
def get_subpixels(idx, nside_superpix, nside_subpix, nest=True):
"""Compute the indices of subpixels contained within superpixels.
This function returns an output array with one additional
dimension of size N for subpixel indices where N is the maximum
number of subpixels for any pair of ``nside_superpix`` and
``nside_subpix``. If the number of subpixels is less than N the
remaining subpixel indices will be set to -1.
Parameters
----------
idx : `~numpy.ndarray`
Array of HEALPix pixel indices for superpixels of NSIDE
``nside_superpix``.
nside_superpix : int or `~numpy.ndarray`
NSIDE of superpixel.
nside_subpix : int or `~numpy.ndarray`
NSIDE of subpixel.
nest : bool
If True, assume NESTED pixel ordering, otherwise, RING pixel
ordering.
Returns
-------
idx_sub : `~numpy.ndarray`
Indices of HEALpix pixels of nside ``nside_subpix`` contained
within pixel indices ``idx`` of nside ``nside_superpix``.
"""
import healpy as hp
if not nest:
idx = hp.ring2nest(nside_superpix, idx)
idx = np.asarray(idx)
nside_superpix = np.asarray(nside_superpix)
nside_subpix = np.asarray(nside_subpix)
if np.any(~is_power2(nside_superpix)) or np.any(~is_power2(nside_subpix)):
raise ValueError("NSIDE must be a power of 2.")
# number of subpixels in each superpixel
npix = np.array((nside_subpix // nside_superpix) ** 2, ndmin=1)
x = np.arange(np.max(npix), dtype=int)
idx = idx * npix
if not np.all(npix[0] == npix):
x = np.broadcast_to(x, idx.shape + x.shape)
idx = idx[..., None] + x
idx[x >= np.broadcast_to(npix[..., None], x.shape)] = INVALID_INDEX.int
else:
idx = idx[..., None] + x
if not nest:
m = idx == INVALID_INDEX.int
idx[m] = 0
idx = hp.nest2ring(nside_subpix[..., None], idx)
idx[m] = INVALID_INDEX.int
return idx
class HpxGeom(Geom):
"""Geometry class for HEALPIX maps.
This class performs mapping between partial-sky indices (pixel
number within a HEALPIX region) and all-sky indices (pixel number
within an all-sky HEALPIX map). Multi-band HEALPIX geometries use
a global indexing scheme that assigns a unique pixel number based
on the all-sky index and band index. In the single-band case the
global index is the same as the HEALPIX index.
By default the constructor will return an all-sky map.
Partial-sky maps can be defined with the ``region`` argument.
Parameters
----------
nside : `~numpy.ndarray`
HEALPIX nside parameter, the total number of pixels is
12*nside*nside. For multi-dimensional maps one can pass
either a single nside value or a vector of nside values
defining the pixel size for each image plane. If nside is not
a scalar then its dimensionality should match that of the
non-spatial axes.
nest : bool
True -> 'NESTED', False -> 'RING' indexing scheme
frame : str
Coordinate system, "icrs" | "galactic"
region : str or tuple
Spatial geometry for partial-sky maps. If none the map will
encompass the whole sky. String input will be parsed
according to HPX_REG header keyword conventions. Tuple
input can be used to define an explicit list of pixels
encompassed by the geometry.
axes : list
Axes for non-spatial dimensions.
"""
is_hpx = True
is_region = False
def __init__(
self, nside, nest=True, frame="icrs", region=None, axes=None
):
# FIXME: Require NSIDE to be power of two when nest=True
self._nside = np.array(nside, ndmin=1)
self._axes = MapAxes.from_default(axes, n_spatial_axes=1)
if self.nside.size > 1 and self.nside.shape != self.shape_axes:
raise ValueError(
"Wrong dimensionality for nside. nside must "
"be a scalar or have a dimensionality consistent "
"with the axes argument."
)
self._nest = nest
self._frame = frame
self._ipix = None
self._region = region
self._create_lookup(region)
self._npix = self._npix * np.ones(self.shape_axes, dtype=int)
def _create_lookup(self, region):
"""Create local-to-global pixel lookup table."""
if isinstance(region, str):
ipix = [
self.get_index_list(nside, self._nest, region)
for nside in self._nside.flat
]
self._ipix = [
ravel_hpx_index((p, i * np.ones_like(p)), np.ravel(self.npix_max))
for i, p in enumerate(ipix)
]
self._region = region
self._indxschm = "EXPLICIT"
self._npix = np.array([len(t) for t in self._ipix])
if self.nside.ndim > 1:
self._npix = self._npix.reshape(self.nside.shape)
self._ipix = np.concatenate(self._ipix)
elif isinstance(region, tuple):
region = [np.asarray(t) for t in region]
m = np.any(np.stack([t >= 0 for t in region]), axis=0)
region = [t[m] for t in region]
self._ipix = ravel_hpx_index(region, self.npix_max)
self._ipix = np.unique(self._ipix)
region = unravel_hpx_index(self._ipix, self.npix_max)
self._region = "explicit"
self._indxschm = "EXPLICIT"
if len(region) == 1:
self._npix = np.array([len(region[0])])
else:
self._npix = np.zeros(self.shape_axes, dtype=int)
idx = np.ravel_multi_index(region[1:], self.shape_axes)
cnt = np.unique(idx, return_counts=True)
self._npix.flat[cnt[0]] = cnt[1]
elif region is None:
self._region = None
self._indxschm = "IMPLICIT"
self._npix = self.npix_max
else:
raise ValueError(f"Invalid region string: {region!r}")
def local_to_global(self, idx_local):
"""Compute a local index (partial-sky) from a global (all-sky) index.
Returns
-------
idx_global : tuple
A tuple of pixel index vectors with global HEALPIX pixel indices
"""
if self._ipix is None:
return idx_local
if self.nside.size > 1:
idx = ravel_hpx_index(idx_local, self._npix)
else:
idx_tmp = tuple(
[idx_local[0]] + [np.zeros(t.shape, dtype=int) for t in idx_local[1:]]
)
idx = ravel_hpx_index(idx_tmp, self._npix)
idx_global = unravel_hpx_index(self._ipix[idx], self.npix_max)
return idx_global[:1] + tuple(idx_local[1:])
def global_to_local(self, idx_global, ravel=False):
"""Compute global (all-sky) index from a local (partial-sky) index.
Parameters
----------
idx_global : tuple
A tuple of pixel indices with global HEALPix pixel indices.
ravel : bool
Return a raveled index.
Returns
-------
idx_local : tuple
A tuple of pixel indices with local HEALPIX pixel indices.
"""
if (
isinstance(idx_global, int)
or (isinstance(idx_global, tuple) and isinstance(idx_global[0], int))
or isinstance(idx_global, np.ndarray)
):
idx_global = unravel_hpx_index(np.array(idx_global, ndmin=1), self.npix_max)
if self.nside.size == 1:
idx = np.array(idx_global[0], ndmin=1)
else:
idx = ravel_hpx_index(idx_global, self.npix_max)
if self._ipix is not None:
retval = np.full(idx.size, -1, "i")
m = np.isin(idx.flat, self._ipix)
retval[m] = np.searchsorted(self._ipix, idx.flat[m])
retval = retval.reshape(idx.shape)
else:
retval = idx
if self.nside.size == 1:
idx_local = tuple([retval] + list(idx_global[1:]))
else:
idx_local = unravel_hpx_index(retval, self._npix)
m = np.any(np.stack([t == INVALID_INDEX.int for t in idx_local]), axis=0)
for i, t in enumerate(idx_local):
idx_local[i][m] = INVALID_INDEX.int
if not ravel:
return idx_local
else:
return ravel_hpx_index(idx_local, self.npix)
def cutout(self, position, width, **kwargs):
"""Create a cutout around a given position.
Parameters
----------
position : `~astropy.coordinates.SkyCoord`
Center position of the cutout region.
width : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
Diameter of the circular cutout region.
Returns
-------
cutout : `~gammapy.maps.WcsNDMap`
Cutout map
"""
if not self.is_regular:
raise ValueError("Can only do a cutout from a regular map.")
width = u.Quantity(width, "deg").value
return self.create(
nside=self.nside,
nest=self.nest,
width=width,
skydir=position,
frame=self.frame,
axes=self.axes
)
def coord_to_pix(self, coords):
import healpy as hp
coords = MapCoord.create(coords, frame=self.frame, axis_names=self.axes.names).broadcasted
theta, phi = coords.theta, coords.phi
if self.axes:
idxs = self.axes.coord_to_idx(coords, clip=True)
bins = self.axes.coord_to_pix(coords)
# FIXME: Figure out how to handle coordinates out of
# bounds of non-spatial dimensions
if self.nside.size > 1:
nside = self.nside[tuple(idxs)]
else:
nside = self.nside
m = ~np.isfinite(theta)
theta[m] = 0.0
phi[m] = 0.0
pix = hp.ang2pix(nside, theta, phi, nest=self.nest)
pix = tuple([pix]) + bins
if np.any(m):
for p in pix:
p[m] = INVALID_INDEX.int
else:
pix = (hp.ang2pix(self.nside, theta, phi, nest=self.nest),)
return pix
def pix_to_coord(self, pix):
import healpy as hp
if self.axes:
bins = []
vals = []
for i, ax in enumerate(self.axes):
bins += [pix[1 + i]]
vals += [ax.pix_to_coord(pix[1 + i])]
idxs = pix_tuple_to_idx(bins)
if self.nside.size > 1:
nside = self.nside[idxs]
else:
nside = self.nside
ipix = np.round(pix[0]).astype(int)
m = ipix == INVALID_INDEX.int
ipix[m] = 0
theta, phi = hp.pix2ang(nside, ipix, nest=self.nest)
coords = [np.degrees(phi), np.degrees(np.pi / 2.0 - theta)]
coords = tuple(coords + vals)
if np.any(m):
for c in coords:
c[m] = INVALID_INDEX.float
else:
ipix = np.round(pix[0]).astype(int)
theta, phi = hp.pix2ang(self.nside, ipix, nest=self.nest)
coords = (np.degrees(phi), np.degrees(np.pi / 2.0 - theta))
return coords
def pix_to_idx(self, pix, clip=False):
# FIXME: Look for better method to clip HPX indices
# TODO: copy idx to avoid modifying input pix?
# pix_tuple_to_idx seems to always make a copy!?
idx = pix_tuple_to_idx(pix)
idx_local = self.global_to_local(idx)
for i, _ in enumerate(idx):
if clip:
if i > 0:
np.clip(idx[i], 0, self.axes[i - 1].nbin - 1, out=idx[i])
else:
np.clip(idx[i], 0, None, out=idx[i])
else:
if i > 0:
mask = (idx[i] < 0) | (idx[i] >= self.axes[i - 1].nbin)
np.putmask(idx[i], mask, -1)
else:
mask = (idx_local[i] < 0) | (idx[i] < 0)
np.putmask(idx[i], mask, -1)
return tuple(idx)
@property
def axes(self):
"""List of non-spatial axes."""
return self._axes
@property
def axes_names(self):
"""All axes names"""
return ["skycoord"] + self.axes.names
@property
def shape_axes(self):
"""Shape of non-spatial axes."""
return self.axes.shape
@property
def data_shape(self):
"""Shape of the Numpy data array matching this geometry."""
npix_shape = tuple([np.max(self.npix)])
return (npix_shape + self.axes.shape)[::-1]
@property
def data_shape_axes(self):
"""Shape of data of the non-spatial axes and unit spatial axes."""
return self.axes.shape[::-1] + (1,)
@property
def ndim(self):
"""Number of dimensions (int)."""
return len(self._axes) + 2
@property
def ordering(self):
"""HEALPix ordering ('NESTED' or 'RING')."""
return "NESTED" if self.nest else "RING"
@property
def nside(self):
"""NSIDE in each band."""
return self._nside
@property
def order(self):
"""ORDER in each band (``NSIDE = 2 ** ORDER``).
Set to -1 for bands with NSIDE that is not a power of 2.
"""
return nside_to_order(self.nside)
@property
def nest(self):
"""Is HEALPix order nested? (bool)."""
return self._nest
@property
def npix(self):
"""Number of pixels in each band.
For partial-sky geometries this can
be less than the number of pixels for the band NSIDE.
"""
return self._npix
@property
def npix_max(self):
"""Max. number of pixels"""
maxpix = 12 * self.nside ** 2
return maxpix * np.ones(self.shape_axes, dtype=int)
@property
def frame(self):
return self._frame
@property
def projection(self):
"""Map projection."""
return "HPX"
@property
def region(self):
"""Region string."""
return self._region
@property
def is_allsky(self):
"""Flag for all-sky maps."""
return self._region is None
@property
def is_regular(self):
"""Flag identifying whether this geometry is regular in non-spatial dimensions.
False for multi-resolution or irregular geometries.
If true all image planes have the same pixel geometry.
"""
if self.nside.size > 1 or self.region == "explicit":
return False
else:
return True
@property
def center_coord(self):
"""Map coordinates of the center of the geometry (tuple)."""
lon, lat, frame = skycoord_to_lonlat(self.center_skydir)
return tuple([lon, lat]) + self.axes.center_coord
@property
def center_pix(self):
"""Pixel coordinates of the center of the geometry (tuple)."""
return self.coord_to_pix(self.center_coord)
@property
def center_skydir(self):
"""Sky coordinate of the center of the geometry.
Returns
-------
center : `~astropy.coordinates.SkyCoord`
Center position
"""
# TODO: simplify
import healpy as hp
if self.is_allsky:
lon, lat = 0., 0.
elif self.region == "explicit":
idx = unravel_hpx_index(self._ipix, self.npix_max)
nside = self._get_nside(idx)
vec = hp.pix2vec(nside, idx[0], nest=self.nest)
vec = np.array([np.mean(t) for t in vec])
lonlat = hp.vec2ang(vec, lonlat=True)
lon, lat = lonlat[0], lonlat[1]
else:
tokens = parse_hpxregion(self.region)
if tokens[0] in ["DISK", "DISK_INC"]:
lon, lat = float(tokens[1]), float(tokens[2])
elif tokens[0] == "HPX_PIXEL":
nside_pix = int(tokens[2])
ipix_pix = int(tokens[3])
if tokens[1] == "NESTED":
nest_pix = True
elif tokens[1] == "RING":
nest_pix = False
else:
raise ValueError(f"Invalid ordering scheme: {tokens[1]!r}")
theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix)
lat = np.degrees((np.pi / 2) - theta)
lon = np.degrees(phi)
return SkyCoord(lon, lat, frame=self.frame, unit="deg")
def interp_weights(self, coords, idxs=None):
"""Get interpolation weights for given coords
Parameters
----------
coords : `MapCoord` or dict
Input coordinates
idxs : `~numpy.ndarray`
Indices for non-spatial axes.
Returns
-------
weights : `~numpy.ndarray`
Interpolation weights
"""
import healpy as hp
coords = MapCoord.create(coords, frame=self.frame).broadcasted
if idxs is None:
idxs = self.coord_to_idx(coords, clip=True)[1:]
theta, phi = coords.theta, coords.phi
m = ~np.isfinite(theta)
theta[m] = 0
phi[m] = 0
if not self.is_regular:
nside = self.nside[tuple(idxs)]
else:
nside = self.nside
pix, wts = hp.get_interp_weights(nside, theta, phi, nest=self.nest)
wts[:, m] = 0
pix[:, m] = INVALID_INDEX.int
if not self.is_regular:
pix_local = [self.global_to_local([pix] + list(idxs))[0]]
else:
pix_local = [self.global_to_local(pix, ravel=True)]
# If a pixel lies outside of the geometry set its index to the center pixel
m = pix_local[0] == INVALID_INDEX.int
if m.any():
coords_ctr = [coords.lon, coords.lat]
coords_ctr += [ax.pix_to_coord(t) for ax, t in zip(self.axes, idxs)]
idx_ctr = self.coord_to_idx(coords_ctr)
idx_ctr = self.global_to_local(idx_ctr)
pix_local[0][m] = (idx_ctr[0] * np.ones(pix.shape, dtype=int))[m]
pix_local += [np.broadcast_to(t, pix_local[0].shape) for t in idxs]
return pix_local, wts
@property
def ipix(self):
"""HEALPIX pixel and band indices for every pixel in the map."""
return self.get_idx()
def is_aligned(self, other):
"""Check if HEALPIx geoms and extra axes are aligned.
Parameters
----------
other : `HpxGeom`
Other geom.
Returns
-------
aligned : bool
Whether geometries are aligned
"""
for axis, otheraxis in zip(self.axes, other.axes):
if axis != otheraxis:
return False
if not self.nside == other.nside:
return False
elif not self.frame == other.frame:
return False
elif not self.nest == other.nest:
return False
else:
return True
def to_nside(self, nside):
"""Upgrade or downgrade the reoslution to a given nside
Parameters
----------
nside : int
Nside
Returns
-------
geom : `~HpxGeom`
A HEALPix geometry object.
"""
if not self.is_regular:
raise ValueError("Upgrade and degrade only implemented for standard maps")
axes = copy.deepcopy(self.axes)
return self.__class__(
nside=nside,
nest=self.nest,
frame=self.frame,
region=self.region,
axes=axes
)
def to_binsz(self, binsz):
"""Change pixel size of the geometry.
Parameters
----------
binsz : float or `~astropy.units.Quantity`
New pixel size. A float is assumed to be in degree.
Returns
-------
geom : `WcsGeom`
Geometry with new pixel size.
"""
binsz = u.Quantity(binsz, "deg").value
if self.is_allsky:
return self.create(
binsz=binsz,
frame=self.frame,
axes=copy.deepcopy(self.axes),
)
else:
return self.create(
skydir=self.center_skydir,
binsz=binsz,
width=self.width.to_value("deg"),
frame=self.frame,
axes=copy.deepcopy(self.axes),
)
def separation(self, center):
"""Compute sky separation wrt a given center.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
Center position
Returns
-------
separation : `~astropy.coordinates.Angle`
Separation angle array (1D)
"""
coord = self.to_image().get_coord()
return center.separation(coord.skycoord)
def to_swapped(self):
"""Geometry copy with swapped ORDERING (NEST->RING or vice versa).
Returns
-------
geom : `~HpxGeom`
A HEALPix geometry object.
"""
axes = copy.deepcopy(self.axes)
return self.__class__(
self.nside, not self.nest, frame=self.frame, region=self.region, axes=axes,
)
def to_image(self):
return self.__class__(
np.max(self.nside), self.nest, frame=self.frame, region=self.region
)
def to_cube(self, axes):
axes = copy.deepcopy(self.axes) + axes
return self.__class__(
np.max(self.nside),
self.nest,
frame=self.frame,
region=self.region,
axes=axes,
)
def _get_neighbors(self, idx):
import healpy as hp
nside = self._get_nside(idx)
idx_nb = (hp.get_all_neighbours(nside, idx[0], nest=self.nest),)
idx_nb += tuple([t[None, ...] * np.ones_like(idx_nb[0]) for t in idx[1:]])
return idx_nb
def _pad_spatial(self, pad_width):
if self.is_allsky:
raise ValueError("Cannot pad an all-sky map.")
idx = self.get_idx(flat=True)
idx_r = ravel_hpx_index(idx, self.npix_max)
# TODO: Pre-filter indices to find those close to the edge
idx_nb = self._get_neighbors(idx)
idx_nb = ravel_hpx_index(idx_nb, self.npix_max)
for _ in range(pad_width):
mask_edge = np.isin(idx_nb, idx_r, invert=True)
idx_edge = idx_nb[mask_edge]
idx_edge = np.unique(idx_edge)
idx_r = np.sort(np.concatenate((idx_r, idx_edge)))
idx_nb = unravel_hpx_index(idx_edge, self.npix_max)
idx_nb = self._get_neighbors(idx_nb)
idx_nb = ravel_hpx_index(idx_nb, self.npix_max)
idx = unravel_hpx_index(idx_r, self.npix_max)
return self.__class__(
self.nside.copy(),
self.nest,
frame=self.frame,
region=idx,
axes=copy.deepcopy(self.axes),
)
def crop(self, crop_width):
if self.is_allsky:
raise ValueError("Cannot crop an all-sky map.")
idx = self.get_idx(flat=True)
idx_r = ravel_hpx_index(idx, self.npix_max)
# TODO: Pre-filter indices to find those close to the edge
idx_nb = self._get_neighbors(idx)
idx_nb = ravel_hpx_index(idx_nb, self.npix_max)
for _ in range(crop_width):
# Mask of pixels that have at least one neighbor not
# contained in the geometry
mask_edge = np.any(np.isin(idx_nb, idx_r, invert=True), axis=0)
idx_r = idx_r[~mask_edge]
idx_nb = idx_nb[:, ~mask_edge]
idx = unravel_hpx_index(idx_r, self.npix_max)
return self.__class__(
self.nside.copy(),
self.nest,
frame=self.frame,
region=idx,
axes=copy.deepcopy(self.axes),
)
def upsample(self, factor):
if not is_power2(factor):
raise ValueError("Upsample factor must be a power of 2.")
if self.is_allsky:
return self.__class__(
self.nside * factor,
self.nest,
frame=self.frame,
region=self.region,
axes=copy.deepcopy(self.axes),
)
idx = list(self.get_idx(flat=True))
nside = self._get_nside(idx)
idx_new = get_subpixels(idx[0], nside, nside * factor, nest=self.nest)
for i in range(1, len(idx)):
idx[i] = idx[i][..., None] * np.ones(idx_new.shape, dtype=int)
idx[0] = idx_new
return self.__class__(
self.nside * factor,
self.nest,
frame=self.frame,
region=tuple(idx),
axes=copy.deepcopy(self.axes),
)
def downsample(self, factor):
if not is_power2(factor):
raise ValueError("Downsample factor must be a power of 2.")
if self.is_allsky:
return self.__class__(
self.nside // factor,
self.nest,
frame=self.frame,
region=self.region,
axes=copy.deepcopy(self.axes),
)
idx = list(self.get_idx(flat=True))
nside = self._get_nside(idx)
idx_new = get_superpixels(idx[0], nside, nside // factor, nest=self.nest)
idx[0] = idx_new
return self.__class__(
self.nside // factor,
self.nest,
frame=self.frame,
region=tuple(idx),
axes=copy.deepcopy(self.axes),
)
@classmethod
def create(
cls,
nside=None,
binsz=None,
nest=True,
frame="icrs",
region=None,
axes=None,
skydir=None,
width=None,
):
"""Create an HpxGeom object.
Parameters
----------
nside : int or `~numpy.ndarray`
HEALPix NSIDE parameter. This parameter sets the size of
the spatial pixels in the map.
binsz : float or `~numpy.ndarray`
Approximate pixel size in degrees. An NSIDE will be
chosen that correponds to a pixel size closest to this
value. This option is superseded by nside.
nest : bool
True for HEALPIX "NESTED" indexing scheme, False for "RING" scheme
frame : {"icrs", "galactic"}, optional
Coordinate system, either Galactic ("galactic") or Equatorial ("icrs").
skydir : tuple or `~astropy.coordinates.SkyCoord`
Sky position of map center. Can be either a SkyCoord
object or a tuple of longitude and latitude in deg in the
coordinate system of the map.
region : str
HPX region string. Allows for partial-sky maps.
width : float
Diameter of the map in degrees. If set the map will
encompass all pixels within a circular region centered on
``skydir``.
axes : list
List of axes for non-spatial dimensions.
Returns
-------
geom : `~HpxGeom`
A HEALPix geometry object.
Examples
--------
>>> from gammapy.maps import HpxGeom, MapAxis
>>> axis = MapAxis.from_bounds(0,1,2)
>>> geom = HpxGeom.create(nside=16)
>>> geom = HpxGeom.create(binsz=0.1, width=10.0)
>>> geom = HpxGeom.create(nside=64, width=10.0, axes=[axis])
>>> geom = HpxGeom.create(nside=[32,64], width=10.0, axes=[axis])
"""
if nside is None and binsz is None:
raise ValueError("Either nside or binsz must be defined.")
if nside is None and binsz is not None:
nside = get_nside_from_pix_size(binsz)
if skydir is None:
lon, lat = (0.0, 0.0)
elif isinstance(skydir, tuple):
lon, lat = skydir
elif isinstance(skydir, SkyCoord):
lon, lat, frame = skycoord_to_lonlat(skydir, frame=frame)
else:
raise ValueError(f"Invalid type for skydir: {type(skydir)!r}")
if region is None and width is not None:
region = f"DISK({lon},{lat},{width/2})"
return cls(nside, nest=nest, frame=frame, region=region, axes=axes)
@classmethod
def from_header(cls, header, hdu_bands=None, format=None):
"""Create an HPX object from a FITS header.
Parameters
----------
header : `~astropy.io.fits.Header`
The FITS header
hdu_bands : `~astropy.io.fits.BinTableHDU`
The BANDS table HDU.
format : str, optional
FITS convention. If None the format is guessed. The following
formats are supported:
- "gadf"
- "fgst-ccube"
- "fgst-ltcube"
- "fgst-bexpcube"
- "fgst-srcmap"
- "fgst-template"
- "fgst-srcmap-sparse"
- "galprop"
- "galprop2"
- "healpy"
Returns
-------
hpx : `~HpxGeom`
HEALPix geometry.
"""
if format is None:
format = HpxConv.identify_hpx_format(header)
conv = HPX_FITS_CONVENTIONS[format]
axes = MapAxes.from_table_hdu(hdu_bands, format=format)
if header["PIXTYPE"] != "HEALPIX":
raise ValueError(
f"Invalid header PIXTYPE: {header['PIXTYPE']} (must be HEALPIX)"
)
if header["ORDERING"] == "RING":
nest = False
elif header["ORDERING"] == "NESTED":
nest = True
else:
raise ValueError(
f"Invalid header ORDERING: {header['ORDERING']} (must be RING or NESTED)"
)
if hdu_bands is not None and "NSIDE" in hdu_bands.columns.names:
nside = hdu_bands.data.field("NSIDE").reshape(axes.shape).astype(int)
elif "NSIDE" in header:
nside = header["NSIDE"]
elif "ORDER" in header:
nside = 2 ** header["ORDER"]
else:
raise ValueError("Failed to extract NSIDE or ORDER.")
try:
frame = coordsys_to_frame(header[conv.frame])
except KeyError:
frame = header.get("COORDSYS", "icrs")
try:
region = header["HPX_REG"]
except KeyError:
try:
region = header["HPXREGION"]
except KeyError:
region = None
return cls(nside, nest, frame=frame, region=region, axes=axes)
@classmethod
def from_hdu(cls, hdu, hdu_bands=None):
"""Create an HPX object from a BinTable HDU.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
The FITS HDU
hdu_bands : `~astropy.io.fits.BinTableHDU`
The BANDS table HDU
Returns
-------
hpx : `~HpxGeom`
HEALPix geometry.
"""
# FIXME: Need correct handling of IMPLICIT and EXPLICIT maps
# if HPX region is not defined then geometry is defined by
# the set of all pixels in the table
if "HPX_REG" not in hdu.header:
pix = (hdu.data.field("PIX"), hdu.data.field("CHANNEL"))
else:
pix = None
return cls.from_header(hdu.header, hdu_bands=hdu_bands, pix=pix)
def to_header(self, format="gadf", **kwargs):
"""Build and return FITS header for this HEALPIX map."""
header = fits.Header()
format = kwargs.get("format", HPX_FITS_CONVENTIONS[format])
# FIXME: For some sparse maps we may want to allow EXPLICIT
# with an empty region string
indxschm = kwargs.get("indxschm", None)
if indxschm is None:
if self._region is None:
indxschm = "IMPLICIT"
elif self.is_regular == 1:
indxschm = "EXPLICIT"
else:
indxschm = "LOCAL"
if "FGST" in format.convname.upper():
header["TELESCOP"] = "GLAST"
header["INSTRUME"] = "LAT"
header[format.frame] = frame_to_coordsys(self.frame)
header["PIXTYPE"] = "HEALPIX"
header["ORDERING"] = self.ordering
header["INDXSCHM"] = indxschm
header["ORDER"] = np.max(self.order)
header["NSIDE"] = np.max(self.nside)
header["FIRSTPIX"] = 0
header["LASTPIX"] = np.max(self.npix_max) - 1
header["HPX_CONV"] = format.convname.upper()
if self.frame == "icrs":
header["EQUINOX"] = (2000.0, "Equinox of RA & DEC specifications")
if self.region:
header["HPX_REG"] = self._region
return header
def _make_bands_cols(self):
cols = []
if self.nside.size > 1:
cols += [fits.Column("NSIDE", "I", array=np.ravel(self.nside))]
return cols
@staticmethod
def get_index_list(nside, nest, region):
"""Get list of pixels indices for all the pixels in a region.
Parameters
----------
nside : int
HEALPIX nside parameter
nest : bool
True for 'NESTED', False = 'RING'
region : str
HEALPIX region string
Returns
-------
ilist : `~numpy.ndarray`
List of pixel indices.
"""
import healpy as hp
# TODO: this should return something more friendly than a tuple
# e.g. a namedtuple or a dict
tokens = parse_hpxregion(region)
reg_type = tokens[0]
if reg_type == "DISK":
lon, lat = float(tokens[1]), float(tokens[2])
radius = np.radians(float(tokens[3]))
vec = coords_to_vec(lon, lat)[0]
ilist = hp.query_disc(nside, vec, radius, inclusive=False, nest=nest)
elif reg_type == "DISK_INC":
lon, lat = float(tokens[1]), float(tokens[2])
radius = np.radians(float(tokens[3]))
vec = coords_to_vec(lon, lat)[0]
fact = int(tokens[4])
ilist = hp.query_disc(
nside, vec, radius, inclusive=True, nest=nest, fact=fact
)
elif reg_type == "HPX_PIXEL":
nside_pix = int(tokens[2])
if tokens[1] == "NESTED":
ipix_ring = hp.nest2ring(nside_pix, int(tokens[3]))
elif tokens[1] == "RING":
ipix_ring = int(tokens[3])
else:
raise ValueError(f"Invalid ordering scheme: {tokens[1]!r}")
ilist = match_hpx_pix(nside, nest, nside_pix, ipix_ring)
else:
raise ValueError(f"Invalid region type: {reg_type!r}")
return ilist
@property
def width(self):
"""Width of the map"""
# TODO: simplify
import healpy as hp
if self.is_allsky:
width = 180.
elif self.region == "explicit":
idx = unravel_hpx_index(self._ipix, self.npix_max)
nside = self._get_nside(idx)
ang = hp.pix2ang(nside, idx[0], nest=self.nest, lonlat=True)
dirs = SkyCoord(ang[0], ang[1], unit="deg", frame=self.frame)
width = np.max(dirs.separation(self.center_skydir))
else:
tokens = parse_hpxregion(self.region)
if tokens[0] in {"DISK", "DISK_INC"}:
width = float(tokens[3])
elif tokens[0] == "HPX_PIXEL":
pix_size = get_pix_size_from_nside(int(tokens[2]))
width = 2.0 * pix_size
return u.Quantity(width, "deg")
def _get_nside(self, idx):
if self.nside.size > 1:
return self.nside[tuple(idx[1:])]
else:
return self.nside
def to_wcs_geom(self, proj="AIT", oversample=2, width_pix=None):
"""Make a WCS projection appropriate for this HPX pixelization.
Parameters
----------
proj : str
Projection type of WCS geometry.
oversample : float
Oversampling factor for WCS map. This will be the
approximate ratio of the width of a HPX pixel to a WCS
pixel. If this parameter is None then the width will be
set from ``width_pix``.
width_pix : int
Width of the WCS geometry in pixels. The pixel size will
be set to the number of pixels satisfying ``oversample``
or ``width_pix`` whichever is smaller. If this parameter
is None then the width will be set from ``oversample``.
Returns
-------
wcs : `~gammapy.maps.WcsGeom`
WCS geometry
"""
pix_size = get_pix_size_from_nside(self.nside)
binsz = np.min(pix_size) / oversample
width = 2.0 * self.width.to_value("deg") + np.max(pix_size)
if width_pix is not None and int(width / binsz) > width_pix:
binsz = width / width_pix
if width > 90.0:
width = min(360.0, width), min(180.0, width)
axes = copy.deepcopy(self.axes)
return WcsGeom.create(
width=width,
binsz=binsz,
frame=self.frame,
axes=axes,
skydir=self.center_skydir,
proj=proj,
)
def to_wcs_tiles(self, nside_tiles=4, margin="0 deg"):
"""Create WCS tiles geometries from HPX geometry with given nside.
The HEALPix geom is divide into superpixels defined by nside_tiles,
which are then represented by a WCS geometry using a tangential
projection. The number of WCS tiles is given by the number of pixels
for the given nside_tiles.
Parameters
----------
nside_tiles : int
Nside for super pixel tiles. Usually nsi
margin : Angle
Width margin of the wcs tile
Return
------
wcs_tiles : list
List of WCS tile geoms.
"""
import healpy as hp
margin = u.Quantity(margin)
if nside_tiles >= self.nside:
raise ValueError(f"nside_tiles must be < {self.nside}")
if not self.is_allsky:
raise ValueError("to_wcs_tiles() is only supported for all sky geoms")
binsz = np.degrees(hp.nside2resol(self.nside)) * u.deg
hpx = self.to_image().to_nside(nside=nside_tiles)
wcs_tiles = []
for pix in range(int(hpx.npix)):
skydir = hpx.pix_to_coord([pix])
vtx = hp.boundaries(
nside=hpx.nside, pix=pix, nest=hpx.nest, step=1
)
lon, lat = hp.vec2ang(vtx.T, lonlat=True)
boundaries = SkyCoord(lon * u.deg, lat * u.deg, frame=hpx.frame)
# Compute maximum separation between all pairs of boundaries and take it
# as width
width = boundaries.separation(boundaries[:, np.newaxis]).max()
wcs_tile_geom = WcsGeom.create(
skydir=(float(skydir[0]), float(skydir[1])),
width=width + margin,
binsz=binsz,
frame=hpx.frame,
proj="TAN",
axes=self.axes
)
wcs_tiles.append(wcs_tile_geom)
return wcs_tiles
def get_idx(self, idx=None, local=False, flat=False, sparse=False, mode="center", axis_name=None):
# TODO: simplify this!!!
if idx is not None and np.any(np.array(idx) >= np.array(self.shape_axes)):
raise ValueError(f"Image index out of range: {idx!r}")
# Regular all- and partial-sky maps
if self.is_regular:
pix = [np.arange(np.max(self._npix))]
if idx is None:
for ax in self.axes:
if mode == "edges" and ax.name == axis_name:
pix += [np.arange(-0.5, ax.nbin, dtype=float)]
else:
pix += [np.arange(ax.nbin, dtype=int)]
else:
pix += [t for t in idx]
pix = np.meshgrid(*pix[::-1], indexing="ij", sparse=sparse)[::-1]
pix = self.local_to_global(pix)
# Non-regular all-sky
elif self.is_allsky and not self.is_regular:
shape = (np.max(self.npix),)
if idx is None:
shape = shape + self.shape_axes
else:
shape = shape + (1,) * len(self.axes)
pix = [np.full(shape, -1, dtype=int) for i in range(1 + len(self.axes))]
for idx_img in np.ndindex(self.shape_axes):
if idx is not None and idx_img != idx:
continue
npix = self._npix[idx_img]
if idx is None:
s_img = (slice(0, npix),) + idx_img
else:
s_img = (slice(0, npix),) + (0,) * len(self.axes)
pix[0][s_img] = np.arange(self._npix[idx_img])
for j in range(len(self.axes)):
pix[j + 1][s_img] = idx_img[j]
pix = [p.T for p in pix]
# Explicit pixel indices
else:
if idx is not None:
npix_sum = np.concatenate(([0], np.cumsum(self._npix)))
idx_ravel = np.ravel_multi_index(idx, self.shape_axes)
s = slice(npix_sum[idx_ravel], npix_sum[idx_ravel + 1])
else:
s = slice(None)
pix_flat = unravel_hpx_index(self._ipix[s], self.npix_max)
shape = (np.max(self.npix),)
if idx is None:
shape = shape + self.shape_axes
else:
shape = shape + (1,) * len(self.axes)
pix = [np.full(shape, -1, dtype=int) for _ in range(1 + len(self.axes))]
for idx_img in np.ndindex(self.shape_axes):
if idx is not None and idx_img != idx:
continue
npix = int(self._npix[idx_img])
if idx is None:
s_img = (slice(0, npix),) + idx_img
else:
s_img = (slice(0, npix),) + (0,) * len(self.axes)
if self.axes:
m = np.all(
np.stack([pix_flat[i + 1] == t for i, t in enumerate(idx_img)]),
axis=0,
)
pix[0][s_img] = pix_flat[0][m]
else:
pix[0][s_img] = pix_flat[0]
for j in range(len(self.axes)):
pix[j + 1][s_img] = idx_img[j]
pix = [p.T for p in pix]
if local:
pix = self.global_to_local(pix)
if flat:
pix = tuple([p[p != INVALID_INDEX.int] for p in pix])
return pix
def region_mask(self, regions):
"""Create a mask from a given list of regions
The mask is filled such that a pixel inside the region is filled with
"True". To invert the mask, e.g. to create a mask with exclusion regions
the tilde (~) operator can be used (see example below).
Parameters
----------
regions : str, `~regions.Region` or list of `~regions.Region`
Region or list of regions (pixel or sky regions accepted).
A region can be defined as a string ind DS9 format as well.
See http://ds9.si.edu/doc/ref/region.html for details.
Returns
-------
mask_map : `~gammapy.maps.WcsNDMap` of boolean type
Boolean region mask
"""
from . import Map, RegionGeom
if not self.is_regular:
raise ValueError("Multi-resolution maps not supported yet")
# TODO: use spatial coordinates only...
geom = RegionGeom.from_regions(regions)
coords = self.get_coord()
mask = geom.contains(coords)
return Map.from_geom(self, data=mask)
def get_coord(self, idx=None, flat=False, sparse=False, mode="center", axis_name=None):
if mode == "edges" and axis_name is None:
raise ValueError("Mode 'edges' requires axis name to be defined")
pix = self.get_idx(idx=idx, flat=flat, sparse=sparse, mode=mode, axis_name=axis_name)
data = self.pix_to_coord(pix)
coords = MapCoord.create(
data=data, frame=self.frame, axis_names=self.axes.names
)
return coords
def contains(self, coords):
idx = self.coord_to_idx(coords)
return np.all(np.stack([t != INVALID_INDEX.int for t in idx]), axis=0)
def solid_angle(self):
"""Solid angle array (`~astropy.units.Quantity` in ``sr``).
The array has the same dimensionality as ``map.nside``
since all pixels have the same solid angle.
"""
import healpy as hp
return Quantity(hp.nside2pixarea(self.nside), "sr")
def __repr__(self):
lon, lat = self.center_skydir.data.lon.deg, self.center_skydir.data.lat.deg
return (
f"{self.__class__.__name__}\n\n"
f"\taxes : {self.axes_names}\n"
f"\tshape : {self.data_shape[::-1]}\n"
f"\tndim : {self.ndim}\n"
f"\tnside : {self.nside[0]}\n"
f"\tnested : {self.nest}\n"
f"\tframe : {self.frame}\n"
f"\tprojection : {self.projection}\n"
f"\tcenter : {lon:.1f} deg, {lat:.1f} deg\n"
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
if self.is_allsky and other.is_allsky is False:
return NotImplemented
# check overall shape and axes compatibility
if self.data_shape != other.data_shape:
return False
for axis, otheraxis in zip(self.axes, other.axes):
if axis != otheraxis:
return False
return (
self.nside == other.nside
and self.frame == other.frame
and self.order == other.order
and self.nest == other.nest
)
def __ne__(self, other):
return not self.__eq__(other)
class HpxToWcsMapping:
"""Stores the indices need to convert from HEALPIX to WCS.
Parameters
----------
hpx : `~HpxGeom`
HEALPix geometry object.
wcs : `~gammapy.maps.WcsGeom`
WCS geometry object.
"""
def __init__(self, hpx, wcs, ipix, mult_val, npix):
self._hpx = hpx
self._wcs = wcs
self._ipix = ipix
self._mult_val = mult_val
self._npix = npix
@property
def hpx(self):
"""HEALPIX projection."""
return self._hpx
@property
def wcs(self):
"""WCS projection."""
return self._wcs
@property
def ipix(self):
"""An array(nx,ny) of the global HEALPIX pixel indices for each WCS pixel."""
return self._ipix
@property
def mult_val(self):
"""An array(nx,ny) of 1/number of WCS pixels pointing at each HEALPIX pixel."""
return self._mult_val
@property
def npix(self):
"""A tuple(nx,ny) of the shape of the WCS grid."""
return self._npix
@lazyproperty
def lmap(self):
"""Array ``(nx, ny)`` mapping local HEALPIX pixel indices for each WCS pixel."""
return self.hpx.global_to_local(self.ipix, ravel=True)
@property
def valid(self):
"""Array ``(nx, ny)`` of bool: which WCS pixel in inside the HEALPIX region."""
return self.lmap >= 0
@classmethod
def create(cls, hpx, wcs):
"""Create HEALPix to WCS geometry pixel mapping.
Parameters
----------
hpx : `~HpxGeom`
HEALPix geometry object.
wcs : `~gammapy.maps.WcsGeom`
WCS geometry object.
Returns
-------
hpx2wcs : `~HpxToWcsMapping`
Mapping
"""
import healpy as hp
npix = wcs.npix
# FIXME: Calculation of WCS pixel centers should be moved into a
# method of WcsGeom
pix_crds = np.dstack(np.meshgrid(np.arange(npix[0]), np.arange(npix[1])))
pix_crds = pix_crds.swapaxes(0, 1).reshape((-1, 2))
sky_crds = wcs.wcs.wcs_pix2world(pix_crds, 0)
sky_crds *=
|
np.radians(1.0)
|
numpy.radians
|
'''
If you are looking for the drift algorithms, please check the
"algorithms" directory at the top level of this repo, which contains
easier to read versions written in Python, Matlab, and R. This file
contains versions of the algorithms that are adapted to our evaluation
and visualization pipelines.
'''
import numpy as np
from sklearn.cluster import KMeans
from scipy.optimize import minimize
from scipy.stats import norm
def correct_drift(method, fixation_XY, passage, return_line_assignments=False, **params):
function = globals()[method]
fixation_XY = np.array(fixation_XY, dtype=int)
line_positions = np.array(passage.midlines, dtype=int)
if method in ['compare', 'warp']:
word_centers = np.array(passage.word_centers(), dtype=int)
return function(fixation_XY, line_positions, word_centers, return_line_assignments=return_line_assignments, **params)
return function(fixation_XY, line_positions, return_line_assignments=return_line_assignments, **params)
def attach(fixation_XY, line_Y, return_line_assignments=False):
n = len(fixation_XY)
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = []
for fixation_i in range(n):
line_i = np.argmin(abs(line_Y - fixation_XY[fixation_i, 1]))
line_assignments.append(line_i)
return np.array(line_assignments, dtype=int)
###################################################################
for fixation_i in range(n):
line_i = np.argmin(abs(line_Y - fixation_XY[fixation_i, 1]))
fixation_XY[fixation_i, 1] = line_Y[line_i]
return fixation_XY
def chain(fixation_XY, line_Y, x_thresh=192, y_thresh=32, return_line_assignments=False):
n = len(fixation_XY)
dist_X = abs(np.diff(fixation_XY[:, 0]))
dist_Y = abs(np.diff(fixation_XY[:, 1]))
end_chain_indices = list(np.where(np.logical_or(dist_X > x_thresh, dist_Y > y_thresh))[0] + 1)
end_chain_indices.append(n)
start_of_chain = 0
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = []
for end_of_chain in end_chain_indices:
mean_y = np.mean(fixation_XY[start_of_chain:end_of_chain, 1])
line_i = np.argmin(abs(line_Y - mean_y))
line_assignments.extend( [line_i] * (end_of_chain - start_of_chain) )
start_of_chain = end_of_chain
return np.array(line_assignments, dtype=int)
###################################################################
for end_of_chain in end_chain_indices:
mean_y = np.mean(fixation_XY[start_of_chain:end_of_chain, 1])
line_i = np.argmin(abs(line_Y - mean_y))
fixation_XY[start_of_chain:end_of_chain, 1] = line_Y[line_i]
start_of_chain = end_of_chain
return fixation_XY
def cluster(fixation_XY, line_Y, return_line_assignments=False):
m = len(line_Y)
fixation_Y = fixation_XY[:, 1].reshape(-1, 1)
clusters = KMeans(m, n_init=100, max_iter=300).fit_predict(fixation_Y)
centers = [fixation_Y[clusters == i].mean() for i in range(m)]
ordered_cluster_indices = np.argsort(centers)
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = []
for fixation_i, cluster_i in enumerate(clusters):
line_i = np.where(ordered_cluster_indices == cluster_i)[0][0]
line_assignments.append(line_i)
return np.array(line_assignments, dtype=int)
###################################################################
for fixation_i, cluster_i in enumerate(clusters):
line_i = np.where(ordered_cluster_indices == cluster_i)[0][0]
fixation_XY[fixation_i, 1] = line_Y[line_i]
return fixation_XY
def compare(fixation_XY, line_Y, word_XY, x_thresh=512, n_nearest_lines=3, return_line_assignments=False):
n = len(fixation_XY)
diff_X = np.diff(fixation_XY[:, 0])
end_line_indices = list(np.where(diff_X < -x_thresh)[0] + 1)
end_line_indices.append(n)
line_assignments = []
start_of_line = 0
for end_of_line in end_line_indices:
gaze_line = fixation_XY[start_of_line:end_of_line]
mean_y = np.mean(gaze_line[:, 1])
lines_ordered_by_proximity = np.argsort(abs(line_Y - mean_y))
nearest_line_I = lines_ordered_by_proximity[:n_nearest_lines]
line_costs = np.zeros(n_nearest_lines)
text_lines = []
warping_paths = []
for candidate_i in range(n_nearest_lines):
candidate_line_i = nearest_line_I[candidate_i]
text_line = word_XY[word_XY[:, 1] == line_Y[candidate_line_i]]
dtw_cost, warping_path = dynamic_time_warping(gaze_line[:, 0:1], text_line[:, 0:1])
line_costs[candidate_i] = dtw_cost
text_lines.append(text_line)
warping_paths.append(warping_path)
line_i = nearest_line_I[np.argmin(line_costs)]
if return_line_assignments:
line_assignments.extend( [line_i] * (end_of_line - start_of_line) )
fixation_XY[start_of_line:end_of_line, 1] = line_Y[line_i]
start_of_line = end_of_line
######################### FOR SIMULATIONS #########################
if return_line_assignments:
return np.array(line_assignments, dtype=int)
###################################################################
return fixation_XY
phases = [{'min_i':3, 'min_j':3, 'no_constraints':False},
{'min_i':1, 'min_j':3, 'no_constraints':False},
{'min_i':1, 'min_j':1, 'no_constraints':False},
{'min_i':1, 'min_j':1, 'no_constraints':True}]
def merge(fixation_XY, line_Y, y_thresh=32, g_thresh=0.1, e_thresh=20, return_line_assignments=False):
n = len(fixation_XY)
m = len(line_Y)
diff_X = np.diff(fixation_XY[:, 0])
dist_Y = abs(np.diff(fixation_XY[:, 1]))
sequence_boundaries = list(np.where(np.logical_or(diff_X < 0, dist_Y > y_thresh))[0] + 1)
sequences = [list(range(start, end)) for start, end in zip([0]+sequence_boundaries, sequence_boundaries+[n])]
for phase in phases:
while len(sequences) > m:
best_merger = None
best_error = np.inf
for i in range(len(sequences)):
if len(sequences[i]) < phase['min_i']:
continue
for j in range(i+1, len(sequences)):
if len(sequences[j]) < phase['min_j']:
continue
candidate_XY = fixation_XY[sequences[i] + sequences[j]]
gradient, intercept = np.polyfit(candidate_XY[:, 0], candidate_XY[:, 1], 1)
residuals = candidate_XY[:, 1] - (gradient * candidate_XY[:, 0] + intercept)
error = np.sqrt(sum(residuals**2) / len(candidate_XY))
if phase['no_constraints'] or (abs(gradient) < g_thresh and error < e_thresh):
if error < best_error:
best_merger = (i, j)
best_error = error
if not best_merger:
break
merge_i, merge_j = best_merger
merged_sequence = sequences[merge_i] + sequences[merge_j]
sequences.append(merged_sequence)
del sequences[merge_j], sequences[merge_i]
mean_Y = [fixation_XY[sequence, 1].mean() for sequence in sequences]
ordered_sequence_indices = np.argsort(mean_Y)
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = []
for line_i, sequence_i in enumerate(ordered_sequence_indices):
line_assignments.extend( [line_i] * len(sequences[sequence_i]) )
return np.array(line_assignments, dtype=int)
###################################################################
for line_i, sequence_i in enumerate(ordered_sequence_indices):
fixation_XY[sequences[sequence_i], 1] = line_Y[line_i]
return fixation_XY
def regress(fixation_XY, line_Y, k_bounds=(-0.1, 0.1), o_bounds=(-50, 50), s_bounds=(1, 20), return_line_assignments=False):
n = len(fixation_XY)
m = len(line_Y)
def fit_lines(params, return_line_assignments=False):
k = k_bounds[0] + (k_bounds[1] - k_bounds[0]) * norm.cdf(params[0])
o = o_bounds[0] + (o_bounds[1] - o_bounds[0]) * norm.cdf(params[1])
s = s_bounds[0] + (s_bounds[1] - s_bounds[0]) * norm.cdf(params[2])
predicted_Y_from_slope = fixation_XY[:, 0] * k
line_Y_plus_offset = line_Y + o
density = np.zeros((n, m))
for line_i in range(m):
fit_Y = predicted_Y_from_slope + line_Y_plus_offset[line_i]
density[:, line_i] = norm.logpdf(fixation_XY[:, 1], fit_Y, s)
if return_line_assignments:
return density.argmax(axis=1)
return -sum(density.max(axis=1))
best_fit = minimize(fit_lines, [0, 0, 0], method='powell')
line_assignments = fit_lines(best_fit.x, True)
######################### FOR SIMULATIONS #########################
if return_line_assignments:
return line_assignments
###################################################################
for fixation_i, line_i in enumerate(line_assignments):
fixation_XY[fixation_i, 1] = line_Y[line_i]
return fixation_XY
def segment(fixation_XY, line_Y, return_line_assignments=False):
n = len(fixation_XY)
m = len(line_Y)
diff_X = np.diff(fixation_XY[:, 0])
saccades_ordered_by_length = np.argsort(diff_X)
line_change_indices = saccades_ordered_by_length[:m-1]
current_line_i = 0
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = []
for fixation_i in range(n):
line_assignments.append(current_line_i)
if fixation_i in line_change_indices:
current_line_i += 1
return np.array(line_assignments, dtype=int)
###################################################################
for fixation_i in range(n):
fixation_XY[fixation_i, 1] = line_Y[current_line_i]
if fixation_i in line_change_indices:
current_line_i += 1
return fixation_XY
def slice(fixation_XY, line_Y, x_thresh=192, y_thresh=32, w_thresh=32, n_thresh=90, return_line_assignments=False):
n = len(fixation_XY)
line_height = np.mean(np.diff(line_Y))
proto_lines, phantom_proto_lines = {}, {}
dist_X = abs(np.diff(fixation_XY[:, 0]))
dist_Y = abs(np.diff(fixation_XY[:, 1]))
end_run_indices = list(np.where(np.logical_or(dist_X > x_thresh, dist_Y > y_thresh))[0] + 1)
run_starts = [0] + end_run_indices
run_ends = end_run_indices + [n]
runs = [list(range(start, end)) for start, end in zip(run_starts, run_ends)]
longest_run_i = np.argmax([fixation_XY[run[-1], 0] - fixation_XY[run[0], 0] for run in runs])
proto_lines[0] = runs.pop(longest_run_i)
while runs:
change_on_this_iteration = False
for proto_line_i, direction in [(min(proto_lines), -1), (max(proto_lines), 1)]:
proto_lines[proto_line_i + direction] = []
if proto_lines[proto_line_i]:
proto_line_XY = fixation_XY[proto_lines[proto_line_i]]
else:
proto_line_XY = phantom_proto_lines[proto_line_i]
run_differences = np.zeros(len(runs))
for run_i, run in enumerate(runs):
y_diffs = [y - proto_line_XY[np.argmin(abs(proto_line_XY[:, 0] - x)), 1] for x, y in fixation_XY[run]]
run_differences[run_i] = np.mean(y_diffs)
merge_into_current = list(np.where(abs(run_differences) < w_thresh)[0])
merge_into_adjacent = list(np.where(np.logical_and(
run_differences * direction >= w_thresh,
run_differences * direction < n_thresh
))[0])
for index in merge_into_current:
proto_lines[proto_line_i].extend(runs[index])
for index in merge_into_adjacent:
proto_lines[proto_line_i + direction].extend(runs[index])
if not merge_into_adjacent:
average_x, average_y = np.mean(proto_line_XY, axis=0)
adjacent_y = average_y + line_height * direction
phantom_proto_lines[proto_line_i + direction] = np.array([[average_x, adjacent_y]])
for index in sorted(merge_into_current + merge_into_adjacent, reverse=True):
del runs[index]
change_on_this_iteration = True
if not change_on_this_iteration:
break
for run in runs:
best_pl_distance = np.inf
best_pl_assignemnt = None
for proto_line_i in proto_lines:
if proto_lines[proto_line_i]:
proto_line_XY = fixation_XY[proto_lines[proto_line_i]]
else:
proto_line_XY = phantom_proto_lines[proto_line_i]
y_diffs = [y - proto_line_XY[np.argmin(abs(proto_line_XY[:, 0] - x)), 1] for x, y in fixation_XY[run]]
pl_distance = abs(np.mean(y_diffs))
if pl_distance < best_pl_distance:
best_pl_distance = pl_distance
best_pl_assignemnt = proto_line_i
proto_lines[best_pl_assignemnt].extend(run)
while len(proto_lines) > len(line_Y):
top, bot = min(proto_lines), max(proto_lines)
if len(proto_lines[top]) < len(proto_lines[bot]):
proto_lines[top + 1].extend(proto_lines[top])
del proto_lines[top]
else:
proto_lines[bot - 1].extend(proto_lines[bot])
del proto_lines[bot]
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = np.zeros(n, dtype=int)
for line_i, proto_line_i in enumerate(sorted(proto_lines)):
line_assignments[proto_lines[proto_line_i]] = line_i
return line_assignments
###################################################################
for line_i, proto_line_i in enumerate(sorted(proto_lines)):
fixation_XY[proto_lines[proto_line_i], 1] = line_Y[line_i]
return fixation_XY
def split(fixation_XY, line_Y, return_line_assignments=False):
n = len(fixation_XY)
diff_X = np.diff(fixation_XY[:, 0])
clusters = KMeans(2, n_init=10, max_iter=300).fit_predict(diff_X.reshape(-1, 1))
centers = [diff_X[clusters == 0].mean(), diff_X[clusters == 1].mean()]
sweep_marker = np.argmin(centers)
end_line_indices = list(np.where(clusters == sweep_marker)[0] + 1)
end_line_indices.append(n)
start_of_line = 0
######################### FOR SIMULATIONS #########################
if return_line_assignments:
line_assignments = []
for end_of_line in end_line_indices:
mean_y = np.mean(fixation_XY[start_of_line:end_of_line, 1])
line_i = np.argmin(abs(line_Y - mean_y))
line_assignments.extend( [line_i] * (end_of_line - start_of_line) )
start_of_line = end_of_line
return np.array(line_assignments, dtype=int)
###################################################################
for end_of_line in end_line_indices:
mean_y =
|
np.mean(fixation_XY[start_of_line:end_of_line, 1])
|
numpy.mean
|
from .utils.vector3 import *
from .utils.constants import *
from .utils.colour_functions import *
from .materials.diffuse import *
from .materials.emissive import *
from .ray import *
from .scene import *
import numpy as np
from PIL import Image
def trace_ray(scene : Scene, ray : Ray):
""" returns tuple (pos, norm) of vec3 objects """
w = scene.camera.screen_width
h = scene.camera.screen_height
inters = [s.intersect(ray.origin, ray.dir) for s in scene.collider_list]
distances, hit_orientation = zip(*inters)
nearest = reduce(np.minimum, distances)
normal = vec3(np.zeros_like(ray.origin), np.zeros_like(ray.origin), np.zeros_like(ray.origin))
for (coll, dis, orient) in zip(scene.collider_list, distances, hit_orientation):
hit_check = (nearest != FARAWAY) & (dis == nearest)
if np.any(hit_check):
hitted_rays = ray.extract(hit_check)
material = coll.assigned_primitive.material
hit_info = Hit(extract(hit_check, dis), extract(hit_check, orient), material, coll, coll.assigned_primitive)
hit_info.point = (hitted_rays.origin + hitted_rays.dir * hit_info.distance)
hit_normal = hit_info.material.get_Normal(hit_info)
normal = normal.place_values(np.reshape(hit_check, (h,w)), hit_normal)
position = ray.origin + ray.dir * nearest
return (position, normal)
def trace_ray_material(scene : Scene, ray : Ray):
""" returns tuple (pos, norm, emission, albedo) of vec3 objects """
inters = [s.intersect(ray.origin, ray.dir) for s in scene.collider_list]
distances, hit_orientation = zip(*inters)
nearest = reduce(np.minimum, distances)
normal = ray.origin.zeros_like()
emission = ray.origin.zeros_like()
albedo = ray.origin.zeros_like()
for (coll, dis, orient) in zip(scene.collider_list, distances, hit_orientation):
hit_check = (nearest != FARAWAY) & (dis == nearest)
if np.any(hit_check):
hitted_rays = ray.extract(hit_check)
material = coll.assigned_primitive.material
hit_info = Hit(extract(hit_check,dis), extract(hit_check,orient), material, coll, coll.assigned_primitive)
hit_info.point = (hitted_rays.origin + hitted_rays.dir * hit_info.distance)
hit_normal = hit_info.material.get_Normal(hit_info)
normal = normal.place_values(hit_check, hit_normal)
if isinstance(material, Diffuse):
color = material.diff_texture.get_color(hit_check)
albedo = albedo.place_values(hit_check, color)
if isinstance(material, Emissive):
color = material.texture_color.get_color(hit_check)
emission = emission.place_values(hit_check, color)
position = ray.origin + ray.dir * nearest
return (position, normal, emission, albedo)
def sample_sphere(shape):
# http://corysimon.github.io/articles/uniformdistn-on-sphere/
theta = 2 * np.pi * np.random.random_sample(size=shape)
phi = np.arccos(1 - 2 *
|
np.random.random_sample(size=shape)
|
numpy.random.random_sample
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [2., 1],
'grp': ['A', 'B']
},
index=['Y-weighted var(X)', 'Y-weighted var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_standard_deviation_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std())
def test_standard_deviation_biased(self):
metric = metrics.StandardDeviation('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0))
def test_standard_deviation_split_by_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std()
expected.name = 'sd(X)'
testing.assert_series_equal(output, expected)
def test_standard_deviation_where(self):
metric = metrics.StandardDeviation('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std()
self.assertEqual(output, expected)
def test_standard_deviation_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sd(X)': [self.df.X.std()]})
testing.assert_frame_equal(output, expected)
def test_standard_deviation_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.std()]}, index=['sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sd(X)': self.df.groupby('grp')['X'].std()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].std().values,
'grp': ['A', 'B']
},
index=['sd(X)', 'sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_standard_deviation_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, np.sqrt(0.75))
def test_weighted_standard_deviation_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((np.sqrt(2), 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted sd(X)'
testing.assert_series_equal(output, expected)
def test_weighted_standard_deviation_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted sd(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted sd(X)': [np.sqrt(2), 1]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [np.sqrt(2), 1],
'grp': ['A', 'B']
},
index=['Y-weighted sd(X)', 'Y-weighted sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_cv_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.sqrt(1 / 3.))
def test_cv_biased(self):
metric = metrics.CV('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0) / np.mean(self.df.X))
def test_cv_split_by_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std() / [1, 2.75]
expected.name = 'cv(X)'
testing.assert_series_equal(output, expected)
def test_cv_where(self):
metric = metrics.CV('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std() / 2.75
self.assertEqual(output, expected)
def test_cv_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cv(X)': [np.sqrt(1 / 3.)]})
testing.assert_frame_equal(output, expected)
def test_cv_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [np.sqrt(1 / 3.)]}, index=['cv(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'cv(X)': [0, np.sqrt(1 / 8.25)]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
data={
'Value': [0, np.sqrt(1 / 8.25)],
'grp': ['A', 'B']
},
index=['cv(X)', 'cv(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_correlation(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.corrcoef(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.corr(self.df.Y))
def test_weighted_correlation(self):
metric = metrics.Correlation('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
cov = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)
expected = pd.DataFrame(
{'Y-weighted corr(X, Y)': [cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])]})
testing.assert_frame_equal(output, expected)
def test_correlation_method(self):
metric = metrics.Correlation('X', 'Y', method='kendall')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.corr(self.df.Y, method='kendall'))
def test_correlation_kwargs(self):
metric = metrics.Correlation('X', 'Y', min_periods=10)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertTrue(pd.isnull(output))
def test_correlation_split_by_not_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
corr_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([corr_a, corr_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'corr(X, Y)'
testing.assert_series_equal(output, expected)
def test_correlation_where(self):
metric = metrics.Correlation('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_correlation_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'corr(X, Y)': [self.df.X.corr(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_correlation_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(df, 'grp')
corr_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'corr(X, Y)': [corr_a, corr_b]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cov(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.cov(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.cov(self.df.Y))
def test_cov_bias(self):
metric = metrics.Cov('X', 'Y', bias=True)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_ddof(self):
metric = metrics.Cov('X', 'Y', ddof=0)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_kwargs(self):
metric = metrics.Cov('X', 'Y', fweights=self.df.Y)
output = metric.compute_on(self.df)
expected =
|
np.cov(self.df.X, self.df.Y, fweights=self.df.Y)
|
numpy.cov
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 10:29:42 2012
dsp_fpga_lib (Version 8)
@author: Muenker_2
"""
#
# Copyright (c) 2011 - 2015:
# <NAME>
# <NAME>
# <NAME> for CS506/606 "Special Topics: Speech Signal Processing" CSLU / OHSU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import string # needed for remezord?
import numpy as np
import numpy.ma as ma
from numpy import pi, asarray, absolute, sqrt, log10, arctan,\
ceil, hstack, mod
import scipy.signal as sig
from scipy import special # needed for remezord
import scipy.spatial.distance as sc_dist
import matplotlib.pyplot as plt
from matplotlib import patches
__version__ = "0.8"
def H_mag(zaehler, nenner, z, lim):
""" Calculate magnitude of H(z) or H(s) in polynomial form at the complex
coordinate z = x, 1j * y (skalar or array)
The result is clipped at lim."""
# limvec = lim * np.ones(len(z))
try: len(zaehler)
except TypeError:
z_val = abs(zaehler) # zaehler is a scalar
else:
z_val = abs(np.polyval(zaehler,z)) # evaluate zaehler at z
try: len(nenner)
except TypeError:
n_val = nenner # nenner is a scalar
else:
n_val = abs(np.polyval(nenner,z))
return np.minimum((z_val/n_val),lim)
#----------------------------------------------
# from scipy.sig.signaltools.py:
def cmplx_sort(p):
"sort roots based on magnitude."
p = np.asarray(p)
if np.iscomplexobj(p):
indx = np.argsort(abs(p))
else:
indx = np.argsort(p)
return np.take(p, indx, 0), indx
# adapted from scipy.signal.signaltools.py:
# TODO: comparison of real values has several problems (5 * tol ???)
def unique_roots(p, tol=1e-3, magsort = False, rtype='min', rdist='euclidian'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, default tol = 1e-3
The tolerance for two roots to be considered equal. Default is 1e-3.
magsort: Boolean, default = False
When magsort = True, use the root magnitude as a sorting criterium (as in
the version used in numpy < 1.8.2). This yields false results for roots
with similar magniudes (e.g. on the unit circle) but is signficantly
faster for a large number of roots (factor 20 for 500 double roots.)
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max' or 'maximum': pick the maximum of those roots (magnitude ?).
- 'min' or 'minimum': pick the minimum of those roots (magnitude?).
- 'avg' or 'mean' : take the average of those roots.
- 'median' : take the median of those roots
dist : {'manhattan', 'euclid'}, optional
How to measure the distance between roots: 'euclid' is the euclidian
distance. 'manhattan' is less common, giving the
sum of the differences of real and imaginary parts.
Returns
-------
pout : list
The list of unique roots, sorted from low to high (only for real roots).
mult : list
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = sp.signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
Find multiples of complex roots on the unit circle:
>>> vals = np.roots(1,2,3,2,1)
uniq, mult = sp.signal.unique_roots(vals, rtype='avg')
"""
def manhattan(a,b):
"""
Manhattan distance between a and b
"""
return ma.abs(a.real - b.real) + ma.abs(a.imag - b.imag)
def euclid(a,b):
"""
Euclidian distance between a and b
"""
return ma.abs(a - b)
if rtype in ['max', 'maximum']:
comproot = ma.max # nanmax ignores nan's
elif rtype in ['min', 'minimum']:
comproot = ma.min # nanmin ignores nan's
elif rtype in ['avg', 'mean']:
comproot = ma.mean # nanmean ignores nan's
# elif rtype == 'median':
else:
raise TypeError(rtype)
if rdist in ['euclid', 'euclidian']:
dist_roots = euclid
elif rdist in ['rect', 'manhattan']:
dist_roots = manhattan
else:
raise TypeError(rdist)
mult = [] # initialize list for multiplicities
pout = [] # initialize list for reduced output list of roots
p = np.atleast_1d(p) # convert p to at least 1D array
tol = abs(tol)
if len(p) == 0: # empty argument, return empty lists
return pout, mult
elif len(p) == 1: # scalar input, return arg with multiplicity = 1
pout = p
mult = [1]
return pout, mult
else:
sameroots = [] # temporary list for roots within the tolerance
pout = p[np.isnan(p)].tolist() # copy nan elements to pout as list
mult = len(pout) * [1] # generate a list with a "1" for each nan
#p = ma.masked_array(p[~np.isnan(p)]) # delete nan elements, convert to ma
p = np.ma.masked_where(np.isnan(p), p) # only masks nans, preferrable?
if np.iscomplexobj(p) and not magsort:
for i in range(len(p)): # p[i] is current root under test
if not p[i] is ma.masked: # has current root been "deleted" yet?
tolarr = dist_roots(p[i], p[i:]) < tol # test against itself and
# subsequent roots, giving a multiplicity of at least one
mult.append(np.sum(tolarr)) # multiplicity = number of "hits"
sameroots = p[i:][tolarr] # pick the roots within the tolerance
p[i:] = ma.masked_where(tolarr, p[i:]) # and "delete" (mask) them
pout.append(comproot(sameroots)) # avg/mean/max of mult. root
else:
p,indx = cmplx_sort(p)
indx = -1
curp = p[0] + 5 * tol # needed to avoid "self-detection" ?
for k in range(len(p)):
tr = p[k]
# if dist_roots(tr, curp) < tol:
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots) # not correct for 'avg'
# of multiple (N > 2) root !
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return np.array(pout), np.array(mult)
##### original code ####
# p = asarray(p) * 1.0
# tol = abs(tol)
# p, indx = cmplx_sort(p)
# pout = []
# mult = []
# indx = -1
# curp = p[0] + 5 * tol
# sameroots = []
# for k in range(len(p)):
# tr = p[k]
# if abs(tr - curp) < tol:
# sameroots.append(tr)
# curp = comproot(sameroots)
# pout[indx] = curp
# mult[indx] += 1
# else:
# pout.append(tr)
# curp = tr
# sameroots = [tr]
# indx += 1
# mult.append(1)
# return array(pout), array(mult)
def zplane(b, a=1, pn_eps=1e-2, zpk=False, analog=False, plt_ax = None, pltLib='matplotlib',
verbose=False, style='square', anaCircleRad=0, lw=2,
mps = 10, mzs = 10, mpc = 'r', mzc = 'b', plabel = '', zlabel = ''):
"""
Plot the poles and zeros in the complex z-plane either from the
coefficients (`b,`a) of a discrete transfer function `H`(`z`) (zpk = False)
or directly from the zeros and poles (z,p) (zpk = True).
When only b is given, the group delay of the transversal (FIR)
filter specified by b is calculated.
Parameters
----------
b : array_like
Numerator coefficients (transversal part of filter)
a : array_like (optional, default = 1 for FIR-filter)
Denominator coefficients (recursive part of filter)
zpk : boolean (default: False)
When True, interpret parameter b as an array containing the
position of the poles and parameter a as an array with the
position of the zeros.
analog : boolean (default: False)
When True, create a P/Z plot suitable for the s-plane, i.e. suppress
the unit circle (unless anaCircleRad > 0) and scale the plot for
a good display of all poles and zeros.
pn_eps : float (default : 1e-2)
Tolerance for separating close poles or zeros
pltLib : string (default: 'matplotlib')
Library for plotting the P/Z plane. Currently, only matplotlib is
implemented. When pltLib = 'none' or when matplotlib is not
available, only pass the poles / zeros and their multiplicity
verbose : boolean (default: False)
When verbose == True, print poles / zeros and their multiplicity.
style : string (default: 'square')
Style of the plot, for style == 'square' make scale of x- and y-
axis equal.
mps = 10, mzs = 10, mpc = 'r', mzc = 'b', lw = 2
plabel, zlabel : string (default: '')
This string is passed to the plot command for poles and zeros and
can be displayed by legend()
Returns
-------
z : ndarray
The zeroes
p : ndarray
The poles
k : real
The gain factor
Notes
-----
"""
# TODO:
# - polar option
# - add keywords for size, color etc. of markers and circle -> **kwargs
# - add option for multi-dimensional arrays and zpk data
# Alternative:
# get a figure/plot
# [z,p,k] = scipy.signal.tf2zpk -> poles and zeros
# Plotten über
# scatter(real(p),imag(p))
# scatter(real(z),imag(z))
# Is input data given as zeros & poles (zpk = True) or
# as numerator & denominator coefficients (b, a) of system function?
if zpk == False:
# The coefficients are less than 1, normalize the coeficients
if np.max(b) > 1:
kn = np.max(b)
b = np.array(b)/float(kn) # make sure that b is an array
else:
kn = 1.
if np.max(a) > 1:
kd = np.max(a)
a = np.array(a)/abs(kd) # make sure that a is an array
else:
kd = 1.
# Calculate the poles, zeros and scaling factor
p = np.roots(a)
z = np.roots(b)
k = kn/kd
else:
z = b; p = a; k = 1.
# find multiple poles and zeros and their multiplicities
# print p, z
if len(p) < 1:
p = np.array(0,ndmin=1) # only zeros, create equal number of poles at z = 0
num_p = np.array(len(z),ndmin=1)
else:
#p, num_p = sig.signaltools.unique_roots(p, tol = pn_eps, rtype='avg')
p, num_p = unique_roots(p, tol = pn_eps, rtype='avg')
p = np.array(p)
if len(z) > 0:
z, num_z = unique_roots(z, tol = pn_eps, rtype='avg')
z = np.array(z)
#z, num_z = sig.signaltools.unique_roots(z, tol = pn_eps, rtype='avg')
else:
num_z = []
# print p,z
if not plt_ax:
# fig = plt.figure()
ax = plt.gca()# fig.add_subplot(111)
else:
ax = plt_ax
if analog == False:
# create the unit circle for the z-plane
uc = patches.Circle((0,0), radius=1, fill=False,
color='grey', ls='solid', zorder=1)
ax.add_patch(uc)
# ax.spines['left'].set_position('center')
# ax.spines['bottom'].set_position('center')
# ax.spines['right'].set_visible(True)
# ax.spines['top'].set_visible(True)
r = 1.1
plt.axis('equal'); plt.axis([-r, r, -r, r], aspect='equal')
else: # s-plane
if anaCircleRad > 0:
# plot a circle with radius = anaCircleRad
uc = patches.Circle((0,0), radius=anaCircleRad, fill=False,
color='grey', ls='solid', zorder=1)
ax.add_patch(uc)
# plot real and imaginary axis
ax.axhline(lw=2, color = 'k', zorder=1)
ax.axvline(lw=2, color = 'k', zorder=1)
# Plot the zeros
ax.scatter(z.real, z.imag, s=mzs*mzs, zorder=2, marker = 'o',
facecolor = 'none', edgecolor = mzc, lw = lw, label=zlabel)
# t1 = plt.plot(z.real, z.imag, 'go', ms=10, label=label)
# plt.setp( t1, markersize=mzs, markeredgewidth=2.0,
# markeredgecolor=mzc, markerfacecolor='none')
# Plot the poles
ax.scatter(p.real, p.imag, s=mps*mps, zorder=2, marker = 'x',
edgecolor = mpc, lw = lw, label=plabel)
# Print multiplicity of poles / zeros
for i in range(len(z)):
if verbose == True: print('z', i, z[i], num_z[i])
if num_z[i] > 1:
plt.text(np.real(z[i]), np.imag(z[i]),' (' + str(num_z[i]) +')',va = 'bottom')
for i in range(len(p)):
if verbose == True: print('p', i, p[i], num_p[i])
if num_p[i] > 1:
plt.text(np.real(p[i]), np.imag(p[i]), ' (' + str(num_p[i]) +')',va = 'bottom')
# increase distance between ticks and labels
# to give some room for poles and zeros
for tick in ax.get_xaxis().get_major_ticks():
tick.set_pad(12.)
tick.label1 = tick._get_text1()
for tick in ax.get_yaxis().get_major_ticks():
tick.set_pad(12.)
tick.label1 = tick._get_text1()
if style == 'square':
plt.axis('equal')
xl = ax.get_xlim(); Dx = max(abs(xl[1]-xl[0]), 0.05)
yl = ax.get_ylim(); Dy = max(abs(yl[1]-yl[0]), 0.05)
ax.set_xlim((xl[0]-Dx*0.05, max(xl[1]+Dx*0.05,0)))
ax.set_ylim((yl[0]-Dy*0.05, yl[1] + Dy*0.05))
# print(ax.get_xlim(),ax.get_ylim())
return z, p, k
#------------------------------------------------------------------------------
def impz(b, a=1, FS=1, N=1, step=False):
"""
Calculate impulse response of a discrete time filter, specified by
numerator coefficients b and denominator coefficients a of the system
function H(z).
When only b is given, the impulse response of the transversal (FIR)
filter specified by b is calculated.
Parameters
----------
b : array_like
Numerator coefficients (transversal part of filter)
a : array_like (optional, default = 1 for FIR-filter)
Denominator coefficients (recursive part of filter)
FS : float (optional, default: FS = 1)
Sampling frequency.
N : float (optional, default N=1 for automatic calculation)
Number of calculated points.
Default: N = len(b) for FIR filters, N = 100 for IIR filters
step: boolean (optional, default: step=False)
plot step response instead of impulse response
Returns
-------
hn : ndarray with length N (see above)
td : ndarray containing the time steps with same
Examples
--------
>>> b = [1,2,3] # Coefficients of H(z) = 1 + 2 z^2 + 3 z^3
>>> h, n = dsp_lib.impz(b)
"""
try: len(a) #len_a = len(a)
except TypeError:
# a has len = 1 -> FIR-Filter
impulse = np.repeat(0.,len(b)) # create float array filled with 0.
try: len(b)
except TypeError:
print('No proper filter coefficients: len(a) = len(b) = 1 !')
else:
try: len(b)
except TypeError: b = [b,] # convert scalar to array with len = 1
impulse = np.repeat(0.,100) # IIR-Filter
if N > 1:
impulse = np.repeat(0.,N)
impulse[0] =1.0 # create dirac impulse
hn = np.array(sig.lfilter(b,a,impulse)) # calculate impulse response
td = np.arange(len(hn)) / FS
if step:
hn = np.cumsum(hn) # integrate impulse response to get step response
return hn, td
#==================================================================
def grpdelay(b, a=1, nfft=512, whole='none', analog=False, Fs=2.*pi):
#==================================================================
"""
Calculate group delay of a discrete time filter, specified by
numerator coefficients `b` and denominator coefficients `a` of the system
function `H` ( `z`).
When only `b` is given, the group delay of the transversal (FIR)
filter specified by `b` is calculated.
Parameters
----------
b : array_like
Numerator coefficients (transversal part of filter)
a : array_like (optional, default = 1 for FIR-filter)
Denominator coefficients (recursive part of filter)
whole : string (optional, default : 'none')
Only when whole = 'whole' calculate group delay around
the complete unit circle (0 ... 2 pi)
N : integer (optional, default: 512)
Number of FFT-points
FS : float (optional, default: FS = 2*pi)
Sampling frequency.
Returns
-------
tau_g : ndarray
The group delay
w : ndarray
The angular frequency points where the group delay was computed
Notes
-----
The group delay :math:`\\tau_g(\\omega)` of discrete and continuous time
systems is defined by
.. math::
\\tau_g(\\omega) = - \\phi'(\\omega)
= -\\frac{\\partial \\phi(\\omega)}{\\partial \\omega}
= -\\frac{\\partial }{\\partial \\omega}\\angle H( \\omega)
A useful form for calculating the group delay is obtained by deriving the
*logarithmic* frequency response in polar form as described in [JOS]_ for
discrete time systems:
.. math::
\\ln ( H( \\omega))
= \\ln \\left({H_A( \\omega)} e^{j \\phi(\\omega)} \\right)
= \\ln \\left({H_A( \\omega)} \\right) + j \\phi(\\omega)
\\Rightarrow \\; \\frac{\\partial }{\\partial \\omega} \\ln ( H( \\omega))
= \\frac{H_A'( \\omega)}{H_A( \\omega)} + j \\phi'(\\omega)
where :math:`H_A(\\omega)` is the amplitude response. :math:`H_A(\\omega)` and
its derivative :math:`H_A'(\\omega)` are real-valued, therefore, the group
delay can be calculated from
.. math::
\\tau_g(\\omega) = -\\phi'(\\omega) =
-\\Im \\left\\{ \\frac{\\partial }{\\partial \\omega}
\\ln ( H( \\omega)) \\right\\}
=-\\Im \\left\\{ \\frac{H'(\\omega)}{H(\\omega)} \\right\\}
The derivative of a polynome :math:`P(s)` (continuous-time system) or :math:`P(z)`
(discrete-time system) w.r.t. :math:`\\omega` is calculated by:
.. math::
\\frac{\\partial }{\\partial \\omega} P(s = j \\omega)
= \\frac{\\partial }{\\partial \\omega} \\sum_{k = 0}^N c_k (j \\omega)^k
= j \\sum_{k = 0}^{N-1} (k+1) c_{k+1} (j \\omega)^{k}
= j P_R(s = j \\omega)
\\frac{\\partial }{\\partial \\omega} P(z = e^{j \\omega T})
= \\frac{\\partial }{\\partial \\omega} \\sum_{k = 0}^N c_k e^{-j k \\omega T}
= -jT \\sum_{k = 0}^{N} k c_{k} e^{-j k \\omega T}
= -jT P_R(z = e^{j \\omega T})
where :math:`P_R` is the "ramped" polynome, i.e. its `k` th coefficient is
multiplied by `k` resp. `k` + 1.
yielding:
.. math::
\\tau_g(\\omega) = -\\Im \\left\\{ \\frac{H'(\\omega)}{H(\\omega)} \\right\\}
\\quad \\text{ resp. } \\quad
\\tau_g(\\omega) = -\\Im \\left\\{ \\frac{H'(e^{j \\omega T})}
{H(e^{j \\omega T})} \\right\\}
where::
(H'(e^jwT)) ( H_R(e^jwT)) (H_R(e^jwT))
tau_g(w) = -im |---------| = -im |-jT ----------| = T re |----------|
( H(e^jwT)) ( H(e^jwT) ) ( H(e^jwT) )
where :math:`H(e^{j\\omega T})` is calculated via the DFT at NFFT points and
the derivative
of the polynomial terms :math:`b_k z^-k` using :math:`\\partial / \\partial w b_k e^-jkwT` = -b_k jkT e^-jkwT.
This is equivalent to muliplying the polynome with a ramp `k`,
yielding the "ramped" function H_R(e^jwT).
For analog functions with b_k s^k the procedure is analogous, but there is no
sampling time and the exponent is positive.
.. [JOS] <NAME>, "Numerical Computation of Group Delay" in
"Introduction to Digital Filters with Audio Applications",
Center for Computer Research in Music and Acoustics (CCRMA),
Stanford University, http://ccrma.stanford.edu/~jos/filters/Numerical_Computation_Group_Delay.html, referenced 2014-04-02,
.. [Lyons] <NAME>, "Understanding Digital Signal Processing", 3rd Ed.,
Prentice Hall, 2010.
Examples
--------
>>> b = [1,2,3] # Coefficients of H(z) = 1 + 2 z^2 + 3 z^3
>>> tau_g, td = dsp_lib.grpdelay(b)
"""
## If the denominator of the computation becomes too small, the group delay
## is set to zero. (The group delay approaches infinity when
## there are poles or zeros very close to the unit circle in the z plane.)
##
## Theory: group delay, g(w) = -d/dw [arg{H(e^jw)}], is the rate of change of
## phase with respect to frequency. It can be computed as:
##
## d/dw H(e^-jw)
## g(w) = -------------
## H(e^-jw)
##
## where
## H(z) = B(z)/A(z) = sum(b_k z^k)/sum(a_k z^k).
##
## By the quotient rule,
## A(z) d/dw B(z) - B(z) d/dw A(z)
## d/dw H(z) = -------------------------------
## A(z) A(z)
## Substituting into the expression above yields:
## A dB - B dA
## g(w) = ----------- = dB/B - dA/A
## A B
##
## Note that,
## d/dw B(e^-jw) = sum(k b_k e^-jwk)
## d/dw A(e^-jw) = sum(k a_k e^-jwk)
## which is just the FFT of the coefficients multiplied by a ramp.
##
## As a further optimization when nfft>>length(a), the IIR filter (b,a)
## is converted to the FIR filter conv(b,fliplr(conj(a))).
if whole !='whole':
nfft = 2*nfft
nfft = int(nfft)
#
w = Fs * np.arange(0, nfft)/nfft # create frequency vector
try: len(a)
except TypeError:
a = 1; oa = 0 # a is a scalar or empty -> order of a = 0
c = b
try: len(b)
except TypeError: print('No proper filter coefficients: len(a) = len(b) = 1 !')
else:
oa = len(a)-1 # order of denom. a(z) resp. a(s)
c = np.convolve(b,a[::-1]) # a[::-1] reverses denominator coeffs a
# c(z) = b(z) * a(1/z)*z^(-oa)
try: len(b)
except TypeError: b=1; ob=0 # b is a scalar or empty -> order of b = 0
else:
ob = len(b)-1 # order of b(z)
if analog:
a_b = np.convolve(a,b)
if ob > 1:
br_a = np.convolve(b[1:] * np.arange(1,ob), a)
else:
br_a = 0
ar_b = np.convolve(a[1:] * np.arange(1,oa), b)
num = np.fft.fft(ar_b - br_a, nfft)
den = np.fft.fft(a_b,nfft)
else:
oc = oa + ob # order of c(z)
cr = c * np.arange(0,oc+1) # multiply with ramp -> derivative of c wrt 1/z
num = np.fft.fft(cr,nfft) #
den = np.fft.fft(c,nfft) #
#
minmag = 10. * np.spacing(1) # equivalent to matlab "eps"
polebins = np.where(abs(den) < minmag)[0] # find zeros of denominator
# polebins = np.where(abs(num) < minmag)[0] # find zeros of numerator
if np.size(polebins) > 0: # check whether polebins array is empty
print('*** grpdelay warning: group delay singular -> setting to 0 at:')
for i in polebins:
print ('f = {0} '.format((Fs*i/nfft)))
num[i] = 0
den[i] = 1
if analog:
tau_g = np.real(num / den)
else:
tau_g = np.real(num / den) - oa
#
if whole !='whole':
nfft = nfft//2
tau_g = tau_g[0:nfft]
w = w[0:nfft]
return tau_g, w
def grp_delay_ana(b, a, w):
"""
Calculate the group delay of an anlog filter.
"""
w, H = sig.freqs(b, a, w)
H_angle = np.unwrap(np.angle(H))
# tau_g = np.zeros(len(w)-1)
tau_g = (H_angle[1:]-H_angle[:-1])/(w[0]-w[1])
return tau_g, w[:-1]
#==================================================================
def format_ticks(xy, scale, format="%.1f"):
#==================================================================
"""
Reformat numbers at x or y - axis. The scale can be changed to display
e.g. MHz instead of Hz. The number format can be changed as well.
Parameters
----------
xy : string, either 'x', 'y' or 'xy'
select corresponding axis (axes) for reformatting
scale : real,
format : string,
define C-style number formats
Returns
-------
nothing
Examples
--------
>>> format_ticks('x',1000.)
Scales all numbers of x-Axis by 1000, e.g. for displaying ms instead of s.
>>> format_ticks('xy',1., format = "%.2f")
Two decimal places for numbers on x- and y-axis
"""
if xy == 'x' or xy == 'xy':
locx,labelx = plt.xticks() # get location and content of xticks
plt.xticks(locx, map(lambda x: format % x, locx*scale))
if xy == 'y' or xy == 'xy':
locy,labely = plt.yticks() # get location and content of xticks
plt.yticks(locy, map(lambda y: format % y, locy*scale))
#==================================================================
def lim_eps(a, eps):
#==================================================================
"""
Return min / max of an array a, increased by eps*(max(a) - min(a)).
Handy for nice looking axes labeling.
"""
mylim = (min(a) - (max(a)-min(a))*eps, max(a) + (max(a)-min(a))*eps)
return mylim
#========================================================
abs = absolute
def oddround(x):
"""Return the nearest odd integer from x."""
return x-mod(x,2)+1
def oddceil(x):
"""Return the smallest odd integer not less than x."""
return oddround(x+1)
def remlplen_herrmann(fp,fs,dp,ds):
"""
Determine the length of the low pass filter with passband frequency
fp, stopband frequency fs, passband ripple dp, and stopband ripple ds.
fp and fs must be normalized with respect to the sampling frequency.
Note that the filter order is one less than the filter length.
Uses approximation algorithm described by Herrmann et al.:
<NAME>, <NAME>, and <NAME>, Practical Design Rules for
Optimum Finite Impulse Response Low-Pass Digital Filters, Bell Syst. Tech.
Jour., 52(6):769-799, Jul./Aug. 1973.
"""
dF = fs-fp
a = [5.309e-3,7.114e-2,-4.761e-1,-2.66e-3,-5.941e-1,-4.278e-1]
b = [11.01217, 0.51244]
Dinf = log10(ds)*(a[0]*
|
log10(dp)
|
numpy.log10
|
#-------------------------------------------------------------------------------
# Main concept for testing returned arrays:
# 1). create ground truth e.g. with cross_val_predict
# 2). run vecstack
# 3). compare returned arrays with ground truth
# 4). compare arrays from file with ground truth
#-------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
import unittest
from numpy.testing import assert_array_equal
# from numpy.testing import assert_allclose
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from numpy.testing import assert_warns
import os
import glob
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
# from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.datasets import load_boston
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import make_scorer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from vecstack import stacking
from vecstack.core import model_action
n_folds = 5
temp_dir = 'tmpdw35lg54ms80eb42'
boston = load_boston()
X, y = boston.data, boston.target
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Make train/test split by hand to avoid strange errors probably related to testing suit:
# https://github.com/scikit-learn/scikit-learn/issues/1684
# https://github.com/scikit-learn/scikit-learn/issues/1704
# Note: Python 2.7, 3.4 - OK, but 3.5, 3.6 - error
np.random.seed(0)
ind = np.arange(500)
np.random.shuffle(ind)
ind_train = ind[:400]
ind_test = ind[400:]
X_train = X[ind_train]
X_test = X[ind_test]
y_train = y[ind_train]
y_test = y[ind_test]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
class MinimalEstimator:
"""Has no get_params attribute"""
def __init__(self, random_state=0):
self.random_state = random_state
def __repr__(self):
return 'Demo string from __repr__'
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
def predict_proba(self, X):
return np.zeros(X.shape[0])
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
class TestFuncRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
os.mkdir(temp_dir)
except:
print('Unable to create temp dir')
@classmethod
def tearDownClass(cls):
try:
os.rmdir(temp_dir)
except:
print('Unable to remove temp dir')
def tearDown(self):
# Remove files after each test
files = glob.glob(os.path.join(temp_dir, '*.npy'))
files.extend(glob.glob(os.path.join(temp_dir, '*.log.txt')))
try:
for file in files:
os.remove(file)
except:
print('Unable to remove temp file')
#---------------------------------------------------------------------------
# Testing returned and saved arrays in each mode
#---------------------------------------------------------------------------
def test_oof_pred_mode(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(X_train, y_train)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_mode(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
S_test_1 = None
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_pred_mode(self):
model = LinearRegression()
S_train_1 = None
_ = model.fit(X_train, y_train)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_pred_bag_mode(self):
S_test_temp = np.zeros((X_test.shape[0], n_folds))
kf = KFold(n_splits = n_folds, shuffle = False, random_state = 0)
for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):
# Split data and target
X_tr = X_train[tr_index]
y_tr = y_train[tr_index]
X_te = X_train[te_index]
y_te = y_train[te_index]
model = LinearRegression()
_ = model.fit(X_tr, y_tr)
S_test_temp[:, fold_counter] = model.predict(X_test)
S_test_1 = np.mean(S_test_temp, axis = 1).reshape(-1, 1)
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred_bag', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_pred_bag_mode(self):
S_test_temp = np.zeros((X_test.shape[0], n_folds))
kf = KFold(n_splits = n_folds, shuffle = False, random_state = 0)
for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):
# Split data and target
X_tr = X_train[tr_index]
y_tr = y_train[tr_index]
X_te = X_train[te_index]
y_te = y_train[te_index]
model = LinearRegression()
_ = model.fit(X_tr, y_tr)
S_test_temp[:, fold_counter] = model.predict(X_test)
S_test_1 = np.mean(S_test_temp, axis = 1).reshape(-1, 1)
S_train_1 = None
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'pred_bag', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing <sample_weight> all ones
#---------------------------------------------------------------------------
def test_oof_pred_mode_sample_weight_one(self):
sw = np.ones(len(y_train))
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict',
fit_params = {'sample_weight': sw}).reshape(-1, 1)
_ = model.fit(X_train, y_train, sample_weight = sw)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0,
sample_weight = sw)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Test <sample_weight> all random
#---------------------------------------------------------------------------
def test_oof_pred_mode_sample_weight_random(self):
np.random.seed(0)
sw = np.random.rand(len(y_train))
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict',
fit_params = {'sample_weight': sw}).reshape(-1, 1)
_ = model.fit(X_train, y_train, sample_weight = sw)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0,
sample_weight = sw)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
|
assert_array_equal(S_test_1, S_test_3)
|
numpy.testing.assert_array_equal
|
import numpy as np
def test_update_mu_i():
from parameters_update_prior_terms import update_mu
mu_i_old = 3.0
c_1 = 2.0
v_i_old = 4.0
result = update_mu(mu_i_old, c_1, v_i_old)
expected_result = 11.0
assert result == expected_result
mu_i_old = np.array([1.0, 2.0, 3.0])
c_1 = np.array([1.0, 2.0, 2.0])
v_i_old = np.array([2.0, 4.0, 1.0])
result = update_mu(mu_i_old, c_1, v_i_old)
expected_result = np.array([3.0, 10.0, 5.0])
np.testing.assert_array_equal(result, expected_result)
def test_update_nu():
from parameters_update_prior_terms import update_nu
nu_old = np.array([2.0, 4.0, 1.0])
c_3 = np.array([1.0, 2.0, 2.0])
result = update_nu(nu_old, c_3)
expected_result = np.array([-2.0, -28.0, -1.0])
np.testing.assert_array_equal(result, expected_result)
def test_update_p_i():
from parameters_update_prior_terms import update_p_i
p_i_old = np.array([1.0, 2.0, 3.0])
G_1 = np.array([2.0, 2.0, 1.0])
G_0 =
|
np.array([1.0, 3.0, 2.0])
|
numpy.array
|
#!/usr/bin/env python
# coding=utf-8
from .traj_gen_base import TrajGen
import numpy as np
import casadi as ca # using casadi for the optimization problem
from qpsolvers import solve_qp # using qpsolvers library for the optimization problem
from scipy.linalg import block_diag, solve
class UAVTrajGen(TrajGen):
def __init__(self, knots_:np.array, order_:list, dim_=4, maxContiOrder_=[4, 3], pos_dim=3):
"""
initialize the class
Args:
knots_: time knots to define the fixed pins
order_: derivetiy order of the polynomial equation; here should be a list which
contains the position derivative and angle derivative
dim_: as default the dimension of the uav trajectory has four (x, y, z, yaw)
maxContiOrder_: max continuity order
"""
super().__init__(knots_, dim_)
self.pos_order, self.ang_order = order_ # polynomial order
self.position_dim = pos_dim
self.maxContiOrder = maxContiOrder_
self.num_segments = knots_.shape[0] - 1 # segments which is knots - 1
self.num_pos_variables = (self.pos_order+1) * self.num_segments
self.num_ang_variables = (self.ang_order+1) * self.num_segments
self.pos_polyCoeffSet = np.zeros((self.position_dim, self.pos_order+1, self.num_segments))
self.ang_polyCoeffSet = np.zeros((dim_-self.position_dim, self.ang_order+1, self.num_segments))
self.segState = np.zeros((self.num_segments, 3)) # 0 dim -> how many fixed pins in this segment,
# muss smaller than the polynomial order+1
# more fixed pins (higher order) will be ignored.
# 1 dim -> continuity degree. should be defined by
# user (maxContiOrder_+1)
## math functions
def scaleMat(self, delT, num_polys):
mat_ = np.diag([delT**i for i in range(num_polys)])
return mat_
def scaleMatBigInv(self, poly_type:str):
"""
inverse matrix of time
Args:
poly_type: the type of the polynomial (the number of the)
"""
mat_ = None
num_polys_ = self.pos_order + 1 if poly_type == 'pos' else self.ang_order + 1
for m in range(self.num_segments):
matSet_ = self.scaleMat(1/(self.Ts[m+1]-self.Ts[m]), num_polys_)
if mat_ is None:
mat_ = matSet_.copy()
else:
mat_ = block_diag(mat_, matSet_)
return mat_
## functional definition
def setDerivativeObj(self, pos_weights, ang_weights):
"""
Setup the which derivaties will be calculated in the cost function
"""
if pos_weights.shape[0] > self.pos_order:
print("Position order of derivative objective > order of poly. Higher terms will be ignored.")
self.pos_weight_mask = pos_weights[:self.pos_order]
else:
self.pos_weight_mask = pos_weights
if ang_weights.shape[0] > self.ang_order:
print("Angle order of derivative objective > order of poly. Higher terms will be ignored.")
self.ang_weight_mask = ang_weights[:self.ang_order]
else:
self.ang_weight_mask = ang_weights
def findSegInteval(self, t_):
idx_ = np.where(self.Ts<=t_)[0]
if idx_.shape[0]>0:
m_ = np.max(idx_)
if m_ >= self.num_segments:
if t_ != self.Ts[-1]:
print('Eval of t : geq TM. eval target = last segment')
m_ = self.num_segments-1
else:
print('Eval of t : leq T0. eval target = 1st segment')
m_ = 0
tau_ = (t_-self.Ts[m_])/(self.Ts[m_+1]-self.Ts[m_])
return m_, tau_
def addPin(self, pin_):
t_ = pin_['t']
X_ = pin_['X']
super().addPin(pin_)
m, _ = self.findSegInteval(t_)
if len(X_.shape) == 2: # 2 dimension ==> loose pin
if m in self.loosePinSet.keys():
self.loosePinSet[m].append(pin_)
else:
self.loosePinSet[m] = [pin_]
elif len(X_.shape) == 1: # vector ==> fix pin
assert (t_==self.Ts[m] or t_==self.Ts[-1]), 'Fix pin should be imposed only knots'
if self.segState[m, 0] <= self.num_pos_variables+1:
if m in self.fixPinSet.keys():
self.fixPinSet[m].append(pin_)
self.fixPinOrder[m].append(pin_['d'])
else:
self.fixPinSet[m] = [pin_]
self.fixPinOrder[m] = [pin_['d']]
self.segState[m, 0] += 1
else:
print('FixPin exceed the dof of this segment. Pin ignored')
else:
print('Dim of pin value is invalid')
def nthCeoff(self, n, d):
""" Returns the nth order ceoffs (n=0...N) of time vector of d-th
derivative.
Args:
n(int): target order
d(int): order derivative
Returns:
val_: n-th ceoffs
"""
if d == 0:
val_ = 1
else:
accumProd_ = np.cumprod(np.arange(n, n-d, -1))
val_ = accumProd_[-1]*(n>=d)
return val_
def IntDerSquard(self, d, poly_order):
"""
{x^(d)(t)}^2 = (tVec(t,d)'*Dp)'*(tVec(t,d)'*Dp)
Args:
d(int): order derivative
poly_order(int): max order of the polynomial
Returns:
mat_: matrix of the cost function
"""
mat_ = np.zeros((poly_order+1, poly_order+1))
if d > poly_order:
print("Order of derivative > poly order, return zeros-matrix \n")
for i in range(d, poly_order+1):
for j in range(d, poly_order+1):
mat_[i,j] = self.nthCeoff(i, d) * self.nthCeoff(j, d) / (i+j-2*d+1)
return mat_
def tVec(self, t_, d_, poly_order):
# time vector evaluated at time t with d-th order derivative.
vec_ = np.zeros((poly_order+1, 1))
for i in range(d_, poly_order+1):
vec_[i] = self.nthCeoff(i, d_)*t_**(i-d_)
return vec_
def fixPinMatSet(self, pin, PolyType):
t_ = pin['t']
X_ = pin['X']
d_ = pin['d']
m_, tau_ = self.findSegInteval(t_)
dTm_ = self.Ts[m_+1] - self.Ts[m_]
if PolyType == 'pos':
poly_order = self.pos_order
idxStart_ = m_*(poly_order+1)
idxEnd_ = (m_+1)*(poly_order+1)
aeqSet_ = np.zeros((self.position_dim, self.num_pos_variables))
beqSet_ = np.zeros((self.position_dim, 1))
for dd in range(self.position_dim):
aeqSet_[dd, idxStart_:idxEnd_] = self.tVec(tau_, d_, poly_order).flatten()/dTm_**d_#
beqSet_[dd] = X_[dd]
elif PolyType == 'ang':
poly_order = self.ang_order
idxStart_ = m_*(poly_order+1)
idxEnd_ = (m_+1)*(poly_order+1)
aeqSet_ = np.zeros((self.dim-self.position_dim, self.num_ang_variables))
beqSet_ = np.zeros((self.dim-self.position_dim, 1))
for dd in range(self.dim-self.position_dim):
aeqSet_[dd, idxStart_:idxEnd_] = self.tVec(tau_, d_, poly_order).flatten()/dTm_**d_#
beqSet_[dd] = X_[self.position_dim+dd]
else:
print("ERROR: please define polynomial for 'pos' or 'ang'")
return aeqSet_, beqSet_
def contiMat(self, m_, dmax, poly_order):
"""
ensure in dmax derivative degree the curve should be continued.
from 0 to dmax derivative
Args:
m_: index of the segment <= M-1
dmax: max conti-degree
poly_order: max poly order
"""
dmax_ = int(dmax)
if poly_order == self.pos_order:
aeq_ = np.zeros((dmax_+1, self.num_pos_variables))
else:
aeq_ = np.zeros((dmax_+1, self.num_ang_variables))
beq_ = np.zeros((dmax_+1, 1)) # different of the eq should be zero
idxStart_ = m_*(poly_order+1)
idxEnd_ = (m_+2)*(poly_order+1) # end of the next segment
dTm1_ = self.Ts[m_+1] - self.Ts[m_]
dTm2_ = self.Ts[m_+2] - self.Ts[m_+1]
for d in range(dmax_+1):
# the end of the first segment should be the same as the begin of the next segment at each derivative
aeq_[d, idxStart_:idxEnd_] = np.concatenate((self.tVec(1, d, poly_order)/dTm1_**d, - self.tVec(0, d, poly_order)/dTm2_**d), axis=0).flatten() #
return aeq_, beq_
def loosePinMatSet(self, pin_, poly_order):
"""
loose pin setup
"""
t_ = pin_['t']
X_ = pin_['X']
d_ = pin_['d']
m_, tau_ = self.findSegInteval(t_)
dTm_ = self.Ts[m_+1] - self.Ts[m_]
idxStart_ = m_*(poly_order+1)
idxEnd_ = (m_+1)*(poly_order+1)
if poly_order == self.pos_order:
aSet_ = np.zeros((self.position_dim, 2, self.num_pos_variables))
bSet_ = np.zeros((self.position_dim, 2, 1))
for dd in range(self.position_dim):
aSet_[dd, :, idxStart_:idxEnd_] = np.array([self.tVec(tau_, d_, poly_order)/dTm_**d_,-self.tVec(tau_, d_, poly_order)/dTm_**d_]).reshape(2, -1) #
bSet_[dd, :] = np.array([X_[dd, 1], -X_[dd, 0]]).reshape(2, -1)
else:
aSet_ = np.zeros((self.dim-self.position_dim, 2, self.num_ang_variables))
bSet_ = np.zeros((self.dim-self.position_dim, 2, 1))
for dd in range(self.dim-self.position_dim):
aSet_[dd, :, idxStart_:idxEnd_] = np.array([self.tVec(tau_, d_, poly_order)/dTm_**d_,-self.tVec(tau_, d_, poly_order)/dTm_**d_]).reshape(2, -1) #
bSet_[dd, :] = np.array([X_[dd, 1], -X_[dd, 0]]).reshape(2, -1)
return aSet_, bSet_
def coeff2endDerivatives(self, Aeq_):
assert Aeq_.shape[1] <= self.num_pos_variables, 'Pin + continuity constraints are already full. No dof for optim.'
mapMat_ = Aeq_.copy()
for m in range(self.num_segments):
freePinOrder_ = np.setdiff1d(np.arange(self.pos_order+1), self.fixPinOrder[m]) # free derivative (not defined by fixed pin)
dof_ = self.pos_order+1 - np.sum(self.segState[m, :2])
freeOrder = freePinOrder_[:int(dof_)]
for order in freeOrder:
virtualPin_ = {'t':self.Ts[m], 'X':np.zeros((self.position_dim, 1)), 'd':order}
aeqSet_, _ = self.fixPinMatSet(virtualPin_, 'pos')
aeq_ = aeqSet_[0] # only one dim is taken.
mapMat_ = np.concatenate((mapMat_, aeq_.reshape(-1, self.num_pos_variables)), axis=0)
return mapMat_
def getQPset(self, PolyType):
AeqSet = None
ASet = None
BeqSet = None
BSet = None
if PolyType == 'pos':
QSet = np.zeros((self.position_dim, self.num_pos_variables, self.num_pos_variables))
for dd in range(self.position_dim):
Q_ = np.zeros((self.num_pos_variables, self.num_pos_variables))
for d in range(1, self.pos_weight_mask.shape[0]+1):
if self.pos_weight_mask[d-1] > 0:
Qd_ = None
for m in range(self.num_segments):
dT_ = self.Ts[m+1] - self.Ts[m]
Q_m_ = self.IntDerSquard(d, self.pos_order)/dT_**(2*d-1)
if Qd_ is None:
Qd_ = Q_m_.copy()
else:
Qd_ = block_diag(Qd_, Q_m_)
Q_ = Q_ + self.pos_weight_mask[d-1]*Qd_
QSet[dd] = Q_
for m in range(self.num_segments):
## fix pin
if m in self.fixPinSet.keys():
for pin in self.fixPinSet[m]:
aeqSet, beqSet = self.fixPinMatSet(pin, 'pos')
if AeqSet is None:
AeqSet = aeqSet.reshape(self.position_dim, -1, self.num_pos_variables)
BeqSet = beqSet.reshape(self.position_dim, -1, 1)
else:
AeqSet = np.concatenate((AeqSet, aeqSet.reshape(self.position_dim, -1, self.num_pos_variables)), axis=1)
BeqSet = np.concatenate((BeqSet, beqSet.reshape(self.position_dim, -1, 1)), axis=1)
## continuity
if m < self.num_segments-1:
contiDof_ = min(self.maxContiOrder[0]+1, self.num_pos_variables+1-self.segState[m, 0])
self.segState[m, 1] = contiDof_
if contiDof_ != self.maxContiOrder[0]+1:
print('Connecting segment ({0},{1}) : lacks {2} dof for imposed {3} th continuity'.format(m, m+1, self.maxContiOrder[0]-contiDof_, self.maxContiOrder[0]))
if contiDof_ >0:
aeq, beq = self.contiMat(m, contiDof_-1, self.pos_order)
AeqSet = np.concatenate((AeqSet, aeq.reshape(1, -1, self.num_pos_variables).repeat(self.position_dim, axis=0)), axis=1)
BeqSet = np.concatenate((BeqSet, beq.reshape(1, -1, 1).repeat(self.position_dim, axis=0)), axis=1)
if m in self.loosePinSet.keys():
for pin in self.loosePinSet[m]:
aSet, bSet = self.loosePinMatSet(pin, self.pos_order)
if ASet is None:
ASet = aSet.copy()
BSet = bSet.copy()
else:
ASet = np.concatenate((ASet, aSet), axis=1)
BSet = np.concatenate((BSet, bSet), axis=1)
elif PolyType == 'ang':
QSet = np.zeros((self.dim-self.position_dim, self.num_ang_variables, self.num_ang_variables))
for dd in range(self.dim-self.position_dim):
Q_ = np.zeros((self.num_ang_variables, self.num_ang_variables))
for d in range(1, self.ang_weight_mask.shape[0]+1):
if self.ang_weight_mask[d-1] > 0:
Qd_ = None
for m in range(self.num_segments):
dT_ = self.Ts[m+1] - self.Ts[m]
Q_m_ = self.IntDerSquard(d, self.ang_order)/dT_**(2*d-1)
if Qd_ is None:
Qd_ = Q_m_.copy()
else:
Qd_ = block_diag(Qd_, Q_m_)
Q_ = Q_ + self.ang_weight_mask[d-1]*Qd_
QSet[dd] = Q_
for m in range(self.num_segments):
## fix pin
if m in self.fixPinSet.keys():
for pin in self.fixPinSet[m]:
aeqSet, beqSet = self.fixPinMatSet(pin, 'ang')
if AeqSet is None and aeqSet is not None:
AeqSet = aeqSet.reshape(self.dim-self.position_dim, -1, self.num_ang_variables)
BeqSet = beqSet.reshape(self.dim-self.position_dim, -1, 1)
elif aeqSet is not None:
AeqSet = np.concatenate((AeqSet, aeqSet.reshape(self.dim-self.position_dim, -1, self.num_ang_variables)), axis=1)
BeqSet = np.concatenate((BeqSet, beqSet.reshape(self.dim-self.position_dim, -1, 1)), axis=1)
else:
pass
# continuity
if m < self.num_segments-1:
contiDof_ = min(self.maxContiOrder[1]+1, self.num_ang_variables+1-self.segState[m, 0])
self.segState[m, 2] = contiDof_
if contiDof_ != self.maxContiOrder[1]+1:
print('Connecting segment ({0},{1}) : lacks {2} dof for imposed {3} th continuity'.format(m, m+1, self.maxContiOrder[1]-contiDof_, self.maxContiOrder[1]))
if contiDof_ >0:
aeq, beq = self.contiMat(m, contiDof_-1, self.ang_order)
AeqSet = np.concatenate((AeqSet, aeq.reshape(1, -1, self.num_ang_variables).repeat(self.dim-self.position_dim, axis=0)), axis=1)
BeqSet = np.concatenate((BeqSet, beq.reshape(1, -1, 1).repeat(self.dim-self.position_dim, axis=0)), axis=1)
else:
print("ERROR: please use 'pos' or 'ang'")
return QSet, ASet, BSet, AeqSet, BeqSet
def mapQP(self, QSet_, ASet_, BSet_, AeqSet_, BeqSet_):
Afp_ = self.coeff2endDerivatives(AeqSet_[0]) # sicne all Aeq in each dim are the same
AfpInv_ = np.linalg.inv(Afp_)
Nf_ = int(AeqSet_[0].shape[0])
Qtemp_ = np.dot(np.dot(AfpInv_.T, QSet_[0]), AfpInv_)
# Qff_ = Qtemp_[:Nf_, :Nf_]
Qfp_ = Qtemp_[:Nf_, Nf_:]
Qpf_ = Qtemp_[Nf_:, :Nf_]
Qpp_ = Qtemp_[Nf_:, Nf_:]
QSet = np.zeros((self.position_dim, self.num_pos_variables-Nf_, self.num_pos_variables-Nf_))
HSet = np.zeros((self.position_dim, self.num_pos_variables-Nf_))
# check ASet ?
if ASet_ is not None:
ASet = np.zeros((self.position_dim, ASet_.shape[1], self.num_pos_variables-Nf_))
BSet = BSet_.copy()
dp_ = None
for dd in range(self.position_dim):
df_ = BeqSet_[dd]
QSet[dd] = 2*Qpp_
HSet[dd] = np.dot(df_.T, (Qfp_+Qpf_.T))
A_ = np.dot(ASet_[dd], AfpInv_)
ASet[dd] = A_[:, Nf_:]
BSet[dd] = BSet_[dd] - np.dot(A_[:, :Nf_], df_)
else:
ASet = None
BSet = None
# directly solving the problem without making an optimization problem
dp_ =
|
np.zeros((self.position_dim, self.num_pos_variables-Nf_))
|
numpy.zeros
|
# xyz Dec 2017
# Do 3d point cloud sample and group by block index
import tensorflow as tf
import os,sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(BASE_DIR+'/../utils')
from block_data_prep_util import GlobalSubBaseBLOCK
import geometric_util as geo_util
import geometric_tf_util as geo_tf_util
import tf_util
import numpy as np
DEBUG_TMP = True
# IS_merge_blocks_while_fix_bmap should be set exactly based on the bidxmap
# configuration. This is origibally set in NETCONFIG. But the configuration is
# not obtained here from bxmap automatically. Should be set manually.
IS_merge_blocks_while_fix_bmap = 1
IsTolerateBug = True
InDropMethod = 'set0'
'''
Checking list:
new_xyz
'''
def shape_str(tensor_ls):
shape_str = ''
for i in range(len(tensor_ls)):
if tensor_ls[i] == None:
shape_str += '\t None'
else:
shape_str += '\t' + str( [s.value for s in tensor_ls[i].shape] )
if i < len(tensor_ls)-1:
shape_str += '\n'
return shape_str
def get_flatten_bidxmap_concat( flatten_bidxmaps, flatten_bm_extract_idx, cascade_id ):
'''
flatten_bidxmaps: (2, 26368, 2)
flatten_bm_extract_idx:
array([[ 0, 0],
[25600, 2],
[26112, 2],
[26368, 2]], dtype=int32)
'''
batch_size = flatten_bidxmaps.get_shape()[0].value
start = flatten_bm_extract_idx[cascade_id]
end = flatten_bm_extract_idx[cascade_id+1]
flatten_bidxmap_i = flatten_bidxmaps[ :,start[0]:end[0],: ]
batch_idx = tf.reshape( tf.range(batch_size),[batch_size,1,1] )
flatten_bidxmap_i_shape1 = flatten_bidxmap_i.get_shape()[1].value
batch_idx = tf.tile( batch_idx,[1,flatten_bidxmap_i_shape1,1] )
flatten_bidxmap_i_concat = tf.concat( [batch_idx,flatten_bidxmap_i],axis=-1,name="flatten_bidxmap%d_concat"%(cascade_id) )
return flatten_bidxmap_i_concat
def pointnet_sa_module(cascade_id, xyz, points, bidmap, mlp_configs, block_bottom_center_mm, configs, sgf_config_pls,
is_training, bn_decay,scope,bn=True, tnet_spec=None, use_xyz=True, IsShowModel=False):
'''
Input cascade_id==0:
xyz is grouped_points: (batch_size,nsubblock0,npoint_subblock0,6)
points: None
bidmap: None
Input cascade_id==1:
xyz: (batch_size,nsubblock0,3)
points: (batch_size,nsubblock0,channel)
bidmap: (batch_size,nsubblock1,npoint_subblock1)
Medium cascade_id==1:
grouped_xyz: (batch_size,nsubblock1,npoint_subblock1,3)
new_xyz: (batch_size,nsubblock1,3)
group_points: (batch_size,nsubblock1,npoint_subblock1,channel)
output cascade_id==1:
new_xyz: (batch_size,nsubblock1,3)
new_points: (batch_size,nsubblock1,channel)
'''
block_bottom_center_mm = tf.cast(block_bottom_center_mm, tf.float32, name='block_bottom_center_mm') # gpu_0/sa_layer3/block_bottom_center_mm:0
batch_size = xyz.get_shape()[0].value
with tf.variable_scope(scope) as sc:
cascade_num = configs['flatten_bm_extract_idx'].shape[0]-1 # include global here (Note: cascade_num does not include global in block_pre_util )
assert configs['sub_block_step_candis'].size == cascade_num-1
if cascade_id==0:
indrop_keep_mask = tf.get_default_graph().get_tensor_by_name('indrop_keep_mask:0') # indrop_keep_mask:0
assert len(xyz.shape) == 3
if bidmap==None:
grouped_xyz = tf.expand_dims( xyz, 1 )
grouped_points = tf.expand_dims( points, 1 )
new_xyz = None
valid_mask = None
else:
batch_idx = tf.reshape( tf.range(batch_size),[batch_size,1,1,1] )
nsubblock = bidmap.get_shape()[1].value
npoint_subblock = bidmap.get_shape()[2].value
batch_idx_ = tf.tile( batch_idx,[1,nsubblock,npoint_subblock,1] )
bidmap = tf.expand_dims( bidmap,axis=-1, name='bidmap' )
bidmap_concat = tf.concat( [batch_idx_,bidmap],axis=-1, name='bidmap_concat' ) # gpu_0/sa_layer0/bidmap_concat:0
# The value for invalid item in bidmap is -17.
# On GPU, the responding grouped_xyz and grouped_points is 0.
# NOT WORK on CPU !!!
# invalid indices comes from merge_blocks_while_fix_bmap
# set point_indices_f for invalid points as
# NETCONFIG['redundant_points_in_block'] ( shoud be set < -500)
valid_mask = tf.greater( bidmap, tf.constant(-500,tf.int32), 'valid_mask' ) # gpu_0/sa_layer0/valid_mask:0
grouped_xyz = tf.gather_nd(xyz, bidmap_concat, name='grouped_xyz') # gpu_0/sa_layer0/grouped_xyz:0
grouped_points = tf.gather_nd(points,bidmap_concat, name='group_points')
if cascade_id==0 and len(indrop_keep_mask.get_shape()) != 0:
grouped_indrop_keep_mask = tf.gather_nd( indrop_keep_mask, bidmap_concat, name='grouped_indrop_keep_mask' ) # gpu_0/sa_layer0/grouped_indrop_keep_mask:0
# new_xyz is the "voxel center" or "mean position of points in the voxel"
if configs['mean_grouping_position'] and (not mlp_configs['block_learning']=='3DCNN'):
new_xyz = tf.reduce_mean(grouped_xyz,-2)
else:
new_xyz = block_bottom_center_mm[:,:,3:6] * tf.constant( 0.001, tf.float32 )
# the mid can be mean or block center, decided by configs['mean_grouping_position']
sub_block_mid = tf.expand_dims( new_xyz,-2, name = 'sub_block_mid' ) # gpu_1/sa_layer0/sub_block_mid
global_block_mid = tf.reduce_mean( sub_block_mid,1, keepdims=True, name = 'global_block_mid' )
grouped_xyz_submid = grouped_xyz - sub_block_mid
grouped_xyz_glomid = grouped_xyz - global_block_mid
grouped_xyz_feed = []
if 'raw' in configs['xyz_elements']:
grouped_xyz_feed.append( grouped_xyz )
if 'sub_mid' in configs['xyz_elements']:
grouped_xyz_feed.append( grouped_xyz_submid )
if 'global_mid' in configs['xyz_elements']:
grouped_xyz_feed.append( grouped_xyz_glomid )
grouped_xyz_feed = tf.concat( grouped_xyz_feed, -1 )
if cascade_id==0:
# xyz must be at the first in feed_data_elements !!!!
grouped_points = tf.concat( [grouped_xyz_feed, grouped_points[...,3:]],-1 )
if len(indrop_keep_mask.get_shape()) != 0:
if InDropMethod == 'set1st':
# set all the dropped item as the first item
tmp1 = tf.multiply( grouped_points, grouped_indrop_keep_mask )
points_1st = grouped_points[:,:,0:1,:]
points_1st = tf.tile( points_1st, [1,1,grouped_points.shape[2],1] )
indrop_mask_inverse = 1 - grouped_indrop_keep_mask
tmp2 = indrop_mask_inverse * points_1st
grouped_points = tf.add( tmp1, tmp2, name='grouped_points_droped' ) # gpu_0/sa_layer0/grouped_points_droped
#tf.add_to_collection( 'check', grouped_points )
elif InDropMethod == 'set0':
valid_mask = tf.logical_and( valid_mask, tf.equal(grouped_indrop_keep_mask,0), name='valid_mask_droped' ) # gpu_1/sa_layer0/valid_mask_droped
elif use_xyz:
grouped_points = tf.concat([grouped_xyz_feed, grouped_points],axis=-1)
tf.add_to_collection( 'grouped_xyz', grouped_xyz )
tf.add_to_collection( 'grouped_xyz_submid', grouped_xyz_submid )
tf.add_to_collection( 'grouped_xyz_glomid', grouped_xyz_glomid )
if cascade_id>0 and use_xyz and (not cascade_id==cascade_num-1):
grouped_points = tf.concat([grouped_xyz_feed, grouped_points],axis=-1)
nsample = grouped_points.get_shape()[2].value # the conv kernel size
if IsShowModel:
print('\n\npointnet_sa_module cascade_id:%d\n xyz:%s\n grouped_xyz:%s\n new_xyz:%s\n grouped_points:%s\n nsample:%d'%(
cascade_id, shape_str([xyz]), shape_str([grouped_xyz]), shape_str([new_xyz]), shape_str([grouped_points]), nsample))
new_points = grouped_points
if valid_mask!=None:
new_points = new_points * tf.cast(valid_mask[:,:,:,0:1], tf.float32)
if 'growth_rate'in mlp_configs['point_encoder'][cascade_id]:
new_points = tf_util.dense_net( new_points, mlp_configs['point_encoder'][cascade_id], bn, is_training, bn_decay,\
scope = 'dense_cascade_%d_point_encoder'%(cascade_id) , is_show_model = IsShowModel )
else:
for i, num_out_channel in enumerate(mlp_configs['point_encoder'][cascade_id]):
new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv%d'%(i), bn_decay=bn_decay)
if configs['Cnn_keep_prob']<1:
if ( not configs['only_last_layer_ineach_cascade'] ) or i == len(mlp_configs['point_encoder'][cascade_id])-1:
new_points = tf_util.dropout(new_points, keep_prob=configs['Cnn_keep_prob'], is_training=is_training, scope='dropout', name='cnn_dp%d'%(i))
if IsShowModel:
print('point encoder1 %d, new_points:%s'%(i, shape_str([new_points])))
if cascade_id == 0:
root_point_features = new_points
#if InDropMethod == 'set0':
# if len(indrop_keep_mask.get_shape()) != 0:
# new_points = tf.identity(new_points,'points_before_droped') # gpu_0/sa_layer0/points_before_droped:0
# new_points = tf.multiply( new_points, grouped_indrop_keep_mask, name='droped_points' ) # gpu_0/sa_layer0/droped_points:0
else:
root_point_features = None
pooling = mlp_configs['block_learning']
if pooling == '3DCNN' and ( cascade_id == 0):
pooling = 'max'
#if pooling=='avg':
# new_points = tf_util.avg_pool2d(new_points, [1,nsample], stride=[1,1], padding='VALID', scope='avgpool1')
#elif pooling=='weighted_avg':
# with tf.variable_scope('weighted_avg1'):
# dists = tf.norm(grouped_xyz,axis=-1,ord=2,keep_dims=True)
# exp_dists = tf.exp(-dists * 5)
# weights = exp_dists/tf.reduce_sum(exp_dists,axis=2,keep_dims=True) # (batch_size, npoint, nsample, 1)
# new_points *= weights # (batch_size, npoint, nsample, mlps_0[-1])
# new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
if pooling=='max':
# Even the grouped_points and grouped_xyz are 0 for invalid points, the
# vaule after mlp will not be. It has to be set as 0 forcely before
# pooling.
if valid_mask!=None:
new_points = new_points * tf.cast(valid_mask[:,:,:,0:1], tf.float32)
new_points = tf.identity( new_points, 'points_before_max' ) # gpu_0/sa_layer0/points_before_max
new_points = tf.reduce_max(new_points, axis=[2], keepdims=True, name='points_after_max')
#elif pooling=='min':
# new_points = tf_util.max_pool2d(-1*new_points, [1,nsample], stride=[1,1], padding='VALID', scope='minpool1')
#elif pooling=='max_and_avg':
# avg_points = tf_util.max_pool2d(new_points, [1,nsample], stride=[1,1], padding='VALID', scope='maxpool1')
# max_points = tf_util.avg_pool2d(new_points, [1,nsample], stride=[1,1], padding='VALID', scope='avgpool1')
# new_points = tf.concat([avg_points, max_points], axis=-1)
elif pooling == '3DCNN':
new_points = grouped_points_to_voxel_points( cascade_id, new_points, valid_mask, block_bottom_center_mm, configs, grouped_xyz, IsShowVoxelModel=IsShowModel )
if IsShowModel:
print('voxel points:%s'%(shape_str([new_points])))
for i, num_out_channel in enumerate( mlp_configs['voxel_channels'][cascade_id] ):
kernel_i = [mlp_configs['voxel_kernels'][cascade_id][i]]*3
stride_i = [mlp_configs['voxel_strides'][cascade_id][i]]*3
if new_points.shape[1]%2 == 0:
padding_i = np.array([[0,0],[1,0],[1,0],[1,0],[0,0]]) * mlp_configs['voxel_paddings'][cascade_id][i]
else:
padding_i = np.array([[0,0],[1,1],[1,1],[1,1],[0,0]]) * mlp_configs['voxel_paddings'][cascade_id][i]
new_points = tf.pad( new_points, padding_i, "CONSTANT" )
if type(num_out_channel) == int:
new_points = tf_util.conv3d(new_points,
num_out_channel,
kernel_i,
scope = '3dconv_%d'%(i),
stride = stride_i,
padding = 'VALID',
bn=bn,
is_training = is_training,
bn_decay = bn_decay,
name = 'points_3dcnn_%d'%(i) )
if IsShowModel:
print('block learning by 3dcnn %d, new_points:%s'%(i, shape_str([new_points])))
elif num_out_channel == 'max':
new_points = tf_util.max_pool3d( new_points,
kernel_i,
scope = '3dmax_%d'%(i),
stride = stride_i,
padding = 'VALID')
if IsShowModel:
print('block learning max pooling %d, new_points:%s'%(i, shape_str([new_points])))
elif num_out_channel == 'avg':
new_points = tf_util.avg_pool3d( new_points,
kernel_i,
scope = '3dmax_%d'%(i),
stride = stride_i,
padding = 'VALID')
if IsShowModel:
print('block learning avg pooling %d, new_points:%s'%(i, shape_str([new_points])))
# gpu_0/sa_layer1/3dconv_0/points_3dcnn_0:0
if configs['Cnn_keep_prob']<1:
if ( not configs['only_last_layer_ineach_cascade'] ) or i == len(mlp_configs['voxel_channels'][cascade_id])-1:
new_points = tf_util.dropout(new_points, keep_prob=configs['Cnn_keep_prob'], is_training=is_training, scope='dropout', name='3dcnn_dp%d'%(i))
# gpu_0/sa_layer4/3dconv_0/points_3dcnn_0:0
new_points = tf.squeeze( new_points, [1,2,3] )
new_points = tf.reshape( new_points, [batch_size, -1, 1, new_points.shape[-1].value] )
if IsShowModel:
print('after %s, new_points:%s'%( pooling, shape_str([new_points])))
if 'growth_rate'in mlp_configs['block_encoder'][cascade_id]:
new_points = tf_util.dense_net( new_points, mlp_configs['block_encoder'][cascade_id], bn, is_training, bn_decay, scope = 'dense_cascade_%d_block_encoder'%(cascade_id) , is_show_model = IsShowModel )
else:
for i, num_out_channel in enumerate(mlp_configs['block_encoder'][cascade_id]):
new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv_post_%d'%(i), bn_decay=bn_decay)
if configs['Cnn_keep_prob']<1:
if ( not configs['only_last_layer_ineach_cascade'] ) or i == len(mlp_configs['block_encoder'][cascade_id])-1:
new_points = tf_util.dropout(new_points, keep_prob=configs['Cnn_keep_prob'], is_training=is_training, scope='dropout', name='cnn_dp%d'%(i))
if IsShowModel:
print('block encoder %d, new_points:%s'%(i, shape_str([new_points])))
# (2, 512, 1, 64)
new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlps_1[-1])
if IsShowModel:
print('pointnet_sa_module return\n new_xyz: %s\n new_points:%s\n\n'%(shape_str([new_xyz]),shape_str([new_points])))
#import pdb;pdb.set_trace()
# (2, 512, 64)
return new_xyz, new_points, root_point_features
def grouped_points_to_voxel_points (cascade_id, new_points, valid_mask, block_bottom_center_mm, configs, grouped_xyz, IsShowVoxelModel=False):
cascade_num = configs['sub_block_step_candis'].size+1
block_bottom_center_mm = tf.identity( block_bottom_center_mm,'block_bottom_center_mm' ) # gpu_0/sa_layer3/block_bottom_center_mm:0
new_points = tf.identity(new_points,name='points_tov') # gpu_0/sa_layer4/points_tov:0
c500 = tf.constant([500],tf.float32)
c1000 = tf.constant([1000],tf.float32)
c1 = tf.constant([1,1,1],tf.float32)
step_last_org = configs['sub_block_step_candis'][cascade_id-1] * c1
step_last = tf.minimum( step_last_org, configs['max_step_stride'], name='step_last' ) # gpu_0/sa_layer1/step_last:0
step_last = tf.expand_dims(step_last,1)
stride_last_org = configs['sub_block_stride_candis'][cascade_id-1] * c1
stride_last = tf.minimum( stride_last_org, configs['max_step_stride'], name='stride_last' ) # gpu_0/sa_layer1/stride_last:0
stride_last = tf.expand_dims(stride_last,1)
voxel_bottom_xyz_mm = block_bottom_center_mm[:,:,0:3]
# NOTE: c1=[1,1,1]*0.5 ONLY when the sh5 step is also the same on three dimensions.
# Otherwise, the stride at each cascade may also be changed.
min_point_bottom_xyz_mm = voxel_bottom_xyz_mm
min_point_bottom_xyz_mm = tf.expand_dims( min_point_bottom_xyz_mm, -2, name='min_point_bottom_xyz_mm' ) # gpu_0/sa_layer1/min_point_bottom_xyz_mm:0
grouped_bottom_xyz_mm = grouped_xyz * c1000 - step_last * c500 # gpu_0/sa_layer1/sub_1:0
# For ExtraGlobal layer, the step_last may be cropped, thus the point_indices_f is smaller.
point_indices_f = (grouped_bottom_xyz_mm - min_point_bottom_xyz_mm) / (stride_last*c1000) # gpu_0/sa_layer3/div:0
point_indices_f = tf.identity( point_indices_f, name='point_indices_f' ) # gpu_0/sa_layer4/point_indices_f:0
# invalid indices comes from merge_blocks_while_fix_bmap
# set point_indices_f for invalid points as
# NETCONFIG['redundant_points_in_block'] ( shoud be set < -500)
invalid_mask = tf.equal( valid_mask, False )
invalid_mask = tf.tile( invalid_mask, [1,1,1,3], name='invalid_mask') # gpu_0/sa_layer1/valid_mask:0
point_indices_f = tf.where( invalid_mask, tf.ones(shape=point_indices_f.shape,dtype=tf.float32)*tf.constant( -9999,dtype=tf.float32), point_indices_f )
point_indices = tf.rint( point_indices_f,'point_indices' ) # gpu_0/sa_layer3/point_indices:0
point_indices_checkmin = tf.where( invalid_mask, tf.ones(shape=point_indices_f.shape,dtype=tf.float32)*tf.constant(999,dtype=tf.float32), point_indices, name='point_indices_checkmin' )
# ------------------------------------------------------------------
# check indice err
Max_Assert_0 = 1e-4
point_indices_err = tf.abs( point_indices - point_indices_f, name='point_indices_err' ) # gpu_0/sa_layer3/point_indices_err:0
point_indices_maxerr = tf.reduce_max( point_indices_err, name='point_indices_maxerr_xyz' ) # gpu_0/sa_layer3/point_indices_maxerr_xyz:0
check_point_indices = tf.assert_less( point_indices_maxerr, Max_Assert_0, data=[cascade_id, point_indices_maxerr],
message='point indices in voxel check on cascade %d '%(cascade_id), name='check_point_indices' )
tf.add_to_collection( 'check', check_point_indices )
# check indice scope:
# Actually only works when IS_merge_blocks_while_fix_bmap=False
Max_Assert = 1e-4+5
batch_size = new_points.shape[0].value
block_num = new_points.shape[1].value
point_num = new_points.shape[2].value
channel_num = new_points.shape[3].value
if configs['dataset_name'] == 'MODELNET40':
IsTolerateBug = 2
IsTolerateBug = 0
else:
IsTolerateBug = 1
if cascade_id==cascade_num-1:
# only in this global cascde, the steps and strides in each dimension
# can be different
if configs['dataset_name'] == 'MODELNET40' and configs['global_step'][0]==3.5:
configs['global_step'] = np.array( [2.3,2.3,2.3] )
max_indice_f = ( np.abs(configs['global_step']) - np.array([1,1,1])*configs['sub_block_step_candis'][cascade_id-1] ) / (np.array([1,1,1])*configs['sub_block_stride_candis'][cascade_id-1])
max_indice_v = np.rint( max_indice_f )
if configs['dataset_name'] != 'MODELNET40':
assert np.sum(np.abs(max_indice_f-max_indice_v)) < Max_Assert
max_indice_v += 1* IsTolerateBug
voxel_size = max_indice_v.astype(np.int32)+1
voxel_shape = [batch_size, block_num, voxel_size[0], voxel_size[1], voxel_size[2], channel_num]
point_indices_checkmin = tf.identity(point_indices_checkmin, 'point_indices_checkmin_A') #
point_indices_checkmin += (max_indice_v+2*IsTolerateBug) * IS_merge_blocks_while_fix_bmap
point_indices_checkmin = tf.identity(point_indices_checkmin, 'point_indices_checkmin_B') # gpu_1/sa_layer4/point_indices_checkmin_B:0
point_indices, first_unique_masks_global = unique_nd( point_indices )
for i in range(3):
real_max = tf.reduce_max(point_indices[:,:,:,i])
check_max_indice = tf.assert_less( real_max - max_indice_v[i], tf.constant(Max_Assert + IS_merge_blocks_while_fix_bmap * max_indice_v[i], dtype=tf.float32 ),
data=[cascade_id, i, real_max, max_indice_v[i]], name='check_max_indice_'+str(i) )
tf.add_to_collection( 'check', check_max_indice )
if IsShowVoxelModel:
print( 'cascade:%d (global) \tvoxel size:%s'%(cascade_id, voxel_size) )
else:
max_indice_f = ( configs['sub_block_step_candis'][cascade_id] - configs['sub_block_step_candis'][cascade_id-1] ) / configs['sub_block_stride_candis'][cascade_id-1]
max_indice_v = np.rint( max_indice_f ).astype(np.float32)
assert abs(max_indice_f-max_indice_v) < Max_Assert + IS_merge_blocks_while_fix_bmap * max_indice_v
voxel_size = max_indice_v.astype(np.int32)+1
voxel_shape = [batch_size, block_num, voxel_size, voxel_size, voxel_size, channel_num]
max_indice_1 = tf.constant(max_indice_v,tf.float32)
real_max = tf.reduce_max(point_indices)
check_max_indice = tf.assert_less( real_max - max_indice_1, tf.constant(Max_Assert + IS_merge_blocks_while_fix_bmap * max_indice_v, tf.float32 ),
data=[cascade_id, real_max, max_indice_1], name='check_max_indice' )
tf.add_to_collection( 'check', check_max_indice )
point_indices_checkmin += (max_indice_v) * IS_merge_blocks_while_fix_bmap + IsTolerateBug*1
if IsShowVoxelModel:
print( 'cascade:%d \tvoxel size:%s'%(cascade_id, voxel_size) )
point_indices_min = tf.reduce_min(point_indices_checkmin, name='point_indices_min') # gpu_0/sa_layer4/point_indices_min:0
check_min_indice = tf.assert_less( tf.constant(-Max_Assert, tf.float32),
point_indices_min, data=[cascade_id,point_indices_min], name='check_min_indice' )
tf.add_to_collection( 'check', check_min_indice )
# ------------------------------------------------------------------
point_indices = tf.cast( point_indices, tf.int32, name='point_indices' ) # gpu_0/sa_layer1/point_indices_1:0
batch_idx = tf.reshape( tf.range(batch_size),[batch_size,1,1,1] )
batch_idx = tf.tile( batch_idx, [1,block_num,point_num,1] )
bn_idx = tf.reshape( tf.range(block_num),[1,block_num,1,1] )
bn_idx = tf.tile( bn_idx, [batch_size,1,point_num,1] )
point_indices = tf.concat( [batch_idx, bn_idx, point_indices], -1, name='point_indices' ) # gpu_0/sa_layer4/point_indices_1:0
# Note: if point_indices have replicated items, the responding value will be multiplied which will lead to error!
# For global cascade, the replicated indices can come from replicated aim
# block of the last gs cascade. This should be solved while generating point_indices for global in this function.
# For other cascades, the replicated indices can come from replicated points
# inside aim block in bidxmap file. This shoule be solved by add np.unique while merging blocks in bidxmap.
voxel_points = tf.scatter_nd( point_indices, new_points, shape=voxel_shape, name='voxel_points' ) # gpu_0/sa_layer1/voxel_points:0
# check voxel: takes long time, only perform for debug
check_points = tf.gather_nd( voxel_points, point_indices, name='check_points' ) # gpu_0/sa_layer4/check_points:0
scatter_err = tf.abs( check_points - new_points) # gpu_0/sa_layer1/scatter_err:0
scatter_err = scatter_err * tf.cast(invalid_mask[:,:,:,0:1], tf.float32)
scatter_err = tf.identity( scatter_err, name='scatter_err' )
scatter_err_max = tf.reduce_max( scatter_err, name = 'scatter_err_max') # gpu_0/sa_layer1/scatter_err_max:0
points_check = tf.assert_less( scatter_err_max, Max_Assert, data=[cascade_id, scatter_err_max], name='scatter_check' )
if DEBUG_TMP and not IS_merge_blocks_while_fix_bmap:
tf.add_to_collection( 'check', points_check )
# ------------------------------------------------------------------
new_voxel_shape = tf.concat( [ tf.constant([batch_size*block_num],tf.int32), voxel_shape[2:6] ],0 )
voxel_points = tf.reshape( voxel_points, shape = new_voxel_shape )
if configs['aug_types']['RotateVox']:
voxel_points = rotate_voxel_randomly( voxel_points, configs )
return voxel_points
def rotate_voxel_randomly( voxel_points, configs ):
voxel_shape = np.array( voxel_points.shape[1:4].as_list() )
grid = np.indices( voxel_shape )
grid = np.transpose( grid,(1,2,3,0) )
version = 'tf'
#---------------------------------------------------------------------------
if version == 'numpy':
rz_angle = np.pi * 0.5
R = np.rint( geo_util.Rz( rz_angle ) ).astype(np.int32)
grid_r = np.matmul( grid, R )
# The rotation center is not voxel center, but the bottom. An offsetis required.
offset_mask =
|
np.sum(R,0)
|
numpy.sum
|
import pandas as pd
import math
import numpy as np
output_filename = 'translationoutput.csv'
input_filename = 'puf2.csv'
x = pd.read_csv(input_filename)
global dim
dim = len(x)
names = x.columns.values
y = {}
for n in names:
y[n] = np.array(x[n])
AGIR1 = y['agir1']
DSI = y['dsi']
EFI = y['efi']
EIC = y['eic']
ELECT = y['elect']
FDED = y['fded']
FLPDYR = y['flpdyr']
FLPDMO = y['flpdmo']
f2441 = y['f2441']
f3800 = y['f3800']
f6251 = y['f6251']
f8582 = y['f8582']
f8606 = y['f8606']
IE = y['ie']
MARS = y['mars']
MIdR = y['midr']
n20 = y['n20']
n24 = y['n24']
n25 = y['n25']
PREP = y['prep']
SCHB = y['schb']
SCHCF = y['schcf']
SCHE = y['sche']
STATE = y['state']
TFORM = y['tform']
TXST = y['txst']
XFPT = y['xfpt']
XFST = y['xfst']
XOCAH = y['xocah']
XOCAWH = y['xocawh']
XOODEP = y['xoodep']
XOPAR = y['xopar']
XTOT = y['xtot']
e00200 = y['e00200']
e00300 = y['e00300']
e00400 = y['e00400']
e00600 = y['e00600']
e00650 = y['e00650']
e00700 = y['e00700']
e00800 = y['e00800']
e00900 = y['e00900']
e01000 = y['e01000']
e01100 = y['e01100']
e01200 = y['e01200']
e01400 = y['e01400']
e01500 = y['e01500']
e01700 = y['e01700']
e02000 = y['e02000']
e02100 = y['e02100']
e02300 = y['e02300']
e02400 = y['e02400']
e02500 = y['e02500']
e03150 = y['e03150']
e03210 = y['e03210']
e03220 = y['e03220']
e03230 = y['e03230']
e03260 = y['e03260']
e03270 = y['e03270']
e03240 = y['e03240']
e03290 = y['e03290']
e03300 = y['e03300']
e03400 = y['e03400']
e03500 = y['e03500']
e00100 = y['e00100']
p04470 = y['p04470']
e04250 = y['e04250']
e04600 = y['e04600']
e04800 = y['e04800']
e05100 = y['e05100']
e05200 = y['e05200']
e05800 = y['e05800']
e06000 = y['e06000']
e06200 = y['e06200']
e06300 = y['e06300']
e09600 = y['e09600']
e07180 = y['e07180']
e07200 = y['e07200']
e07220 = y['e07220']
e07230 = y['e07230']
e07240 = y['e07240']
e07260 = y['e07260']
e07300 = y['e07300']
e07400 = y['e07400']
e07600 = y['e07600']
p08000 = y['p08000']
e07150 = y['e07150']
e06500 = y['e06500']
e08800 = y['e08800']
e09400 = y['e09400']
e09700 = y['e09700']
e09800 = y['e09800']
e09900 = y['e09900']
e10300 = y['e10300']
e10700 = y['e10700']
e10900 = y['e10900']
e59560 = y['e59560']
e59680 = y['e59680']
e59700 = y['e59700']
e59720 = y['e59720']
e11550 = y['e11550']
e11070 = y['e11070']
e11100 = y['e11100']
e11200 = y['e11200']
e11300 = y['e11300']
e11400 = y['e11400']
e11570 = y['e11570']
e11580 = y['e11580']
e11581 = y['e11581']
e11582 = y['e11582']
e11583 = y['e11583']
e10605 = y['e10605']
e11900 = y['e11900']
e12000 = y['e12000']
e12200 = y['e12200']
e17500 = y['e17500']
e18425 = y['e18425']
e18450 = y['e18450']
e18500 = y['e18500']
e19200 = y['e19200']
e19550 = y['e19550']
e19800 = y['e19800']
e20100 = y['e20100']
e19700 = y['e19700']
e20550 = y['e20550']
e20600 = y['e20600']
e20400 = y['e20400']
e20800 = y['e20800']
e20500 = y['e20500']
e21040 = y['e21040']
p22250 = y['p22250']
e22320 = y['e22320']
e22370 = y['e22370']
p23250 = y['p23250']
e24515 = y['e24515']
e24516 = y['e24516']
e24518 = y['e24518']
e24535 = y['e24535']
e24560 = y['e24560']
e24598 = y['e24598']
e24615 = y['e24615']
e24570 = y['e24570']
p25350 = y['p25350']
e25370 = y['e25370']
e25380 = y['e25380']
p25470 = y['p25470']
p25700 = y['p25700']
e25820 = y['e25820']
e25850 = y['e25850']
e25860 = y['e25860']
e25940 = y['e25940']
e25980 = y['e25980']
e25920 = y['e25920']
e25960 = y['e25960']
e26110 = y['e26110']
e26170 = y['e26170']
e26190 = y['e26190']
e26160 = y['e26160']
e26180 = y['e26180']
e26270 = y['e26270']
e26100 = y['e26100']
e26390 = y['e26390']
e26400 = y['e26400']
e27200 = y['e27200']
e30400 = y['e30400']
e30500 = y['e30500']
e32800 = y['e32800']
e33000 = y['e33000']
e53240 = y['e53240']
e53280 = y['e53280']
e53410 = y['e53410']
e53300 = y['e53300']
e53317 = y['e53317']
e53458 = y['e53458']
e58950 = y['e58950']
e58990 = y['e58990']
p60100 = y['p60100']
p61850 = y['p61850']
e60000 = y['e60000']
e62100 = y['e62100']
e62900 = y['e62900']
e62720 = y['e62720']
e62730 = y['e62730']
e62740 = y['e62740']
p65300 = y['p65300']
p65400 = y['p65400']
e68000 = y['e68000']
e82200 = y['e82200']
t27800 = y['t27800']
e27860 = y['s27860']
p27895 = y['p27895']
e87500 = y['e87500']
e87510 = y['e87510']
e87520 = y['e87520']
e87530 = y['e87530']
e87540 = y['e87540']
e87550 = y['e87550']
RECID = y['recid']
s006 = y['s006']
s008 = y['s008']
s009 = y['s009']
WSAMP = y['wsamp']
TXRT = y['txrt']
_adctcrt = np.array([0.15])
#Rate for additional ctc
_aged = np.array([[1500],[1200]])
#Extra std. ded. for aged
_almdep = np.array([6950])
#Child AMT Exclusion base
_almsp = np.array([179500])
#AMT bracket
_amex = np.array([3900])
#Personal Exemption
_amtage = np.array([24])
#Age for full AMT exclusion
_amtsep = np.array([232500])
#AMT Exclusion
_almsep = np.array([39375])
#Extra alminc for married sep
_agcmax = np.array([15000])
#??
_cgrate1 = np.array([0.10])
#Initial rate on long term gains
_cgrate2 = np.array([0.20])
#Normal rate on long term gains
_chmax = np.array([1000])
#Max Child Tax Credit per child
_crmax = np.array([[487],[3250],[5372],[6044]])
#Max earned income credit
_dcmax = np.array([3000])
#Max dependent care expenses
_dylim = np.array([3300])
#Limits for Disqualified Income
_ealim = np.array([3000])
#Max earn ACTC
_edphhs = np.array([63])
#End of educ phaseout - singles
_edphhm = np.array([126])
#End of educ phaseout - married
_feimax = np.array([97600])
#Maximum foreign earned income exclusion
#_hopelm = np.array([1200])
_joint = np.array([0])
#Extra to ymax for joint
_learn = np.array([10000])
#Expense limit for the LLC
_pcmax = np.array([35])
#Maximum Percentage for f2441
_phase = np.array([172250])
#Phase out for itemized
_rtbase = np.array([[0.0765], [0.3400], [0.4000], [0.4000]])
#EIC base rate
_rtless = np.array([[0.0765], [0.1598], [0.2106], [0.2106]])
#EIC _phaseout rate
_ssmax = np.array([115800])
#SS Maximum taxable earnings
_ymax = np.array([[7970], [17530], [17530], [17530]])
#Start of EIC _phaseout
_rt1 = np.array([0.1])
#10% rate
_rt2 = np.array([0.15])
#15% rate
_rt3 = np.array([0.25])
#25% rate
_rt4 = np.array([0.28])
#28% rate
_rt5 = np.array([0.33])
#33% rate
_rt6 = np.array([0.35])
#35% rate
_rt7 = np.array([0.396])
#39.6% rate
_amtys = np.array([112500, 150000, 75000, 112500, 150000, 75000])
#AMT Phaseout Start
_cphase = np.array([75000, 110000, 55000, 75000, 75000, 55000])
#Child Tax Credit Phase-Out
_thresx = np.array([200000, 250000, 125000, 200000, 250000, 125000])
#Threshold for add medicare
_ssb50 = np.array([25000, 32000, 0, 25000, 25000, 0])
#SS 50% taxability threshold
_ssb85 = np.array([34000, 44000, 0, 34000, 34000, 0])
#SS 85% taxability threshold
_amtex = np.array([[51900, 80750, 40375, 51900, 80750, 40375],
[0, 0, 0, 0, 0, 0]])
#AMT Exclusion
_exmpb = np.array([[200000, 300000, 150000, 250000, 300000, 150000],
[0, 0, 0, 0, 0, 0]])
#Personal Exemption Amount
_stded = np.array([[6100, 12200, 6100, 8950, 12200, 6100, 1000],
[0, 0, 0, 0, 0, 0, 0]])
#Standard Deduction
_brk1 = np.array([[8925, 17850, 8925, 12750, 17850, 8925],
[0, 0, 0, 0, 0, 0]])
#10% tax rate thresholds
_brk2 = np.array([[36250, 72500, 36250, 48600, 72500, 36250],
[0, 0, 0, 0, 0, 0]])
#15% tax rate thresholds
_brk3 = np.array([[87850, 146400, 73200, 125450, 146400, 73200],
[0, 0, 0, 0, 0, 0]])
#25% tax rate thresholds
_brk4 = np.array([[183250, 223050, 111525, 203150, 223050, 111525],
[0, 0, 0, 0, 0, 0]])
#28% tax rate thresholds
_brk5 = np.array([[398350, 398350, 199175, 398350, 398350, 199175],
[0, 0, 0, 0, 0, 0]])
#33% tax rate thresholds
_brk6 = np.array([[400000, 450000, 225000, 425000, 450000, 225000],
[0, 0, 0, 0, 0, 0]])
#25% tax rate thresholds
def Puf():
#Run this function data input is the PUF file
global e35300_0
e35300_0 = np.zeros((dim,))
global e35600_0
e35600_0 = np.zeros((dim,))
global e35910_0
e35910_0 = np.zeros((dim,))
global x03150
x03150 = np.zeros((dim,))
global e03600
e03600 = np.zeros((dim,))
global e03280
e03280 = np.zeros((dim,))
global e03900
e03900 = np.zeros((dim,))
global e04000
e04000 = np.zeros((dim,))
global e03700
e03700 = np.zeros((dim,))
global c23250
c23250 = np.zeros((dim,))
global e22250
e22250 = np.zeros((dim,))
global e23660
e23660 = np.zeros((dim,))
global f2555
f2555 = np.zeros((dim,))
global e02800
e02800 = np.zeros((dim,))
global e02610
e02610 = np.zeros((dim,))
global e02540
e02540 = np.zeros((dim,))
global e02615
e02615 = np.zeros((dim,))
global SSIND
SSIND = np.zeros((dim,))
global e18400
e18400 = np.zeros((dim,))
global e18800
e18800 = np.zeros((dim,))
global e18900
e18900 = np.zeros((dim,))
global e20950
e20950 = np.zeros((dim,))
global e19500
e19500 = np.zeros((dim,))
global e19570
e19570 = np.zeros((dim,))
global e19400
e19400 = np.zeros((dim,))
global c20400
c20400 = np.zeros((dim,))
global e20200
e20200 = np.zeros((dim,))
global e20900
e20900 = np.zeros((dim,))
global e21000
e21000 = np.zeros((dim,))
global e21010
e21010 = np.zeros((dim,))
global e02600
e02600 = np.zeros((dim,))
global _exact
_exact = np.zeros((dim,))
global e11055
e11055 = np.zeros((dim,))
global e00250
e00250 = np.zeros((dim,))
global e30100
e30100 = np.zeros((dim,))
global _compitem
_compitem = np.zeros((dim,))
global e15360
e15360 = np.zeros((dim,))
global e04200
e04200 = np.zeros((dim,))
global e04470
e04470 = np.zeros((dim,))
global e37717
e37717 = np.zeros((dim,))
global e04805
e04805 = np.zeros((dim,))
global AGEP
AGEP = np.zeros((dim,))
global AGES
AGES = np.zeros((dim,))
global PBI
PBI = np.zeros((dim,))
global SBI
SBI = np.zeros((dim,))
global t04470
t04470 = np.zeros((dim,))
global e23250
e23250 = np.zeros((dim,))
global e58980
e58980 = np.zeros((dim,))
global c00650
c00650 = np.zeros((dim,))
global e24583
e24583 = np.zeros((dim,))
global _fixup
_fixup = np.zeros((dim,))
global _cmp
_cmp = np.zeros((dim,))
global e59440
e59440 = np.zeros((dim,))
global e59470
e59470 = np.zeros((dim,))
global e59400
e59400 = np.zeros((dim,))
global e10105
e10105 = np.zeros((dim,))
global e83200_0
e83200_0 = np.zeros((dim,))
global e59410
e59410 = np.zeros((dim,))
global e59420
e59420 = np.zeros((dim,))
global e74400
e74400 = np.zeros((dim,))
global x62720
x62720 = np.zeros((dim,))
global x60260
x60260 = np.zeros((dim,))
global x60240
x60240 = np.zeros((dim,))
global x60220
x60220 = np.zeros((dim,))
global x60130
x60130 = np.zeros((dim,))
global x62730
x62730 = np.zeros((dim,))
global e60290
e60290 = np.zeros((dim,))
global DOBYR
DOBYR = np.zeros((dim,))
global SDOBYR
SDOBYR = np.zeros((dim,))
global DOBMD
DOBMD = np.zeros((dim,))
global SDOBMD
SDOBMD = np.zeros((dim,))
global e62600
e62600 = np.zeros((dim,))
global x62740
x62740 = np.zeros((dim,))
global _fixeic
_fixeic = np.zeros((dim,))
global e32880
e32880 = np.zeros((dim,))
global e32890
e32890 = np.zeros((dim,))
global CDOB1
CDOB1 = np.zeros((dim,))
global CDOB2
CDOB2 = np.zeros((dim,))
global e32750
e32750 = np.zeros((dim,))
global e32775
e32775 = np.zeros((dim,))
global e33420
e33420 = np.zeros((dim,))
global e33430
e33430 = np.zeros((dim,))
global e33450
e33450 = np.zeros((dim,))
global e33460
e33460 = np.zeros((dim,))
global e33465
e33465 = np.zeros((dim,))
global e33470
e33470 = np.zeros((dim,))
global x59560
x59560 = np.zeros((dim,))
global EICYB1
EICYB1 = np.zeros((dim,))
global EICYB2
EICYB2 = np.zeros((dim,))
global EICYB3
EICYB3 = np.zeros((dim,))
global e83080
e83080 = np.zeros((dim,))
global e25360
e25360 = np.zeros((dim,))
global e25430
e25430 = np.zeros((dim,))
global e25470
e25470 = np.zeros((dim,))
global e25400
e25400 = np.zeros((dim,))
global e25500
e25500 = np.zeros((dim,))
global e26210
e26210 = np.zeros((dim,))
global e26340
e26340 = np.zeros((dim,))
global e26205
e26205 = np.zeros((dim,))
global e26320
e26320 = np.zeros((dim,))
global e87482
e87482 = np.zeros((dim,))
global e87487
e87487 = np.zeros((dim,))
global e87492
e87492 = np.zeros((dim,))
global e87497
e87497 = np.zeros((dim,))
global e87526
e87526 = np.zeros((dim,))
global e87522
e87522 = np.zeros((dim,))
global e87524
e87524 = np.zeros((dim,))
global e87528
e87528 = np.zeros((dim,))
global EDCRAGE
EDCRAGE =
|
np.zeros((dim,))
|
numpy.zeros
|
'''Visualization.
'''
import imageio
import scipy
import matplotlib
import matplotlib.pylab as plt
import matplotlib.patches as mpatches
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import visdom
from sklearn.manifold import TSNE
from tile_images import tile_raster_images
visualizer = None
matplotlib.use('Agg')
_options = dict(
use_tanh=False,
quantized=False,
img=None,
label_names=None,
is_caption=False,
is_attribute=False
)
CHAR_MAP = ['_', '\n', ' ', '!', '"', '%', '&', "'", '(', ')', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '8', '9', ':', ';', '=', '?', '\\', '`',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '*', '*', '*']
def setup(server, port, use_tanh=None, quantized=None, img=None, label_names=None,
is_caption=False, is_attribute=False, env='main'):
global visualizer
visualizer = visdom.Visdom(server=server, port=port, env=env)
global _options
if use_tanh is not None:
_options['use_tanh'] = use_tanh
if quantized is not None:
_options['quantized'] = quantized
if img is not None:
_options['img'] = img
if label_names is not None:
_options['label_names'] = label_names
_options['is_caption'] = is_caption
_options['is_attribute'] = is_attribute
if is_caption and is_attribute:
raise ValueError('Cannot be both attribute and caption')
def dequantize(images):
images = np.argmax(images, axis=1).astype('uint8')
images_ = []
for image in images:
img2 = Image.fromarray(image)
img2.putpalette(_options['img'].getpalette())
img2 = img2.convert('RGB')
images_.append(np.array(img2))
images = np.array(images_).transpose(0, 3, 1, 2).astype(floatX) / 255.
return images
def save_images(images, num_x, num_y, out_file=None, labels=None,
margin_x=5, margin_y=5, image_id=0, caption='', title=''):
if labels is not None:
if _options['is_caption']:
margin_x = 80
margin_y = 50
elif _options['is_attribute']:
margin_x = 25
margin_y = 200
elif _options['label_names'] is not None:
margin_x = 20
margin_y = 25
else:
margin_x = 5
margin_y = 12
if out_file is None:
pass
else:
if _options['quantized']:
images = dequantize(images)
elif _options['use_tanh']:
images = 0.5 * (images + 1.)
images = images * 255.
dim_c, dim_x, dim_y = images.shape[-3:]
if dim_c == 1:
arr = tile_raster_images(
X=images, img_shape=(dim_x, dim_y), tile_shape=(num_x, num_y),
tile_spacing=(margin_y, margin_x), bottom_margin=margin_y)
fill = 255
else:
arrs = []
for c in xrange(dim_c):
arr = tile_raster_images(
X=images[:, c].copy(), img_shape=(dim_x, dim_y),
tile_shape=(num_x, num_y),
tile_spacing=(margin_y, margin_x),
bottom_margin=margin_y)
arrs.append(arr)
arr = np.array(arrs).transpose(1, 2, 0)
fill = (255, 255, 255)
im = Image.fromarray(arr)
if labels is not None:
try:
font = ImageFont.truetype(
'/usr/share/fonts/truetype/freefont/FreeSans.ttf', 9)
except:
font = ImageFont.truetype(
'/usr/share/fonts/truetype/liberation/LiberationSerif-Regular.ttf', 9)
idr = ImageDraw.Draw(im)
for i, label in enumerate(labels):
x_ = (i % num_x) * (dim_x + margin_x)
y_ = (i // num_x) * (dim_y + margin_y) + dim_y
if _options['is_caption']:
l_ = ''.join([CHAR_MAP[j] for j in label])
if len(l_) > 20:
l_ = '\n'.join(
[l_[x:x+20] for x in range(0, len(l_), 20)])
elif _options['is_attribute']:
attribs = [j for j, a in enumerate(label) if a == 1]
l_ = '\n'.join(_options['label_names'][a] for a in attribs)
elif _options['label_names'] is not None:
l_ = _options['label_names'][label]
l_ = l_.replace('_', '\n')
else:
l_ = str(label)
idr.text((x_, y_), l_, fill=fill, font=font)
arr = np.array(im)
if arr.ndim == 3:
arr = arr.transpose(2, 0, 1)
visualizer.image(arr, opts=dict(title=title, caption=caption),
win='image_{}'.format(image_id))
im.save(out_file)
def save_movie(images, num_x, num_y, env='main', out_file=None, movie_id=0):
if out_file is None:
pass
else:
images_ = []
for i, image in enumerate(images):
if _options['quantized']:
image = dequantize(image)
dim_c, dim_x, dim_y = image.shape[-3:]
image = image.reshape((num_x, num_y, dim_c, dim_x, dim_y))
image = image.transpose(0, 3, 1, 4, 2)
image = image.reshape(num_x * dim_x, num_y * dim_y, dim_c)
if _options['use_tanh']:
image = 0.5 * (image + 1.)
images_.append(image)
imageio.mimsave(out_file, images_)
visualizer.video(videofile=out_file, env=env,
win='movie_{}'.format(movie_id))
def save_hist(fake_scores, real_scores, out_file, env='main'):
bins = np.linspace(np.min(
|
np.array([fake_scores, real_scores])
|
numpy.array
|
"""
A generic sphere handle gizmo that moves along a single axis.
@author <NAME>
"""
import json
from os import path
from pathlib import Path
import numpy as np
from panda3d.core import LVector3f
from tools.envedit import helper
from tools.envedit.gizmos.gizmo_system import GizmoSystem
from tools.envedit.gizmos.mesh_gizmo import MeshGizmo
from tools.envedit.transform import Transform
class SphereHandleGizmo(MeshGizmo):
# axis: the axis in local space the sphere moves along
def __init__(self, axis):
MeshGizmo.__init__(self)
self.color = (0.2, 0.2, 0.8, 1)
self.start_mouse_world_pos = np.array([0, 0, 0, 1])
self.start_translate_callback = None
self.translate_callback = None
self.translate_finished_callback = None
self.component = None
self.start_pos =
|
np.array([0, 0, 0])
|
numpy.array
|
import numpy as np
def random_haplotype(n_variants, priors):
assert sum(priors) == 1, priors
return np.asanyarray(np.random.rand(n_variants) < priors[1], dtype="int")
def random_genotype(n_variants, priors):
priors = np.asanyarray(priors)
genotype = np.ones(n_variants, dtype="int")
r = np.random.rand(n_variants)
genotype[r<priors[0]] = 0
genotype[r>1-priors[-1]] = 2
return genotype
def simulate_genotype_matrix_from_founders(n_variants, n_individuals, founder_types, transition_prob=0.2):
a = simulate_haplotype_matrix_from_founders(n_variants, n_individuals, founder_types, transition_prob)
b = simulate_haplotype_matrix_from_founders(n_variants, n_individuals, founder_types, transition_prob)
return a+b
def simulate_haplotype_matrix_from_founders(n_variants, n_individuals, founder_types, transition_prob=0.2):
# founder_types = np.array([random_genotype(n_variants, priors) for _ in range(n_founders)]).T
n_founders = founder_types.shape[-1]
founder_changes = (np.random.rand(n_variants*n_individuals)<transition_prob).reshape(n_individuals, n_variants)
founder_changes[:, 0] = 1
founder_idxs = np.random.randint(0, n_founders, np.count_nonzero(founder_changes))
idx_diffs = np.diff(founder_idxs)
founders = np.zeros(n_variants*n_individuals,dtype="int")
founders[np.flatnonzero(founder_changes.flatten())[1:]] = idx_diffs
founders[0] = founder_idxs[0]
founders = founders.cumsum().reshape(n_individuals, n_variants).T
return founder_types[np.arange(n_variants)[:, None], founders]
# print(simulate_genotype_matrix_from_founders(3, 4, 2, transition_prob=0.3))
def simulate_genotype_matrix(n_variants, n_individuals, transition_prob=0.2):
"""
Simulate genotype matrix
return n_variants x n_individuals matrix
"""
p = transition_prob
q = 1-p
transition_matrix = np.array([[q*q, 2*q*p, p**2],
[q*p, p*p+q*q, p*q],
[p**2, 2*q*p, q**2]])
cum_transition_matrix = np.cumsum(transition_matrix, axis=1)
matrix = []
cur = np.random.choice([0, 1, 2], n_individuals)
for v in range(n_variants):
matrix.append(cur)
rand = np.random.rand(n_individuals)
new = np.ones_like(cur)
new[rand<cum_transition_matrix[cur, 0]] = 0
new[rand>cum_transition_matrix[cur, 1]] = 2
cur = np.where(
|
np.random.rand(n_individuals)
|
numpy.random.rand
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Torch interface"""
import functools
import numpy as np
import pytest
torch = pytest.importorskip("torch")
import pennylane as qml
from pennylane.gradients import finite_diff, param_shift
from pennylane.interfaces.batch import execute
class TestTorchExecuteUnitTests:
"""Unit tests for torch execution"""
def test_jacobian_options(self, mocker, tol):
"""Test setting jacobian options"""
spy = mocker.spy(qml.gradients, "param_shift")
a = torch.tensor([0.1, 0.2], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn=param_shift,
gradient_kwargs={"shifts": [(np.pi / 4,)] * 2},
interface="torch",
)[0]
res.backward()
for args in spy.call_args_list:
assert args[1]["shift"] == [(np.pi / 4,)] * 2
def test_incorrect_mode(self):
"""Test that an error is raised if a gradient transform
is used with mode=forward"""
a = torch.tensor([0.1, 0.2], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
with pytest.raises(
ValueError, match="Gradient transforms cannot be used with mode='forward'"
):
execute([tape], dev, gradient_fn=param_shift, mode="forward", interface="torch")[0]
def test_forward_mode_reuse_state(self, mocker):
"""Test that forward mode uses the `device.execute_and_gradients` pathway
while reusing the quantum state."""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(dev, "execute_and_gradients")
a = torch.tensor([0.1, 0.2], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn="device",
gradient_kwargs={"method": "adjoint_jacobian", "use_device_state": True},
interface="torch",
)[0]
# adjoint method only performs a single device execution, but gets both result and gradient
assert dev.num_executions == 1
spy.assert_called()
def test_forward_mode(self, mocker):
"""Test that forward mode uses the `device.execute_and_gradients` pathway"""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(dev, "execute_and_gradients")
a = torch.tensor([0.1, 0.2], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn="device",
gradient_kwargs={"method": "adjoint_jacobian"},
interface="torch",
)[0]
# two device executions; one for the value, one for the Jacobian
assert dev.num_executions == 2
spy.assert_called()
def test_backward_mode(self, mocker):
"""Test that backward mode uses the `device.batch_execute` and `device.gradients` pathway"""
dev = qml.device("default.qubit", wires=1)
spy_execute = mocker.spy(qml.devices.DefaultQubit, "batch_execute")
spy_gradients = mocker.spy(qml.devices.DefaultQubit, "gradients")
a = torch.tensor([0.1, 0.2], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn="device",
mode="backward",
gradient_kwargs={"method": "adjoint_jacobian"},
interface="torch",
)[0]
assert dev.num_executions == 1
spy_execute.assert_called()
spy_gradients.assert_not_called()
res.backward()
spy_gradients.assert_called()
class TestCaching:
"""Test for caching behaviour"""
def test_cache_maxsize(self, mocker):
"""Test the cachesize property of the cache"""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(qml.interfaces.batch, "cache_execute")
def cost(a, cachesize):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.probs(wires=0)
return execute(
[tape], dev, gradient_fn=param_shift, cachesize=cachesize, interface="torch"
)[0][0, 0]
params = torch.tensor([0.1, 0.2], requires_grad=True)
res = cost(params, cachesize=2)
res.backward()
cache = spy.call_args[0][1]
assert cache.maxsize == 2
assert cache.currsize == 2
assert len(cache) == 2
def test_custom_cache(self, mocker):
"""Test the use of a custom cache object"""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(qml.interfaces.batch, "cache_execute")
def cost(a, cache):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.probs(wires=0)
return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface="torch")[0][
0, 0
]
custom_cache = {}
params = torch.tensor([0.1, 0.2], requires_grad=True)
res = cost(params, cache=custom_cache)
res.backward()
cache = spy.call_args[0][1]
assert cache is custom_cache
def test_caching_param_shift(self, tol):
"""Test that, with the parameter-shift transform,
Torch always uses the optimum number of evals when computing the Jacobian."""
dev = qml.device("default.qubit", wires=1)
def cost(a, cache):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.probs(wires=0)
return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface="torch")[0][
0, 0
]
# Without caching, 5 evaluations are required to compute
# the Jacobian: 1 (forward pass) + (2 shifts * 2 params)
params = torch.tensor([0.1, 0.2], requires_grad=True)
torch.autograd.functional.jacobian(lambda p: cost(p, cache=None), params)
assert dev.num_executions == 5
# With caching, 5 evaluations are required to compute
# the Jacobian: 1 (forward pass) + (2 shifts * 2 params)
dev._num_executions = 0
torch.autograd.functional.jacobian(lambda p: cost(p, cache=True), params)
assert dev.num_executions == 5
@pytest.mark.parametrize("num_params", [2, 3])
def test_caching_param_shift_hessian(self, num_params, tol):
"""Test that, with the parameter-shift transform,
caching reduces the number of evaluations to their optimum
when computing Hessians."""
dev = qml.device("default.qubit", wires=2)
params = torch.tensor(np.arange(1, num_params + 1) / 10, requires_grad=True)
N = len(params)
def cost(x, cache):
with qml.tape.JacobianTape() as tape:
qml.RX(x[0], wires=[0])
qml.RY(x[1], wires=[1])
for i in range(2, num_params):
qml.RZ(x[i], wires=[i % 2])
qml.CNOT(wires=[0, 1])
qml.var(qml.PauliZ(0) @ qml.PauliX(1))
return execute(
[tape], dev, gradient_fn=param_shift, cache=cache, interface="torch", max_diff=2
)[0]
# No caching: number of executions is not ideal
hess1 = torch.autograd.functional.hessian(lambda x: cost(x, cache=None), params)
if num_params == 2:
# compare to theoretical result
x, y, *_ = params.detach()
expected = torch.tensor(
[
[2 * np.cos(2 * x) * np.sin(y) ** 2, np.sin(2 * x) * np.sin(2 * y)],
[np.sin(2 * x) * np.sin(2 * y), -2 * np.cos(x) ** 2 * np.cos(2 * y)],
]
)
assert np.allclose(expected, hess1, atol=tol, rtol=0)
expected_runs = 1 # forward pass
expected_runs += 2 * N # Jacobian
expected_runs += 4 * N + 1 # Hessian diagonal
expected_runs += 4 * N**2 # Hessian off-diagonal
assert dev.num_executions == expected_runs
# Use caching: number of executions is ideal
dev._num_executions = 0
hess2 = torch.autograd.functional.hessian(lambda x: cost(x, cache=True), params)
assert np.allclose(hess1, hess2, atol=tol, rtol=0)
expected_runs_ideal = 1 # forward pass
expected_runs_ideal += 2 * N # Jacobian
expected_runs_ideal += N + 1 # Hessian diagonal
expected_runs_ideal += 4 * N * (N - 1) // 2 # Hessian off-diagonal
assert dev.num_executions == expected_runs_ideal
assert expected_runs_ideal < expected_runs
def test_caching_adjoint_backward(self):
"""Test that caching reduces the number of adjoint evaluations
when mode=backward"""
dev = qml.device("default.qubit", wires=2)
params = torch.tensor([0.1, 0.2, 0.3])
def cost(a, cache):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.RY(a[2], wires=0)
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
return execute(
[tape],
dev,
gradient_fn="device",
cache=cache,
mode="backward",
gradient_kwargs={"method": "adjoint_jacobian"},
interface="torch",
)[0]
# Without caching, 3 evaluations are required.
# 1 for the forward pass, and one per output dimension
# on the backward pass.
torch.autograd.functional.jacobian(lambda x: cost(x, cache=None), params)
assert dev.num_executions == 3
# With caching, only 2 evaluations are required. One
# for the forward pass, and one for the backward pass.
dev._num_executions = 0
torch.autograd.functional.jacobian(lambda x: cost(x, cache=True), params)
assert dev.num_executions == 2
torch_devices = [None]
if torch.cuda.is_available():
torch_devices.append(torch.device("cuda"))
execute_kwargs = [
{"gradient_fn": param_shift, "interface": "torch"},
{
"gradient_fn": "device",
"mode": "forward",
"gradient_kwargs": {"method": "adjoint_jacobian", "use_device_state": False},
"interface": "torch",
},
{
"gradient_fn": "device",
"mode": "forward",
"gradient_kwargs": {"method": "adjoint_jacobian", "use_device_state": True},
"interface": "torch",
},
{
"gradient_fn": "device",
"mode": "backward",
"gradient_kwargs": {"method": "adjoint_jacobian"},
"interface": "torch",
},
]
@pytest.mark.gpu
@pytest.mark.parametrize("torch_device", torch_devices)
@pytest.mark.parametrize("execute_kwargs", execute_kwargs)
class TestTorchExecuteIntegration:
"""Test the torch interface execute function
integrates well for both forward and backward execution"""
def test_execution(self, torch_device, execute_kwargs):
"""Test that the execute function produces results with the expected shapes"""
dev = qml.device("default.qubit", wires=1)
a = torch.tensor(0.1, requires_grad=True, device=torch_device)
b = torch.tensor(0.2, requires_grad=False, device=torch_device)
with qml.tape.JacobianTape() as tape1:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
with qml.tape.JacobianTape() as tape2:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
res = execute([tape1, tape2], dev, **execute_kwargs)
assert len(res) == 2
assert res[0].shape == (1,)
assert res[1].shape == (1,)
def test_scalar_jacobian(self, torch_device, execute_kwargs, tol):
"""Test scalar jacobian calculation by comparing two types of pipelines"""
a = torch.tensor(0.1, requires_grad=True, dtype=torch.float64, device=torch_device)
dev = qml.device("default.qubit", wires=2)
with qml.tape.JacobianTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
res = execute([tape], dev, **execute_kwargs)[0]
res.backward()
# compare to backprop gradient
def cost(a):
with qml.tape.QuantumTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit.autograd", wires=2)
return dev.batch_execute([tape])[0]
expected = qml.grad(cost, argnum=0)(0.1)
assert torch.allclose(a.grad, torch.tensor(expected, device=torch_device), atol=tol, rtol=0)
def test_jacobian(self, torch_device, execute_kwargs, tol):
"""Test jacobian calculation by checking against analytic values"""
a_val = 0.1
b_val = 0.2
a = torch.tensor(a_val, requires_grad=True, device=torch_device)
b = torch.tensor(b_val, requires_grad=True, device=torch_device)
dev = qml.device("default.qubit", wires=2)
with qml.tape.JacobianTape() as tape:
qml.RZ(torch.tensor(0.543, device=torch_device), wires=0)
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
res = execute([tape], dev, **execute_kwargs)[0]
assert tape.trainable_params == [1, 2]
assert isinstance(res, torch.Tensor)
assert res.shape == (2,)
expected = torch.tensor(
[np.cos(a_val), -np.cos(a_val) * np.sin(b_val)], device=torch_device
)
assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)
loss = torch.sum(res)
loss.backward()
expected = torch.tensor(
[-
|
np.sin(a_val)
|
numpy.sin
|
import copy
import numpy
from slab.sound import Sound
from slab.signal import Signal
from slab.filter import Filter
from slab.hrtf import HRTF
class Binaural(Sound):
"""
Class for working with binaural sounds, including ITD and ILD manipulation. Binaural inherits all sound
generation functions from the Sound class, but returns binaural signals. Recasting an object of class sound or
sound with 1 or 3+ channels calls Sound.copychannel to return a binaural sound with two channels identical
to the first channel of the original sound.
Arguments:
data (slab.Signal | numpy.ndarray | list | str): see documentation of slab.Sound for details. the `data` must
have either one or two channels. If it has one, that channel is duplicated
samplerate (int): samplerate in Hz, must only be specified when creating an instance from an array.
Attributes:
.left: the first data channel, containing the sound for the left ear.
.right: the second data channel, containing the sound for the right ear
.data: the data-array of the Sound object which has the shape `n_samples` x `n_channels`.
.n_channels: the number of channels in `data`. Must be 2 for a binaural sound.
.n_samples: the number of samples in `data`. Equals `duration` * `samplerate`.
.duration: the duration of the sound in seconds. Equals `n_samples` / `samplerate`.
"""
# instance properties
def _set_left(self, other):
if hasattr(other, 'samplerate'): # probably an slab object
self.data[:, 0] = other.data[:, 0]
else:
self.data[:, 0] = numpy.array(other)
def _set_right(self, other):
if hasattr(other, 'samplerate'): # probably an slab object
self.data[:, 1] = other.data[:, 0]
else:
self.data[:, 1] = numpy.array(other)
left = property(fget=lambda self: Sound(self.channel(0)), fset=_set_left,
doc='The left channel for a stereo sound.')
right = property(fget=lambda self: Sound(self.channel(1)), fset=_set_right,
doc='The right channel for a stereo sound.')
def __init__(self, data, samplerate=None):
if isinstance(data, (Sound, Signal)):
if data.n_channels == 1: # if there is only one channel, duplicate it.
self.data = numpy.tile(data.data, 2)
elif data.n_channels == 2:
self.data = data.data
else:
raise ValueError("Data must have one or two channel!")
self.samplerate = data.samplerate
elif isinstance(data, (list, tuple)):
if isinstance(data[0], (Sound, Signal)):
if data[0].n_samples != data[1].n_samples:
raise ValueError('Sounds must have same number of samples!')
if data[0].samplerate != data[1].samplerate:
raise ValueError('Sounds must have same samplerate!')
super().__init__([data[0].data[:, 0], data[1].data[:, 0]], data[0].samplerate)
else:
super().__init__(data, samplerate)
elif isinstance(data, str):
super().__init__(data, samplerate)
if self.n_channels == 1:
self.data = numpy.tile(self.data, 2) # duplicate channel if monaural file
else:
super().__init__(data, samplerate)
if self.n_channels == 1:
self.data = numpy.tile(self.data, 2) # duplicate channel if monaural file
if self.n_channels != 2:
ValueError('Binaural sounds must have two channels!')
def itd(self, duration=None, max_lag=0.001):
"""
Either estimate the interaural time difference of the sound or generate a new sound with the specified
interaural time difference. The resolution for computing the ITD is 1/samplerate seconds. A negative
ITD value means that the right channel is delayed, meaning the sound source is to the left.
Arguments:
duration (None| int | float): Given None, the instance's ITD is computed. Given another value, a new sound
with the desired interaural time difference in samples (given an integer) or seconds (given a float)
is generated.
max_lag (float): Maximum possible value for ITD estimation. Defaults to 1 millisecond which is barely
outside the physiologically plausible range for humans. Is ignored if `duration` is specified.
Returns:
(int | slab.Binaural): The interaural time difference in samples or a copy of the instance with the
specified interaural time difference.
Examples::
sound = slab.Binaural.whitenoise()
lateral = sound.itd(duration=0.0005) # generate a sound with 0.5 ms ITD
lateral.itd() # estimate the ITD of the sound
"""
if duration is None:
return self._get_itd(max_lag)
return self._apply_itd(duration)
def _get_itd(self, max_lag):
max_lag = Sound.in_samples(max_lag, self.samplerate)
xcorr = numpy.correlate(self.data[:, 0], self.data[:, 1], 'full')
lags =
|
numpy.arange(-max_lag, max_lag + 1)
|
numpy.arange
|
import numpy as np
import matplotlib.pyplot as plt
from plotUtils import *
import copy
from time import time
import math
from numba import cuda, float32, jit, int32
# Problem 1
# 1.a
def analysis(ori_arr):
"""Analyze the input array, return the first array and the second array."""
even_res = []
odd_res = []
for i in range(len(ori_arr)):
if i % 2 == 0:
even_res.append((ori_arr[i+1] + ori_arr[i]) / 2)
else:
odd_res.append((ori_arr[i] - ori_arr[i-1]) / 2)
return
|
np.array(even_res)
|
numpy.array
|
import numpy as np
import pytest
def test_zernike_func_xx_corr(coeff_xx, noll_index_xx, eidos_data_xx):
""" Tests reconstruction of xx correlation against eidos """
from africanus.rime import zernike_dde
npix = 17
nsrc = npix ** 2
ntime = 1
na = 1
nchan = 1
ncorr = 1
thresh = 15
npoly = thresh
# Linear (l,m) grid
nx, ny = npix, npix
grid = (np.indices((nx, ny), dtype=float) - nx // 2) * 2 / nx
ll, mm = grid[0], grid[1]
lm = np.vstack((ll.flatten(), mm.flatten())).T
# Initializing coords, coeffs, and noll_indices
coords = np.empty((3, nsrc, ntime, na, nchan), dtype=float)
coeffs = np.empty((na, nchan, ncorr, npoly), dtype=np.complex128)
noll_indices = np.empty((na, nchan, ncorr, npoly))
parallactic_angles = np.zeros((ntime, na), dtype=np.float64)
frequency_scaling =
|
np.ones((nchan,), dtype=np.float64)
|
numpy.ones
|
# coding: utf8
""" Unit tests:
- :class:`TestMultivariateJacobiOPE` check correct implementation of the corresponding class.
"""
import unittest
import numpy as np
from scipy.integrate import quad
from scipy.special import eval_jacobi
import sys
sys.path.append('..')
from dppy.multivariate_jacobi_ope import (MultivariateJacobiOPE,
compute_ordering_BaHa16,
compute_Gautschi_bounds)
from dppy.utils import inner1d, check_random_state
class TestMultivariateJacobiOPE(unittest.TestCase):
"""
"""
seed = 0
def test_ordering(self):
"""Make sure the ordering of multi-indices respects the one prescirbed by :cite:`BaHa16` Section 2.1.3
"""
ord_d2_N16 = [(0, 0),
(0, 1), (1, 0), (1, 1),
(0, 2), (1, 2), (2, 0), (2, 1), (2, 2),
(0, 3), (1, 3), (2, 3), (3, 0), (3, 1), (3, 2), (3, 3)]
ord_d3_N27 = [(0, 0, 0),
(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1),
(0, 0, 2), (0, 1, 2), (0, 2, 0), (0, 2, 1), (0, 2, 2), (1, 0, 2), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)]
orderings = [ord_d2_N16, ord_d3_N27]
for idx, ord_to_check in enumerate(orderings):
N, d = len(ord_to_check), len(ord_to_check[0])
self.assertTrue(compute_ordering_BaHa16(N, d), ord_to_check)
def test_square_norms(self):
N = 100
dims = np.arange(2, 5)
max_deg = 50 # to avoid quad warning in dimension 1
for d in dims:
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
pol_2_eval = dpp.poly_1D_degrees[:max_deg]
quad_square_norms =\
[[quad(lambda x:
(1-x)**a * (1+x)**b * eval_jacobi(n, a, b, x)**2,
-1, 1)[0]
for n, a, b in zip(deg,
dpp.jacobi_params[:, 0],
dpp.jacobi_params[:, 1])]
for deg in pol_2_eval]
self.assertTrue(np.allclose(
dpp.poly_1D_square_norms[pol_2_eval,
range(dpp.dim)],
quad_square_norms))
def test_Gautschi_bounds(self):
"""Test if bounds computed w/wo log scale coincide"""
N = 100
dims = np.arange(2, 5)
for d in dims:
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
with_log_scale = compute_Gautschi_bounds(dpp.jacobi_params,
dpp.ordering,
log_scale=True)
without_log_scale = compute_Gautschi_bounds(dpp.jacobi_params,
dpp.ordering,
log_scale=False)
self.assertTrue(np.allclose(with_log_scale, without_log_scale))
def test_kernel_symmetry(self):
"""
K(x) == K(x, x)
K(x, y) == K(y, x)
K(x, Y) == K(Y, x) = [K(x, y) for y in Y]
K(X) == [K(x, x) for x in X]
K(X, Y) == [K(x, y) for x, y in zip(X, Y)]
"""
N = 100
dims = np.arange(2, 5)
for d in dims:
jacobi_params = 0.5 -
|
np.random.rand(d, 2)
|
numpy.random.rand
|
import numpy as np
import matplotlib.pyplot as plt
import pyvista as pv
import pandas as pd
from skimage import measure
from scipy.integrate import simps
from scipy.interpolate import griddata
import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon
from zmapio import ZMAPGrid
def poly_area(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
class Surface:
def __init__(self, **kwargs):
self.x = kwargs.pop('x',None)
self.y = kwargs.pop('y',None)
self.z = kwargs.pop('z',None)
self.crs = kwargs.pop('crs',4326)
#Properties
@property
def x(self):
return self._x
@x.setter
def x(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._y = value
@property
def z(self):
return self._z
@z.setter
def z(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._z = value
@property
def crs(self):
return self._crs
@crs.setter
def crs(self,value):
assert isinstance(value,(int,str,type(None))), f"{type(value)} not accepted. Name must be str. Example 'EPSG:3117'"
if isinstance(value,int):
value = f'EPSG:{value}'
elif isinstance(value,str):
assert value.startswith('EPSG:'), 'if crs is string must starts with EPSG:. If integer must be the Coordinate system reference number EPSG http://epsg.io/'
self._crs = value
def contour(self,ax=None,**kwargs):
#Create the Axex
cax= ax or plt.gca()
return cax.contour(self.x,self.y,self.z,**kwargs)
def contourf(self,ax=None,**kwargs):
#Create the Axex
cax= ax or plt.gca()
return cax.contourf(self.x,self.y,self.z,**kwargs)
def structured_surface_vtk(self):
#Get a Pyvista Object StructedGrid
grid = pv.StructuredGrid(self.x, self.y, self.z).elevation()
return grid
def get_contours_bound(self,levels=None,zmin=None,zmax=None,n=10):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels = np.linspace(zmin,zmax,n)
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
#iterate over levels levels
contours = self.structured_surface_vtk().contour(isosurfaces=levels.tolist())
contours.points[:,2] = contours['Elevation']
df = pd.DataFrame(contours.points, columns=['x','y','z'])
#Organize the points according their angle with respect the centroid. This is done with the
#porpuse of plot the bounds continously.
list_df_sorted = []
for i in df['z'].unique():
df_z = df.loc[df['z']==i,['x','y','z']]
centroid = df_z[['x','y']].mean(axis=0).values
df_z[['delta_x','delta_y']] = df_z[['x','y']] - centroid
df_z['angle'] = np.arctan2(df_z['delta_y'],df_z['delta_x'])
df_z.sort_values(by='angle', inplace=True)
list_df_sorted.append(df_z)
return pd.concat(list_df_sorted, axis=0)
def get_contours_area_bounds(self,levels=None,n=10,zmin=None,zmax=None,c=2.4697887e-4):
contours = self.get_contours_bound(levels=levels,zmin=zmin,zmax=zmax,n=n)
area_dict= {}
for i in contours['z'].unique():
poly = contours.loc[contours['z']==i,['x','y']]
area = poly_area(poly['x'],poly['y'])
area_dict.update({i:area*c})
return pd.DataFrame.from_dict(area_dict, orient='index', columns=['area'])
def get_contours_area_mesh(self,levels=None,n=10,zmin=None,zmax=None,c=2.4697887e-4):
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
levels = np.linspace(zmin,zmax,n)
dif_x = np.diff(self.x,axis=1).mean(axis=0)
dif_y = np.diff(self.y,axis=0).mean(axis=1)
dxx, dyy = np.meshgrid(dif_x,dif_y)
area_dict = {}
for i in levels:
z = self.z.copy()
z[(z<i)|(z>zmax)|(z<zmin)] = np.nan
z = z[1:,1:]
a = dxx * dyy * ~np.isnan(z) *2.4697887e-4
area_dict.update({i:a.sum()})
return pd.DataFrame.from_dict(area_dict, orient='index', columns=['area'])
def get_contours(self,levels=None,zmin=None,zmax=None,n=10):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels = np.linspace(zmin,zmax,n)
zz = self.z
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
#iterate over levels levels
data = pd.DataFrame()
i = 0
for level in levels:
contours = measure.find_contours(zz,level)
if contours == []:
continue
else:
for contour in contours:
level_df = pd.DataFrame(contour, columns=['y','x'])
level_df['level'] = level
level_df['n'] = i
data = data.append(level_df,ignore_index=True)
i += 1
if not data.empty:
#re scale
data['x'] = (data['x']/zz.shape[1]) * (xmax - xmin) + xmin
data['y'] = (data['y']/zz.shape[0]) * (ymax - ymin) + ymin
return data
def get_contours_gdf(self,levels=None,zmin=None,zmax=None,n=10, crs="EPSG:4326"):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels =
|
np.linspace(zmin,zmax,n)
|
numpy.linspace
|
import os, sys
import numpy as np
import aqml.cheminfo.molecule.geometry as cmg
T,F = True,False
def get_mbtypes(zs):
"""
get many-body types
"""
# atoms that cannot be J in angle IJK or J/K in dihedral angle IJKL
zs1 = [1,9,17,35,53]
zs.sort()
nz = len(zs)
# 1-body
mbs1 = [ '%d'%zi for zi in zs ]
# 2-body
mbs2 = []
mbs2 += [ '%d-%d'%(zi,zi) for zi in zs ]
for i in range(nz):
for j in range(i+1,nz):
mbs2.append( '%d-%d'%(zs[i],zs[j]) )
# 3-body
mbs3 = []
zs2 = list( set(zs).difference( set(zs1) ) )
zs2.sort()
nz2 = len(zs2)
for j in range(nz2):
for i in range(nz):
for k in range(i,nz):
type3 = '%d-%d-%d'%(zs[i],zs2[j],zs[k])
if type3 not in mbs3: mbs3.append( type3 )
# 4-body
mbs4 = []
for j in range(nz2):
for k in range(j,nz2):
for i in range(nz):
for l in range(nz):
zj,zk = zs2[j],zs2[k]
zi,zl = zs[i],zs[l]
if j == k:
zi,zl = min(zs[i],zs[l]), max(zs[i],zs[l])
type4 = '%d-%d-%d-%d'%(zi,zj,zk,zl)
if type4 not in mbs4: mbs4.append( type4 )
return [mbs2,mbs3,mbs4]
def copy_class(objfrom, objto, names):
for n in names:
if hasattr(objfrom, n):
v = getattr(objfrom, n)
setattr(objto, n, v);
def set_cls_attr(obj, names, vals):
for i,n in enumerate(names):
setattr(obj, n, vals[i])
class NBody(object):
"""
get many body terms
"""
# reference coordination numbers
cnsr = { 1:1, 3:1, 4:2, 5:3, 6:4, 7:3, 8:2, 9:1, \
11:1, 12:2, 13:3, 14:4, 15:3, 16:2, 17:1, \
35:1}
def __init__(self, obj, g=None, pls=None, rpad=F, rcut=12.0, unit='rad', \
iconn=F, icn=F, iconj=F, icnb=F, plmax4conj=3, bob=F, plcut=None, \
iheav=F, ivdw=F, cns=None, ctpidx=F, idic4=F):
"""
iconj : distinguish between sigma and pi bond type in a bond with BO>1
icnb : allow conjugated & non-bonded atomic pair to be treated by a Morse pot
#i2b : allow a bond with BO>1 to be treated as a sigma and a pi bond
ctpidx: calculate toplogical idx? T/F
plcut : Path Length cutoff
rpad : if set to T, all bond distances associated with a torsion are
also part of the list to be returned
"""
self.plmax4conj = plmax4conj
self.ctpidx = ctpidx
self.plcut = plcut
self.bob = bob
self.rpad = rpad
self.idic4 = idic4
if isinstance(obj,(tuple,list)):
assert len(obj) == 2
zs, coords = obj
else:
#sa = obj.__str__() # aqml.cheminfo.core.molecules object
#if ('atoms' in sa) or ('molecule' in sa):
try:
zs, coords = obj.zs, obj.coords
except: #else:
raise Exception('#ERROR: no attributes zs/coords exist for `obj')
if iconj:
iconn, icn = T, T
if icn:
iconn = T
na = len(zs)
set_cls_attr(self, ['na','zs','coords','g','pls'], \
[na, zs, coords, g, pls] )
if iconn:
assert g is not None
if icnb:
assert pls is not None
set_cls_attr(self, ['rcut','unit','iheav','iconn','icn','iconj', 'icnb',], \
[rcut, unit, iheav, iconn, icn, iconj, icnb])
ias = np.arange(self.na)
self.ias = ias
ias_heav = ias[ self.zs > 1 ]
self.ias_heav = ias_heav
g_heav = g[ias_heav][:,ias_heav]
self.nb_heav = int( (g_heav > 0).sum()/2 )
iasr1, iasr2 = np.where( np.triu(g_heav) > 0 )
self.iasb = np.array([ias_heav[iasr1],ias_heav[iasr2]],np.int).T
if cns is None:
# in case that we're given as input a subgraph made up of heavy atoms only,
# then `cns info is incomplete (due to neglect of H's), you must manually
# specify `cns to rectify this.
cns = g.sum(axis=0)
self.cns = cns
self.geom = cmg.Geometry(coords)
self.vars2, self.vars3, self.vars4 = [], [], []
# is atom unsaturated?
self.ius = ( cns < np.array([self.cnsr[zi] for zi in zs]) )
def iza8(self, zs):
return np.all(np.array(zs)==8)
def is_conjugated(self,ia,ja, hyperconj=T):
""" are the i- and j-th atoms conjugated?
criteria:
cn_i < cn_ref, e.g., C_sp2, cn=3, cnr=4
if hyperconj is `True, and one atom satisfying cni<cnr while
the other being O/N-sp3, the corresponding bond is also considered
to be conjugated
"""
istat = F
ius = self.ius[ [ia,ja] ]
#print(' -- saturated? i,j = ', ius)
zsp = self.zs[ [ia,ja] ]
if np.all(ius): #iu1 and iu2:
#if not self.iza8([z1,z2]):
# # exclude interaction between "=O" and "=O"
istat = T
else:
if hyperconj and np.any(ius):
z1 = zsp[ius][0]
z2 = zsp[1] if z1 == zsp[0] else zsp[0]
if z2 in [7,8]: # and (not self.iza8([z1,z2])):
istat = T
return istat
def get_atoms(self):
mbs1 = {}
for ia in range(self.na):
cni = self.cns[ia]
zi = self.zs[ia]
type1 = '%d_%d'%(zi,cni) if self.icn else '%d'%zi
if type1 in list(mbs1.keys()):
mbs1[type1] += [zi]
else:
mbs1[type1] = [zi]
return mbs1
@property
def cg(self):
if not hasattr(self, '_cg'):
self._cg = self.get_cg()
return self._cg
def get_cg(self, hyperconj=T):
"""
get conjugation graph, i.e., cg[i,j] = T if the i- and j-th atom
1) form a bond and 2) are in the same conjugation env
"""
cg = np.zeros((self.na,self.na)) # conjugation graph
for ia in range(self.na):
for ja in range(ia+1,self.na):
if self.g[ia,ja]:
cg[ia,ja] = cg[ja,ia] = self.is_conjugated(ia,ja, hyperconj=F)
return cg
@property
def tpidx(self):
""" toplogical idx calculated based on the molecular graph """
if not hasattr(self, '_tpidx'):
tpidx = np.zeros((self.na,self.na)) # topological index
cg = self.get_cg(hyperconj=F) ###### `hyperconj reset to False!!
dgrs =
|
np.sum(cg, axis=0)
|
numpy.sum
|
import tempfile
import unittest
import numpy as np
import pandas as pd
from build_features import gimme_the_mean, remove_invalid_data
class TestBuildFeatures(unittest.TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=True)
self.path = self.file.name
rand_df = pd.DataFrame({'a': np.random.normal(0, 1, 100),
'b':
|
np.random.normal(0, 1, 100)
|
numpy.random.normal
|
import os
import random
import cv2
import idx2numpy
import numpy as np
from tqdm import tqdm
from random import randrange
test_path = ['../data/t10k-images-idx3-ubyte', '../data/t10k-labels-idx1-ubyte']
train_path = ['../data/train-images-idx3-ubyte', '../data/train-labels-idx1-ubyte']
output_dir = '../output_4'
label_file = 'labels.csv'
os.makedirs(output_dir, exist_ok=True)
n_samples_train = [0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000]
n_samples_test = [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]
cnt = 0
number_of_samples_per_class = 10
overlap_size = 30
scale_map = {1 : 0.7, 2 : 0.8, 3 : 0.9, 4 : 1}
def remove_zero_padding(arr, y_start_upper=True, scale=1):
"""
Remove all zero padding in the left and right bounding of arr
:param arr: image as numpy array
:return: image as numpy array
"""
left_bounding = 0
right_bounding = 0
t = 0
for j in range(arr.shape[1]):
if t == 1:
break
for i in range(arr.shape[0]):
if not arr[i][j] == 0:
left_bounding = j
t = 1
break
t = 0
for j in reversed(range(arr.shape[1])):
if t == 1:
break
for i in range(arr.shape[0]):
if not arr[i][j] == 0:
right_bounding = j
t = 1
break
left_bounding = max(0, left_bounding - randrange(0,3))
right_bounding = min(right_bounding + randrange(0,3), arr.shape[1])
temp_arr = arr[:, left_bounding:right_bounding]
new_shape_x = max(1,int(temp_arr.shape[1]*scale))
new_shape_y = max(1,int(temp_arr.shape[0]*scale))
temp_arr2 = cv2.resize(temp_arr, (new_shape_x, new_shape_y))
im1 = np.zeros((28, temp_arr2.shape[1]))
diff_height = im1.shape[0] - temp_arr2.shape[0]
# start_y = randrange(diff_height+1)
start_y = 0
if y_start_upper == True:
start_y = 0
else:
start_y = diff_height
im1[start_y : start_y + temp_arr2.shape[0], :] = temp_arr2
return im1
def print_arr(arr):
"""
Print out numpy array
:param arr: numpy array
:return: void
"""
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
print(arr[i][j], end='')
print()
def concat(a, b, overlap=True, intersection_scalar=0.2):
"""
Concatenate 2 numpy array
:param a: numpy array
:param b: numpy array
:param overlap: decide 2 array are overlap or not
:param intersection_scalar: percentage of overlap size
:return: numpy array
"""
assert a.shape[0] == b.shape[0]
if overlap is False:
return np.concatenate((a, b), axis=1)
sequence_length = a.shape[1] + b.shape[1]
intersection_size = int(intersection_scalar * min(a.shape[1], b.shape[1]))
im =
|
np.zeros((a.shape[0], sequence_length - intersection_size))
|
numpy.zeros
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
import numpy as np
import re, textwrap
from ..utils.misc import check_exist
from ..cell import utils as cell_utils
def get_info_from_block(data, key):
'''Provide data and a key, return info from the block indicated by key'''
block_MATCH = re.compile(r'''
[\w\W]*
(?i)begin [ ]* ''' + key.strip() + '''
(?P<content>
[\s\S]*?(?=\n.*?[ ] $|(?i)end) # match everything until next blank line or EOL
)
''', re.VERBOSE)
match = block_MATCH.match(data)
if match is not None:
return match['content']
else:
return match
def get_info_after_key(data, key):
'''Provide data and a key, return info from the block indicated by key'''
key_MATCH = re.compile(r'''
[\w\W]* ''' + key.strip() + ''' [ ]* [:=]+''' + '''(?P<value>[\s\S]*?(?=\n.*?[ ] $|[\n]))''', re.VERBOSE)
match = key_MATCH.match(data)
if match is not None:
return match['value']
else:
return match
def read_win(filename):
'''Read seedname.win'''
assert check_exist(filename), 'Cannot find : ' + filename
with open(filename, 'r') as data_file:
data = data_file.read()
unit_cell = get_info_from_block(data, 'Unit_Cell_Cart')
unit_cell = np.float64(unit_cell.split()).reshape(3,3)
abs_coords = None
atoms_cart = get_info_from_block(data, 'atoms_cart')
if atoms_cart is not None:
atoms_cart = atoms_cart.split()
natom = len(atoms_cart) // 4
atom = [atoms_cart[4*i] for i in range(natom)]
abs_coords = np.float64([atoms_cart[4*i + 1 : 4*i + 4] for i in range(natom)])
frac_coords = None
atoms_frac = get_info_from_block(data, 'atoms_frac')
if atoms_frac is not None:
atoms_frac = atoms_frac.split()
natom = len(atoms_frac) // 4
atom = [atoms_frac[4*i] for i in range(natom)]
frac_coords = np.float64([atoms_frac[4*i + 1 : 4*i + 4] for i in range(natom)])
mp_grid = np.int64(get_info_after_key(data, 'mp_grid').split()).tolist()
kpoint_path = get_info_from_block(data, 'kpoint_path')
kpath = None
if kpoint_path is not None:
kpoint_path_MATCH = re.compile(r'''
[ ]*
(?P<k1>\S+) [ ]* (?P<k1x>\S+) [ ]* (?P<k1y>\S+) [ ]* (?P<k1z>\S+) [ ]* (?P<k2>\S+) [ ]* (?P<k2x>\S+) [ ]* (?P<k2y>\S+) [ ]* (?P<k2z>\S+)
''', re.VERBOSE)
kpath = []
for kpoint in kpoint_path_MATCH.finditer(kpoint_path):
content = kpoint.groupdict()
k1 = [content['k1'], np.float64([content['k1x'], content['k1y'], content['k1z']])]
k2 = [content['k2'], np.float64([content['k2x'], content['k2y'], content['k2z']])]
kpath.append([k1, k2])
kpts = None
kpts_data = get_info_from_block(data, 'kpoints')
kpts = np.float64(kpts_data.split()).reshape(-1, 3)
out = {}
out['unit_cell'] = unit_cell
out['atom'] = atom
out['abs_coords'] = abs_coords
out['frac_coords'] = frac_coords
out['mp_grid'] = mp_grid
out['kpath'] = kpath
out['kpts'] = kpts
return out
def read_band(filename):
'''Read seedname_band.dat'''
assert check_exist(filename), 'Cannot find : ' + filename
with open(filename, 'r') as data_file:
data = data_file.read()
temp = data.split('\n \n')[:-1]
bands = []
for i, band in enumerate(temp):
formatted_band = np.float64(band.split()).reshape(-1, 2)
if i == 0: proj_kpath = formatted_band[:,0]
bands.append(formatted_band[:,1])
return proj_kpath, np.asarray(bands).T
def read_kpt(filename):
'''Read seedname_kpt.dat'''
assert check_exist(filename), 'Cannot find : ' + filename
with open(filename, 'r') as data_file:
data = data_file.read()
temp = data.split()
nkpts = int(temp[0])
kpath_frac =
|
np.float64(temp[1:])
|
numpy.float64
|
import tables
import pandas as pd
import numpy as np
import math
import pdb
import matplotlib.pyplot as plt
import sklearn
import copy
from sklearn.metrics import r2_score
from db import dbfunctions as dbfn
from scipy.signal import butter, lfilter, filtfilt
from scipy import stats
from riglib.filter import Filter
from ismore import brainamp_channel_lists, ismore_bmi_lib
from ismore.ismore_tests.eeg_feature_extraction import EEGMultiFeatureExtractor
from utils.constants import *
# Documentation
# Script to test the channel selection using a artificial signal (optimal signal)
## dataset
#hdf_ids = [3962,3964]
#hdf_ids = [4296, 4298, 4300, 4301]#4296, 4298, 4300, 4301 AI subject
#hdf_ids = [4329,4330,4331] # EL subject
hdf_ids = [4329]
channels_2visualize = brainamp_channel_lists.eeg32_filt #determine here the electrodes that we wanna visualize (either the filtered ones or the raw ones that will be filtered here)
#channels_2visualize = brainamp_channel_lists.emg14_filt
#db_name = "default"
db_name = "tubingen"
hdf_names = []
for id in hdf_ids:
te = dbfn.TaskEntry(id, dbname= db_name)
hdf_names.append(te.hdf_filename)
te.close_hdf()
# if '_filt' not in channels_2visualize[0]: #or any other type of configuration using raw signals
# filt_training_data = True
# else:
# filt_training_data = False
filt_training_data = True
# Set 'feature_names' to be a list containing the names of features to use
# (see emg_feature_extraction.py for options)
feature_names = ['AR'] # choose here the feature names that will be used to train the decoder
#frequency_resolution = 3#Define the frequency bins of interest (the ones to be used to train the decoder)
# Set 'feature_fn_kwargs' to be a dictionary where:
# key: name of a feature (e.g., 'ZC')
# value: a dictionary of the keyword arguments to be passed into the feature
# function (e.g., extract_ZC) corresponding to this feature
# (see emg_feature_extraction.py for how the feature function definitions)
freq_bands = dict()
# freq_bands['13_filt'] = [] #list with the freq bands of interest
# freq_bands['14_filt'] = []#[[2,7],[9,16]]
# freq_bands['18_filt'] = []
# freq_bands['19_filt'] = []
feature_fn_kwargs = {
'AR': {'freq_bands': freq_bands}, # choose here the feature names that will be used to train the decoder
# 'ZC': {'threshold': 30},
# 'SSC': {'threshold': 700},
}
# neighbour_channels = { #define the neighbour channels for each channel (for the Laplacian filter)
# '1': [2,3,4],
# '2': [5,6],
# '3': [4,5],
# }
neighbour_channels = { #define the neighbour channels for each channel (for the Laplacian filter)
'1_filt': channels_2visualize,
'2_filt': channels_2visualize,
'3_filt': channels_2visualize,
'4_filt': channels_2visualize,
'5_filt': channels_2visualize,
'6_filt': channels_2visualize,
'7_filt': channels_2visualize,
'8_filt': ['3_filt', '4_filt','12_filt', '13_filt'],
'9_filt': ['4_filt', '5_filt','13_filt', '14_filt'],
'10_filt': ['5_filt', '6_filt','14_filt', '15_filt'],
'11_filt': ['6_filt', '7_filt','15_filt', '16_filt'],
'12_filt': channels_2visualize,
'13_filt': ['8_filt', '9_filt','18_filt', '19_filt'],
'14_filt': ['9_filt', '10_filt','19_filt', '20_filt'],
'15_filt': ['10_filt', '11_filt','20_filt', '21_filt'],
'16_filt': channels_2visualize,
'17_filt': channels_2visualize,
'18_filt': ['12_filt', '13_filt','23_filt', '24_filt'],
'19_filt': ['13_filt', '14_filt','24_filt', '25_filt'],
'20_filt': ['14_filt', '15_filt','25_filt', '26_filt'],
'21_filt': ['15_filt', '16_filt','26_filt', '27_filt'],
'22_filt': channels_2visualize,
'23_filt': channels_2visualize,
'24_filt': ['18_filt', '19_filt','28_filt'],
'25_filt': ['19_filt', '20_filt','28_filt', '30_filt'],
'26_filt': ['20_filt', '21_filt','30_filt'],
'27_filt': channels_2visualize,
'28_filt': channels_2visualize,
'29_filt': channels_2visualize,
'30_filt': channels_2visualize,
'31_filt': channels_2visualize,
'32_filt': channels_2visualize,
}
channels_2train = ['13_filt','14_filt','18_filt','19_filt']
# All channels CAR filter
# for num, name in enumerate(neighbour_channels):
# neighbour_channels[name] = channels_2visualize
# neighbour_channels = { #define the neighbour channels for each channel (for the Laplacian filter)
# '1': channels_2visualize,
# '2': channels_2visualize,
# '3': channels_2visualize,
# '4': channels_2visualize,
# '5': channels_2visualize,
# '6': channels_2visualize,
# '7': channels_2visualize,
# '8': ['3', '4','12', '13'],
# '9': ['4', '5','13', '14'],
# '10': ['5', '6','14', '15'],
# '11': ['6', '7','15', '16'],
# '12': channels_2visualize,
# '13': ['8', '9','18', '19'],
# '14': ['9', '10','19', '20'],
# '15': ['10', '11','20', '21'],
# '16': channels_2visualize,
# '17': channels_2visualize,
# '18': ['12', '13','23', '24'],
# '19': ['13', '14','24', '25'],
# '20': ['14', '15','25', '26'],
# '21': ['15', '16','26', '27'],
# '22': channels_2visualize,
# '23': channels_2visualize,
# '24': ['18', '19','28'],
# '25': ['19', '20','28', '30'],
# '26': ['20', '21','30'],
# '27': channels_2visualize,
# '28': channels_2visualize,
# '29': channels_2visualize,
# '30': channels_2visualize,
# '31': channels_2visualize,
# '32': channels_2visualize,
# }
fs = 1000
channel_names = ['chan' + name for name in channels_2visualize]
neighbours = dict()
for chan_neighbour in neighbour_channels:#Add the ones used for the Laplacian filter here
neighbours['chan' + chan_neighbour] = []
for k,chans in enumerate(neighbour_channels[chan_neighbour]):
new_channel = 'chan' + chans
neighbours['chan' + chan_neighbour].append(new_channel)
neighbour_channels = copy.copy(neighbours)
# calculate coefficients for a 4th-order Butterworth BPF from 10-450 Hz
band = [0.5, 80] # Hz
nyq = 0.5 * fs
low = band[0] / nyq
high = band[1] / nyq
bpf_coeffs = butter(4, [low, high], btype='band')
band = [48,52] # Hz
nyq = 0.5 * fs
low = band[0] / nyq
high = band[1] / nyq
notchf_coeffs = butter(2, [low, high], btype='bandstop')
extractor_cls = EEGMultiFeatureExtractor
f_extractor = extractor_cls(None, channels_2train = [], channels = channels_2visualize, feature_names = feature_names, feature_fn_kwargs = feature_fn_kwargs, fs=fs, neighbour_channels = neighbour_channels)
features = []
labels = []
for name in hdf_names:
# load EMG data from HDF file
hdf = tables.openFile(name)
eeg = hdf.root.brainamp[:][channel_names]
len_file = len(hdf.root.brainamp[:][channel_names[0]])
# Parameters to generate artificial EEG data
fsample = 1000.00 #Sample frequency in Hz
t = np.arange(0, 10 , 1/fsample)#[0 :1/fsample:10-1/fsample]; #time frames of 10seg for each state
time = np.arange(0, len_file/fsample, 1/fsample) #time vector for 5min
f = 10 # in Hz
rest_amp = 10
move_amp = 5; #mov state amplitude
#steps to follow to visualize the r2 plots
#1.- Filter the whole signal - only if non-filtered data is extracted from the source/HDF file.
# BP and NOTCH-filters might or might not be applied here, depending on what data (raw or filt) we are reading from the hdf file
cnt = 1
cnt_noise = 1
for k in range(len(channel_names)): #for loop on number of electrodes
if channel_names[k] in ['chan8_filt', 'chan9_filt', 'chan13_filt', 'chan14_filt', 'chan18_filt', 'chan19_filt']:
rest_noise = rest_amp*0.1*np.random.randn(len(t)) #10% of signal amplitude
rest_signal = np.zeros(len(t))
move_noise = move_amp*0.1*np.random.randn(len(t)) #10% of signal amplitude
move_signal = np.zeros(len(t))
for i in np.arange(len(t)):
rest_signal[i] = rest_amp*cnt * math.sin((f+cnt-1)*2*math.pi*t[i]) + rest_noise[i] #rest sinusoidal signal
move_signal[i] = move_amp*cnt * math.sin((f+cnt-1)*2*math.pi*t[i]) + move_noise[i]
cnt += 1
signal = []
# label = []
for i in np.arange(30):
signal = np.hstack([signal, rest_signal, move_signal])
# label = np.hstack([label, np.ones([len(rest_signal)]), np.zeros([len(move_signal)])])
else:
rest_signal = rest_amp*0.1*cnt_noise*np.random.randn(len(t)) #10% of signal amplitude. only noise
move_signal = rest_amp*0.1*cnt_noise*np.random.randn(len(t)) #10% of signal amplitude
cnt_noise += 1
signal = []
# label = []
for i in np.arange(30):
signal = np.hstack([signal, rest_signal, move_signal])
eeg[channel_names[k]]['data'] = signal[:len_file].copy()
eeg[channel_names[k]]['data'] = lfilter(bpf_coeffs[0],bpf_coeffs[1], eeg[channel_names[k]]['data'])
eeg[channel_names[k]]['data'] = lfilter(notchf_coeffs[0],notchf_coeffs[1], eeg[channel_names[k]]['data'])
# Laplacian filter - this has to be applied always, independently of using raw or filt data
#import pdb; pdb.set_trace()
# from scipy.io import savemat
# import os
# savemat(os.path.expandvars('$HOME/code/ismore/filtered_eeg15.mat'), dict(filtered_data = eeg['chan15']['data']))
# savemat(os.path.expandvars('$HOME/code/ismore/filtered_eeg7.mat'), dict(filtered_data = eeg['chan7']['data']))
#import pdb; pdb.set_trace()
eeg = f_extractor.Laplacian_filter(eeg)
# import os
# savemat(os.path.expandvars('$HOME/code/ismore/filtered_Laplace_eeg15.mat'), dict(filtered_data = eeg['chan15']['data']))
# savemat(os.path.expandvars('$HOME/code/ismore/filtered_Laplace_eeg7.mat'), dict(filtered_data = eeg['chan7']['data']))
#import pdb; pdb.set_trace()
#2.- Break done the signal into trial intervals (concatenate relax and right trials as they happen in the exp)
rest_start_idxs_eeg = np.arange(0,len(time), fsample *20)
mov_start_idxs_eeg = np.arange(10*1000,len(time), fsample *20)
rest_end_idxs_eeg = np.arange(10*1000-1,len(time), fsample *20)
mov_end_idxs_eeg = np.arange(20*1000-1,len(time), fsample *20)
if len(mov_end_idxs_eeg) < len(rest_end_idxs_eeg):
rest_end_idxs_eeg = rest_end_idxs_eeg[:len(mov_end_idxs_eeg)]
rest_start_idxs_eeg = rest_start_idxs_eeg[:len(mov_end_idxs_eeg)]
mov_start_idxs_eeg = mov_start_idxs_eeg[:len(mov_end_idxs_eeg)]
for chan_n, k in enumerate(channel_names):
r_features_ch = None
m_features_ch = None
# k = 'chan8_filt'
# chan_n = 7
found_index = k.find('n') + 1
chan_freq = k[found_index:]
#import pdb; pdb.set_trace()
for idx in range(len(rest_start_idxs_eeg)):
# mirar si es mejor sacar los features en todos los channels a la vez o channel por channel!
#rest_window = eeg[k][rest_start_times_eeg[idx]:rest_end_times_eeg[idx]]['data']
#import pdb; pdb.set_trace()
rest_window = eeg[k][rest_start_idxs_eeg[idx]:rest_end_idxs_eeg[idx]+1]['data']
mov_window = eeg[k][mov_start_idxs_eeg[idx]:mov_end_idxs_eeg[idx]+1]['data']
# import pdb; pdb.set_trace()
#3.- Take windows of 500ms every 50ms (i.e. overlap of 450ms)
#4.- Extract features (AR-psd) of each of these windows
n = 0
while n <= (len(rest_window) - 500) and n <= (len(mov_window) - 500):
# if k == 'chan8_filt':
# import pdb; pdb.set_trace()
r_feats = f_extractor.extract_features(rest_window[n:n+500],chan_freq)
m_feats = f_extractor.extract_features(mov_window[n:n+500],chan_freq)
# if k == 'chan8_filt':
# if chan_n == 7:
# import pdb; pdb.set_trace()
if r_features_ch == None:
r_features_ch = r_feats.copy()
m_features_ch = m_feats.copy()
else:
r_features_ch = np.vstack([r_features_ch, r_feats])
m_features_ch = np.vstack([m_features_ch, m_feats])
n +=50
# if chan_n == 12:
# mean_PSD_mov = np.mean(m_features_ch, axis = 0)
# mean_PSD_rest = np.mean(r_features_ch, axis = 0)
#plt.figure(); plt.plot(r_features_ch.T); plt.show()
#import pdb; pdb.set_trace()
# import os
# savemat(os.path.expandvars('$HOME/code/ismore/all_psds.mat'), dict(r_features_ch = r_features_ch, m_features_ch = m_features_ch))
#plt.figure(); plt.plot(mean_PSD_mov); plt.plot(mean_PSD_rest,color ='red'); plt.show()
# if chan_n == 12:
# import pdb; pdb.set_trace()
if len(features) < len(channel_names):
features.append(np.vstack([r_features_ch, m_features_ch]))
labels.append(np.vstack([np.zeros(r_features_ch.shape), np.ones(m_features_ch.shape)]))
else:
features[chan_n] = np.vstack([features[chan_n],np.vstack([r_features_ch, m_features_ch])])
labels[chan_n] = np.vstack([labels[chan_n],np.vstack([np.zeros(r_features_ch.shape), np.ones(m_features_ch.shape)])])
# import os
# from scipy.io import savemat
# savemat(os.path.expandvars('$HOME/code/ismore/labels_features_eegall.mat'), dict(labels = labels, features = features))
# compute r2 coeff for each channel and freq (using all the training datafiles)
#import pdb; pdb.set_trace()
r2 = np.zeros([len(channel_names),features[0].shape[1]])
for k in range(len(features)):
for kk in range(features[k].shape[1]):
r2[k,kk] = stats.pearsonr(labels[k][:,kk], features[k][:,kk])[0]**2
plt.figure('features-ch 8')
for i in np.arange(100):
plt.plot(features[7][i,:],'b'); plt.plot(features[7][-i-1,:],'r'); plt.show(block = False)
plt.figure('features-ch 9')
for i in np.arange(100):
plt.plot(features[8][i,:],'b'); plt.plot(features[8][-i-1,:],'r'); plt.show(block = False)
plt.figure('features-ch 10')
for i in np.arange(100):
plt.plot(features[9][i,:],'b'); plt.plot(features[9][-i-1,:],'r'); plt.show(block = False)
mov_trials = np.where(labels[12][:,0] == 1)
rest_trials = np.where(labels[12][:,0] == 0)
mov_feat_mean = np.mean(features[12][mov_trials[0],:],axis = 0)
rest_feat_mean = np.mean(features[12][rest_trials[0],:],axis = 0)
plt.figure('mean features- ch 13')
plt.plot(rest_feat_mean); plt.plot(mov_feat_mean, 'r'); plt.show(block = False)
#plt.hold(True);
# import os
# from scipy.io import savemat
# savemat(os.path.expandvars('$HOME/code/ismore/r2_values.mat'), dict(r2 = r2))
# import pdb; pdb.set_trace()
#r2[k] = r2_score(labels[k], features[k], multioutput = 'raw_values')
#r2[k] = np.corrcoef(labels[k], features[k], 0)[-1,252:]**2
# plot image of r2 values (freqs x channels)
plt.figure()
plt.imshow(r2, interpolation = 'none')
plt.axis([0,50,0,31])
plt.yticks(
|
np.arange(32)
|
numpy.arange
|
import itertools
import numpy as np
import pytest
from PartSegCore.multiscale_opening import MuType, PyMSO, calculate_mu, calculate_mu_mid
from PartSegCore.segmentation.watershed import NeighType, calculate_distances_array
from PartSegCore.sprawl_utils.euclidean_cython import calculate_euclidean
class TestMu:
def test_base_mu(self):
image = np.zeros((10, 10, 10), dtype=np.uint8)
image[2:8, 2:8, 2:8] = 10
res = calculate_mu(image, 2, 8, MuType.base_mu)
assert np.all(res == (image > 0).astype(np.float64))
res = calculate_mu(image, 5, 15, MuType.base_mu)
assert np.all(res == (image > 0).astype(np.float64) * 0.5)
image[4:6, 4:6, 4:6] = 20
res = calculate_mu(image, 5, 15, MuType.base_mu)
assert np.all(res == ((image > 0).astype(np.float64) + (image > 15).astype(np.float64)) * 0.5)
def test_base_mu_masked(self):
image = np.zeros((10, 10, 10), dtype=np.uint8)
image[2:8, 2:8, 2:8] = 10
res = calculate_mu(image, 2, 8, MuType.base_mu, image > 0)
assert np.all(res == (image > 0).astype(np.float64))
res = calculate_mu(image, 5, 15, MuType.base_mu, image > 0)
assert np.all(res == (image > 0).astype(np.float64) * 0.5)
image[4:6, 4:6, 4:6] = 20
res = calculate_mu(image, 5, 15, MuType.base_mu, image > 0)
assert np.all(res == ((image > 0).astype(np.float64) + (image > 15).astype(np.float64)) * 0.5)
def test_reversed_base_mu(self):
image = np.zeros((10, 10, 10), dtype=np.uint8)
image[2:8, 2:8, 2:8] = 10
res = calculate_mu(image, 8, 2, MuType.base_mu)
assert np.all(res == (image == 0).astype(np.float64))
res = calculate_mu(image, 15, 5, MuType.base_mu)
assert np.all(res == ((20 - image) / 20).astype(np.float64))
image[4:6, 4:6, 4:6] = 20
res = calculate_mu(image, 15, 5, MuType.base_mu)
assert np.all(res == (20 - image) / 20)
def test_reversed_base_mu_masked(self):
image = np.zeros((10, 10, 10), dtype=np.uint8)
image[2:8, 2:8, 2:8] = 10
mask = image > 0
res = calculate_mu(image, 8, 2, MuType.base_mu, mask)
assert np.all(res ==
|
np.zeros(image.shape, dtype=np.float64)
|
numpy.zeros
|
from sklearn.decomposition import PCA
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import DataStream_Vis_Utils as utils
from moviepy.editor import *
import skvideo
import cv2
import imageio
import numpy as np
import viz_utils as vu
import scipy
from scipy.signal import find_peaks
from scipy.ndimage import gaussian_filter1d
import pdb
# Import ffmpeg to write videos here..
ffm_path = 'C:/Users/bassp/OneDrive/Desktop/ffmpeg/bin/'
skvideo.setFFmpegPath(ffm_path)
import skvideo.io
# Public functions
def get_principle_components(positions, vel=0, acc=0, num_pcs=3):
pca = PCA(n_components=num_pcs, whiten=True, svd_solver='full')
if vel:
if acc:
pos = np.asarray(positions)
vel = np.asarray(vel)
acc = np.asarray(acc)
pva = np.hstack((pos.reshape(pos.shape[1], pos.shape[0] * pos.shape[2]),
vel.reshape(vel.shape[1], vel.shape[0] * vel.shape[2]),
acc.reshape(acc.shape[1], acc.shape[0] * acc.shape[2])))
pc_vector = pca.fit_transform(pva)
else:
pos = np.asarray(positions)
vel = np.asarray(vel)
pv = np.hstack((pos.reshape(pos.shape[1], pos.shape[0] * pos.shape[2]),
vel.reshape(vel.shape[1], vel.shape[0] * vel.shape[2]
)))
pc_vector = pca.fit_transform(pv)
else:
if acc:
pos = np.asarray(positions)
acc = np.asarray(acc)
pa = np.hstack((pos.reshape(pos.shape[1], pos.shape[0] * pos.shape[2]),
acc.reshape(acc.shape[1], acc.shape[0] * acc.shape[2])))
pc_vector = pca.fit_transform(pa)
else:
pos = np.asarray(positions)
pc_vector = pca.fit_transform(pos.reshape(pos.shape[1], pos.shape[0] * pos.shape[2]))
return pc_vector
def gkern(input_vector, sig=1.):
"""\
creates gaussian kernel with side length `l` and a sigma of `sig`. Filters N-D vector.
"""
resulting_vector = gaussian_filter1d(input_vector, sig, mode='mirror')
return resulting_vector
# noinspection PyTypeChecker,PyBroadException
class ReachViz:
# noinspection SpellCheckingInspection
def __init__(self, date, session, data_path, block_vid_file, kin_path, rat):
self.preprocessed_rmse, self.outlier_list = [], []
self.probabilities, self.bi_reach_vector, self.trial_index, self.first_lick_signal, self.outlier_indexes = \
[], [], [], [], []
self.rat = rat
self.date = date
self.session = session
self.kinematic_data_path = kin_path
self.block_exp_df, self.d, self.kinematic_block, self.velocities, self.speeds, self.dim, self.save_dict = \
[], [], [], [], [], [], []
self.data_path = data_path
self.sensors, self.gen_p_thresh = 0, 0.5
self.load_data() # get exp/kin dataframes
self.reaching_dataframe = pd.DataFrame()
self.trial_start_vectors = 0
self.trial_stop_vectors = 0
self.arm_id_list = []
self.pos_pc, self.pos_v_pc, self.pos_v_a_pc, self.freq_decomp_pos = [], [], [], []
self.sstr, self.lick_index = 0, []
self.rat_gaps, self.total_ints, self.interpolation_rmse, self.outlier_rmse, self.valid_rmse = [], [], [], [], []
self.block_video_path = block_vid_file
# Obtain Experimental Block of Data
self.get_block_data()
# Find "reaching" peaks and tentative start times agnostic of trial time
# self.get_reaches_from_block()
# pdb.set_trace()
# Get Start/Stop of Trials
self.get_starts_stops()
# Initialize sensor variables
self.exp_response_sensor, self.trial_sensors, self.h_moving_sensor, self.reward_zone_sensor, self.lick, \
self.trial_num = 0, 0, 0, 0, 0, 0
self.time_vector, self.images, self.bout_vector = [], [], []
self.trial_rewarded = False
self.filename = None
self.total_outliers, self.rewarded, self.left_palm_f_x, self.right_palm_f_x = [], [], [], []
self.total_raw_speeds, self.total_preprocessed_speeds, self.total_probabilities = [], [], []
self.reach_start_time = []
self.reach_peak_time, self.right_palm_maxima = [], []
self.left_start_times = []
self.left_peak_times, self.left_palm_maxima = [], []
self.right_reach_end_time, self.right_reach_end_times = [], []
self.left_reach_end_time, self.left_reach_end_times = [], []
self.right_start_times, self.left_start_times = [], []
self.left_hand_speeds, self.right_hand_speeds, self.total_speeds, self.left_hand_raw_speeds, \
self.right_hand_raw_speeds = [], [], [], [], []
self.right_peak_times = []
self.bimanual_reach_times = []
self.k_length = 0
self.speed_holder, self.raw_speeds = [], []
self.left_arm_pc_pos, self.left_arm_pc_pos_v, self.left_arm_pc_pos_v_a, self.right_arm_pc_pos, \
self.right_arm_pc_pos_v, self.right_arm_pc_pos_v_a = [], [], [], [], [], []
self.uninterpolated_left_palm_v, self.uninterpolated_right_palm_v = [], []
# Initialize kinematic variables
self.left_palm_velocity, self.right_palm_velocity, self.lag, self.clip_path, self.lick_vector, self.reach_vector, \
self.prob_index, self.pos_index, self.seg_num, self.prob_nose, self.right_wrist_velocity, self.left_wrist_velocity \
= 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], []
self.nose_p, self.handle_p, self.left_shoulder_p, self.left_forearm_p, self.left_wrist_p = [], [], [], [], []
self.left_palm_p, self.left_index_base_p = [], []
self.left_index_tip_p, self.left_middle_base_p, self.left_middle_tip_p, self.left_third_base_p, \
self.left_third_tip_p, self.left_end_base_p, self.left_end_tip_p, self.right_shoulder_p, self.right_forearm_p, \
self.right_wrist_p, self.right_palm_p = [], [], [], [], [], [], [], [], [], [], []
self.right_index_base_p, self.right_index_tip_p, self.right_middle_base_p, self.right_middle_tip_p = [], [], [], []
self.right_third_base_p, self.right_third_tip_p, self.right_end_base_p, self.right_end_tip_p = [], [], [], []
self.fps = 20
# Kinematic variable initialization
self.nose_v, self.handle_v, self.left_shoulder_v, self.left_forearm_v, self.left_wrist_v, self.left_palm_v, self.left_index_base_v, \
self.left_index_tip_v, self.left_middle_base_v, self.left_middle_tip_v, self.left_third_base_v, self.left_third_tip_v, self.left_end_base_v, \
self.left_end_tip_v, self.right_shoulder_v, self.right_forearm_v, self.right_wrist_v, self.right_palm_v, self.right_index_base_v, \
self.right_index_tip_v, self.right_middle_base_v, self.right_middle_tip_v, self.right_third_base_v, self.right_third_tip_v, \
self.right_end_base_v, self.right_end_tip_v = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \
[], [], [], [], [], [], [], [], [], [], []
self.nose_s, self.handle_s, self.left_shoulder_s, self.left_forearm_s, self.left_wrist_s, self.left_palm_s, self.left_index_base_s, \
self.left_index_tip_s, self.left_middle_base_s, self.left_middle_tip_s, self.left_third_base_s, self.left_third_tip_s, self.left_end_base_s, \
self.left_end_tip_s, self.right_shoulder_s, self.right_forearm_s, self.right_wrist_s, self.right_palm_s, self.right_index_base_s, \
self.right_index_tip_s, self.right_middle_base_s, self.right_middle_tip_s, self.right_third_base_s, self.right_third_tip_s, \
self.right_end_base_s, self.right_end_tip_s = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \
[], [], [], [], [], [], [], [], []
self.nose_a, self.handle_a, self.left_shoulder_a, self.left_forearm_a, self.left_wrist_a, self.left_palm_a, self.left_index_base_a, \
self.left_index_tip_a, self.left_middle_base_a, self.left_middle_tip_a, self.left_third_base_a, self.left_third_tip_a, self.left_end_base_a, \
self.left_end_tip_a, self.right_shoulder_a, self.right_forearm_a, self.right_wrist_a, self.right_palm_a, self.right_index_base_a, \
self.right_index_tip_a, self.right_middle_base_a, self.right_middle_tip_a, self.right_third_base_a, self.right_third_tip_a, \
self.right_end_base_a, self.right_end_tip_a = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \
[], [], [], [], [], [], [], [], []
self.nose_o, self.handle_o, self.left_shoulder_o, self.left_forearm_o, self.left_wrist_o, self.left_palm_o, self.left_index_base_o, \
self.left_index_tip_o, self.left_middle_base_o, self.left_middle_tip_o, self.left_third_base_o, self.left_third_tip_o, self.left_end_base_o, \
self.left_end_tip_o, self.right_shoulder_o, self.right_forearm_o, self.right_wrist_o, self.right_palm_o, self.right_index_base_o, \
self.right_index_tip_o, self.right_middle_base_o, self.right_middle_tip_o, self.right_third_base_o, self.right_third_tip_o, \
self.right_end_base_o, self.right_end_tip_o = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \
[], [], [], [], [], [], [], [], []
# Initialize kinematic, positional and probability-indexed variables
self.prob_filter_index, self.left_arm_filter_index, self.right_arm_filter_index = [], [], []
self.right_index_base, self.right_index_tip, self.right_middle_base, self.right_middle_tip, \
self.right_third_base, self.right_third_tip, self.right_end_base, self.right_end_tip = [], [], [], [], [], [], [], []
self.left_index_base, self.left_index_tip, self.left_middle_base, self.left_middle_tip, self.left_third_base, \
self.left_third_tip, self.left_end_base, self.left_end_tip = [], [], [], [], [], [], [], []
self.start_trial_indice, self.trial_cut_vector, self.block_cut_vector, self.handle_velocity, \
self.bout_reach = [], [], [], [], []
self.handle_moved, self.gif_save_path, self.prob_right_index, self.prob_left_index, self.l_pos_index, \
self.r_pos_index = 0, 0, 0, 0, 0, 0
self.x_robot, self.y_robot, self.z_robot, self.uninterpolated_right_palm, \
self.uninterpolated_left_palm = [], [], [], [], []
self.prob_left_digit, self.prob_right_digit, self.left_digit_filter_index, \
self.right_digit_filter_index = [], [], [], []
self.reaching_mask, self.right_arm_speed, self.left_arm_speed, self.reprojections, \
self.interpolation_gaps = [], [], [], [], []
self.left_palm_speed, self.right_palm_speed, self.handle_speed, self.right_arm_velocity, \
self.left_arm_velocity = [], [], [], [], []
self.bout_flag, self.positions, self.sensor_data_list = False, [], []
self.nose, self.handle, self.body_prob, self.central_body_mass, self.prob_left_shoulder, self.prob_right_shoulder = [
0, 0, 0, 0, 0, 0]
self.left_shoulder, self.right_forearm, self.left_forearm, self.right_wrist, self.left_wrist, self.right_palm, self.left_palm = [
0, 0, 0, 0, 0, 0, 0]
self.prob_nose, self.prob_right_arm, self.prob_left_arm, self.right_digits, self.left_digits, self.right_shoulder = [
0, 0, 0, 0, 0, 0]
self.robot_handle_speed, self.reprojected_handle, self.reprojected_nose, self.reprojected_bhandle, self.reprojected_left_palm = [
0, 0, 0, 0, 0]
self.reprojected_right_palm, self.reprojected_left_wrist, self.reprojected_right_wrist, self.reprojected_left_shoulder, self.reprojected_right_shoulder = [
0, 0, 0, 0, 0]
self.prob_right_index, self.prob_left_index, self.bi_pos_index, self.r_reach_vector, self.l_reach_vector, \
self.left_prob_index, self.right_prob_index = [0, 0, 0, 0, 0, 0, 0]
self.prob_list, self.pos_list, self.interpolation_rmse_list, self.valid_rmse_list, \
self.outlier_rmse_list = [], [], [], [], []
return
def load_data(self):
""" Function to load per-rat database into ReachLoader. """
df = vu.import_robot_data(self.data_path)
self.sensors = df.reset_index(drop=True)
with (open(self.kinematic_data_path, "rb")) as openfile:
self.d = pickle.load(openfile)
return
def make_paths(self):
""" Function to construct a structured directory to save visual results. """
vu.mkdir_p(self.sstr)
vu.mkdir_p(self.sstr + '/data')
vu.mkdir_p(self.sstr + '/videos')
vu.mkdir_p(self.sstr + '/videos/reaches')
vu.mkdir_p(self.sstr + '/plots')
vu.mkdir_p(self.sstr + '/plots/reaches')
vu.mkdir_p(self.sstr + '/timeseries')
vu.mkdir_p(self.sstr + '/timeseries_analysis_plots')
vu.mkdir_p(self.sstr + '/timeseries_analysis_plots/reaches')
return
def get_block_data(self):
""" Function to fetch block positional and sensor data from rat database. """
for kin_items in self.d:
try:
sess = kin_items.columns.levels[1]
date = kin_items.columns.levels[2]
self.dim = kin_items.columns.levels[3]
except: # fetched a null dataframe (0 entry in list), avoid..
pass
if sess[0] in self.session:
if '_' in date[0][-1]:
if date[0][-3:-1] in self.date:
print('Hooked block positions for date ' + date[0] + ' and session ' + sess[0])
self.kinematic_block = kin_items
else:
if date[0][-2:] in self.date:
print('Hooked block positions for date ' + date[0] + ' and session ' + sess[0])
self.kinematic_block = kin_items
self.block_exp_df = self.sensors.loc[self.sensors['Date'] == self.date].loc[self.sensors['S'] == self.session]
return
def get_reaches_from_block(self):
self.reach_start_time = []
self.reach_peak_time = []
self.left_start_times = []
self.right_start_times = []
self.left_peak_times = []
self.right_peak_times = []
self.right_reach_end_times = []
self.left_reach_end_times = []
# Extract sensor data across whole block
self.extract_sensor_data(0, -1)
# Extract kinematic data across whole block
left_wrist = vu.norm_coordinates(
self.kinematic_block[self.kinematic_block.columns[15:18]].values) # 21 end
right_wrist = vu.norm_coordinates(
self.kinematic_block[self.kinematic_block.columns[51:54]].values) # 57 end
left_palm = vu.norm_coordinates(
self.kinematic_block[self.kinematic_block.columns[18:21]].values)
right_palm = vu.norm_coordinates(
self.kinematic_block[self.kinematic_block.columns[54:57]].values)
state_values = [left_wrist, right_wrist, left_palm, right_palm]
w = 81
left_wrist_p = np.mean(self.kinematic_block[self.kinematic_block.columns[15 + w:18 + w]].values,
axis=1) # 21 end
right_wrist_p = np.mean(self.kinematic_block[self.kinematic_block.columns[51 + w:54 + w]].values,
axis=1) # 57 end
left_palm_p = np.mean(self.kinematic_block[self.kinematic_block.columns[18 + w:21 + w]].values,
axis=1)
right_palm_p = np.mean(self.kinematic_block[self.kinematic_block.columns[54 + w:57 + w]].values,
axis=1)
prob_values = [left_wrist_p, right_wrist_p, left_palm_p, right_palm_p]
prob_nose = np.squeeze(
np.mean(self.kinematic_block[self.kinematic_block.columns[6 + 81:9 + 81]].values, axis=1))
prob_filter_index = np.where(prob_nose < 0.3)[0]
positions = []
velocities = []
accelerations = []
speed = []
filtered_pos = []
# Pre-Process input data
for idx, pos_val in enumerate(state_values):
p_val = prob_values[idx]
p_o = self.threshold_data_with_probabilities(p_val, p_thresh=0.6)
svd, acc, speeds = self.calculate_kinematics_from_position(np.copy(pos_val))
v_o = np.where(svd > 2)
possi, num_int, gap_ind = vu.interpolate_3d_vector(np.copy(pos_val), v_o, p_o)
# filtered_pos = vu.cubic_spline_smoothing(np.copy(possi), spline_coeff=0.1)
for ix in range(0, 3):
filtered_pos.append(gkern(np.copy(possi[ix, :]), 3))
filtered_pos_spline = vu.cubic_spline_smoothing(np.copy(filtered_pos), spline_coeff=0.99)
v, a, s = self.calculate_kinematics_from_position(np.copy(filtered_pos_spline))
# 0 out non-values
filtered_pos[prob_filter_index] = 0
v[prob_filter_index] = 0
s[prob_filter_index] = 0
a[prob_filter_index] = 0
positions.append(filtered_pos)
velocities.append(v)
accelerations.append(a)
speed.append(s)
# Find peaks across entire block
right_palm_peaks = find_peaks(speed[3], height=0.6, distance=15)[0]
left_palm_peaks = find_peaks(speed[2], height=0.6, distance=15)[0]
# Find minima for each peak across each hand
for ir in range(0, left_palm_peaks.shape[0]):
left_palm_below_thresh = \
np.where(speed[2][left_palm_peaks[ir] - 30:left_palm_peaks[ir]] < 0.1)[0]
left_wrist_below_thresh = \
np.where(speed[2][left_palm_peaks[ir] - 30:left_palm_peaks[ir]] < 0.1)[0]
left_palm_post_peak = np.where(speed[2][left_palm_peaks[ir]:left_palm_peaks[ir] + 30])
try:
start_time_l = left_palm_peaks[ir] - \
np.intersect1d(left_palm_below_thresh, left_wrist_below_thresh)[-1]
except:
start_time_l = left_palm_peaks[ir] - left_palm_below_thresh[-1]
self.left_start_times.append(start_time_l)
self.left_peak_times.append(left_palm_peaks[ir])
self.reach_peak_time.append(left_palm_peaks[ir]) # Record Peak
self.reach_start_time.append(start_time_l)
self.left_reach_end_time.append(left_palm_post_peak)
# Same, for right hand
for ir in range(0, right_palm_peaks.shape[0]):
right_palm_below_thresh = \
np.where(speed[3][right_palm_peaks[ir] - 30:right_palm_peaks[ir]] < 0.1)[0]
right_wrist_below_thresh = \
np.where(speed[3][right_palm_peaks[ir] - 30:right_palm_peaks[ir]] < 0.1)[0]
right_palm_post_peak = np.where(speed[3][right_palm_peaks[ir]:right_palm_peaks[ir] + 30] < 0.1)[0]
try:
start_time_r = right_palm_peaks[ir] - \
np.intersect1d(right_palm_below_thresh, right_wrist_below_thresh)[-1]
except:
start_time_r = right_palm_peaks[ir] - right_palm_below_thresh[-1]
self.left_start_times.append(start_time_r)
self.left_peak_times.append(right_palm_peaks[ir])
self.reach_peak_time.append(right_palm_peaks[ir]) # Record Peak
self.reach_start_time.append(start_time_r)
self.right_reach_end_time.append(right_palm_post_peak)
self.right_start_times = list(self.right_start_times)
self.right_peak_times = list(self.right_peak_times)
self.bimanual_reach_times = []
# For each tentative right-handed reach, check if there is a "left" start within 15 frames
for right_reach in self.right_start_times:
for left_reach in self.left_start_times:
if left_reach - 10 < right_reach < left_reach + 10: # Mark as bi-manual
self.bimanual_reach_times.append(np.asarray([right_reach, left_reach]))
self.split_videos_into_reaches()
return
def split_videos_into_reaches(self):
bi = self.block_video_path.rsplit('.')[0]
self.sstr = bi + '/reaching_split_trials'
for ir, r in enumerate(self.right_start_times):
print('Splitting video at' + str(r))
self.split_trial_video(r - 5, self.right_reach_end_time + 5, segment=True, num_reach=ir)
for ir, r in enumerate(self.left_start_times):
print('Splitting video at' + str(r))
self.split_trial_video(r - 5, self.left_reach_end_time + 5, segment=True, num_reach=ir + 3000)
return
def threshold_data_with_probabilities(self, p_vector, p_thresh):
""" Function to threshold input position vectors by the probability of this position being present. The mean
over multiple cameras is used to better estimate error.
"""
low_p_idx = np.where(p_vector < p_thresh) # Filter positions by ind p values
return np.asarray(low_p_idx)
def get_starts_stops(self):
""" Obtain the start and stop times of coarse behavior from the sensor block. """
self.trial_start_vectors = self.block_exp_df['r_start'].values[0]
self.trial_stop_vectors = self.block_exp_df['r_stop'].values[0]
print('Number of Trials: ' + str(len(self.trial_start_vectors)))
return
def extract_sensor_data(self, idxstrt, idxstp, filter_sensors=True, check_lick=True):
""" Function to extract probability thresholded sensor data from ReachMaster. Data is coarsely filtered.
"""
self.k_length = self.kinematic_block[self.kinematic_block.columns[3:6]].values.shape[0]
self.block_exp_df = self.sensors.loc[self.sensors['Date'] == self.date].loc[self.sensors['S'] == self.session]
self.h_moving_sensor = np.asarray(np.copy(self.block_exp_df['moving'].values[0][idxstrt:idxstp]))
self.lick_index = np.asarray(
|
np.copy(self.block_exp_df['lick'].values[0])
|
numpy.copy
|
#!/usr/bin/env python
# coding: utf-8
# # Collection of modules for image analysis
# ### March 13,2020
import numpy as np
import matplotlib.pyplot as plt
# import pandas as pd
import subprocess as sp
import os
import glob
import itertools
from scipy import fftpack
from matplotlib.colors import LogNorm, PowerNorm, Normalize
### Histogram modules
def f_plot_grid(arr,cols=16,fig_size=(15,5)):
''' Plot a grid of images
'''
size=arr.shape[0]
rows=int(np.ceil(size/cols))
print(rows,cols)
fig,axarr=plt.subplots(rows,cols,figsize=fig_size, gridspec_kw = {'wspace':0, 'hspace':0})
if rows==1: axarr=np.reshape(axarr,(rows,cols))
if cols==1: axarr=np.reshape(axarr,(rows,cols))
for i in range(min(rows*cols,size)):
row,col=int(i/cols),i%cols
try:
axarr[row,col].imshow(arr[i],origin='lower', cmap='YlGn', extent = [0, 128, 0, 128], norm=Normalize(vmin=-1., vmax=1.))
# Drop axis label
except Exception as e:
print('Exception:',e)
pass
temp=plt.setp([a.get_xticklabels() for a in axarr[:-1,:].flatten()], visible=False)
temp=plt.setp([a.get_yticklabels() for a in axarr[:,1:].flatten()], visible=False)
def f_plot_intensity_grid(arr,cols=5,fig_size=(12,12)):
'''
Module to plot the pixel intensity histograms for a set of images on a gird
'''
size=arr.shape[0]
assert cols<=size, "cols %s greater than array size %s"%(cols,size)
num=arr.shape[0]
rows=int(np.ceil(size/cols))
# print(rows,cols)
# print("Plotting %s images" %(rows*cols))
fig,axarr=plt.subplots(rows,cols,figsize=fig_size,constrained_layout=True)
for i in range(rows*cols):
row,col=int(i/cols),i%cols
### Get histogram
try:
img_arr=arr[i]
norm=False
hist, bin_edges = np.histogram(img_arr.flatten(), bins=25, density=norm)
centers = (bin_edges[:-1] + bin_edges[1:]) / 2
axarr[row,col].errorbar(centers,hist,fmt='o-')
# fig.subplots_adjust(left=0.01,bottom=0.01,right=0.1,top=0.1,wspace=0.001,hspace=0.0001)
except Exception as e:
print('error',e)
def f_batch_histogram(img_arr,bins,norm,hist_range):
''' Compute histogram statistics for a batch of images'''
## Extracting the range. This is important to ensure that the different histograms are compared correctly
if hist_range==None : ulim,llim=np.max(img_arr),np.min(img_arr)
else: ulim,llim=hist_range[1],hist_range[0]
# print(ulim,llim)
### array of histogram of each image
hist_arr=np.array([np.histogram(arr.flatten(), bins=bins, range=(llim,ulim), density=norm) for arr in img_arr],dtype=object) ## range is important
hist=np.stack(hist_arr[:,0]) # First element is histogram array
bin_list=np.stack(hist_arr[:,1]) # Second element is bin value
### Compute statistics over histograms of individual images
mean,err=np.mean(hist,axis=0),np.std(hist,axis=0)/np.sqrt(hist.shape[0])
bin_edges=bin_list[0]
centers = (bin_edges[:-1] + bin_edges[1:]) / 2
return mean,err,centers
def f_pixel_intensity(img_arr,bins=25,label='validation',mode='avg',normalize=False,log_scale=True,plot=True, hist_range=None):
'''
Module to compute and plot histogram for pixel intensity of images
Has 2 modes : simple and avg
simple mode: No errors. Just flatten the input image array and compute histogram of full data
avg mode(Default) :
- Compute histogram for each image in the image array
- Compute errors across each histogram
'''
norm=normalize # Whether to normalize the histogram
if plot:
plt.figure()
plt.xlabel('Pixel value')
plt.ylabel('Counts')
plt.title('Pixel Intensity Histogram')
if log_scale: plt.yscale('log')
if mode=='simple':
hist, bin_edges = np.histogram(img_arr.flatten(), bins=bins, density=norm, range=hist_range)
centers = (bin_edges[:-1] + bin_edges[1:]) / 2
if plot: plt.errorbar(centers, hist, fmt='o-', label=label)
return hist,None
elif mode=='avg':
### Compute histogram for each image.
mean,err,centers=f_batch_histogram(img_arr,bins,norm,hist_range)
if plot: plt.errorbar(centers,mean,yerr=err,fmt='o-',label=label)
return mean,err
def f_compare_pixel_intensity(img_lst,label_lst=['img1','img2'],bkgnd_arr=[],log_scale=True, normalize=True, mode='avg',bins=25, hist_range=None):
'''
Module to compute and plot histogram for pixel intensity of images
Has 2 modes : simple and avg
simple mode: No errors. Just flatten the input image array and compute histogram of full data
avg mode(Default) :
- Compute histogram for each image in the image array
- Compute errors across each histogram
bkgnd_arr : histogram of this array is plotting with +/- sigma band
'''
norm=normalize # Whether to normalize the histogram
plt.figure()
## Plot background distribution
if len(bkgnd_arr):
if mode=='simple':
hist, bin_edges = np.histogram(bkgnd_arr.flatten(), bins=bins, density=norm, range=hist_range)
centers = (bin_edges[:-1] + bin_edges[1:]) / 2
plt.errorbar(centers, hist, color='k',marker='*',linestyle=':', label='bkgnd')
elif mode=='avg':
### Compute histogram for each image.
mean,err,centers=f_batch_histogram(bkgnd_arr,bins,norm,hist_range)
plt.plot(centers,mean,linestyle=':',color='k',label='bkgnd')
plt.fill_between(centers, mean - err, mean + err, color='k', alpha=0.4)
### Plot the rest of the datasets
for img,label,mrkr in zip(img_lst,label_lst,itertools.cycle('o^*sDHPdpx_>')):
if mode=='simple':
hist, bin_edges = np.histogram(img.flatten(), bins=bins, density=norm, range=hist_range)
centers = (bin_edges[:-1] + bin_edges[1:]) / 2
plt.errorbar(centers, hist, fmt=mrkr+'-', label=label)
elif mode=='avg':
### Compute histogram for each image.
mean,err,centers=f_batch_histogram(img,bins,norm,hist_range)
# print('Centers',centers)
plt.errorbar(centers,mean,yerr=err,fmt=mrkr+'-',label=label)
if log_scale:
plt.yscale('log')
plt.xscale('symlog',linthreshx=50)
plt.legend()
plt.xlabel('Pixel value')
plt.ylabel('Counts')
plt.title('Pixel Intensity Histogram')
# ## Spectral modules
# <!-- %%latex -->
# ### Formulae
# Image gives
# $$ I(x,y) $$
#
# Fourier transform
# $$ F(k_x, k_y) = \int \left[ I \ e^{-2 \pi i \bar{x}} \right] dx dy $$
#
# 1D average
# $$ F(k) = \int \left [ d \theta \right]$$
# In[4]:
def f_get_azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
source: https://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/
"""
# Create a grid of points with x and y coordinates
y, x = np.indices(image.shape)
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
# Get the radial coordinate for every grid point. Array has the shape of image
r = np.hypot(x - center[0], y - center[1])
ind = np.argsort(r.flat) ### Get indices that sort the "r" array in ascending order.
r_sorted = r.flat[ind] ### Sort the "r" array
i_sorted = image.flat[ind] ### Sort the image points according to the radial coordinate
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof
def f_radial_profile(data, center=None):
''' Module to compute radial profile of a 2D image '''
y, x = np.indices((data.shape)) # Get a grid of x and y values
if not center: center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0]) # compute centers
# get radial values of every pair of points
r = np.sqrt((x - center[0])**2 + (y - center[1])**2)
r = r.astype(np.int)
# Compute histogram of r values
tbin = np.bincount(r.ravel(), data.ravel())
nr = np.bincount(r.ravel())
radialprofile = tbin / nr
return radialprofile[1:-1]
def f_get_power_spectrum(image,GLOBAL_MEAN=0.9998563):
"""
Computes azimuthal average of 2D power spectrum of a np array image
GLOBAL_MEAN is the mean pixel value of the training+validation datasets
"""
### Compute 2D fourier. transform
F1 = fftpack.fft2((image - GLOBAL_MEAN)/GLOBAL_MEAN)
F2 = fftpack.fftshift(F1)
### Absolute value of F-transform
pspec2d = np.abs(F2)**2
### Compute azimuthal average
# P_k = f_get_azimuthalAverage(pspec2d)
P_k = f_radial_profile(pspec2d)
return P_k
def f_batch_spectrum(arr):
"""Computes power spectrum for a batch of images"""
P_k=[f_get_power_spectrum(i) for i in arr]
return np.array(P_k)
def f_compute_spectrum(img_arr,plot=False,label='input',log_scale=True):
'''
Module to compute Average of the 1D spectrum
'''
num = img_arr.shape[0]
Pk = f_batch_spectrum(img_arr)
#mean,std = np.mean(Pk, axis=0),np.std(Pk, axis=0)/np.sqrt(Pk.shape[0])
mean,std = np.mean(Pk, axis=0),np.std(Pk, axis=0)
k=np.arange(len(mean))
if plot:
plt.figure()
plt.plot(k, mean, 'k:')
plt.plot(k, mean + std, 'k-',label=label)
plt.plot(k, mean - std, 'k-')
# plt.xscale('log')
if log_scale: plt.yscale('log')
plt.ylabel(r'$P(k)$')
plt.xlabel(r'$k$')
plt.title('Power Spectrum')
plt.legend()
return mean,std
def f_compare_spectrum(img_lst,label_lst=['img1','img2'],bkgnd_arr=[],log_scale=True):
'''
Compare the spectrum of 2 sets of images:
img_lst contains the set of images arrays, Each is of the form (num_images,height,width)
label_lst contains the labels used in the plot
'''
plt.figure()
## Plot background distribution
if len(bkgnd_arr):
Pk= f_batch_spectrum(bkgnd_arr)
mean,err = np.mean(Pk, axis=0),np.std(Pk, axis=0)/np.sqrt(Pk.shape[0])
k=np.arange(len(mean))
plt.plot(k, mean,color='k',linestyle='-',label='bkgnd')
plt.fill_between(k, mean - err, mean + err, color='k',alpha=0.8)
for img_arr,label,mrkr in zip(img_lst,label_lst,itertools.cycle('>^*sDHPdpx_')):
Pk= f_batch_spectrum(img_arr)
mean,err = np.mean(Pk, axis=0),np.std(Pk, axis=0)/np.sqrt(Pk.shape[0])
k=np.arange(len(mean))
# print(mean.shape,std.shape)
plt.fill_between(k, mean - err, mean + err, alpha=0.4)
plt.plot(k, mean, marker=mrkr, linestyle=':',label=label)
if log_scale: plt.yscale('log')
plt.ylabel(r'$P(k)$')
plt.xlabel(r'$k$')
plt.title('Power Spectrum')
plt.legend()
### 3D spectrum modules
# ### Spectral modules
## numpy code
def f_radial_profile_3d(data, center=(None,None)):
''' Module to compute radial profile of a 2D image '''
z, y, x = np.indices((data.shape)) # Get a grid of x and y values
center=[]
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0, (z.max()-z.min())/2.0]) # compute centers
# get radial values of every pair of points
r = np.sqrt((x - center[0])**2 + (y - center[1])**2+ + (z - center[2])**2)
r = r.astype(np.int)
# Compute histogram of r values
tbin = np.bincount(r.ravel(), data.ravel())
nr = np.bincount(r.ravel())
radialprofile = tbin / nr
return radialprofile[1:-1]
def f_compute_spectrum_3d(arr):
'''
compute spectrum for a 3D image
'''
# GLOBAL_MEAN=1.0
# arr=((arr - GLOBAL_MEAN)/GLOBAL_MEAN)
y1=
|
np.fft.fftn(arr)
|
numpy.fft.fftn
|
import matplotlib.pyplot as plt
import numpy as np
import torchvision
from brancher.variables import RootVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, CategoricalVariable, EmpiricalVariable, RandomIndices
from brancher import inference
import brancher.functions as BF
# Data
number_pixels = 28*28
number_output_classes = 10
train = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=None)
test = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=None)
dataset_size = len(train)
input_variable = np.reshape(train.train_data.numpy(), newshape=(dataset_size, number_pixels, 1))
output_labels = train.train_labels.numpy()
# Data sampling model
minibatch_size = 5
minibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size, name="indices", is_observed=True)
x = EmpiricalVariable(input_variable, indices=minibatch_indices, name="x", is_observed=True)
labels = EmpiricalVariable(output_labels, indices=minibatch_indices, name="labels", is_observed=True)
# Architecture parameters
number_hidden_units = 20
b1 = NormalVariable(np.zeros((number_hidden_units, 1)),
10*np.ones((number_hidden_units, 1)), "b1")
b2 = NormalVariable(np.zeros((number_output_classes, 1)),
10*np.ones((number_output_classes, 1)), "b2")
weights1 = NormalVariable(np.zeros((number_hidden_units, number_pixels)),
10*np.ones((number_hidden_units, number_pixels)), "weights1")
weights2 = NormalVariable(np.zeros((number_output_classes, number_hidden_units)),
10*np.ones((number_output_classes, number_hidden_units)), "weights2")
# Forward pass
hidden_units = BF.tanh(BF.matmul(weights1, x) + b1)
final_activations = BF.matmul(weights2, hidden_units) + b2
k = CategoricalVariable(logits=final_activations, name="k")
# Probabilistic model
model = ProbabilisticModel([k])
# Observations
k.observe(labels)
# Variational Model
Qb1 = NormalVariable(np.zeros((number_hidden_units, 1)),
0.01*np.ones((number_hidden_units, 1)), "b1", learnable=True)
Qb2 = NormalVariable(np.zeros((number_output_classes, 1)),
0.01*
|
np.ones((number_output_classes, 1))
|
numpy.ones
|
import numpy, pandas
from scipy.special import entr
try:
import pygeos
except (ImportError, ModuleNotFoundError):
pass # gets handled in the _cast function.
# from nowosad and stepinski
# https://doi.org/10.1080/13658816.2018.1511794
def _overlay(a, b, return_indices=False):
"""
Compute geometries from overlaying a onto b
"""
tree = pygeos.STRtree(a)
bix, aix = tree.query_bulk(b)
overlay = pygeos.intersection(a[aix], b[bix])
if return_indices:
return aix, bix, overlay
return overlay
def _cast(collection):
"""
Cast a collection to a pygeos geometry array.
"""
try:
import pygeos, geopandas
except (ImportError, ModuleNotFoundError) as exception:
raise type(exception)(
"pygeos and geopandas are required for map comparison statistics."
)
if isinstance(collection, (geopandas.GeoSeries, geopandas.GeoDataFrame)):
return collection.geometry.values.data.squeeze()
elif pygeos.is_geometry(collection).all():
if isinstance(collection, (numpy.ndarray, list)):
return numpy.asarray(collection)
else:
return numpy.array([collection])
elif isinstance(collection, (numpy.ndarray, list)):
return pygeos.from_shapely(collection).squeeze()
else:
return numpy.array([pygeos.from_shapely(collection)])
def external_entropy(a, b, balance=0, base=numpy.e):
"""
The harmonic mean summarizing the overlay entropy of two
sets of polygons: a onto b and b onto a.
Called the v-measure in :cite:`nowosad2018`
Parameters
----------
a : geometry array of polygons
array of polygons
b : geometry array of polygons
array of polygons
balance: float
weight that describing the relative importance of pattern a or pattern b.
When large and positive, we weight the measure more to ensure polygons in b
are fully contained by polygons in a. When large and negative,
we weight the pattern to ensure polygons in A are fully contained
by polygons in b. Corresponds to the log of beta in {cite}`Nowosad2018`.
base: float
base of logarithm to use throughout computation
Returns
--------
(n,) array expressing the entropy of the areal distributions
of a's splits by partition b.
Example
-------
>>> r1 = geopandas.read_file('tests/regions.zip', layer='regions1')
>>> r2 = geopandas.read_file('tests/regions.zip', layer='regions2')
>>> external_entropy(r1, r2)
"""
a = _cast(a)
b = _cast(b)
beta = numpy.exp(balance)
aix, bix, ab = _overlay(a, b, return_indices=True)
a_areas = pygeos.area(a)
b_areas = pygeos.area(b)
ab_areas = pygeos.area(ab)
b_onto_a = _overlay_entropy(aix, a_areas, ab_areas, base=base) # SjZ
# SZ, as sabre has entropy.empirical(rowSums(xtab), unit='log2')
b_onto_a /= areal_entropy(areas=b_areas, local=False, base=base)
a_onto_b = _overlay_entropy(bix, b_areas, ab_areas, base=base) # SjR
# SR, as sabre has entropy.empirical(colSums(xtab), unit='log2')
a_onto_b /= areal_entropy(areas=a_areas, local=False, base=base)
c = 1 - numpy.average(b_onto_a, weights=a_areas)
h = 1 - numpy.average(a_onto_b, weights=b_areas)
return (1 + beta) * h * c / ((beta * h) + c)
def completeness(a, b, local=False, base=numpy.e):
"""
The completeness of the partitions of polygons in a to those in a.
Closer to 1 when all polygons in a are fully contained within polygons in b.
From :cite:`nowosad2018`
Parameters
----------
a : geometry array of polygons
array of polygons
b : geometry array of polygons
array of polygons
local: bool (default: False)
whether or not to provide local scores for each polygon. If True, the
completeness for polygons in a are returned.
scale: bool (default: None)
whether to scale the completeness score(s). By default, completeness is
is scaled for local scores so that the average of the local scores is
the overall map completeness. If not local, then completeness is returned
unscaled. You can also set local=True and scale=False to get raw components
of the completeness, whose sum is the completeness for the entire map.
Global re-scaled scores (local=False & scale=True) are not supported.
base: bool (default=None)
what base to use for the entropy calculations. The default is base e,
which means entropy is measured in "nats."
Example
-------
>>> r1 = geopandas.read_file('tests/regions.zip', layer='regions1')
>>> r2 = geopandas.read_file('tests/regions.zip', layer='regions2')
>>> completeness(r1, r2)
"""
a = _cast(a)
b = _cast(b)
ohi = overlay_entropy(a, b, standardize=True, local=True, base=base)
a_areas = pygeos.area(a)
w = a_areas / a_areas.sum()
ci = (w * (1 - ohi)) / w.sum()
if local:
return ci
return ci.sum()
def homogeneity(a, b, local=False, base=numpy.e):
"""
The homogeneity of polygons from a partitioned by b.
From :cite:`nowosad2018`
This is equal to completeness(b,a).
It is closer to 1 when all polygons in b correspond well to polygons in a.
Parameters
----------
a : geometry array of polygons
array of polygons
b : geometry array of polygons
array of polygons
local: bool (default: False)
whether or not to provide local scores for each polygon. If True, the
homogeneity for polygons in b are returned.
scale: bool (default: None)
whether to scale the completeness score(s). By default, completeness is
is scaled for local scores so that the average of the local scores is
the overall map completeness. If not local, then completeness is returned
unscaled. You can also set local=True and scale=False to get raw components
of the completeness, whose sum is the completeness for the entire map.
Global re-scaled scores (local=False & scale=True) are not supported.
base: bool (default=None)
what base to use for the entropy calculations. The default is base e,
which means entropy is measured in "nats."
Example
-------
>>> r1 = geopandas.read_file('tests/regions.zip', layer='regions1')
>>> r2 = geopandas.read_file('tests/regions.zip', layer='regions2')
>>> homogeneity(r1, r2)
"""
return completeness(b, a, local=local, base=base)
def overlay_entropy(a, b, standardize=True, local=False, base=numpy.e):
"""
The entropy of how n zones in a are split by m partitions in b,
where n is the number of polygons in a and m is the number
of partitions in b. This is the "overlay entropy", since
the set of polygons constructed from intersection(a,b) is often
called the "overlay" of A onto B.
Larger when zones in a are uniformly split into many even pieces
by partitions in b, and small when zones in A correspond well to
zones in B.
Parameters
-----------
a : geometry array of polygons
a set of polygons (the "target") for whom the areal entropy is calculated
b : geometry array of polygons
a set of polygons (the "frame") that splits a
Returns
--------
(n,) array expressing the entropy of the areal distributions
of a's splits by partition b.
Example
-------
>>> r1 = geopandas.read_file('tests/regions.zip', layer='regions1')
>>> r2 = geopandas.read_file('tests/regions.zip', layer='regions2')
>>> overlay_entropy(r1, r2)
"""
a = _cast(a)
b = _cast(b)
aix, bix, ab = _overlay(a, b, return_indices=True)
a_areas = pygeos.area(a)
b_areas = pygeos.area(b)
h = _overlay_entropy(aix, a_areas, pygeos.area(ab), base=base)
if standardize:
h /= areal_entropy(None, areas=b_areas, local=False, base=base)
if local:
return h
return h.sum()
def _overlay_entropy(aix, a_areas, ab_areas, base):
"""
direct function to compute overlay entropies
"""
mapping = pandas.DataFrame.from_dict(
dict(
a=aix,
area=ab_areas,
a_area=a_areas[aix],
)
)
mapping["frac"] = mapping.area / mapping.a_area
mapping["entropy"] = entr(mapping.frac.values) /
|
numpy.log(base)
|
numpy.log
|
""" From: https://github.com/bazingagin/IBA/ """
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.stats import norm
from tqdm import tqdm
from thermostat.explain import ExplainerAutoModelInitializer
class IBASequential(nn.Sequential):
def forward(self, *inp):
for module in self._modules.values():
inp = module(*inp)
return inp
class Estimator:
"""
Useful to calculate the empirical mean and variance of intermediate feature maps.
"""
def __init__(self, layer):
self.layer = layer
self.M = None # running mean for each entry
self.S = None # running std for each entry
self.N = None # running num_seen for each entry
self.num_seen = 0 # total samples seen
self.eps = 1e-5
def feed(self, z: np.ndarray):
# Initialize if this is the first datapoint
if self.N is None:
self.M =
|
np.zeros_like(z, dtype=float)
|
numpy.zeros_like
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" dataset """
from __future__ import division
import os
import numpy as np
import cv2
import mindspore.dataset as de
import mindspore.dataset.vision.c_transforms as C
from mindspore.mindrecord import FileWriter
from pycocotools.coco import COCO
from src.config import config
def _rand(a=0., b=1.):
"""Generate random."""
return np.random.rand() * (b - a) + a
class Augmenter:
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5):
if
|
np.random.rand()
|
numpy.random.rand
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
import torch.nn.functional as F
from op_tester import op_tester
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parent.parent)
import test_util as tu
@pytest.mark.parametrize("inplacing", [False, True])
@pytest.mark.parametrize(
"source_shape, dest_shape, N, source_offset, dest_offset", [
([6, 6], [3, 6], [1, 1, 1], [0, 2, 4], [0, 1, 2]),
([6, 6], [3, 6], [1, 2], [0, 4], [2, 0]),
([6, 6], [3, 6], [1, 2], [0, 4], [2, 0]),
([6, 6], [4, 6], [1, 1], [0, 3], [0, 2]),
])
def test_sequenceslice(op_tester, inplacing, source_shape, dest_shape, N,
source_offset, dest_offset):
source = np.arange(np.prod(source_shape)) + 10
source =
|
np.reshape(source, source_shape)
|
numpy.reshape
|
import os
import numpy as np
from skimage import color
import matplotlib.pylab as plt
def remove_files(files):
"""
Remove files from disk
args: files (str or list) remove all files in 'files'
"""
if isinstance(files, (list, tuple)):
for f in files:
if os.path.isfile(os.path.expanduser(f)):
os.remove(f)
elif isinstance(files, str):
if os.path.isfile(os.path.expanduser(files)):
os.remove(files)
def create_dir(dirs):
"""
Create directory
args: dirs (str or list) create all dirs in 'dirs'
"""
if isinstance(dirs, (list, tuple)):
for d in dirs:
if not os.path.exists(os.path.expanduser(d)):
os.makedirs(d)
elif isinstance(dirs, str):
if not os.path.exists(os.path.expanduser(dirs)):
os.makedirs(dirs)
def setup_logging(model_name):
model_dir = "../../models"
# Output path where we store experiment log and weights
model_dir = os.path.join(model_dir, model_name)
fig_dir = "../../figures"
# Create if it does not exist
create_dir([model_dir, fig_dir])
def plot_batch(color_model, q_ab, X_batch_black, X_batch_color, batch_size, h, w, nb_q, epoch):
# Format X_colorized
X_colorized = color_model.predict(X_batch_black / 100.)[:, :, :, :-1]
X_colorized = X_colorized.reshape((batch_size * h * w, nb_q))
X_colorized = q_ab[np.argmax(X_colorized, 1)]
X_a = X_colorized[:, 0].reshape((batch_size, 1, h, w))
X_b = X_colorized[:, 1].reshape((batch_size, 1, h, w))
X_colorized = np.concatenate((X_batch_black, X_a, X_b), axis=1).transpose(0, 2, 3, 1)
X_colorized = [np.expand_dims(color.lab2rgb(im), 0) for im in X_colorized]
X_colorized = np.concatenate(X_colorized, 0).transpose(0, 3, 1, 2)
X_batch_color = [np.expand_dims(color.lab2rgb(im.transpose(1, 2, 0)), 0) for im in X_batch_color]
X_batch_color = np.concatenate(X_batch_color, 0).transpose(0, 3, 1, 2)
list_img = []
for i, img in enumerate(X_colorized[:min(32, batch_size)]):
arr = np.concatenate([X_batch_color[i], np.repeat(X_batch_black[i] / 100., 3, axis=0), img], axis=2)
list_img.append(arr)
plt.figure(figsize=(20,20))
list_img = [np.concatenate(list_img[4 * i: 4 * (i + 1)], axis=2) for i in range(len(list_img) / 4)]
arr =
|
np.concatenate(list_img, axis=1)
|
numpy.concatenate
|
import json
import numpy as np
# Returns the Pearson correlation score between user1 and user2
def pearson_score(dataset, user1, user2):
if user1 not in dataset:
raise TypeError('User ' + user1 + ' not present in the dataset')
if user2 not in dataset:
raise TypeError('User ' + user2 + ' not present in the dataset')
# Movies rated by both user1 and user2
rated_by_both = {}
for item in dataset[user1]:
if item in dataset[user2]:
rated_by_both[item] = 1
num_ratings = len(rated_by_both)
# If there are no common movies, the score is 0
if num_ratings == 0:
return 0
# Compute the sum of ratings of all the common preferences
user1_sum = np.sum([dataset[user1][item] for item in rated_by_both])
user2_sum = np.sum([dataset[user2][item] for item in rated_by_both])
# Compute the sum of squared ratings of all the common preferences
user1_squared_sum = np.sum([np.square(dataset[user1][item]) for item in rated_by_both])
user2_squared_sum = np.sum([np.square(dataset[user2][item]) for item in rated_by_both])
# Compute the sum of products of the common ratings
product_sum = np.sum([dataset[user1][item] * dataset[user2][item] for item in rated_by_both])
# Compute the Pearson correlation
Sxy = product_sum - (user1_sum * user2_sum / num_ratings)
Sxx = user1_squared_sum - np.square(user1_sum) / num_ratings
Syy = user2_squared_sum -
|
np.square(user2_sum)
|
numpy.square
|
import math
import numpy as np
from scipy.linalg import solve_triangular
from scipy.optimize import minimize
from typing import List, Callable
from flare.env import AtomicEnvironment
from flare.struc import Structure
from flare.gp_algebra import get_ky_mat, get_ky_and_hyp, get_like_from_ky_mat,\
get_like_grad_from_mats, get_neg_likelihood, get_neg_like_grad
class GaussianProcess:
""" Gaussian Process Regression Model.
Implementation is based on Algorithm 2.1 (pg. 19) of
"Gaussian Processes for Machine Learning" by <NAME> Williams"""
def __init__(self, kernel: Callable,
kernel_grad: Callable, hyps: np.ndarray,
cutoffs: np.ndarray,
hyp_labels: List=None,
energy_force_kernel: Callable=None,
energy_kernel: Callable=None,
opt_algorithm: str='L-BFGS-B',
maxiter=10, par=False):
"""Initialize GP parameters and training data."""
self.kernel = kernel
self.kernel_grad = kernel_grad
self.energy_kernel = energy_kernel
self.energy_force_kernel = energy_force_kernel
self.kernel_name = kernel.__name__
self.hyps = hyps
self.hyp_labels = hyp_labels
self.cutoffs = cutoffs
self.algo = opt_algorithm
self.l_mat = None
self.alpha = None
self.training_data = []
self.training_labels = []
self.training_labels_np = np.empty(0, )
self.maxiter = maxiter
self.likelihood = None
self.likelihood_gradient = None
self.par = par
# TODO unit test custom range
def update_db(self, struc: Structure, forces: list,
custom_range: List[int] = ()):
"""Given structure and forces, add to training set.
:param struc: structure to add to db
:type struc: Structure
:param forces: list of corresponding forces to add to db
:type forces: list<float>
:param custom_range: Indices to use in lieu of the whole structure
:type custom_range: List[int]
"""
# By default, use all atoms in the structure
noa = len(struc.positions)
update_indices = custom_range or list(range(noa))
for atom in update_indices:
env_curr = AtomicEnvironment(struc, atom, self.cutoffs)
forces_curr = np.array(forces[atom])
self.training_data.append(env_curr)
self.training_labels.append(forces_curr)
# create numpy array of training labels
self.training_labels_np = self.force_list_to_np(self.training_labels)
@staticmethod
def force_list_to_np(forces: list) -> np.ndarray:
""" Convert list of forces to numpy array of forces.
:param forces: list of forces to convert
:type forces: list<float>
:return: numpy array forces
:rtype: np.ndarray
"""
forces_np = []
for m in range(len(forces)):
for n in range(3):
forces_np.append(forces[m][n])
forces_np = np.array(forces_np)
return forces_np
def train(self, monitor=False, custom_bounds=None):
""" Train Gaussian Process model on training data. """
x_0 = self.hyps
args = (self.training_data, self.training_labels_np,
self.kernel_grad, self.cutoffs, monitor,
self.par)
if self.algo == 'L-BFGS-B':
# bound signal noise below to avoid overfitting
bounds = np.array([(-np.inf, np.inf)] * len(x_0))
bounds[-1] = (1e-6, np.inf)
# Catch linear algebra errors and switch to BFGS if necessary
try:
res = minimize(get_neg_like_grad, x_0, args,
method='L-BFGS-B', jac=True, bounds=bounds,
options={'disp': False, 'gtol': 1e-4,
'maxiter': self.maxiter})
except:
print("Warning! Algorithm for L-BFGS-B failed. Changing to "
"BFGS for remainder of run.")
self.algo = 'BFGS'
if custom_bounds is not None:
res = minimize(get_neg_like_grad, x_0, args,
method='L-BFGS-B', jac=True, bounds=custom_bounds,
options={'disp': False, 'gtol': 1e-4,
'maxiter': self.maxiter})
elif self.algo == 'BFGS':
res = minimize(get_neg_like_grad, x_0, args,
method='BFGS', jac=True,
options={'disp': False, 'gtol': 1e-4,
'maxiter': self.maxiter})
elif self.algo == 'nelder-mead':
res = minimize(get_neg_likelihood, x_0, args,
method='nelder-mead',
options={'disp': False,
'maxiter': self.maxiter,
'xtol': 1e-5})
self.hyps = res.x
self.set_L_alpha()
self.likelihood = -res.fun
self.likelihood_gradient = -res.jac
def predict(self, x_t: AtomicEnvironment, d: int) -> [float, float]:
# get kernel vector
k_v = self.get_kernel_vector(x_t, d)
# get predictive mean
pred_mean = np.matmul(k_v, self.alpha)
# get predictive variance without cholesky (possibly faster)
self_kern = self.kernel(x_t, x_t, d, d, self.hyps,
self.cutoffs)
pred_var = self_kern - \
np.matmul(np.matmul(k_v, self.ky_mat_inv), k_v)
# # get predictive variance (possibly slow)
# v_vec = solve_triangular(self.l_mat, k_v, lower=True)
# self_kern = self.kernel(x_t, x_t, self.bodies, d, d, self.hyps,
# self.cutoffs)
# pred_var = self_kern - np.matmul(v_vec, v_vec)
return pred_mean, pred_var
def predict_local_energy(self, x_t: AtomicEnvironment) -> float:
"""Predict the sum of triplet energies that include the test atom.
:param x_t: Atomic environment of test atom
:type x_t: AtomicEnvironment
:return: local energy in eV (up to a constant)
:rtype: float
"""
k_v = self.en_kern_vec(x_t)
pred_mean = np.matmul(k_v, self.alpha)
return pred_mean
def predict_local_energy_and_var(self, x_t: AtomicEnvironment):
# get kernel vector
k_v = self.en_kern_vec(x_t)
# get predictive mean
pred_mean = np.matmul(k_v, self.alpha)
# get predictive variance
v_vec = solve_triangular(self.l_mat, k_v, lower=True)
self_kern = self.energy_kernel(x_t, x_t, self.hyps,
self.cutoffs)
pred_var = self_kern - np.matmul(v_vec, v_vec)
return pred_mean, pred_var
def get_kernel_vector(self, x: AtomicEnvironment,
d_1: int) -> np.ndarray:
""" Compute kernel vector.
:param x: data point to compare against kernel matrix
:type x: AtomicEnvironment
:param d_1:
n t:type d_1: int
:return: kernel vector
:rtype: np.ndarray
"""
ds = [1, 2, 3]
size = len(self.training_data) * 3
k_v = np.zeros(size, )
for m_index in range(size):
x_2 = self.training_data[int(math.floor(m_index / 3))]
d_2 = ds[m_index % 3]
k_v[m_index] = self.kernel(x, x_2, d_1, d_2,
self.hyps, self.cutoffs)
return k_v
def en_kern_vec(self, x: AtomicEnvironment) -> np.ndarray:
ds = [1, 2, 3]
size = len(self.training_data) * 3
k_v = np.zeros(size, )
for m_index in range(size):
x_2 = self.training_data[int(math.floor(m_index / 3))]
d_2 = ds[m_index % 3]
k_v[m_index] = self.energy_force_kernel(x_2, x, d_2,
self.hyps, self.cutoffs)
return k_v
def set_L_alpha(self):
hyp_mat, ky_mat = get_ky_and_hyp(self.hyps, self.training_data,
self.training_labels_np,
self.kernel_grad, self.cutoffs)
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, self.training_labels_np)
l_mat = np.linalg.cholesky(ky_mat)
l_mat_inv = np.linalg.inv(l_mat)
ky_mat_inv = l_mat_inv.T @ l_mat_inv
alpha = np.matmul(ky_mat_inv, self.training_labels_np)
self.ky_mat = ky_mat
self.l_mat = l_mat
self.alpha = alpha
self.ky_mat_inv = ky_mat_inv
self.l_mat_inv = l_mat_inv
self.like = like
self.like_grad = like_grad
def update_L_alpha(self):
n = self.l_mat_inv.shape[0]
N = len(self.training_data)
m = N - n//3 # number of data added
ky_mat = np.zeros((3*N, 3*N))
ky_mat[:n, :n] = self.ky_mat
k_v = np.array([[] for i in range(n)])
V_mat = np.zeros((3*m, 3*m))
# calculate kernels for all added data
for i in range(m):
x_t = self.training_data[-1-i]
k_vi = np.array([self.get_kernel_vector(x_t, d+1)
for d in range(3)]).T # (n+3m) x 3
k_vi = k_vi[:n, :]
k_v = np.hstack([k_v, k_vi]) # n x 3m
for d1 in range(3):
for j in range(i, m):
y_t = self.training_data[-1-j]
for d2 in range(3):
V_mat[3*i+d1, 3*j+d2] = \
self.kernel(x_t, y_t, d1+1, d2+1,
self.hyps, self.cutoffs)
V_mat[3*j+d2, 3*i+d1] = V_mat[3*i+d1, 3*j+d2]
ky_mat[:n, n:] = k_v
ky_mat[n:, :n] = k_v.T
sigma_n = self.hyps[-1]
ky_mat[n:, n:] = V_mat + sigma_n**2 * np.eye(3*m)
l_mat = np.linalg.cholesky(ky_mat)
l_mat_inv = np.linalg.inv(l_mat)
ky_mat_inv = l_mat_inv.T @ l_mat_inv
alpha =
|
np.matmul(ky_mat_inv, self.training_labels_np)
|
numpy.matmul
|
import copy
import math
import random
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
# from geomdl import fitting
# from geomdl.visualization import VisMPL as vis
from matplotlib import cm
from scipy.interpolate import RBFInterpolator
from sklearn.neighbors import KDTree
import config
import environment.bulletcdhelper as bcdhelper
import localenv.envloader as el
import surface as sfc
import trimesh.intersections as inc
import utils.comformalmapping_utils as cu
import utils.drawpath_utils as du
import utils.math_utils as mu
import utils.pcd_utils as pcdu
import utils.phoxi as phoxi
import utils.phoxi_locator as pl
import utils.run_utils as ru
import utiltools.robotmath as rm
def find_img_interior_rec(img, gray_threshold=1, toggledebug=False):
"""
:param img: rgb/gray image
:param toggledebug:
:return: width, height and center of the cutted image, as well as the cutted image
"""
img = copy.deepcopy(img)
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except:
gray = img
img = np.stack((gray,) * 3, axis=-1)
# Create our mask by selecting the non-zero values of the picture
ret, mask = cv2.threshold(gray, gray_threshold, 255, cv2.THRESH_BINARY)
# Select the contour
cont, _ = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) # cv2.CHAIN_APPROX_NONE
# Get all the points of the contour
contour = cont[0].reshape(len(cont[0]), 2)
# we assume a rectangle with at least two points on the contour gives a 'good enough' result
# get all possible rectangles based on this hypothesis
rect = []
if toggledebug:
cv2.drawContours(gray, cont, -1, (255, 0, 0), 1)
cv2.imshow('Picture with contour', gray)
cv2.waitKey(0)
for i in range(len(contour)):
x1, y1 = contour[i]
for j in range(len(contour)):
x2, y2 = contour[j]
area = abs(y2 - y1) * abs(x2 - x1)
rect.append(((x1, y1), (x2, y2), area))
# the first rect of all_rect has the biggest area, so it's the best solution if he fits in the picture
all_rect = sorted(rect, key=lambda x: x[2], reverse=True)
# we take the largest rectangle we've got, based on the value of the rectangle area
# only if the border of the rectangle is not in the black part
# if the list is not empty
if all_rect:
best_rect_found = False
index_rect = 0
nb_rect = len(all_rect)
# we check if the rectangle is a good solution
while not best_rect_found and index_rect < nb_rect:
rect = all_rect[index_rect]
(x1, y1) = rect[0]
(x2, y2) = rect[1]
valid_rect = True
# we search a black area in the perimeter of the rectangle (vertical borders)
x = min(x1, x2)
while x < max(x1, x2) + 1 and valid_rect:
if mask[y1, x] == 0 or mask[y2, x] == 0:
# if we find a black pixel, that means a part of the rectangle is black
# so we don't keep this rectangle
valid_rect = False
x += 1
y = min(y1, y2)
while y < max(y1, y2) + 1 and valid_rect:
if mask[y, x1] == 0 or mask[y, x2] == 0:
valid_rect = False
y += 1
if valid_rect:
best_rect_found = True
index_rect += 1
if best_rect_found:
x_range = (min(x1, x2), max(x1, x2))
y_range = (min(y1, y2), max(y1, y2))
img[:y_range[0], :] = np.array([0, 0, 0])
img[y_range[1]:, :] = np.array([0, 0, 0])
img[:, :x_range[0]] = np.array([0, 0, 0])
img[:, x_range[1]:] = np.array([0, 0, 0])
w = x_range[1] - x_range[0]
h = y_range[1] - y_range[0]
center = (int((x_range[0] + x_range[1]) / 2), int((y_range[0] + y_range[1]) / 2))
if toggledebug:
print(x_range, y_range)
print(w, h, center)
cv2.rectangle(gray, (x_range[0], y_range[1]), (x_range[1], y_range[0]), (255, 0, 0), 1)
cv2.circle(gray, center, 1, (255, 0, 0), 1)
cv2.imshow('Picture with rectangle?', gray)
cv2.waitKey(0)
return w, h, center, img
else:
print('No rectangle fitting into the area')
return None, None, None, img
else:
print('No rectangle found')
return None, None, None, img
def resize_drawpath(drawpath, w, h, space=5):
"""
:param drawpath: draw path point list
:param w:
:param h:
:param space: space between drawing and rectangle edge
:return:
"""
def __sort_w_h(a, b):
if a > b:
return a, b
else:
return b, a
drawpath = remove_list_dup(drawpath)
p_narray = np.array(drawpath)
pl_w = max(p_narray[:, 0]) - min(p_narray[:, 0])
pl_h = max(p_narray[:, 1]) - min(p_narray[:, 1])
pl_w, pl_h = __sort_w_h(pl_w, pl_h)
w, h = __sort_w_h(w, h)
# if pl_w / w > 1 and pl_h / h > 1:
scale = max([pl_w / (w - space), pl_h / (h - space)])
p_narray = p_narray / scale
return list(p_narray)
def resize_drawpath_ms(drawpath_ms, w, h, space=5):
"""
:param drawpath_ms: draw path point list
:param w:
:param h:
:param space: space between drawing and rectangle edge
:return:
"""
def __sort_w_h(a, b):
if a > b:
return a, b
else:
return b, a
print('length of each stroke(dup):', [len(stroke) for stroke in drawpath_ms])
drawpath_ms = [remove_list_dup(stroke) for stroke in drawpath_ms]
stroke_len_list = [len(stroke) for stroke in drawpath_ms]
print('length of each stroke:', stroke_len_list)
p_narray = np.array([p for s in drawpath_ms for p in s])
pl_w = max(p_narray[:, 0]) - min(p_narray[:, 0])
pl_h = max(p_narray[:, 1]) - min(p_narray[:, 1])
pl_w, pl_h = __sort_w_h(pl_w, pl_h)
w, h = __sort_w_h(w, h)
# if pl_w / w > 1 and pl_h / h > 1:
scale = max([pl_w / (w - space), pl_h / (h - space)])
p_narray = p_narray / scale
drawpath_ms_resized = []
p_cnt = 0
while p_cnt < len(p_narray):
for stroke_len in stroke_len_list:
i = 0
stroke = []
while i < stroke_len:
stroke.append(p_narray[p_cnt])
i += 1
p_cnt += 1
drawpath_ms_resized.append(stroke)
return drawpath_ms_resized
def show_drawpath_on_img(p_list, img):
for point in p_list:
point = (int(point[0]), int(point[1]))
cv2.circle(img, point, radius=1, color=(0, 0, 255), thickness=0)
cv2.imshow('result', img)
cv2.waitKey(0)
def rayhitmesh_closest(obj, pfrom, pto, toggledebug=False):
mcm = bcdhelper.MCMchecker()
pos, nrml = mcm.getRayHitMeshClosest(pfrom=pfrom, pto=pto, objcm=obj)
if toggledebug:
print('------------------')
print('pfrom, pto:', pfrom, pto)
print('pos:', pos)
print('normal:', -nrml)
# base.pggen.plotArrow(base.render, spos=pfrom, epos=pto, length=100, rgba=(0, 1, 0, 0.5))
if pos is not None:
return np.array(pos), -np.array(nrml)
else:
return None, None
def rayhitmesh_drawpath_ss(obj_item, drawpath, direction=np.asarray((0, 0, 1)), toggledebug=False):
time_start = time.time()
print('--------------rayhit single stroke on mesh--------------')
print('draw path point num:', len(drawpath))
pos_nrml_list = []
pos_pre = []
for i, p in enumerate(drawpath):
# try:
# pos, nrml = rayhitmesh_point(obj_item.objcm, obj_item.drawcenter, p)
# if list(pos) != list(pos_pre):
# pos_nrml_list.append([pos, nrml])
# pos_pre = pos
# except:
# continue
pos, nrml = rayhitmesh_p(obj_item.objcm, obj_item.drawcenter, p, direction=direction)
pos_nrml_list.append([pos, nrml])
if toggledebug:
if i == 1:
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plot_edge = 5
plot_edge_z = 5
x_range = (pos[0] - plot_edge, pos[0] + plot_edge)
y_range = (pos[1] - plot_edge - 9, pos[1] + plot_edge)
z_range = (pos[2] - plot_edge_z - 9, pos[2] + plot_edge_z - .5)
pcd_edge = 0
pcd = [p for p in obj_item.pcd if
x_range[0] - pcd_edge < p[0] < x_range[1] + pcd_edge and
y_range[0] - pcd_edge < p[1] < y_range[1] + pcd_edge and
z_range[0] - pcd_edge < p[2] < z_range[1] + pcd_edge]
pcd = np.asarray(random.choices(pcd, k=3000))
ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], c='k', alpha=.2, s=1)
ax.scatter([pos_nrml_list[0][0][0]], [pos_nrml_list[0][0][1]], [pos_nrml_list[0][0][2]], c='r', s=10)
ax.scatter([pos_nrml_list[1][0][0]], [pos_nrml_list[1][0][1]], [pos_nrml_list[1][0][2]], c='g', s=10)
plt.show()
print('projection time cost', time.time() - time_start)
error, error_list = get_prj_error(drawpath, pos_nrml_list)
print('avg error', np.mean(error_list))
return pos_nrml_list, error_list
def rayhitmesh_drawpath_ms(obj_item, drawpath_ms, direction=np.asarray((0, 0, 1)), error_method='ED'):
def __get_mean(l):
l = [p for p in l if p is not None]
return np.mean(l)
time_start = time.time()
pos_nrml_list_ms = []
error_list_ms = []
print('--------------rayhit multiple strokes on mesh--------------')
for drawpath in drawpath_ms:
# print('draw path point num:', len(drawpath))
pos_nrml_list = []
for point in drawpath:
try:
pos, nrml = rayhitmesh_p(obj_item.objcm, obj_item.drawcenter, point, direction=direction)
pos_nrml_list.append([pos, nrml])
except:
pos_nrml_list.append([None, None])
if error_method == 'GD':
error, error_list = get_prj_error(drawpath, pos_nrml_list, method=error_method, obj_pcd=obj_item.pcd)
else:
error, error_list = get_prj_error(drawpath, pos_nrml_list)
pos_nrml_list_ms.append(pos_nrml_list)
error_list_ms.extend(error_list)
time_cost_total = time.time() - time_start
print('projection time cost', time_cost_total)
print('avg error', __get_mean(error_list_ms))
return pos_nrml_list_ms, error_list_ms, time_cost_total
def rayhitmesh_p(obj, center, p, direction=np.asarray((0, 0, 1))):
if abs(direction[0]) == 1:
pfrom = np.asarray((center[0], p[0] + center[1], p[1] + center[2])) + 50 * direction
pto = np.asarray((center[0], p[0] + center[1], p[1] + center[2])) - 50 * direction
elif abs(direction[1]) == 1:
pfrom = np.asarray((p[0] + center[0], center[1], p[1] + center[2])) + 50 * direction
pto = np.asarray((p[0] + center[0], center[1], p[1] + center[2])) - 50 * direction
elif abs(direction[2]) == 1:
pfrom = np.asarray((p[0] + center[0], p[1] + center[1], center[2])) + 50 * direction
pto = np.asarray((p[0] + center[0], p[1] + center[1], center[2])) - 50 * direction
else:
print('Wrong input direction!')
return None, None
base.pggen.plotArrow(base.render, spos=pfrom, epos=pto, length=100, rgba=(0, 1, 0, 1))
# base.run()
pos, nrml = rayhitmesh_closest(obj, pfrom, pto)
return pos, -nrml
def get_vecs_angle(v1, v2):
return math.degrees(np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))))
def get_knn_indices(p, kdt, k=3):
distances, indices = kdt.query([p], k=k, return_distance=True)
return indices[0]
def get_knn(p, kdt, k=3):
p_nearest_inx = get_knn_indices(p, kdt, k=k)
pcd = list(np.array(kdt.data))
return np.asarray([pcd[p_inx] for p_inx in p_nearest_inx])
def get_nn_indices_by_distance(p, kdt, step=1.0):
result_indices = []
distances, indices = kdt.query([p], k=1000, return_distance=True)
distances = distances[0]
indices = indices[0]
for i in range(len(distances)):
if distances[i] < step:
result_indices.append(indices[i])
return result_indices
def get_kdt(p_list, dimension=3):
time_start = time.time()
p_list = np.asarray(p_list)
p_narray = np.array(p_list[:, :dimension])
kdt = KDTree(p_narray, leaf_size=100, metric='euclidean')
# print('time cost(kdt):', time.time() - time_start)
return kdt, p_narray
def get_z_by_bilinearinp(p, kdt):
p_nearest_inx = get_knn_indices(p, kdt, k=4)
pcd = list(np.array(kdt.data))
p_list = [pcd[p_inx] for p_inx in p_nearest_inx]
return mu.bilinear_interp_2d(p[:2], [(p[0], p[1]) for p in p_list], [p[2] for p in p_list])
def __get_avg_dist(p_list):
dist_list = []
for i in range(1, len(p_list)):
dist = np.linalg.norm(np.array(p_list[i]) - np.array(p_list[i - 1]))
dist_list.append(dist)
return np.average(dist_list)
def get_intersec(p1, p2, plane_nrml, d):
p1_d = (np.vdot(p1, plane_nrml) + d) / np.sqrt(np.vdot(plane_nrml, plane_nrml))
p1_d2 = (np.vdot(p2 - p1, plane_nrml)) / np.sqrt(np.vdot(plane_nrml, plane_nrml))
n = p1_d2 / p1_d
return p1 + n * (p2 - p1)
def get_nrml_pca(knn):
pcv, pcaxmat = rm.computepca(knn)
return pcaxmat[:, np.argmin(pcv)]
def __find_nxt_p_pca(drawpath_p1, drawpath_p2, kdt_d3, p0, n0, max_nn=150, direction=np.array([0, 0, 1]),
toggledebug=False, pcd=None, snap=True):
v_draw = np.array(drawpath_p2) - np.array(drawpath_p1)
if abs(direction[0]) == 1:
v_draw = (0, v_draw[0], v_draw[1])
elif abs(direction[1]) == 1:
v_draw = (v_draw[0], 0, v_draw[1])
elif abs(direction[2]) == 1:
v_draw = (v_draw[0], v_draw[1], 0)
else:
print('Wrong input direction!')
return None
rotmat = rm.rotmat_betweenvector(direction, n0)
v_draw = np.dot(rotmat, v_draw)
pt = p0 + v_draw
knn = get_knn(pt, kdt_d3, k=max_nn)
center = pcdu.get_pcd_center(np.asarray(knn))
nrml = get_nrml_pca(knn)
if snap:
p_nxt = pt - np.dot((pt - center), nrml) * nrml
else:
p_nxt = copy.deepcopy(pt)
if np.dot(nrml, np.asarray([0, 0, 1])) < 0:
nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([-1, 0, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) == -1:
# nrml = -nrml
if toggledebug:
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_edge = 1.5
plot_edge_z = 1.5
knn_p0 = get_knn(p0, kdt_d3, k=max_nn)
x_range = (min(knn[:, 0].flatten()) - plot_edge, max(knn[:, 0].flatten()) + plot_edge)
y_range = (min(knn[:, 1].flatten()) - plot_edge, max(knn[:, 1].flatten()) + plot_edge)
z_range = (min(knn[:, 2].flatten()) - plot_edge_z, max(knn[:, 2].flatten()) + plot_edge_z)
ax.scatter(knn[:, 0], knn[:, 1], knn[:, 2], c='y', s=1, alpha=.1)
coef_l = mu.fit_plane(knn)
mu.plot_surface_f(ax, coef_l, x_range, y_range, z_range, dense=.5, c=cm.coolwarm)
x_range = (min(knn_p0[:, 0].flatten()) - plot_edge, max(knn_p0[:, 0].flatten()) + plot_edge)
y_range = (min(knn_p0[:, 1].flatten()) - plot_edge, max(knn_p0[:, 1].flatten()) + plot_edge)
z_range = (min(knn_p0[:, 2].flatten()) - plot_edge_z, max(knn_p0[:, 2].flatten()) + plot_edge_z)
ax.scatter(knn_p0[:, 0], knn_p0[:, 1], knn_p0[:, 2], c='y', s=1, alpha=.1)
coef_l_init = mu.fit_plane(knn_p0)
mu.plot_surface_f(ax, coef_l_init, x_range, y_range, z_range, dense=.5, c=cm.coolwarm)
if pcd is not None:
pcd_edge = 5
pcd = [p for p in pcd if
x_range[0] - pcd_edge < p[0] < x_range[1] + pcd_edge and
y_range[0] - pcd_edge < p[1] < y_range[1] + pcd_edge + 5 and
z_range[0] - pcd_edge < p[2] < z_range[1] + pcd_edge + 5]
pcd = np.asarray(random.choices(pcd, k=2000))
ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], c='k', alpha=.1, s=1)
ax.scatter([p0[0]], [p0[1]], [p0[2]], c='r', s=10, alpha=1)
ax.scatter([p_nxt[0]], [p_nxt[1]], [p_nxt[2]], c='g', s=10, alpha=1)
ax.scatter([pt[0]], [pt[1]], [pt[2]], c='b', s=10, alpha=1)
# ax.annotate3D('$q_0$', pcd_start_p, xytext=(3, 3), textcoords='offset points')
# ax.annotate3D('$q_1$', p_nxt, xytext=(3, 3), textcoords='offset points')
# ax.annotate3D('$q_t$', pt, xytext=(3, 3), textcoords='offset points')
# ax.arrow3D(pcd_start_p[0], pcd_start_p[1], pcd_start_p[2],
# pcd_start_n[0], pcd_start_n[1], pcd_start_n[2], mutation_scale=10, arrowstyle='->')
# ax.arrow3D(pt[0], pt[1], pt[2],
# nrml[0], nrml[1], nrml[2], mutation_scale=10, arrowstyle='->')
# ax.arrow3D(pcd_start_p[0], pcd_start_p[1], pcd_start_p[2],
# v_draw[0], v_draw[1], v_draw[2], mutation_scale=10, arrowstyle='->')
# ax.annotate3D('$N_0$', pcd_start_p + pcd_start_n, xytext=(3, 3), textcoords='offset points')
# ax.annotate3D('$N_t$', pt + nrml, xytext=(3, 3), textcoords='offset points')
# ax.annotate3D('$V_{draw}$', pcd_start_p + v_draw * 0.5, xytext=(3, 3), textcoords='offset points')
plt.show()
return p_nxt, nrml
def __find_nxt_p_psfc(drawpath_p1, drawpath_p2, kdt_d3, p0, n0, max_nn=150, direction=np.array([0, 0, 1]),
toggledebug=False, step=0.1, pcd=None, pca_trans=True, mode='rbf', snap=False):
v_draw = np.array(drawpath_p2) - np.array(drawpath_p1)
if abs(direction[0]) == 1:
v_draw = (0, v_draw[0], v_draw[1])
elif abs(direction[1]) == 1:
v_draw = (v_draw[0], 0, v_draw[1])
elif abs(direction[2]) == 1:
v_draw = (v_draw[0], v_draw[1], 0)
else:
print('Wrong input direction!')
return None
def __surface(pts, mode):
if mode == 'rbf':
surface = sfc.RBFSurface(pts[:, :2], pts[:, 2])
elif mode == 'gaussian':
surface = sfc.MixedGaussianSurface(pts[:, :2], pts[:, 2], n_mix=1)
elif mode == 'quad':
surface = sfc.QuadraticSurface(pts[:, :2], pts[:, 2])
else:
surface = None
return surface
rotmat = rm.rotmat_betweenvector(direction, n0)
v_draw = np.dot(rotmat, v_draw)
knn_p0 = get_knn(p0, kdt_d3, k=max_nn)
# pcdu.show_pcd(knn_p0)
if pca_trans:
knn_p0_tr, transmat = mu.trans_data_pcv(knn_p0, random_rot=False)
surface = __surface(knn_p0_tr, mode)
else:
transmat = np.eye(3)
surface = __surface(knn_p0, mode)
# surface_cm = surface.get_gometricmodel(rgba=[.5, .7, 1, .3])
# mat4 = np.eye(4)
# mat4[:3, :3] = transmat
# surface_cm.sethomomat(mat4)
# surface_cm.reparentTo(base.render)
# base.run()
tgt_len = np.linalg.norm(v_draw)
pm = np.dot(np.linalg.inv(transmat), p0)
tgt_len_list = [tgt_len]
p_nxt = p0
while True:
p_uv = (pm + np.dot(np.linalg.inv(transmat), v_draw) * step)[:2]
z = surface.get_zdata([p_uv])[0]
pt = np.asarray([p_uv[0], p_uv[1], z])
tgt_len -= np.linalg.norm(pt - pm)
tgt_len_list.append(tgt_len)
if abs(tgt_len_list[-1]) < abs(tgt_len_list[-2]):
pm = pt
p_nxt = pt
else:
break
p_nxt = np.dot(transmat, p_nxt)
knn = get_knn(p_nxt, kdt_d3, k=max_nn)
nrml = get_nrml_pca(knn)
if snap:
knn_pt = get_knn(p_nxt, kdt_d3, k=30)
center = pcdu.get_pcd_center(np.asarray(knn_pt))
nrml = get_nrml_pca(knn_pt)
p_nxt = p_nxt - np.dot((p_nxt - center), nrml) * nrml
if np.dot(nrml, np.asarray([0, 0, 1])) < 0:
nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([-1, 0, 0])) < 0:
# nrml = -nrml
if np.dot(nrml, np.asarray([0, -1, 0])) == -1:
nrml = -nrml
if toggledebug:
fig = plt.figure()
ax = fig.gca(projection='3d')
pcd_edge = 10
plot_edge = 5
plot_edge_z = 1.5
x_range = (min(knn_p0[:, 0].flatten()) - plot_edge, max(knn_p0[:, 0].flatten()) + plot_edge)
y_range = (min(knn_p0[:, 1].flatten()) - plot_edge, max(knn_p0[:, 1].flatten()) + plot_edge)
z_range = (min(knn_p0[:, 2].flatten()) - plot_edge_z, max(knn_p0[:, 2].flatten()) + plot_edge_z)
xgrid = np.mgrid[x_range[0]:x_range[1], y_range[0]:y_range[1]]
xflat = xgrid.reshape(2, -1).T
zflat = surface.get_zdata(xflat)
inp_pts = np.column_stack((xflat, zflat))
inp_pts = np.dot(transmat, inp_pts.T).T
Z = inp_pts[:, 2].reshape((xgrid.shape[1], xgrid.shape[2]))
ax.plot_surface(xgrid[0], xgrid[1], Z, rstride=1, cstride=1, alpha=.5, cmap='coolwarm')
# ax.scatter(inp_pts[:, 0], inp_pts[:, 1], inp_pts[:, 2], c='r', alpha=1, s=1)
pcd = [p for p in pcd if
x_range[0] - pcd_edge < p[0] < x_range[1] + pcd_edge and
y_range[0] - pcd_edge < p[1] < y_range[1] + pcd_edge + 5 and
z_range[0] - pcd_edge < p[2] < z_range[1] + pcd_edge + 5]
pcd = np.asarray(random.choices(pcd, k=2000))
ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], c='k', alpha=.1, s=1)
ax.scatter([p0[0]], [p0[1]], [p0[2]], c='r', s=10, alpha=1)
ax.scatter([p_nxt[0]], [p_nxt[1]], [p_nxt[2]], c='g', s=10, alpha=1)
plt.show()
return p_nxt, nrml
def __find_nxt_p_rbf_g(drawpath_p1, drawpath_p2, surface, transmat, kdt_d3, p0, n0, max_nn=150,
direction=np.array([0, 0, 1]), toggledebug=False, step=0.1, pcd=None, snap=False):
v_draw = np.array(drawpath_p2) - np.array(drawpath_p1)
if abs(direction[0]) == 1:
v_draw = (0, v_draw[0], v_draw[1])
elif abs(direction[1]) == 1:
v_draw = (v_draw[0], 0, v_draw[1])
elif abs(direction[2]) == 1:
v_draw = (v_draw[0], v_draw[1], 0)
else:
print('Wrong input direction!')
return None
rotmat = rm.rotmat_betweenvector(direction, n0)
v_draw = np.dot(rotmat, v_draw)
tgt_len = np.linalg.norm(v_draw)
pm = np.dot(np.linalg.inv(transmat), p0)
tgt_len_list = [tgt_len]
p_nxt = None
while True:
p_uv = (pm + np.dot(np.linalg.inv(transmat), v_draw) * step)[:2]
z = surface.get_zdata([p_uv])[0]
pt = np.asarray([p_uv[0], p_uv[1], z])
tgt_len -= np.linalg.norm(pt - pm)
tgt_len_list.append(tgt_len)
if abs(tgt_len_list[-1]) < abs(tgt_len_list[-2]):
pm = pt
p_nxt = pt
else:
break
p_nxt = np.dot(transmat, p_nxt)
knn = get_knn(p_nxt, kdt_d3, k=max_nn)
nrml = get_nrml_pca(knn)
if snap:
knn_pt = get_knn(p_nxt, kdt_d3, k=30)
center = pcdu.get_pcd_center(np.asarray(knn_pt))
nrml = get_nrml_pca(knn_pt)
p_nxt = p_nxt - np.dot((p_nxt - center), nrml) * nrml
if np.dot(nrml, np.asarray([0, 0, 1])) < 0:
nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([-1, 0, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) == -1:
# nrml = -nrml
if toggledebug:
fig = plt.figure()
ax = fig.gca(projection='3d')
pcd_edge = 10
plot_edge = 5
plot_edge_z = 1.5
x_range = (p0[0] - plot_edge, p0[0] + plot_edge)
y_range = (p0[1] - plot_edge, p0[1] + plot_edge)
z_range = (p0[2] - plot_edge_z, p0[2] + plot_edge_z)
xgrid = np.mgrid[x_range[0]:x_range[1], y_range[0]:y_range[1]]
xflat = xgrid.reshape(2, -1).T
zflat = surface(xflat)
inp_pts = np.column_stack((xflat, zflat))
inp_pts = np.dot(transmat, inp_pts.T).T
Z = inp_pts[:, 2].reshape((xgrid.shape[1], xgrid.shape[2]))
ax.plot_surface(xgrid[0], xgrid[1], Z, rstride=1, cstride=1, alpha=.5, cmap='coolwarm')
# ax.scatter(inp_pts[:, 0], inp_pts[:, 1], inp_pts[:, 2], c='r', alpha=1, s=1)
pcd = [p for p in pcd if
x_range[0] - pcd_edge < p[0] < x_range[1] + pcd_edge and
y_range[0] - pcd_edge < p[1] < y_range[1] + pcd_edge + 5 and
z_range[0] - pcd_edge < p[2] < z_range[1] + pcd_edge + 5]
pcd = np.asarray(random.choices(pcd, k=2000))
ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], c='k', alpha=.1, s=1)
ax.scatter([p0[0]], [p0[1]], [p0[2]], c='r', s=10, alpha=1)
ax.scatter([p_nxt[0]], [p_nxt[1]], [p_nxt[2]], c='g', s=10, alpha=1)
plt.show()
return p_nxt, nrml
def __find_nxt_p_bp(drawpath_p1, drawpath_p2, kdt_d3, p0, n0, objcm, pcd, max_nn=150, direction=np.array([0, 0, 1])):
v_draw = np.array(drawpath_p2) - np.array(drawpath_p1)
if abs(direction[0]) == 1:
v_draw = (0, v_draw[0], v_draw[1])
elif abs(direction[1]) == 1:
v_draw = (v_draw[0], 0, v_draw[1])
elif abs(direction[2]) == 1:
v_draw = (v_draw[0], v_draw[1], 0)
else:
print('Wrong input direction!')
return None
if objcm is None:
if len(pcd) < 50000:
objcm = pcdu.reconstruct_surface(pcd)
else:
objcm = pcdu.reconstruct_surface(random.choices(pcd, k=50000))
trimesh = objcm.trimesh
objcm.reparentTo(base.render)
rotmat = rm.rotmat_betweenvector(direction, n0)
v_draw = np.dot(rotmat, v_draw)
tgt_len = np.linalg.norm(v_draw)
pm = p0
pt_list = [p0]
tgt_len_list = [tgt_len]
segs = inc.mesh_plane(trimesh, np.cross(n0, v_draw), p0)
seg_pts = flatten_nested_list(segs)
pcdu.show_pcd(seg_pts)
kdt_seg, _ = get_kdt(seg_pts)
while tgt_len > 0:
knn_seg_pts = get_knn(pm, kdt_seg, k=len(seg_pts))
for pt in knn_seg_pts:
# print(np.dot((pt - pm), v_draw), pt, pm)
if str(pt) != str(pm) and np.dot((pt - pm), v_draw) >= 0:
tgt_len -= np.linalg.norm(pt - pm)
tgt_len_list.append(tgt_len)
pt_list.append(pt)
pm = pt
break
else:
break
# print(pt_list)
# print(tgt_len_list)
p_nxt = pt_list[-2] + (pt_list[-1] - pt_list[-2]) * (tgt_len_list[-2] / np.linalg.norm(pt_list[-1] - pt_list[-2]))
# print(tgt_len_list)
# for p in pt_list:
# base.pggen.plotSphere(base.render, p, rgba=(1, 0, 0, 1))
# base.pggen.plotSphere(base.render, p_nxt, rgba=(0, 1, 0, 1))
# base.run()
knn = get_knn(p_nxt, kdt_d3, k=max_nn)
nrml = get_nrml_pca(knn)
if np.dot(nrml, np.asarray([0, 0, 1])) < 0:
nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([-1, 0, 0])) < 0:
# nrml = -nrml
# if np.dot(nrml, np.asarray([0, -1, 0])) == -1:
# nrml = -nrml
return p_nxt, nrml
def __find_nxt_p(drawpath_p1, drawpath_p2, kdt_d3, pcd_start_p, pcd_start_n, direction=np.array([0, 0, 1])):
v_draw = np.array(drawpath_p2) - np.array(drawpath_p1)
if abs(direction[0]) == 1:
v_draw = (0, v_draw[0], v_draw[1])
elif abs(direction[1]) == 1:
v_draw = (v_draw[0], 0, v_draw[1])
elif abs(direction[2]) == 1:
v_draw = (v_draw[0], v_draw[1], 0)
else:
print('Wrong input direction!')
return None
rotmat = rm.rotmat_betweenvector(direction, pcd_start_n)
pt = pcd_start_p + np.dot(rotmat, v_draw)
p_start_inx = get_knn_indices(pcd_start_p, kdt_d3, k=1)[0]
p_nxt_inx = get_knn_indices(pt, kdt_d3, k=1)[0]
if p_start_inx == p_nxt_inx:
p_nxt_inx = get_knn_indices(pt, kdt_d3, k=2)[1]
return p_nxt_inx
def __find_nxt_p_intg(drawpath_p1, drawpath_p2, kdt_d3, p0, n0, direction=np.array([0, 0, 1]), max_nn=150, snap=False,
toggledebug=False, pcd=None):
v_draw = np.array(drawpath_p2) - np.array(drawpath_p1)
v_draw_len = np.linalg.norm(v_draw)
if abs(direction[0]) == 1:
v_draw = (0, v_draw[0], v_draw[1])
elif abs(direction[1]) == 1:
v_draw = (v_draw[0], 0, v_draw[1])
elif abs(direction[2]) == 1:
v_draw = (v_draw[0], v_draw[1], 0)
else:
print('Wrong input direction!')
return None
rotmat = rm.rotmat_betweenvector(direction, np.asarray(n0))
v_draw = np.dot(rotmat, v_draw)
knn_p0 = get_knn(p0, kdt_d3, k=max_nn)
knn_p0_tr, transmat = mu.trans_data_pcv(knn_p0)
f_q = mu.fit_qua_surface(knn_p0_tr)
v_draw_tr = np.dot(transmat.T, v_draw)
f_l_base = mu.get_plane(n0, v_draw, p0)
f_l = mu.get_plane(n0, v_draw, p0, transmat=transmat.T)
# print('v_draw', v_draw, v_draw_tr)
itg_axis = list(abs(v_draw_tr)).index(max(abs(v_draw_tr[:2])))
if v_draw_tr[itg_axis] > 0:
pt, _, F, G, x_y = mu.cal_surface_intersc(f_q, f_l, np.dot(transmat.T, p0), tgtlen=v_draw_len, mode='ub',
itg_axis=['x', 'y', 'z'][itg_axis], toggledebug=False)
else:
pt, _, F, G, x_y = mu.cal_surface_intersc(f_q, f_l, np.dot(transmat.T, p0), tgtlen=v_draw_len, mode='lb',
itg_axis=['x', 'y', 'z'][itg_axis], toggledebug=False)
pt = np.dot(transmat, pt)
# print('next p(2D):', drawpath_p2)
# print('next p(3D):', pt)
if snap:
knn_pt = get_knn(pt, kdt_d3, k=max_nn)
center = pcdu.get_pcd_center(np.asarray(knn_pt))
nrml = get_nrml_pca(knn_pt)
p_nxt = pt - np.dot((pt - center), nrml) * nrml
else:
p_nxt = pt
if toggledebug:
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 2, 1, projection='3d')
plot_edge = 9
plot_edge_z = 5
x_range = (min(knn_p0[:, 0].flatten()) - plot_edge, max(knn_p0[:, 0].flatten()) + plot_edge)
y_range = (min(knn_p0[:, 1].flatten()) - plot_edge, max(knn_p0[:, 1].flatten()) + plot_edge)
z_range = (min(knn_p0[:, 2].flatten()) - plot_edge_z, max(knn_p0[:, 2].flatten()) + plot_edge_z)
# if pcd is not None:
# pcd_edge = 6.5
# pcd = [p for p in pcd if
# x_range[0] - pcd_edge < p[0] < x_range[1] + pcd_edge and
# y_range[0] - pcd_edge+5 < p[1] < y_range[1] + pcd_edge and
# z_range[0] - pcd_edge < p[2] < z_range[1] + pcd_edge]
# pcd = np.asarray(random.choices(pcd, k=2000))
# ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], c='k', alpha=.1, s=1)
ax.scatter(knn_p0[:, 0], knn_p0[:, 1], knn_p0[:, 2], c='y', s=1, alpha=.5)
f_q_base = mu.fit_qua_surface(knn_p0)
mu.plot_surface_f(ax, f_q_base, x_range, y_range, z_range, dense=.5, c=cm.coolwarm)
mu.plot_surface_f(ax, f_l_base, x_range, y_range, z_range, dense=.5, axis='x', alpha=.2)
if snap:
ax.scatter(knn_pt[:, 0], knn_pt[:, 1], knn_pt[:, 2], c='k', s=5, alpha=.1)
f_q_qt = mu.fit_qua_surface(knn_pt)
mu.plot_surface_f(ax, f_q_qt, x_range, y_range, z_range, dense=.5)
ax.scatter([pt[0]], [pt[1]], [pt[2]], c='g', s=10, alpha=1)
ax.annotate3D('$q_t$', pt, xytext=(3, 3), textcoords='offset points')
ax.scatter([p0[0]], [p0[1]], [p0[2]], c='r', s=10, alpha=1)
ax.scatter([p_nxt[0]], [p_nxt[1]], [p_nxt[2]], c='b', s=10, alpha=1)
# ax.annotate3D('$q_0$', p0, xytext=(3, 3), textcoords='offset points')
# ax.annotate3D('$q_1$', p_nxt, xytext=(3, 3), textcoords='offset points')
# ax.arrow3D(p0[0], p0[1], p0[2], n0[0], n0[1], n0[2], mutation_scale=10, arrowstyle='->', color='b')
# ax.arrow3D(p0[0], p0[1], p0[2], v_draw[0], v_draw[1], v_draw[2], mutation_scale=10, arrowstyle='->')
# ax.annotate3D('$N_0$', p0 + n0, xytext=(3, 3), textcoords='offset points')
# ax.annotate3D('$V_{draw}$', p0 + v_draw * 0.5, xytext=(3, 3), textcoords='offset pixels')
ax_tr = fig.add_subplot(1, 2, 2, projection='3d')
knn_p0_tr = np.dot(transmat.T, knn_p0.T).T
x_range = (min(knn_p0_tr[:, 0].flatten()) - plot_edge, max(knn_p0_tr[:, 0].flatten()) + plot_edge)
y_range = (min(knn_p0_tr[:, 1].flatten()) - plot_edge, max(knn_p0_tr[:, 1].flatten()) + plot_edge)
z_range = (min(knn_p0_tr[:, 2].flatten()) - plot_edge_z, max(knn_p0_tr[:, 2].flatten()) + plot_edge_z)
ax_tr.scatter(knn_p0_tr[:, 0], knn_p0_tr[:, 1], knn_p0_tr[:, 2], c='k', s=5, alpha=.1)
mu.plot_surface_f(ax_tr, f_q, x_range, y_range, z_range, dense=.5, c=cm.coolwarm)
mu.plot_surface_f(ax_tr, f_l, x_range, y_range, z_range, dense=.5, axis='y', alpha=.2)
p0_tr = np.dot(transmat.T, p0)
p_nxt_tr = np.dot(transmat.T, p_nxt)
n0_tr = np.dot(transmat.T, n0)
ax_tr.scatter([p0_tr[0]], [p0_tr[1]], [p0_tr[2]], c='r', s=10, alpha=1)
ax_tr.scatter([p_nxt_tr[0]], [p_nxt_tr[1]], [p_nxt_tr[2]], c='b', s=10, alpha=1)
# ax_tr.annotate3D('$q_0$', p0_tr, xytext=(3, 3), textcoords='offset points')
# ax_tr.annotate3D('$q_1$', p_nxt_tr, xytext=(3, 3), textcoords='offset points')
# ax_tr.arrow3D(p0_tr[0], p0_tr[1], p0_tr[2], n0_tr[0], n0_tr[1], n0_tr[2],
# mutation_scale=10, arrowstyle='->')
# ax_tr.arrow3D(p0_tr[0], p0_tr[1], p0_tr[2], v_draw_tr[0], v_draw_tr[1], v_draw_tr[2],
# mutation_scale=10, arrowstyle='->')
# ax_tr.annotate3D('$N_0$', p0_tr + n0_tr, xytext=(3, 3), textcoords='offset points')
# ax_tr.annotate3D('$V_{draw}$', p0_tr + v_draw_tr * 0.5, xytext=(3, 3), textcoords='offset points')
mu.plot_intersc(ax_tr, x_y, F, p0_tr, alpha=.5, c='k', plot_edge=10)
mu.plot_intersc(ax, x_y, F, p0_tr, transmat=transmat, alpha=.5, c='k', plot_edge=9)
plt.show()
knn_p_nxt = get_knn(p_nxt, kdt_d3, k=max_nn)
p_nxt_nrml = get_nrml_pca(knn_p_nxt)
angle = rm.angle_between_vectors(p_nxt_nrml, n0)
if abs(angle) > np.pi / 2:
p_nxt_nrml = -p_nxt_nrml
# print(angle, np.asarray(pt), np.asarray(p_nxt_nrml))
return np.asarray(p_nxt), np.asarray(p_nxt_nrml)
def __prj_stroke(stroke, drawcenter, pcd, pcd_nrmls, kdt_d3, mode='DI', objcm=None, pcd_start_p=None, error_method='ED',
pcd_start_n=None, surface=None, transmat=None, direction=np.asarray((0, 0, 1)), toggledebug=False):
"""
:param stroke:
:param drawcenter:
:param pcd:
:param pcd_nrmls:
:param kdt_d3:
:param mode: 'DI', 'EI','QI','rbf','rbf-g', 'gaussian', 'quad'
:param objcm:
:param pcd_start_p:
:param pcd_start_n:
:param direction:
:return:
"""
time_start = time.time()
if pcd_start_p is None:
if objcm is None:
inx = get_knn_indices(drawcenter, kdt_d3)[0]
pcd_start_p = pcd[inx]
pcd_start_n = pcd_nrmls[inx]
print('pcd_start_p:', pcd_start_p, 'pcd_start_n:', pcd_start_n)
else:
pcd_start_p, pcd_start_n = rayhitmesh_p(objcm, drawcenter, stroke[0], direction=direction)
# base.pggen.plotSphere(base.render, pcd_start_p, radius=2, rgba=(0, 0, 1, 1))
# base.run()
pos_nrml_list = [[pcd_start_p, pcd_start_n]]
for i in range(len(stroke) - 1):
p1, p2 = stroke[i], stroke[i + 1]
if mode == 'EI':
p_nxt, nrml = __find_nxt_p_pca(p1, p2, kdt_d3, pos_nrml_list[-1][0], pos_nrml_list[-1][1],
direction=direction, toggledebug=toggledebug, pcd=pcd)
pos_nrml_list.append([p_nxt, nrml])
elif mode == 'DI':
p_nxt_inx = __find_nxt_p(p1, p2, kdt_d3, pos_nrml_list[-1][0], pos_nrml_list[-1][1], direction=direction)
pos_nrml_list.append([pcd[p_nxt_inx], pcd_nrmls[p_nxt_inx]])
elif mode == 'QI':
p_nxt, p_nxt_nrml = __find_nxt_p_intg(p1, p2, kdt_d3, pos_nrml_list[-1][0], pos_nrml_list[-1][1],
snap=SNAP_QI, direction=direction, toggledebug=toggledebug, pcd=pcd)
pos_nrml_list.append([p_nxt, p_nxt_nrml])
# base.pggen.plotSphere(base.render, pos=p_nxt, rgba=(1, 0, 0, 1))
# base.pggen.plotArrow(base.render, spos=p_nxt, epos=p_nxt + p_nxt_nrml * 10, rgba=(1, 0, 0, 1))
elif mode in ['rbf', 'gaussian', 'quad']:
p_nxt, p_nxt_nrml = __find_nxt_p_psfc(p1, p2, kdt_d3, pos_nrml_list[-1][0], pos_nrml_list[-1][1],
direction=direction, toggledebug=toggledebug, pcd=pcd, step=.01,
mode=mode, snap=SNAP_SFC)
pos_nrml_list.append([p_nxt, p_nxt_nrml])
elif mode == 'rbf_g':
p_nxt, p_nxt_nrml = __find_nxt_p_rbf_g(p1, p2, surface, transmat, kdt_d3, pos_nrml_list[-1][0],
pos_nrml_list[-1][1], direction=direction, toggledebug=toggledebug,
pcd=pcd, step=.01, snap=SNAP_SFC_G)
pos_nrml_list.append([p_nxt, p_nxt_nrml])
elif mode == 'bp':
p_nxt, p_nxt_nrml = __find_nxt_p_bp(p1, p2, kdt_d3, pos_nrml_list[-1][0], pos_nrml_list[-1][1], objcm, pcd,
direction=direction)
pos_nrml_list.append([p_nxt, p_nxt_nrml])
else:
print("mode name must in ['DI', 'EI','QI','rbf','rbf-g', 'gaussian', 'quad']")
time_cost = time.time() - time_start
if error_method == 'GD':
error, error_list = get_prj_error(stroke, pos_nrml_list, method=error_method, kdt_d3=kdt_d3)
else:
error, error_list = get_prj_error(stroke, pos_nrml_list)
print(f'stroke error: {error}')
return pos_nrml_list, error, error_list, time_cost
def _is_p_on_seg(p1, p2, q):
if min(p1[0], p2[0]) <= q[0] <= max(p1[0], p2[0]) and min(p1[1], p2[1]) <= q[1] <= max(p1[1], p2[1]) \
and min(p1[2], p2[2]) <= q[2] <= max(p1[2], p2[2]):
return True
else:
return False
def get_prj_error(drawpath, pos_nrml_list, method='ED', kdt_d3=None, pcd=None, objcm=None,
surface=None, transmat=np.eye(4), max_nn=150):
"""
:param drawpath:
:param pos_nrml_list:
:param method: 'ED', 'GD', 'rbf'
:return:
"""
error_list = []
prj_len_list = []
real_len_list = []
if method == 'ED':
for i in range(1, len(drawpath)):
try:
real_len = np.linalg.norm(np.array(drawpath[i]) - np.array(drawpath[i - 1]))
prj_len = np.linalg.norm(np.array(pos_nrml_list[i][0]) - np.array(pos_nrml_list[i - 1][0]))
error_list.append(round((prj_len - real_len) / real_len, 5))
prj_len_list.append(prj_len)
real_len_list.append(real_len)
# print('project ED:', real_len, prj_len)
except:
error_list.append(None)
elif method == 'GD':
for i in range(1, len(drawpath)):
try:
real_len = np.linalg.norm(np.array(drawpath[i]) - np.array(drawpath[i - 1]))
p1 = np.asarray(pos_nrml_list[i - 1][0])
p2 = np.asarray(pos_nrml_list[i][0])
v_draw = p2 - p1
knn = get_knn(p1, kdt_d3, k=max_nn)
knn_tr, transmat = mu.trans_data_pcv(knn)
f_q = mu.fit_qua_surface(knn_tr)
f_l = mu.get_plane(pos_nrml_list[i][1], v_draw, p1, transmat=transmat.T)
v_draw_tr = np.dot(transmat.T, v_draw)
p1_tr = np.dot(transmat.T, p1)
p2_tr = np.dot(transmat.T, p2)
if abs(v_draw_tr[0]) > abs(v_draw_tr[1]):
prj_len = mu.cal_surface_intersc_p2p(f_q, f_l, p1_tr, p2_tr, itg_axis='x')
else:
prj_len = mu.cal_surface_intersc_p2p(f_q, f_l, p1_tr, p2_tr, itg_axis='y')
error_list.append(round((prj_len - real_len) / real_len, 5))
prj_len_list.append(prj_len)
real_len_list.append(real_len)
# print('project GD:', real_len, prj_len)
except:
error_list.append(None)
elif method == 'rbf':
for i in range(1, len(drawpath)):
try:
real_len = np.linalg.norm(np.array(drawpath[i]) - np.array(drawpath[i - 1]))
p1 = np.asarray(pos_nrml_list[i - 1][0])
p2 = np.asarray(pos_nrml_list[i][0])
v_draw = p2 - p1
knn = get_knn(p1, kdt_d3, k=max_nn)
knn_tr, transmat = mu.trans_data_pcv(knn, random_rot=False)
surface = sfc.RBFSurface(knn_tr[:, :2], knn_tr[:, 2])
pm = np.dot(np.linalg.inv(transmat), p1)
step = .05
iter_times = 1 / step
prj_len = 0
while iter_times > 0:
p_uv = (pm + np.dot(np.linalg.inv(transmat), v_draw) * step)[:2]
z = surface.get_zdata([p_uv])[0]
pt = np.asarray([p_uv[0], p_uv[1], z])
prj_len += np.linalg.norm(pt - pm)
pm = pt
iter_times -= 1
error_list.append(round((prj_len - real_len) / real_len, 5))
prj_len_list.append(prj_len)
real_len_list.append(real_len)
except:
error_list.append(None)
elif method == 'rbf-g':
for i in range(1, len(drawpath)):
try:
real_len = np.linalg.norm(np.array(drawpath[i]) - np.array(drawpath[i - 1]))
p1, n1 = np.asarray(pos_nrml_list[i - 1])
p2, n2 = np.asarray(pos_nrml_list[i])
v_draw = p2 - p1
pm = np.dot(np.linalg.inv(transmat), p1)
step = .05
iter_times = 1 / step
prj_len = 0
base.pggen.plotSphere(base.render, p1, rgba=(1, 0, 0, 1))
while iter_times > 0:
p_uv = (pm + np.dot(np.linalg.inv(transmat), v_draw) * step)[:2]
z = surface.get_zdata([p_uv])[0]
pt = np.asarray([p_uv[0], p_uv[1], z])
prj_len += np.linalg.norm(pt - pm)
pm = pt
iter_times -= 1
base.pggen.plotSphere(base.render, pt, rgba=(1, 0, 1, 1))
error = round((prj_len - real_len) / real_len, 4)
error_list.append(error)
prj_len_list.append(prj_len)
real_len_list.append(real_len)
except:
error_list.append(None)
# base.run()
elif method == 'inc':
if objcm is None:
if len(pcd) < 50000:
objcm = pcdu.reconstruct_surface(pcd)
else:
objcm = pcdu.reconstruct_surface(random.choices(pcd, k=50000))
trimesh = objcm.trimesh
objcm.reparentTo(base.render)
for i in range(1, len(drawpath)):
try:
real_len = np.linalg.norm(np.array(drawpath[i]) - np.array(drawpath[i - 1]))
p1, n1 = np.asarray(pos_nrml_list[i - 1])
p2, n2 = np.asarray(pos_nrml_list[i])
v_draw = p2 - p1
segs = inc.mesh_plane(trimesh, np.cross(n1, v_draw), p1)
seg_pts = flatten_nested_list(segs)
inp_list = [p1, p2]
for p in seg_pts:
if _is_p_on_seg(p1, p2, p):
inp_list.append(p)
# base.pggen.plotSphere(base.render, p, rgba=(1, 1, 0, 1))
kdt, _ = get_kdt(inp_list)
_, indices = kdt.query([p1], k=len(inp_list))
prj_len = 0
for i in range(len(indices[0]) - 1):
prj_len += np.linalg.norm(inp_list[indices[0][i]] - inp_list[indices[0][i + 1]])
error = round((prj_len - real_len) / real_len, 4)
error_list.append(error)
prj_len_list.append(prj_len)
real_len_list.append(real_len)
# base.pggen.plotSphere(base.render, p1, rgba=(1, 0, 0, 1))
# base.pggen.plotSphere(base.render, p2, rgba=(0, 1, 0, 1))
except:
error_list.append(None)
if sum(real_len_list) != 0:
error = round(abs(sum(real_len_list) - sum(prj_len_list)) / sum(real_len_list), 5)
# error = round(max(np.asarray(real_len_list) - np.asarray(prj_len_list)), 5)
else:
error = 0
return error, error_list
def prj_drawpath_ss_on_pcd(obj_item, drawpath, mode='DI', direction=np.asarray((0, 0, 1)), error_method='ED',
toggledebug=False):
base.pggen.plotBox(base.render, pos=(obj_item.drawcenter[0], obj_item.drawcenter[1], 80), x=120, y=120, z=1,
rgba=[1, 1, 1, .3])
for p in drawpath:
base.pggen.plotSphere(base.render, pos=(p[0] + obj_item.drawcenter[0], p[1] + obj_item.drawcenter[1], 80),
radius=1, rgba=(1, 0, 0, 1))
print('--------------map single stroke on pcd--------------')
print('pcd num:', len(obj_item.pcd))
print('draw path point num:', len(drawpath))
surface = None
transmat = np.eye(3)
time_cost_rbf = 0
pca_trans = True
if mode == 'rbf_g':
time_start = time.time()
# pcd = np.asarray(random.choices(pcd, k=5000))
if pca_trans:
pcd_tr, transmat = mu.trans_data_pcv(obj_item.pcd, random_rot=False)
surface = sfc.RBFSurface(pcd_tr[:, :2], pcd_tr[:, 2], kernel=KERNEL)
# surface = sfc.MixedGaussianSurface(pcd_tr[:, :2], pcd_tr[:, 2], n_mix=1)
else:
surface = sfc.RBFSurface(obj_item.pcd[:, :2], obj_item.pcd[:, 2], kernel=KERNEL)
time_cost_rbf = time.time() - time_start
print('time cost(rbf global):', time_cost_rbf)
kdt_d3, pcd_narray_d3 = get_kdt(obj_item.pcd.tolist(), dimension=3)
pos_nrml_list, error, error_list, time_cost = \
__prj_stroke(drawpath, obj_item.drawcenter, obj_item.pcd, obj_item.nrmls, kdt_d3, objcm=obj_item.objcm,
mode=mode, pcd_start_p=None, pcd_start_n=None, direction=direction, toggledebug=toggledebug,
error_method=error_method, surface=surface, transmat=transmat)
print('avg error', np.mean(error_list))
print('projetion time cost', time_cost + time_cost_rbf)
return pos_nrml_list, error_list, time_cost + time_cost_rbf
def prj_drawpath_ss_loop(obj_item, drawpath, mode='DI', direction=np.asarray((0, 0, 1)), error_method='ED',
toggledebug=False, step=1):
time_start = time.time()
loop_error_list = []
loop_pos_nrml_list = []
loop_time_cost_list = []
print('--------------map single stroke to pcd loop--------------')
print('pcd num:', len(obj_item.pcd))
print('draw path point num:', len(drawpath))
for i in range(0, len(drawpath), step):
print('loop:', i)
drawpath_tmp = drawpath[i:] + drawpath[:i]
kdt_d3, pcd_narray_d3 = get_kdt(obj_item.pcd.tolist(), dimension=3)
pos_nrml_list, error, error_list, time_cost = \
__prj_stroke(drawpath_tmp, obj_item.drawcenter, obj_item.pcd, obj_item.nrmls, kdt_d3,
objcm=obj_item.objcm, mode=mode, pcd_start_p=None, pcd_start_n=None, direction=direction,
toggledebug=toggledebug, error_method=error_method)
loop_error_list.append(error)
loop_pos_nrml_list.append(pos_nrml_list)
loop_time_cost_list.append(time_cost)
time_cost = time.time() - time_start
print('loop time cost', time_cost)
return loop_pos_nrml_list, loop_error_list, loop_time_cost_list
def prj_drawpath_ss_SI_loop(obj_item, drawpath, error_method='ED', toggledebug=False, step=1):
time_start = time.time()
loop_error_list = []
loop_pos_nrml_list = []
loop_time_cost_list = []
print('--------------map single stroke to pcd loop--------------')
print('pcd num:', len(obj_item.pcd))
print('draw path point num:', len(drawpath))
for i in range(0, len(drawpath), step):
print('loop:', i)
drawpath_tmp = drawpath[i:] + drawpath[:i]
time_start = time.time()
uvs, vs, nrmls, faces, avg_scale = cu.lscm_objcm(obj_item.objcm, toggledebug=toggledebug)
uv_center = cu.get_uv_center(uvs)
pos_nrml_list, error, error_list, time_cost = \
__prj_stroke_SI(drawpath_tmp, uv_center, uvs, vs, nrmls, faces, avg_scale, error_method=error_method)
print('avg error', error)
loop_error_list.append(error)
loop_pos_nrml_list.append(pos_nrml_list)
loop_time_cost_list.append(time_cost)
time_cost = time.time() - time_start
print('loop time cost', time_cost)
return loop_pos_nrml_list, loop_error_list, loop_time_cost_list
def prj_drawpath_ms_on_pcd(obj_item, drawpath_ms, mode='DI', step=1.0, direction=np.asarray((0, 0, 1)),
error_method='ED', toggledebug=False, pca_trans=True):
print(f'--------------map multiple strokes on pcd({mode})--------------')
print('pcd num:', len(obj_item.pcd))
print('stroke num:', len(drawpath_ms))
kdt_d3, point_narray_d3 = get_kdt(obj_item.pcd, dimension=3)
pos_nrml_list_ms = []
error_ms = []
error_list_ms = []
time_cost_total = 0
surface = None
transmat = np.eye(3)
time_cost_rbf = 0
# pcdu.show_pcd(obj_item.pcd)
# base.run()
if mode == 'rbf_g':
time_start = time.time()
pcd = obj_item.pcd
# print(len(obj_item.pcd))
# pcd = np.asarray(random.choices(pcd, k=50000))
if pca_trans:
pcd_tr, transmat = mu.trans_data_pcv(pcd, random_rot=False)
surface = sfc.RBFSurface(pcd_tr[:, :2], pcd_tr[:, 2], kernel=KERNEL)
# surface = sfc.MixedGaussianSurface(pcd_tr[:, :2], pcd_tr[:, 2], n_mix=1)
else:
surface = sfc.RBFSurface(pcd[:, :2], pcd[:, 2], kernel=KERNEL)
time_cost_rbf = time.time() - time_start
print('time cost(rbf global):', time_cost_rbf)
surface_cm = surface.get_gometricmodel(rgba=[.8, .8, .1, 1])
mat4 = np.eye(4)
mat4[:3, :3] = transmat
surface_cm.sethomomat(mat4)
surface_cm.reparentTo(base.render)
pcdu.show_pcd(pcd)
base.run()
for i, stroke in enumerate(drawpath_ms):
print('------------------------------')
print('stroke point num:', len(stroke))
if i > 0:
gotostart_stroke = mu.linear_interp_2d(drawpath_ms[i - 1][-1], stroke[0], step=step)
gotostart_pos_nrml_list, _, _, time_cost = \
__prj_stroke(gotostart_stroke, obj_item.drawcenter, obj_item.pcd, obj_item.nrmls, kdt_d3,
objcm=obj_item.objcm, mode=mode, direction=direction,
pcd_start_p=pos_nrml_list_ms[i - 1][-1][0], pcd_start_n=pos_nrml_list_ms[i - 1][-1][1],
toggledebug=toggledebug, surface=surface, transmat=transmat)
time_cost_total += time_cost
stroke_pos_nrml_list, error, error_list, time_cost = \
__prj_stroke(stroke, obj_item.drawcenter, obj_item.pcd, obj_item.nrmls, kdt_d3,
objcm=obj_item.objcm, mode=mode, error_method=error_method, direction=direction,
pcd_start_p=gotostart_pos_nrml_list[-1][0], pcd_start_n=gotostart_pos_nrml_list[-1][1],
toggledebug=toggledebug, surface=surface, transmat=transmat)
time_cost_total += time_cost
else:
stroke_pos_nrml_list, error, error_list, time_cost = \
__prj_stroke(stroke, obj_item.drawcenter, obj_item.pcd, obj_item.nrmls, kdt_d3,
objcm=obj_item.objcm, mode=mode, direction=direction, error_method=error_method,
toggledebug=toggledebug, surface=surface, transmat=transmat)
time_cost_total += time_cost
error_ms.append(error)
error_list_ms.extend(error_list)
pos_nrml_list_ms.append(stroke_pos_nrml_list)
print('avg error', np.mean(error_ms))
print('time cost(projetion)', time_cost_total + time_cost_rbf)
return pos_nrml_list_ms, error_list_ms, time_cost_total + time_cost_rbf
def __prj_stroke_II(stroke, drawcenter, vs, nrmls, kdt_uv, scale_list, use_binp=False):
time_start = time.time()
avg_scale = np.mean(scale_list)
stroke = np.array(stroke) / avg_scale
pos_nrml_list = []
stroke =
|
np.array([(-p[0], -p[1]) for p in stroke])
|
numpy.array
|
"""
Misc tools for implementing data structures
"""
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try:
from io import BytesIO
except ImportError: # pragma: no cover
# Python < 2.6
from cStringIO import StringIO as BytesIO
import itertools
from cStringIO import StringIO
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas._tseries as lib
from pandas.util import py3compat
import codecs
import csv
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
np.set_printoptions(suppress=True)
except Exception: # pragma: no cover
pass
class PandasError(Exception):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
def isnull(obj):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if np.isscalar(obj) or obj is None:
return lib.checknull(obj)
from pandas.core.generic import PandasObject
from pandas import Series
if isinstance(obj, np.ndarray):
if obj.dtype.kind in ('O', 'S'):
# Working around NumPy ticket 1542
shape = obj.shape
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(obj.ravel())
result[:] = vec.reshape(shape)
if isinstance(obj, Series):
result = Series(result, index=obj.index, copy=False)
elif obj.dtype == np.datetime64:
# this is the NaT pattern
result = obj.view('i8') == lib.NaT
else:
result = -np.isfinite(obj)
return result
elif isinstance(obj, PandasObject):
# TODO: optimize for DataFrame, etc.
return obj.apply(isnull)
else:
return obj is None
def notnull(obj):
'''
Replacement for numpy.isfinite / -numpy.isnan which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
res = isnull(obj)
if np.isscalar(res):
return not res
return -res
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
return arr
def _take_1d_datetime(arr, indexer, out, fill_value=np.nan):
view = arr.view(np.int64)
outview = out.view(np.int64)
lib.take_1d_bool(view, indexer, outview, fill_value=fill_value)
def _take_2d_axis0_datetime(arr, indexer, out, fill_value=np.nan):
view = arr.view(np.int64)
outview = out.view(np.int64)
lib.take_1d_bool(view, indexer, outview, fill_value=fill_value)
def _take_2d_axis1_datetime(arr, indexer, out, fill_value=np.nan):
view = arr.view(np.uint8)
outview = out.view(np.uint8)
lib.take_1d_bool(view, indexer, outview, fill_value=fill_value)
def _view_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if na_override is not None and np.isnan(fill_value):
fill_value = na_override
view = arr.view(wrap_dtype)
outview = out.view(wrap_dtype)
f(view, indexer, outview, fill_value=fill_value)
return wrapper
_take1d_dict = {
'float64' : lib.take_1d_float64,
'int32' : lib.take_1d_int32,
'int64' : lib.take_1d_int64,
'object' : lib.take_1d_object,
'bool' : _view_wrapper(lib.take_1d_bool, np.uint8),
'datetime64[us]' : _view_wrapper(lib.take_1d_int64, np.int64,
na_override=lib.NaT),
}
_take2d_axis0_dict = {
'float64' : lib.take_2d_axis0_float64,
'int32' : lib.take_2d_axis0_int32,
'int64' : lib.take_2d_axis0_int64,
'object' : lib.take_2d_axis0_object,
'bool' : _view_wrapper(lib.take_2d_axis0_bool, np.uint8),
'datetime64[us]' : _view_wrapper(lib.take_2d_axis0_int64, np.int64,
na_override=lib.NaT),
}
_take2d_axis1_dict = {
'float64' : lib.take_2d_axis1_float64,
'int32' : lib.take_2d_axis1_int32,
'int64' : lib.take_2d_axis1_int64,
'object' : lib.take_2d_axis1_object,
'bool' : _view_wrapper(lib.take_2d_axis1_bool, np.uint8),
'datetime64[us]' : _view_wrapper(lib.take_2d_axis1_int64, np.int64,
na_override=lib.NaT),
}
def _get_take2d_function(dtype_str, axis=0):
if axis == 0:
return _take2d_axis0_dict[dtype_str]
else:
return _take2d_axis1_dict[dtype_str]
def take_1d(arr, indexer, out=None, fill_value=np.nan):
"""
Specialized Cython take which sets NaN values in one pass
"""
dtype_str = arr.dtype.name
n = len(indexer)
if not isinstance(indexer, np.ndarray):
# Cython methods expects 32-bit integers
indexer = np.array(indexer, dtype=np.int32)
indexer = _ensure_int32(indexer)
out_passed = out is not None
take_f = _take1d_dict.get(dtype_str)
if dtype_str in ('int32', 'int64', 'bool'):
try:
if out is None:
out =
|
np.empty(n, dtype=arr.dtype)
|
numpy.empty
|
from PIL import Image
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from tools.prepare_things import get_name
import os
import torch
import numpy as np
class MakeList(object):
"""
this class used to make list of data for model train and test, return the root name of each image
root: txt file records condition for every cxr image
"""
def __init__(self, args, ratio=0.8):
self.image_root = args.dataset_dir
self.all_image = get_name(self.image_root, mode_folder=False)
self.category = sorted(set([i[:i.find('_')] for i in self.all_image]))
for c_id, c in enumerate(self.category):
print(c_id, '\t', c)
self.ration = ratio
def get_data(self):
all_data = []
for img in self.all_image:
label = self.deal_label(img)
all_data.append([os.path.join(self.image_root, img), label])
train, val = train_test_split(all_data, random_state=1, train_size=self.ration)
return train, val
def deal_label(self, img_name):
categoty_no = img_name[:img_name.find('_')]
back = self.category.index(categoty_no)
return back
class MakeListImage():
"""
this class used to make list of data for ImageNet
"""
def __init__(self, args):
self.image_root = args.dataset_dir
self.category = get_name(self.image_root + "train/")
self.used_cat = self.category[:args.num_classes]
# for c_id, c in enumerate(self.used_cat):
# print(c_id, '\t', c)
def get_data(self):
train = self.get_img(self.used_cat, "train")
val = self.get_img(self.used_cat, "val")
return train, val
def get_img(self, folders, phase):
record = []
for folder in folders:
current_root = os.path.join(self.image_root, phase, folder)
images = get_name(current_root, mode_folder=False)
for img in images:
record.append([os.path.join(current_root, img), self.deal_label(folder)])
return record
def deal_label(self, img_name):
back = self.used_cat.index(img_name)
return back
class ConText(Dataset):
"""read all image name and label"""
def __init__(self, data, transform=None):
self.all_item = data
self.transform = transform
def __len__(self):
return len(self.all_item)
def __getitem__(self, item_id): # generate data when giving index
while not os.path.exists(self.all_item[item_id][0]):
raise ("not exist image:" + self.all_item[item_id][0])
image_path = self.all_item[item_id][0]
image = Image.open(image_path).convert('RGB')
if image.mode == 'L':
image = image.convert('RGB')
if self.transform:
image = self.transform(image)
label = self.all_item[item_id][1]
label = torch.from_numpy(
|
np.array(label)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 18 10:30:35 2019
@author: kuangen
"""
from numpy import genfromtxt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import mat4py as m4p
from random import shuffle
import pandas as pd
import copy
import glob
def load_UCI_mat(data_path = 'data/1_dataset_UCI_DSADS/Raw/', X_dim = 4,
is_one_hot = False, is_normalized = False,
is_resize = False, leave_one_num = -1, sub_num = 8,
feature_length = 5625, sensor_num = 0):
idx_vec = list(range(sub_num))
if -1 == leave_one_num:
shuffle(idx_vec)
idx_train = idx_vec[:5]
idx_test = idx_vec[5:-1]
else:
idx_test = [copy.deepcopy(idx_vec[leave_one_num])]
idx_vec.pop(leave_one_num)
idx_train = idx_vec
# dataset:
# x_s_train, y_s_train, x_s_val, y_s_val, x_s_test, y_s_test, \
# x_t_train, y_t_train, x_t_val, y_t_val, x_t_test, y_t_test = \
dataset = []
for i in range(6):
dataset.append(
|
np.array([], dtype=np.float32)
|
numpy.array
|
import numpy as np
import cv2 as cv
import copy
import imutils
####################################################
def get_corners(contour):
epsilon = 0.05
count = 0
while True:
perimeter = cv.arcLength(contour,True)
perimeter = epsilon*perimeter
if perimeter > 100 or perimeter < 1:
return None
approx = cv.approxPolyDP(contour,perimeter,True)
print(perimeter)
hull = cv.convexHull(approx)
if len(hull) == 4:
return hull
else:
if len(hull) > 4:
epsilon += 0.01
else:
epsilon -= 0.01
if count > 10:
return []
###########################################################
def ar_tag_contours(contours, contour_hierarchy):
paper_contours_ind = []
ar_tag_contours = []
for ind, contour in enumerate(contour_hierarchy[0]):
if contour[3] == 0:
paper_contours_ind.append(ind)
if (len(paper_contours_ind) > 3):
return None
for ind in paper_contours_ind:
ar_tag_contour_ind = contour_hierarchy[0][ind][2]
ar_tag_contours.append(contours[ar_tag_contour_ind])
return ar_tag_contours
###############################################################
def arrange(corners):
corners = corners.reshape((4, 2))
new = np.zeros((4, 1, 2), dtype=np.int32)
add = corners.sum(1)
new[0] = corners[np.argmin(add)]
new[2] =corners[np.argmax(add)]
diff = np.diff(corners, axis=1)
new[1] =corners[np.argmin(diff)]
new[3] = corners[np.argmax(diff)]
return new
###########################################################
def homograph(src_plane, dest_plane):
A = []
for i in range(0, len(src_plane)):
x, y = src_plane[i][0], src_plane[i][1]
xp, yp = dest_plane[i][0], dest_plane[i][1]
A.append([-x, -y, -1, 0, 0, 0, x * xp, y * xp, xp])
A.append([0, 0, 0, -x, -y, -1, x * yp, y * yp, yp])
A =
|
np.asarray(A)
|
numpy.asarray
|
import os
import gym
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from copy import deepcopy
from matplotlib import cm
from PIL import Image
IMPASSABLE_FOOD = 0 # TODO: does this make sense?
def update_shadow(position_x, position_y, shadow_in,
environment, environment_size_x, environment_size_y,
delta_time, relaxation_time, agent_influence_strength,
agent_influence_radius, number_of_agents, sigma):
"""
Define the update rule for shadow
"""
shadow_out = shadow_in*np.exp(-delta_time/relaxation_time)
for dummy_a in range(0, number_of_agents):
highest_x = int(
np.round(position_x[dummy_a]+sigma*agent_influence_radius))
lowest_x = int(
np.round(position_x[dummy_a] - sigma*agent_influence_radius))
highest_y = int(
np.round(position_y[dummy_a]+sigma*agent_influence_radius))
lowest_y = int(
np.round(position_y[dummy_a] - sigma*agent_influence_radius))
for dummy_x in range(max(0, lowest_x),
min(highest_x, environment_size_x)):
for dummy_y in range(max(0, lowest_y),
min(highest_y, environment_size_y)):
dummy_r = np.sqrt((dummy_x-position_x[dummy_a])**2 +
(dummy_y-position_y[dummy_a])**2)
shadow_out[dummy_x, dummy_y] = \
shadow_out[dummy_x, dummy_y]+agent_influence_strength *\
|
np.exp(-dummy_r**2/(2*agent_influence_radius**2))
|
numpy.exp
|
import numpy as np
from utils.Distance import distance
import concurrent.futures
import csv
from sklearn.model_selection import cross_val_score
from utils.ShapeletEval import ShapeletEval
from sklearn.model_selection import StratifiedKFold
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
from os.path import join, exists
import json
import logging
#logging.basicConfig( level=logging.DEBUG )
class SSMHelper(object):
def __init__( self, num_cpus: int = 1 ):
self.dist = distance
self.shapelets = []
self.thresholds = []
self.num_cpus = num_cpus
def compare_to_baseline( self, baseline_models: list, train_path: str, test_path: str, result_path: str, name: str, iteration: int, ground_truth: list = [], min_len: int = 10, max_len: int = 10, stride: int = 1, cv: int = 5, scoring: list = ['accuracy', 'precision', 'recall', 'f1'] ):
# Read Training Data
X_train = []
y_train = []
with open(train_path, 'r') as train_file:
reader = csv.reader(train_file, delimiter=',')
for row in reader:
X_train.append( (np.array(row[1:]).astype(float)) )
y_train.append(np.array(row[0]).astype(float).astype(int))
# Read Test Data
X_test = []
y_test = []
with open(test_path, 'r') as test_file:
reader = csv.reader(test_file, delimiter=',')
for row in reader:
X_test.append( (np.array(row[1:]).astype(float)) )
y_test.append(np.array(row[0]).astype(float).astype(int))
# Evaluate top k shapelets for SSM and basline methods
k = 10
result_path="./ssm_tmp"
if not exists(result_path):
os.makedirs(result_path)
#ssm_cross_val_metrics = self.cross_validate_ssm( X_train, y_train, name, result_path, stride, min_len, max_len, cv )
# Fit ssm
logging.info( "Train SSM" )
ssm_shapelets, ssm_parameters = self.run_ssm(train_path, test_path, result_path, name, stride=stride, min_len=min_len, max_len=max_len)
shapelet_eval = ShapeletEval( X_train, y_train, X_train, y_train )
# Create dict with SSM properties
ssm = {}
ssm_distances = []
ssm_aucs = []
ssm_p_val = []
ssm_top_candidates = []
ssm_acc= []
ssm_recall=[]
ssm_prec=[]
for i, ssm_shapelet in enumerate(ssm_shapelets):
if i < k:
ssm_top_candidates.append( ssm_shapelet['shapelet'] )
ssm_distances.append( self._subsequence_dist( ssm_shapelet['shapelet'], ground_truth ) )
#ssm_aucs.append( shapelet_eval.get_shapelet_auc(X_test, y_test, ssm_shapelet['shapelet']) )
all_metrics = shapelet_eval.evaluate( ssm_shapelet['shapelet'] )
sorted_pvals = sorted( all_metrics, key=lambda x: x['p_val'] )
ssm_p_val.append( sorted_pvals[0]['p_val'] )
ssm_acc.append( sorted_pvals[0]['acc'] )
ssm_recall.append( sorted_pvals[0]['recall'] )
ssm_prec.append( sorted_pvals[0]['prec'] )
else:
break
#ssm['cv'] = { 'num_folds': cv, 'results': ssm_cross_val_metrics }
ssm['top_k_gt_distances'] = ssm_distances
#ssm['top_k_aucs'] = ssm_aucs
ssm['top_k_pvals']= ssm_p_val
ssm['top_k_acc']= ssm_acc
ssm['top_k_prec']= ssm_prec
ssm['top_k_recall']= ssm_recall
ssm['top_candidates']= ssm_top_candidates
# Encode data to binary representation
encoded_train_data = self.fit_transform( X_train, y_train, min_len=min_len, max_len=max_len )
encoded_test_data = self.transform( X_test )
# Create dict with SSM properties
#Fit baseline models and get metrics
baselines = {}
shapelet_eval = ShapeletEval( X_train, y_train, X_train, y_train )
for i, model in enumerate( baseline_models ):
i = str(i)
baselines[ i ] = {}
# Get CV scores
#scores = self.cross_validate_general( model, encoded_train_data, y_train, n_splits=cv )
#baselines[ i ]['cv'] = scores
#scores = cross_val_score(model, X_train, y_train, cv=5, scoring='accuracy')
#baselines[ i ]['scores'] = scores
# Fit model
model.fit( encoded_train_data, y_train )
# Get coefficents
baselines[ i ]['model'] = model
if hasattr( model, 'coef_' ):
coef = model.coef_
if type(coef) == np.ndarray:
coef = coef.tolist()
baselines[ i ]['coef'] = coef
# Get rank of ground truth
'''abs_rank, rel_rank, closest_dist, closest_shapelet = self.find_ground_truth_rank( self.shapelets, model.coef_[0], ground_truth, shapelet_eval )
baselines[ i ]['rel_rank_gt'] = str(rel_rank)
baselines[ i ]['abs_rank_gt'] = str(abs_rank)
baselines[ i ]['min_dist_gt'] = str(closest_dist)
baselines[ i ]['min_dist_gt_seq'] = closest_shapelet'''
# Get distance between top k shapelets by coef
# Was only necessary for elastic net and lasso, dumb work around
if i == '2' or i == "3":
sorted_idx = np.argsort( np.array(model.coef_)**2 )
else:
sorted_idx = np.argsort( np.array(model.coef_[0])**2 )
model_top_k_ss = np.take( self.shapelets, sorted_idx[-k:], axis=0 ) #self.shapelets comes from the fit transform function
model_distances = []
model_aucs = []
model_p_val = []
model_acc = []
model_prec = []
model_recall = []
model_top_candidates = []
shapelet_eval = ShapeletEval( X_train, y_train, X_train, y_train )
for model_shapelet in model_top_k_ss:
if type(model_shapelet) == np.ndarray:
model_shapelet = model_shapelet.tolist()
model_top_candidates.append( model_shapelet )
model_distances.append( self._subsequence_dist( model_shapelet, ground_truth ) )
#model_aucs.append( shapelet_eval.get_shapelet_auc(X_test, y_test, model_shapelet) )
all_metrics = shapelet_eval.evaluate( model_shapelet )
sorted_pvals = sorted( all_metrics, key=lambda x: x['p_val'] )
model_p_val.append( sorted_pvals[0]['p_val'] )
model_acc.append( sorted_pvals[0]['acc'] )
model_prec.append( sorted_pvals[0]['prec'] )
model_recall.append( sorted_pvals[0]['recall'] )
baselines[i]['top_k_gt_distances'] = model_distances
#baselines[i]['top_k_aucs'] = model_aucs
baselines[i]['top_k_pvals'] = model_p_val
baselines[i]['top_k_acc'] = model_acc
baselines[i]['top_k_prec'] = model_prec
baselines[i]['top_k_recall'] = model_recall
baselines[i]['top_candidates'] = model_top_candidates
#Create plots
logging.info( "Create Plots" )
plt.close('all')
number_of_baselines = len(baselines.keys())
f, axarr = plt.subplots(number_of_baselines+2, sharex=False, figsize=(7,7))
axarr[0].plot(np.arange(len(ground_truth)), ground_truth)
if len(ssm['top_candidates']) > 0:
for cand in ssm['top_candidates']:
axarr[1].plot(np.arange(len(cand)), cand)
#ssm_mss = ssm['top_candidates'][0]
for i in np.arange( number_of_baselines ):
for cand in baselines[str(i)]['top_candidates']:
axarr[i+2].plot(np.arange(len(cand)), cand)
#top_cand_model_0 = baselines[str(0)]['top_candidates'][ len(baselines[str(0)]['top_candidates'])-1 ]
'''for cand in baselines[str(1)]['top_candidates']:
axarr[3].plot(np.arange(len(cand)), cand)
for cand in baselines[str(2)]['top_candidates']:
axarr[4].plot(np.arange(len(cand)), cand)'''
#top_cand_model_1 = baselines[str(1)]['top_candidates'][ len(baselines[str(1)]['top_candidates'])-1 ]
#axarr[3].plot(np.arange(len(top_cand_model_1)), top_cand_model_1)
plt.savefig( join( result_path, 'plots', "{}_{}.png".format(name, iteration)) )
return baselines, ssm
def find_ground_truth_rank( self, shapelet_list: list, coef: list, ground_truth: list, shapelet_eval ):
closest_shapelet = []
closest_idx = 0
closest_dist = np.inf
for i, shapelet in enumerate(shapelet_list):
d = shapelet_eval._subsequence_dist( shapelet, ground_truth )
if d < closest_dist:
closest_shapelet = shapelet
closest_dist = d
closest_idx = i
sorted_coef = np.argsort( np.array(coef)**2 )
abs_rank = sorted_coef[ closest_idx ]
rel_rank = sorted_coef[ closest_idx ] / float( len(sorted_coef) )
return abs_rank, rel_rank, closest_dist, closest_shapelet
def cross_validate_general( self, model, X_train: np.ndarray, y_train: list, n_splits: int = 5 ):
if not isinstance(X_train, np.ndarray):
X_train = np.array(X_train)
if not isinstance(y_train, np.ndarray):
y_train = np.array(y_train)
skf = StratifiedKFold(n_splits=n_splits)
cross_val_metrics = []
current_fold = 1
for train_index, val_index in skf.split(X_train, y_train):
logging.info( "Fitting model for fold {} with {} training and {} validation samples".format(current_fold, len(train_index), len(val_index)) )
model.fit( X_train[train_index], y_train[train_index] )
if hasattr( model, 'coef_' ):
# Get distance between top k shapelets by coef
sorted_idx = np.argsort( np.array(model.coef_[0])**2 )
logging.info( "Choosing most significant shapelet for evaluation metrics" )
most_sig_shapelet = np.take( self.shapelets, sorted_idx[ len(sorted_idx)-1 ], axis=0 )
# create instance of shapeletEval with current training/test split
shapelet_eval = ShapeletEval( X_train[train_index], y_train[train_index], X_train[val_index], y_train[val_index] )
stats = shapelet_eval.evaluate( most_sig_shapelet )
# calculate acc, p-val, f2, prec, recall, ...
cross_val_metrics.append(stats)
else:
logging.info( "Encountered classifier without coef_ attribute. Currently not supported for evaluation." )
current_fold += 1
return cross_val_metrics
def cross_validate_ssm( self, X_train: np.ndarray, y_train: list, name: str, result_path: str, stride: int = 1, min_len: int = 10, max_len: int = 10, n_splits: int = 5 ):
if not isinstance(X_train, np.ndarray):
X_train = np.array(X_train)
if not isinstance(y_train, np.ndarray):
y_train = np.array(y_train)
skf = StratifiedKFold(n_splits=n_splits)
cross_val_metrics = []
current_fold = 1
for train_index, val_index in skf.split(X_train, y_train):
# write to file system tmp
train_x_file_name = "{}-fold_tmp_{}".format(current_fold, name)
file_path = join( result_path, train_x_file_name )
X_y_merged = [ np.concatenate([[ y_train[idx] ], X_train[ idx ]]) for idx in train_index ]
np.savetxt( file_path, X_y_merged, delimiter=',' )
# run ssm
logging.info( "Running SSM for fold {} with {} training and {} validation samples".format(current_fold, len(train_index), len(val_index)) )
os.system("../ssm.sh -m {} -M {} -s {} -r {} -t {} -o {} -n {}".format( min_len, max_len, stride, file_path, file_path, result_path, train_x_file_name) )
# read resulting shapelets
with open( join( result_path, "{}.json".format(train_x_file_name) ) , 'r' ) as f:
ssm_result = json.load( f )
# create instance of shapeletEval with current training/test split
shapelet_eval = ShapeletEval( X_train[train_index], y_train[train_index], X_train[val_index], y_train[val_index] )
logging.info( "{} significant shapelet(s) found.".format(len(ssm_result['shapelets'])) )
if len(ssm_result['shapelets']) > 0:
ssm_shapelets_result = sorted( ssm_result['shapelets'], key=lambda x: x['p_val'])
logging.info( "Choosing most significant shapelet for evaluation metrics" )
most_sig_shapelet = ssm_shapelets_result[0]['shapelet']
stats = shapelet_eval.evaluate( most_sig_shapelet )
# calculate acc, p-val, f2, prec, recall, ...
cross_val_metrics.append(stats)
current_fold += 1
return cross_val_metrics
def run_ssm(self, train_path: str, test_path: str, result_path: str, name: str, min_len: int = 10, max_len: int = 10, stride: int = 1):
self.filename = "{}_{}_{}".format(name, min_len, max_len )
os.system("../ssm.sh -m {} -M {} -s {} -r {} -t {} -o {} -n {}".format( min_len, max_len, stride, train_path, test_path, result_path, self.filename) )
with open( join( result_path, "{}.json".format( self.filename ) ) , 'r' ) as f:
ssm_result = json.load( f )
return sorted( ssm_result['shapelets'], key=lambda x: x['p_val']), ssm_result['parameters']
def generate_candidates( self, data: np.ndarray, min_len: int = 10, max_len: int = 12, stride: int = 1 ):
data = SSMHelper._get_ndarray(data)
if max_len == 0:
max_len = min_len
print(max_len)
print(min_len)
pool = []
l = max_len
logging.info("Generate all possible candidates...")
while l >= min_len:
for i, time_series in enumerate(data):
for window_start in range(0, len(time_series) - l + 1, stride):
as_list = time_series[window_start:window_start + l].tolist()
if as_list not in pool:
pool.append(as_list)
l = l - 1
logging.info("generated {0} candidates".format(len(pool)))
return np.array(pool)
@staticmethod
def compute_info_content(y):
"""
commpute information content info(D).
"""
y_copy = y.copy()
y_copy = np.concatenate([y_copy, [1,0]])
class_counts = np.unique( y_copy, return_counts=True )[1]
class_wise_prop = class_counts/float( sum(class_counts) )
log_prob = np.log2( class_wise_prop )
return -sum( ( class_wise_prop * log_prob ) )
def compute_info_a(self, y_split_left, y_split_right):
"""
compute conditional information content Info_A(D).
"""
left_class = (len(y_split_left) / float(self.dataset_length)) * SSMHelper.compute_info_content( y_split_left )
right_class = (len(y_split_right) / float(self.dataset_length)) * SSMHelper.compute_info_content( y_split_right )
return left_class + right_class
def compute_info_gain(self, y_split_left, y_split_right):
"""
compute information gain(A) = Info(D) - Info_A(D)
"""
return self.info_content - self.compute_info_a( y_split_left, y_split_right )
def calc_distance_parallel( self, args ):
logging.info( "Calculate distances for shapelet chunk {}".format( args['chunk_idx'] ) )
shapelets = args['shapelets']
data = args['data']
y_data = args['y_data']
result = []
encoded_data = []
threshold_list= []
for i, shapelet in enumerate(shapelets):
shapelet_distances = []
for ts in data:
dist = self._subsequence_dist( ts, shapelet )
shapelet_distances.append( dist )
# Get mean splitpoints
splitpoints = self._get_splitpoints( shapelet_distances )
shapelet_distances = np.array( shapelet_distances )
# Iterate over splitpoints to find optimal information gain only if we did not fit data before
if len( self.chunked_thresholds ) == 0:
bsf_gain = 0
bsf_splitpoint = 0
for splitpoint in splitpoints:
left_labels = y_data[ shapelet_distances <= splitpoint ]
right_labels = y_data[ ~(shapelet_distances <= splitpoint) ]
info_gain = self.compute_info_gain( left_labels, right_labels )
if info_gain > bsf_gain:
bsf_gain = info_gain
bsf_splitpoint = splitpoint
threshold_list.append( [bsf_splitpoint] )
encoded_data.append( shapelet_distances )
# Simply use mean as threshold
#if len( self.chunked_thresholds ) == 0:
# threshold_list.append( [np.mean( shapelet_distances )] )
#encoded_data.append( shapelet_distances )
threshold_list = np.array(threshold_list)
encoded_data = np.array(encoded_data)
# Resulting matrices have number of shapelet rows and number of time series columns
assert encoded_data.shape == ( len(shapelets), len(data) )
if len( self.chunked_thresholds ) == 0:
encoded_data = (encoded_data > threshold_list).astype(int).T
else:
encoded_data = (encoded_data > self.chunked_thresholds[args['chunk_idx']]).astype(int).T
return { 'chunk_idx': args['chunk_idx'], 'encoded_data': encoded_data, 'threshold_list': threshold_list }
def _get_splitpoints( self, distances: list ):
logging.debug( "Get splitpoints." )
idx_distances =
|
np.argsort(distances)
|
numpy.argsort
|
print("tool.py...import keras")
import numpy as np
import keras
print("import keras")
import configuration
from real_time_detection.GUI.MLE_tool.FaceReader import FaceReader
from real_time_detection.GUI.MLE_tool.EEGReader import EEGReader
# from real_time_detection.GUI.MLE_tool.mytool import format_raw_images_data
# 该函数format_raw_images_data()放置在EmotionReader.py内即可,
# 如果放在mytool里面然后在本文件中 导入mytool,会导致循环import,文件出错
face_reader_obj = FaceReader(input_type='')
EEG_reader_obj = EEGReader(input_type='')
print("class EmotionReader")
# class EmotionReader
class EmotionReader:
'''
This class is used to return the emotion in real time.
Attribute:
input_tpye: input_type: 'file' indicates that the stream is from file.
In other case, the stream will from the default camera.
face_model: the model for predicting emotion by faces.
EEG_model: the model for predicting emotion by EEG.
todiscrete_model: the model for transforming continuous emotion (valence
and arousal) into discrete emotion.
face_mean: the mean matrix for normalizing faces data.
EEG_mean: the mean matrix for normalizing EEG data.
EEG_std: the std matrix for normalizing EEG data.
valence_weigth: the valence weight for fusion
aoursal_weight: the arousal weight for fusion
cache_valence: the most recent valence, in case we don't have data to
predict we return the recent data.
cacha_arousal: the most recent arousal.
'''
def __init__(self, input_type):
'''
Arguments:
input_type: 'file' indicates that the stream is from file. In other
case, the stream will from the defalt camera.
'''
self.graph = GET_graph()
self.input_type = input_type
self.face_model = keras.models.load_model(configuration.MODEL_PATH + 'CNN_face_regression.h5')
self.EEG_model = keras.models.load_model(configuration.MODEL_PATH + 'LSTM_EEG_regression.h5')
self.todiscrete_model = keras.models.load_model(configuration.MODEL_PATH + 'continuous_to_discrete.h5')
self.face_mean = np.load(configuration.DATASET_PATH + 'fer2013/X_mean.npy')
self.EEG_mean = np.load(configuration.MODEL_PATH + 'EEG_mean.npy')
self.EEG_std =
|
np.load(configuration.MODEL_PATH + 'EEG_std.npy')
|
numpy.load
|
import re
from pathlib import Path
import subprocess
import affine
import numpy as np
import pytest
from .conftest import requires_gdal31, requires_gdal35, gdal_version
import rasterio
from rasterio.drivers import blacklist
from rasterio.enums import MaskFlags, Resampling
from rasterio.env import Env
from rasterio.errors import RasterioIOError
def test_validate_dtype_None(tmpdir):
"""Raise TypeError if there is no dtype"""
name = str(tmpdir.join("lol.tif"))
with pytest.raises(TypeError):
rasterio.open(
name, 'w', driver='GTiff', width=100, height=100, count=1)
def test_validate_dtype_str(tmpdir):
name = str(tmpdir.join("lol.tif"))
with pytest.raises(TypeError):
rasterio.open(
name, 'w', driver='GTiff', width=100, height=100, count=1,
dtype='Int16')
def test_validate_dtype_float128(tmpdir, basic_image):
"""Raise TypeError if dtype is unsupported by GDAL."""
name = str(tmpdir.join('float128.tif'))
try:
basic_image_f128 = basic_image.astype('float128')
except TypeError:
pytest.skip("Unsupported data type")
height, width = basic_image_f128.shape
with pytest.raises(TypeError):
rasterio.open(name, 'w', driver='GTiff', width=width, height=height,
count=1, dtype=basic_image_f128.dtype)
def test_validate_count_None(tmpdir):
name = str(tmpdir.join("lol.tif"))
with pytest.raises(TypeError):
rasterio.open(
name, 'w', driver='GTiff', width=100, height=100, # count=None
dtype=rasterio.uint8)
def test_no_crs(tmpdir):
# A dataset without crs is okay.
name = str(tmpdir.join("lol.tif"))
with rasterio.open(
name, 'w', driver='GTiff', width=100, height=100, count=1,
dtype=rasterio.uint8) as dst:
dst.write(np.ones((100, 100), dtype=rasterio.uint8), indexes=1)
@pytest.mark.gdalbin
def test_context(tmpdir):
name = Path(str(tmpdir.join("test_context.tif"))).as_posix()
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
dtype=rasterio.ubyte) as s:
assert s.name == name
assert s.driver == 'GTiff'
assert not s.closed
assert s.count == 1
assert s.width == 100
assert s.height == 100
assert s.shape == (100, 100)
assert s.indexes == (1,)
assert repr(s) == "<open DatasetWriter name='%s' mode='w'>" % name
assert s.closed
assert s.count == 1
assert s.width == 100
assert s.height == 100
assert s.shape == (100, 100)
assert repr(s) == "<closed DatasetWriter name='%s' mode='w'>" % name
info = subprocess.check_output(["gdalinfo", name]).decode('utf-8')
assert "GTiff" in info
assert "Size is 100, 100" in info
assert "Band 1 Block=100x81 Type=Byte, ColorInterp=Gray" in info
@pytest.mark.gdalbin
def test_write_ubyte(tmpdir):
name = str(tmpdir.mkdir("sub").join("test_write_ubyte.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
dtype=a.dtype) as s:
s.write(a, indexes=1)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=127.000, Maximum=127.000, Mean=127.000, StdDev=0.000" in info
@pytest.mark.gdalbin
def test_write_sbyte(tmpdir):
name = str(tmpdir.mkdir("sub").join("test_write_sbyte.tif"))
a = np.ones((100, 100), dtype=rasterio.sbyte) * -33
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
dtype=a.dtype) as dst:
dst.write(a, indexes=1)
with rasterio.open(name) as dst:
assert (dst.read() == -33).all()
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=-33.000, Maximum=-33.000, Mean=-33.000, StdDev=0.000" in info
assert 'SIGNEDBYTE' in info
@pytest.mark.gdalbin
def test_write_ubyte_multi(tmpdir):
name = str(tmpdir.mkdir("sub").join("test_write_ubyte_multi.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
dtype=a.dtype) as s:
s.write(a, 1)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=127.000, Maximum=127.000, Mean=127.000, StdDev=0.000" in info
@pytest.mark.gdalbin
def test_write_ubyte_multi_list(tmpdir):
name = str(tmpdir.mkdir("sub").join("test_write_ubyte_multi_list.tif"))
a = np.array([np.ones((100, 100), dtype=rasterio.ubyte) * 127])
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
dtype=a.dtype) as s:
s.write(a, [1])
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=127.000, Maximum=127.000, Mean=127.000, StdDev=0.000" in info
@pytest.mark.gdalbin
def test_write_ubyte_multi_3(tmpdir):
name = str(tmpdir.mkdir("sub").join("test_write_ubyte_multi_list.tif"))
arr = np.array(3 * [np.ones((100, 100), dtype=rasterio.ubyte) * 127])
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=3,
dtype=arr.dtype) as s:
s.write(arr)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=127.000, Maximum=127.000, Mean=127.000, StdDev=0.000" in info
@pytest.mark.gdalbin
def test_write_float(tmpdir):
name = str(tmpdir.join("test_write_float.tif"))
a = np.ones((100, 100), dtype=rasterio.float32) * 42.0
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=2,
dtype=rasterio.float32) as s:
assert s.dtypes == (rasterio.float32, rasterio.float32)
s.write(a, indexes=1)
s.write(a, indexes=2)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=42.000, Maximum=42.000, Mean=42.000, StdDev=0.000" in info
@pytest.mark.gdalbin
def test_write_crs_transform(tmpdir):
name = str(tmpdir.join("test_write_crs_transform.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
transform = affine.Affine(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0)
with rasterio.open(
name,
"w",
driver="GTiff",
width=100,
height=100,
count=1,
crs={
"units": "m",
"no_defs": True,
"datum": "WGS84",
"proj": "utm",
"zone": 18,
},
transform=transform,
dtype=rasterio.ubyte,
) as s:
s.write(a, indexes=1)
assert s.crs.to_epsg() == 32618
info = subprocess.check_output(["gdalinfo", name]).decode('utf-8')
# make sure that pixel size is nearly the same as transform
# (precision varies slightly by platform)
assert re.search(r'Pixel Size = \(300.03792\d+,-300.04178\d+\)', info)
@pytest.mark.gdalbin
def test_write_crs_transform_affine(tmpdir):
name = str(tmpdir.join("test_write_crs_transform.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
transform = affine.Affine(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0)
with rasterio.open(
name,
"w",
driver="GTiff",
width=100,
height=100,
count=1,
crs={
"units": "m",
"no_defs": True,
"datum": "WGS84",
"proj": "utm",
"zone": 18,
},
transform=transform,
dtype=rasterio.ubyte,
) as s:
s.write(a, indexes=1)
assert s.crs.to_epsg() == 32618
info = subprocess.check_output(["gdalinfo", name]).decode('utf-8')
# make sure that pixel size is nearly the same as transform
# (precision varies slightly by platform)
assert re.search(r'Pixel Size = \(300.03792\d+,-300.04178\d+\)', info)
@pytest.mark.gdalbin
def test_write_crs_transform_2(tmpdir, monkeypatch):
"""Using 'EPSG:32618' as CRS."""
monkeypatch.delenv('GDAL_DATA', raising=False)
name = str(tmpdir.join("test_write_crs_transform.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
transform = affine.Affine(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0)
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
crs='EPSG:32618',
transform=transform,
dtype=rasterio.ubyte) as s:
s.write(a, indexes=1)
assert s.crs.to_epsg() == 32618
info = subprocess.check_output(["gdalinfo", name]).decode('utf-8')
assert 'UTM zone 18N' in info
# make sure that pixel size is nearly the same as transform
# (precision varies slightly by platform)
assert re.search(r'Pixel Size = \(300.03792\d+,-300.04178\d+\)', info)
@pytest.mark.gdalbin
def test_write_crs_transform_3(tmpdir):
"""Using WKT as CRS."""
name = str(tmpdir.join("test_write_crs_transform.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
transform = affine.Affine(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0)
wkt = 'PROJCS["WGS 84 / UTM zone 18N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-75],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32618"]]'
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=1,
crs=wkt,
transform=transform,
dtype=rasterio.ubyte) as s:
s.write(a, indexes=1)
assert s.crs.to_epsg() == 32618
info = subprocess.check_output(["gdalinfo", name]).decode('utf-8')
assert 'UTM zone 18N' in info
# make sure that pixel size is nearly the same as transform
# (precision varies slightly by platform)
assert re.search(r'Pixel Size = \(300.03792\d+,-300.04178\d+\)', info)
@pytest.mark.gdalbin
def test_write_meta(tmpdir):
name = str(tmpdir.join("test_write_meta.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
meta = dict(driver='GTiff', width=100, height=100, count=1)
with rasterio.open(name, 'w', dtype=a.dtype, **meta) as s:
s.write(a, indexes=1)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=127.000, Maximum=127.000, Mean=127.000, StdDev=0.000" in info
@pytest.mark.gdalbin
def test_write_nodata(tmpdir):
name = str(tmpdir.join("test_write_nodata.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
with rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=2,
dtype=a.dtype, nodata=0) as s:
s.write(a, indexes=1)
s.write(a, indexes=2)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "NoData Value=0" in info
def test_guard_nodata(tmpdir):
name = str(tmpdir.join("test_guard_nodata.tif"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
with pytest.raises(ValueError):
rasterio.open(
name, 'w',
driver='GTiff', width=100, height=100, count=2,
dtype=a.dtype, nodata=-1)
def test_write_noncontiguous(tmpdir):
name = str(tmpdir.join("test_write_nodata.tif"))
ROWS = 4
COLS = 10
BANDS = 6
# Create a 3-D random int array (rows, columns, bands)
total = ROWS * COLS * BANDS
arr = np.random.randint(
0, 10, size=total).reshape(
(ROWS, COLS, BANDS), order='F').astype(np.int32)
kwargs = {
'driver': 'GTiff',
'width': COLS,
'height': ROWS,
'count': BANDS,
'dtype': rasterio.int32
}
with rasterio.open(name, 'w', **kwargs) as dst:
for i in range(BANDS):
dst.write(arr[:, :, i], indexes=i + 1)
@pytest.mark.parametrize("driver", list(blacklist.keys()))
def test_write_blacklist(tmpdir, driver):
# Skip if we don't have driver support built in.
with Env() as env:
if driver not in env.drivers():
pytest.skip()
name = str(tmpdir.join("data.test"))
with pytest.raises(RasterioIOError) as exc_info:
rasterio.open(name, 'w', driver=driver, width=100, height=100,
count=1, dtype='uint8')
exc = str(exc_info.value)
assert exc.startswith("Blacklisted")
def test_creation_metadata_deprecation(tmpdir):
name = str(tmpdir.join("test.tif"))
with rasterio.open(name, 'w', driver='GTiff', height=1, width=1, count=1, dtype='uint8', BIGTIFF='YES') as dst:
dst.write(np.ones((1, 1, 1), dtype='uint8'))
assert dst.tags(ns='rio_creation_kwds') == {}
def test_wplus_transform(tmpdir):
"""Transform is set on a new dataset created in w+ mode (see issue #1359)"""
name = str(tmpdir.join("test.tif"))
transform = affine.Affine.translation(10.0, 10.0) * affine.Affine.scale(0.5, -0.5)
with rasterio.open(name, 'w+', driver='GTiff', crs='epsg:4326', transform=transform, height=10, width=10, count=1, dtype='uint8') as dst:
dst.write(np.ones((1, 10, 10), dtype='uint8'))
assert dst.transform == transform
def test_write_no_driver__issue_1203(tmpdir):
name = str(tmpdir.join("test.invalid"))
with pytest.raises(ValueError), rasterio.open(name, 'w', height=1, width=1, count=1, dtype='uint8'):
print("TEST FAILED IF THIS IS REACHED.")
@pytest.mark.parametrize("mode", ["w", "w+"])
def test_require_width(tmpdir, mode):
"""width and height are required for w and w+ mode"""
name = str(tmpdir.join("test.tif"))
with pytest.raises(TypeError):
with rasterio.open(name, mode, driver="GTiff", height=1, count=1, dtype='uint8'):
print("TEST FAILED IF THIS IS REACHED.")
def test_too_big_for_tiff(tmpdir):
"""RasterioIOError is raised when TIFF is too big"""
name = str(tmpdir.join("test.tif"))
with pytest.raises(RasterioIOError):
rasterio.open(name, 'w', driver='GTiff', height=100000, width=100000, count=1, dtype='uint8', BIGTIFF=False)
@pytest.mark.parametrize("extension, driver", [
('tif', 'GTiff'),
('tiff', 'GTiff'),
('png', 'PNG'),
('jpg', 'JPEG'),
('jpeg', 'JPEG'),
])
def test_write__autodetect_driver(tmpdir, extension, driver):
name = str(tmpdir.join("test.{}".format(extension)))
with rasterio.open(name, 'w', height=1, width=1, count=1, dtype='uint8') as rds:
assert rds.driver == driver
@pytest.mark.parametrize("driver", ["PNG", "JPEG"])
def test_issue2088(tmpdir, capsys, driver):
"""Write a PNG or JPEG without error messages"""
with rasterio.open(
str(tmpdir.join("test")),
"w",
driver=driver,
dtype="uint8",
count=1,
height=256,
width=256,
transform=affine.Affine.identity(),
) as src:
data = np.ones((256, 256), dtype=np.uint8)
src.write(data, 1)
captured = capsys.readouterr()
assert "ERROR 4" not in captured.err
assert "ERROR 4" not in captured.out
@requires_gdal31
def test_write_cog(tmpdir, path_rgb_byte_tif):
"""Show resolution of issue #2102"""
with rasterio.open(path_rgb_byte_tif) as src:
profile = src.profile
profile.update(driver="COG", extent=src.bounds, resampling=Resampling.bilinear)
with rasterio.open(str(tmpdir.join("test.tif")), "w", **profile) as cog:
cog.write(src.read())
def test_write_masked(tmp_path):
"""Verify that masked arrays are filled when written."""
data = np.ma.masked_less_equal(np.array([[0, 1, 2]], dtype="uint8"), 1)
data.fill_value = 3
with rasterio.open(
tmp_path / "test.tif",
"w",
driver="GTiff",
count=1,
width=3,
height=1,
dtype="uint8",
) as dst:
dst.write(data, indexes=1)
# Expect the masked array's fill_value in the first two pixels.
with rasterio.open(tmp_path / "test.tif") as src:
assert src.mask_flag_enums == ([MaskFlags.all_valid],)
arr = src.read()
assert list(arr.flatten()) == [3, 3, 2]
def test_write_masked_nodata(tmp_path):
"""Verify that masked arrays are filled with nodata when written."""
data = np.ma.masked_less_equal(np.array([[0, 1, 2]], dtype="uint8"), 1)
with rasterio.open(
tmp_path / "test.tif",
"w",
driver="GTiff",
count=1,
width=3,
height=1,
dtype="uint8",
nodata=0,
) as dst:
dst.write(data, indexes=1)
# Expect the dataset's nodata value in the first two pixels.
with rasterio.open(tmp_path / "test.tif") as src:
assert src.mask_flag_enums == ([MaskFlags.nodata],)
arr = src.read()
assert list(arr.flatten()) == [0, 0, 2]
def test_write_masked_true(tmp_path):
"""Verify that a mask is written when we write a masked array."""
data = np.ma.masked_less_equal(
|
np.array([[0, 1, 2]], dtype="uint8")
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 18:05:51 2019
@author: ben91
"""
from SimulationClasses import *
from TimeSteppingMethods import *
from FiniteVolumeSchemes import *
from FluxSplittingMethods import *
from InitialConditions import *
from Equations import *
from wholeNetworks import *
from LoadDataMethods import *
from keras import *
from keras.models import *
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anime
from matplotlib import style
from matplotlib import rcParams
import math
style.use('fivethirtyeight')
rcParams.update({'figure.autolayout': True})
'''
# Import modules/packages
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.close('all') # close all open figures
# Define and set custom LaTeX style
styleNHN = {
"pgf.rcfonts":False,
"pgf.texsystem": "pdflatex",
"text.usetex": False, #TODO: might need to change this to false
"font.family": "serif"
}
mpl.rcParams.update(styleNHN)
xx = np.linspace(0,1,100)
yy = xx**2
# Plotting defaults
ALW = 0.75 # AxesLineWidth
FSZ = 12 # Fontsize
LW = 2 # LineWidth
MSZ = 5 # MarkerSize
SMALL_SIZE = 8 # Tiny font size
MEDIUM_SIZE = 10 # Small font size
BIGGER_SIZE = 14 # Large font size
plt.rc('font', size=FSZ) # controls default text sizes
plt.rc('axes', titlesize=FSZ) # fontsize of the axes title
plt.rc('axes', labelsize=FSZ) # fontsize of the x and y labels
plt.rc('xtick', labelsize=FSZ) # fontsize of the x-tick labels
plt.rc('ytick', labelsize=FSZ) # fontsize of the y-tick labels
plt.rc('legend', fontsize=FSZ) # legend fontsize
plt.rc('figure', titlesize=FSZ) # fontsize of the figure title
plt.rcParams['axes.linewidth'] = ALW # sets the default axes lindewidth to ``ALW''
plt.rcParams["mathtext.fontset"] = 'cm' # Computer Modern mathtext font (applies when ``usetex=False'')
def discTrackStep(c,x,t,u,P,title, a, b, err):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
c: shock speed
x: x coordinates
y: y coordinates
u: velocity
P: periods advected for
err: plot error if True, otherwise plot solution
'''
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - c*tg
plt.figure()
if err:
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = eex-u
'''
plt.contourf(xp,tg,u)
plt.colorbar()
plt.title(title)
plt.figure()
plt.contourf(xp,tg,eex)
'''
for i in range(-2,int(P)):
plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.7,20))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
else:
for i in range(-2,int(P)+1):
plt.contourf(xp+i*L,tg,u,np.linspace(-0.2,1.2,57))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
def intError(c,x,t,u,title):
L = x[-1] - x[0] + x[1] - x[0]
dx = x[1] - x[0]
nx = np.size(x)
xg, tg = np.meshgrid(t,x)
xp = xg - c*tg
ons = np.ones_like(xp)
#eex = np.roll(np.greater(ons,xp%L),-1,axis = 0)
eex1 = xp/dx
eex1[eex1>=1] = 1
eex1[eex1<=0] = 0
eex2 = (-xp%L-L/2)/dx
eex2[eex2>=1] = 1
eex2[eex2<=0] = 0
eex3 = (-xp%L-L/2)/dx
eex3[eex3>(nx/2-1)] = -(eex3[eex3>(nx/2-1)]-nx/2)
eex3[eex3>=1] = 1
eex3[eex3<=0] = 0
er = eex3-u
ers = np.power(er,2)
ers0 = np.expand_dims(ers[0,:],axis = 0)
ers_aug = np.concatenate((ers,ers0), axis = 0)
err_int = np.trapz(ers_aug, dx = dx, axis = 0)
plt.plot(t,np.sqrt(err_int),'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('L2 Error')
#plt.ylim([0,0.02])
def totalVariation(t,u,title):#plot total variation over time
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
#plt.figure()
plt.plot(t,tv,'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('Total Variation')
#plt.ylim((1.999,2.01))
def totalEnergy(t,u, dx, title):#plot total energy
u0 = np.expand_dims(u[0,:],axis = 0)
u_aug = np.concatenate((u,u0), axis = 0)
energy = 0.5*np.trapz(np.power(u_aug,2), dx = dx, axis = 0)
plt.figure()
plt.plot(t,energy)
plt.title(title)
plt.xlabel('Time')
plt.ylabel('1/2*integral(u^2)')
plt.ylim([0,np.max(energy)*1.1])
def mwn(FVM):
'''
plot modified wavenumber of a finite volume scheme
Inputs:
FVM: finite volume method object to test
'''
nx = 100
nt = 10
L = 2
T = 0.00001
x = np.linspace(0,L,nx,endpoint=False)
t = np.linspace(0,T,nt)
dx = x[1]-x[0]
dt = t[1]-t[0]
sigma = T/dx
EQ = adv()
FS = LaxFriedrichs(EQ, 1)
RK = SSPRK3()
NK = int((np.size(x)-1)/2)
mwn = np.zeros(NK,dtype=np.complex_)
wn = np.zeros(NK)
A = 1
for k in range(2,NK):
IC = cosu(A,k/L)
testCos = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_cos = testCos.run()
u_f_cos = u_cos[:,0]
u_l_cos = u_cos[:,-1]
IC = sinu(A,k/L)
testSin = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_sin = testSin.run()
u_f_sin = u_sin[:,0]
u_l_sin = u_sin[:,-1]
u_h0 =np.fft.fft(u_f_cos+complex(0,1)*u_f_sin)
u_h = np.fft.fft(u_l_cos+complex(0,1)*u_l_sin)
v_h0 = u_h0[k]
v_h = u_h[k]
mwn[k] = -1/(complex(0,1)*sigma)*np.log(v_h/v_h0)
wn[k] = 2*k*np.pi/nx
plt.plot(wn,np.real(mwn))
#plt.hold
plt.plot(wn,wn)
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (real part)')
plt.figure()
plt.plot(wn,np.imag(mwn))
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (imaginary part)')
plt.figure()
plt.semilogy(wn,abs(wn-np.real(mwn)))
return wn
def animateSim(x,t,u,pas):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
t: t coordinates
u: velocity
pas: how long to pause between frames
'''
for i in range(0,len(t)):
plt.plot(x,u[:,i])
plt.pause(pas)
plt.clf()
plt.plot(x,u[:,-1])
def specAnalysis(model, u, RKM,WENONN, NNNN, h, giveModel, makePlots):
'''
perform spectral analysis of a finite volume method when operating on a specific waveform
Finds eigenvalues, and then uses this to compute max
Inputs:
Model: WENO5 neural network that will be analyzed
u: the data that is the input to the method
RKM: time stepping method object to analyze for space-time coupling
wenoName: name of layer in model that gives WENO5 coefficicents
NNname: name of layer in model that gives NN coefficients
giveModel: whether or not we are passing layer names or model names
'''
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
N = np.size(u)
M = 5#just assume stencil size is 5 for now
sortedU = np.zeros((N,M)) + 1j*np.zeros((N,M))
for i in range(0,M):#assume scheme is upwind or unbiased
sortedU[:,i] = np.roll(u,math.floor(M/2)-i)
def scale(sortedU, NNNN):
min_u = np.amin(sortedU,1)
max_u = np.amax(sortedU,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(sortedU[:,2])
u_tmp[:] = sortedU[:,2]
#for i in range(0,5):
# sortedU[:,i] = (sortedU[:,i]-min_u)/(max_u-min_u)
cff = NNNN.predict(sortedU)#compute \Delta u
cff[const_n,:] = np.array([1/30,-13/60,47/60,9/20,-1/20])
#print('fl: ', fl)
return cff
if(np.sum(np.iscomplex(u))>=1):
wec = WENONN.predict(np.real(sortedU)) + WENONN.predict(np.imag(sortedU))*1j
nnc = scale(np.real(sortedU), NNNN) + scale(np.imag(sortedU), NNNN)*1j
op_WENO5 = np.zeros((N,N)) + np.zeros((N,N))*1j
op_NN = np.zeros((N,N)) + np.zeros((N,N))*1j
else:
wec = WENONN.predict(np.real(sortedU))
nnc = scale(np.real(sortedU), NNNN)
op_WENO5 = np.zeros((N,N))
op_NN = np.zeros((N,N))
for i in range(0,N):
for j in range(0,M):
op_WENO5[i,(i+j-int(M/2))%N] -= wec[i,j]
op_WENO5[i,(i+j-int(M/2)-1)%N] += wec[(i-1)%N,j]
op_NN[i,(i+j-int(M/2))%N] -= nnc[i,j]
op_NN[i,(i+j-int(M/2)-1)%N] += nnc[(i-1)%N,j]
#print(i,': ', op_WENO5[i,:])
WEeigs, WEvecs = np.linalg.eig(op_WENO5)
NNeigs, NNvecs = np.linalg.eig(op_NN)
con_nn = np.linalg.solve(NNvecs, u)
#now do some rungekutta stuff
x = np.linspace(-3,3,301)
y = np.linspace(-3,3,301)
X,Y = np.meshgrid(x,y)
Z = X + Y*1j
g = abs(1 + Z + np.power(Z,2)/2 + np.power(Z,3)/6)
g_we = abs(1 + (h*WEeigs) + np.power(h*WEeigs,2)/2 + np.power(h*WEeigs,3)/6)
g_nn = abs(1 + (h*NNeigs) + np.power(h*NNeigs,2)/2 + np.power(h*NNeigs,3)/6)
#do some processing for that plot of the contributions vs the amplification factor
c_abs = np.abs(con_nn)
ords = np.argsort(c_abs)
g_sort = g_nn[ords]
c_sort = con_nn[ords]
c_norm = c_sort/np.linalg.norm(c_sort,1)
c_abs2 = np.abs(c_norm)
#do some processing for the most unstable mode
ordsG = np.argsort(g_nn)
unstb = NNvecs[:,ordsG[-1]]
if(makePlots>=1):
plt.figure()
plt.plot(np.sort(g_we),'.')
plt.plot(np.sort(g_nn),'.')
plt.legend(('WENO5','NN'))
plt.title('CFL = '+ str(h))
plt.xlabel('index')
plt.ylabel('|1+HL+(HL^2)/2+(HL^3)/6|')
plt.ylim([0,1.2])
plt.figure()
plt.plot(np.real(WEeigs),np.imag(WEeigs),'.')
plt.plot(np.real(NNeigs),np.imag(NNeigs),'.')
plt.title('Eigenvalues')
plt.legend(('WENO5','NN'))
plt.figure()
plt.plot(g_nn,abs(con_nn),'.')
plt.xlabel('Amplification Factor')
plt.ylabel('Contribution')
print('Max WENO g: ',np.max(g_we))
print('Max NN g: ',np.max(g_nn))
if(makePlots>=2):
plt.figure()
sml = 1E-2
plt.contourf(X, Y, g, [1-sml,1+sml])
plt.figure()
plt.plot(g_sort,c_abs2,'.')
plt.xlabel('Scaled Amplification Factor')
plt.ylabel('Contribution')
return g_nn, con_nn, unstb
#return np.max(g_we), np.max(g_nn)
#plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.025,20))
def specAnalysisData(model, u, RKM,WENONN, NNNN, CFL, giveModel):
nx, nt = np.shape(u)
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
maxWe = np.zeros(nt)
maxNN = np.zeros(nt)
for i in range(0,nt):
print(i)
maxWe[i], maxNN[i] = specAnalysis(model, u[:,i], RKM, WENONN, NNNN, CFL, True, False)
plt.figure()
plt.plot(maxWe)
plt.figure()
plt.plot(maxNN)
return maxWe, maxNN
def eigenvectorProj(model, u, WENONN, NNNN):
nx = np.shape(u)
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
def evalPerf(x,t,P,u,eex):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
y: y coordinates
P: periods advected for
u: velocity
Outputs:
tvm: max total variation in solution
swm: max shock width in solution
'''
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
tvm = np.max(tv)
u = np.transpose(u)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
#plot width of discontinuity over time for neural network and WENO5
'''
us = np.roll(u, 1, axis = 0)
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
plot width of discontinuity over time for neural network and WENO5
'''
u = np.transpose(u)
u_WE = np.transpose(u_WE)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
dx = x[1]-x[0]
'''
eex = (-xp%L-L/2)/dx
eex[eex>49] = -(eex[eex>49]-50)
eex[eex>=1] = 1
eex[eex<=0] = 0
'''
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
er_we = np.abs(eex-u_WE)
wdth = np.sum(np.greater(er,0.01),axis=1)*dx/2
wdth_we = np.sum(np.greater(er_we,0.01),axis=1)*dx/2
plt.figure()
plt.plot(t,wdth)
plt.plot(t,wdth_we)
plt.legend(('Neural Network','WENO5'))
plt.xlabel('t')
plt.ylabel('Discontinuity Width')
def convStudy():
'''
Test order of accuracy of an FVM
'''
nr = 21
errNN = np.zeros(nr)
errWE = np.zeros(nr)
errEN = np.zeros(nr)
dxs = np.zeros(nr)
for i in range(0,nr):
print(i)
nx = 10*np.power(10,0.1*i)
L = 2
x = np.linspace(0,L,int(nx),endpoint=False)
dx = x[1]-x[0]
FVM1 = NNWENO5dx(dx)
FVM2 = WENO5()
FVM3 = ENO3()
u = np.sin(4*np.pi*x) +
|
np.cos(4*np.pi*x)
|
numpy.cos
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive State Space Model Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import AutoregressiveMovingAverageStateSpaceModel
from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel
tfd = tfp.distributions
def arma_explicit_logp(y, ar_coefs, ma_coefs, level_scale):
"""Manual log-prob computation for arma(p, q) process."""
# Source: page 132 of
# http://www.ru.ac.bd/stat/wp-content/uploads/sites/25/2019/03/504_02_Hamilton_Time-Series-Analysis.pdf
p = len(ar_coefs)
q = len(ma_coefs)
t = len(y)
# For the first few steps of y, where previous values
# are not available, we model them as zero-mean with
# stddev `prior_scale`.
e = np.zeros([t])
for i in range(p):
zero_padded_y = np.zeros([p])
zero_padded_y[p - i:p] = y[:i]
pred_y = np.dot(zero_padded_y, ar_coefs[::-1])
e[i] = y[i] - pred_y
for i in range(p, len(y)):
pred_y = (np.dot(y[i - p:i], ar_coefs[::-1]) +
np.dot(e[i - q:i], ma_coefs[::-1]))
e[i] = y[i] - pred_y
lp = (-((t - p) / 2) * np.log(2 * np.pi)
- ((t - p) / 2) * np.log(level_scale ** 2)
- np.sum(e ** 2 / (2 * level_scale ** 2)))
return lp
class _AutoregressiveMovingAverageStateSpaceModelTest(test_util.TestCase):
def testEqualsAutoregressive(self):
# An ARMA(p, 0) process is just an AR(p) processes
num_timesteps = 10
observed_time_series = self._build_placeholder(
np.random.randn(num_timesteps, 1))
level_scale = self._build_placeholder(0.1)
# We'll test an AR1 process, and also (just for kicks) that the trivial
# embedding as an AR2 process gives the same model.
coefficients_order1 = np.array([1.]).astype(self.dtype)
coefficients_order2 = np.array([1., 1.]).astype(self.dtype)
ar1_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order1,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar2_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order2,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
arma1_ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=coefficients_order1,
ma_coefficients=np.array([0.]).astype(self.dtype),
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
arma2_ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=coefficients_order2,
ma_coefficients=np.array([0.]).astype(self.dtype),
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
ar1_lp, arma1_lp, ar2_lp, arma2_lp = (
ar1_ssm.log_prob(observed_time_series),
arma1_ssm.log_prob(observed_time_series),
ar2_ssm.log_prob(observed_time_series),
arma2_ssm.log_prob(observed_time_series)
)
self.assertAllClose(ar1_lp, arma1_lp)
self.assertAllClose(ar2_lp, arma2_lp)
def testLogprobCorrectness(self):
# Compare the state-space model's log-prob to an explicit implementation.
num_timesteps = 10
observed_time_series_ =
|
np.random.randn(num_timesteps)
|
numpy.random.randn
|
from functools import lru_cache
import numpy as np
from barry.cosmology.pk2xi import PowerToCorrelationGauss
from barry.cosmology.power_spectrum_smoothing import validate_smooth_method, smooth
from barry.models.model import Model
from barry.models import PowerSpectrumFit
from scipy.interpolate import splev, splrep
from scipy import integrate
class CorrelationFunctionFit(Model):
""" A generic model for computing correlation functions."""
def __init__(self, name="BAO Correlation Polynomial Fit", smooth_type="hinton2017", fix_params=("om"), smooth=False, correction=None, isotropic=True):
""" Generic correlation function model
Parameters
----------
name : str, optional
Name of the model
smooth_type : str, optional
The sort of smoothing to use. Either 'hinton2017' or 'eh1998'
fix_params : list[str], optional
Parameter names to fix to their defaults. Defaults to just `[om]`.
smooth : bool, optional
Whether to generate a smooth model without the BAO feature. Defaults to `false`.
correction : `Correction` enum. Defaults to `Correction.SELLENTIN
"""
super().__init__(name, correction=correction, isotropic=isotropic)
self.parent = PowerSpectrumFit(fix_params=fix_params, smooth_type=smooth_type, correction=correction, isotropic=isotropic)
self.smooth_type = smooth_type.lower()
if not validate_smooth_method(smooth_type):
exit(0)
self.declare_parameters()
self.set_fix_params(fix_params)
# Set up data structures for model fitting
self.smooth = smooth
self.camb = None
self.PT = None
self.pk2xi = None
self.recon_smoothing_scale = None
self.cosmology = None
self.nmu = 100
self.mu = np.linspace(0.0, 1.0, self.nmu)
self.pk2xi_0 = None
self.pk2xi_2 = None
self.pk2xi_4 = None
def set_data(self, data):
""" Sets the models data, including fetching the right cosmology and PT generator.
Note that if you pass in multiple datas (ie a list with more than one element),
they need to have the same cosmology.
Parameters
----------
data : dict, list[dict]
A list of datas to use
"""
super().set_data(data)
self.pk2xi_0 = PowerToCorrelationGauss(self.camb.ks, ell=0)
self.pk2xi_2 = PowerToCorrelationGauss(self.camb.ks, ell=2)
self.pk2xi_4 = PowerToCorrelationGauss(self.camb.ks, ell=4)
def declare_parameters(self):
""" Defines model parameters, their bounds and default value. """
self.add_param("om", r"$\Omega_m$", 0.1, 0.5, 0.31) # Cosmology
self.add_param("alpha", r"$\alpha$", 0.8, 1.2, 1.0) # Stretch for monopole
self.add_param("b0", r"$b0$", 0.01, 10.0, 1.0) # Linear galaxy bias for monopole
if not self.isotropic:
self.add_param("epsilon", r"$\epsilon$", -0.2, 0.2, 0.0) # Stretch for multipoles
self.add_param("b2", r"$b2$", 0.01, 10.0, 1.0) # Linear galaxy bias for quadrupole
@lru_cache(maxsize=1024)
def compute_basic_power_spectrum(self, om):
""" Computes the smoothed linear power spectrum and the wiggle ratio.
Uses a fixed h0 as determined by the dataset cosmology.
Parameters
----------
om : float
The Omega_m value to generate a power spectrum for
Returns
-------
array
pk_smooth - The power spectrum smoothed out
array
pk_ratio_dewiggled - the ratio pk_lin / pk_smooth
"""
# Get base linear power spectrum from camb
res = self.camb.get_data(om=om, h0=self.camb.h0)
pk_smooth_lin = smooth(self.camb.ks, res["pk_lin"], method=self.smooth_type, om=om, h0=self.camb.h0) # Get the smoothed power spectrum
pk_ratio = res["pk_lin"] / pk_smooth_lin - 1.0 # Get the ratio
return pk_smooth_lin, pk_ratio
@lru_cache(maxsize=32)
def get_alphas(self, alpha, epsilon):
""" Computes values of alpha_par and alpha_perp from the input values of alpha and epsilon
Parameters
----------
alpha : float
The isotropic dilation scale
epsilon: float
The anisotropic warping
Returns
-------
alpha_par : float
The dilation scale parallel to the line-of-sight
alpha_perp : float
The dilation scale perpendicular to the line-of-sight
"""
return alpha * (1.0 + epsilon) ** 2, alpha / (1.0 + epsilon)
@lru_cache(maxsize=32)
def get_sprimefac(self, epsilon):
""" Computes the prefactor to dilate a s value given epsilon, such that sprime = s * sprimefac * alpha
Parameters
----------
epsilon: float
The anisotropic warping
Returns
-------
kprimefac : np.ndarray
The mu dependent prefactor for dilating a k value
"""
musq = self.mu ** 2
epsilonsq = (1.0 + epsilon) ** 2
sprimefac = np.sqrt(musq * epsilonsq ** 2 + (1.0 - musq) / epsilonsq)
return sprimefac
@lru_cache(maxsize=32)
def get_muprime(self, epsilon):
""" Computes dilated values of mu given input values of epsilon for the correlation function
Parameters
----------
epsilon: float
The anisotropic warping
Returns
-------
muprime : np.ndarray
The dilated mu values
"""
musq = self.mu ** 2
muprime = self.mu / np.sqrt(musq + (1.0 - musq) / (1.0 + epsilon) ** 6)
return muprime
def compute_correlation_function(self, dist, p, smooth=False):
""" Computes the dilated correlation function multipoles at distance d given the supplied params
Parameters
----------
dist : np.ndarray
Array of distances in the correlation function to compute
p : dict
dictionary of parameter name to float value pairs
smooth : bool, optional
Whether or not to generate a smooth model without the BAO feature
Returns
-------
sprime : np.ndarray
distances of the computed xi
xi0 : np.ndarray
the model monopole interpolated to sprime.
xi2 : np.ndarray
the model quadrupole interpolated to sprime. Will be 'None' if the model is isotropic
"""
# Generate the power spectrum multipoles at the undilated k-values without shape additions
ks = self.camb.ks
kprime, pk0, pk2, pk4 = self.parent.compute_power_spectrum(ks, p, smooth=smooth, shape=False, dilate=False)
if self.isotropic:
sprime = p["alpha"] * dist
xi0 = p["b0"] * self.pk2xi_0.__call__(ks, pk0, sprime)
xi2 = None
xi4 = None
else:
# Construct the dilated 2D correlation function by splineing the undilated multipoles. We could have computed these
# directly at sprime, but sprime depends on both s and mu, so splining is probably quicker
epsilon =
|
np.round(p["epsilon"], decimals=5)
|
numpy.round
|
import os
import pickle
import numpy as np
import torch
import lib
## Assumes:
## num_nan_policy=='mean'
## cat_nan_policy=='new'
## cat_policy=='indices'
class Normalization:
def __init__(self,args,seed):
print('Loading normalization data...')
dataset_dir = lib.get_path(args['data']['path'])
self.dataset_info = lib.load_json(dataset_dir / 'info.json')
normalization=args['data'].get('normalization')
num_nan_policy='mean'
cat_nan_policy='new'
cat_policy=args['data'].get('cat_policy', 'indices')
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0)
normalizer_path = dataset_dir / f'normalizer_X__{normalization}__{num_nan_policy}__{cat_nan_policy}__{cat_policy}__{seed}.pickle'
encoder_path = dataset_dir / f'encoder_X__{normalization}__{num_nan_policy}__{cat_nan_policy}__{cat_policy}__{seed}.pickle'
if cat_min_frequency:
normalizer_path = normalizer_path.with_name(
normalizer_path.name.replace('.pickle', f'__{cat_min_frequency}.pickle')
)
encoder_path = encoder_path.with_name(
encoder_path.name.replace('.pickle', f'__{cat_min_frequency}.pickle')
)
### some of these files may not exist; e.g. num_new_values exists only if the training DS contains NAs
if os.path.exists(dataset_dir / f'num_new_values.npy'):
self.num_new_values = np.load(dataset_dir / f'num_new_values.npy')
else:
self.num_new_values = np.zeros(self.dataset_info['n_num_features'])
self.normalizer = pickle.load(open(normalizer_path, 'rb'))
self.encoder = pickle.load(open(encoder_path, 'rb'))
self.max_values = np.load(dataset_dir / f'max_values.npy')
## y_mean, y_std
self.y_mean_std = np.load(dataset_dir / f'y_mean_std.npy')
self.cat_values = np.load(dataset_dir / f'categories.npy').tolist()
self.n_num_features = self.dataset_info['n_num_features']
self.n_cat_features = self.dataset_info['n_cat_features']
def normalize_x(self,x_num,x_cat):
## (4.1) transform numerical data
## (4.1.1) replace nan by mean
num_nan_mask = np.isnan(x_num)
if num_nan_mask.any():
num_nan_indices = np.where(num_nan_mask)
x_num[num_nan_indices] = np.take(self.num_new_values, num_nan_indices[1])
## (4.1.2) normalize
x_num = self.normalizer.transform(x_num)
x_num = torch.as_tensor(x_num, dtype=torch.float)
## (4.2) transform categorical data
## (4.2.1) replace nan
x_cat = np.array(x_cat,dtype='<U32').reshape(1,-1)
cat_nan_mask = x_cat == 'nan'
if cat_nan_mask.any():
cat_nan_indices = np.where(cat_nan_mask)
x_cat[cat_nan_indices] = '___null___'
## (4.2.2) encode; fix values, since new data may be out of cat range
unknown_value = self.encoder.get_params()['unknown_value']
x_cat = self.encoder.transform(x_cat)
## this won't work, since the transformer can't handle max_values[column_idx]+1; make sure that train contains nan's if they're present
for column_idx in range(x_cat.shape[1]):
x_cat[x_cat[:,column_idx]==unknown_value,column_idx] = ( self.max_values[column_idx]+1 )
x_cat = torch.as_tensor(x_cat, dtype=torch.long)
return x_num, x_cat
def normalize_y(self,y_raw):
return (y_raw*self.y_mean_std[1])+self.y_mean_std[0]
def get_example_data(self):
if self.n_num_features == 8:
print("Assuming ACOTSP dataset.")
x_num=[1.83, 7.87, 0.69, 36.0, np.nan, np.nan, np.nan, 6.0 ]
x_cat=['129', 'as', '2', 'nan' ]
else:
print("Assuming LKH dataset.")
x_num=[255.0, 0.0, 5.0, 5.0, 4.0, 3.0, 12.0, 14.0, 20.0, 5.0, 986.0, 5.0]
x_cat=['121', 'NO', 'QUADRANT', 'QUADRANT', 'YES', 'YES', 'GREEDY', 'NO', 'NO', 'YES']
x_num=
|
np.array(x_num)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
data_loader.py - The data management module
===========================================
This module handles the fetching of the data from the local resources path, given in the configuration and arranging it
for our purposes of estimations. For instance, the data for Example no. 1 can be fetched by:
::
get_data(ExperimentType.ExampleNo1) - Creating the data for Example no. 1 of the paper.
"""
from scipy.linalg import qr
import numpy as np
from Infrastructure.enums import ExperimentType
from Infrastructure.utils import Dict, RowVector, Matrix, create_factory, Callable
from matrices_classes import MatInSVDForm, ExperimentNo5Form
def _random_orthonormal_cols(data_size: int, columns: int) -> Matrix:
return np.ascontiguousarray(qr(np.random.randn(data_size, columns), mode="economic", overwrite_a=True,
check_finite=False)[0])
def _get_first_3_examples_data(data_size: int, singular_values: RowVector) -> Matrix:
"""
A method which creates a random matrix of size data_size x data_size with given singular values.
Args:
data_size(int): The input data size n.
singular_values(RowVector): The singular values to be set for the matrix to create.
Returns:
A random size data_size x data_size Matrix awith the given singular values.
"""
rank: int = len(singular_values)
U: Matrix = _random_orthonormal_cols(data_size, rank)
V: Matrix = _random_orthonormal_cols(data_size, rank)
return MatInSVDForm(U, singular_values, V)
def _get_example_4_data(data_size: int, singular_values: RowVector) -> Matrix:
"""
A method which creates a data_size x data_size matrix whose singular values are the input values.
Args:
data_size(int): The input data size n.
singular_values(RowVector): The singular values to be set for the matrix to create.
Returns:
A data_size x data_size Matrix with the given singular values.
"""
U: Matrix = np.ascontiguousarray(
np.stack([
np.ones(data_size),
np.tile([1, -1], data_size // 2),
np.tile([1, 1, -1, -1], data_size // 4),
np.tile([1, 1, 1, 1, -1, -1, -1, -1], data_size // 8)]).T) / np.sqrt(data_size)
V: Matrix = np.ascontiguousarray(
np.stack([
np.concatenate([np.ones(data_size - 1), [0]]) / np.sqrt(data_size - 1),
np.concatenate([np.zeros(data_size - 1), [1]]),
np.concatenate([np.tile([1, -1], (data_size - 2) // 2) /
|
np.sqrt(data_size - 2)
|
numpy.sqrt
|
from scipy import stats
from numpy import linalg
import numpy as np
import sys
# NOTE: enable/disable smart quantization of weights and activations
smart_quantization = False
def quantize_arr(input_arr, min_val, max_val):
quantize_range = 256.0
input_range = max_val - min_val
mul_factor = input_range / quantize_range
v1 = np.subtract(input_arr, min_val)
v2 =
|
np.divide(v1, mul_factor)
|
numpy.divide
|
from abc import abstractmethod
from typing import Union
import numpy as np
class GLIDEError(Exception):
"""Raised when an error related to the ASF classes is encountered.
"""
class GLIDEBase:
"""
Implements the non-differentiable variant of GLIDE-II as proposed in
Ruiz, Francisco, <NAME>, and <NAME>.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
self.has_additional_constraints = False
self.utopian = utopian
self.nadir = nadir
self.rho = rho
self.required_keys: dict = {}
self.extras = kwargs
def __call__(self, objective_vector: np.ndarray, preference: dict) -> np.ndarray:
"""Evaluate the scalarization function value based on objective vectors and
DM preference.
Args:
objective_vector (np.ndarray): 2-dimensional array of objective values of solutions.
preference (dict): The preference given by the decision maker. The required
dictionary keys and their meanings can be found in self.required_keys variable.
Returns:
np.ndarray: The scalarized value obtained by using GLIDE-II over
objective_vector.
"""
self.preference = preference
self.objective_vector = objective_vector
f_minus_q = np.atleast_2d(objective_vector - self.q)
mu = np.atleast_2d(self.mu)
I_alpha = self.I_alpha
max_term = np.max(mu[:, I_alpha] * f_minus_q[:, I_alpha], axis=1)
sum_term = self.rho * np.sum(self.w * f_minus_q, axis=1)
return max_term + sum_term
def evaluate_constraints(
self, objective_vector: np.ndarray, preference: dict
) -> Union[None, np.ndarray]:
"""Evaluate the additional contraints generated by the GLIDE-II formulation.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
objective_vector (np.ndarray): [description]
preference (dict): [description]
Returns:
Union[None, np.ndarray]: [description]
"""
if not self.has_additional_constraints:
return None
self.preference = preference
self.objective_vector = objective_vector
constraints = (
self.epsilon[self.I_epsilon]
+ self.s_epsilon * self.delta_epsilon[self.I_epsilon]
- objective_vector[:, self.I_epsilon]
)
return constraints
@property
@abstractmethod
def I_alpha(self):
pass
@property
@abstractmethod
def I_epsilon(self):
pass
@property
@abstractmethod
def mu(self):
pass
@property
@abstractmethod
def q(self):
pass
@property
@abstractmethod
def w(self):
pass
@property
@abstractmethod
def epsilon(self):
pass
@property
@abstractmethod
def s_epsilon(self):
pass
@property
@abstractmethod
def delta_epsilon(self):
pass
class reference_point_method_GLIDE(GLIDEBase):
"""
Implements the reference point method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
<NAME>, <NAME>, and <NAME>.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 1
self.__mu = 1 / (nadir - utopian)
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line parallel to the nadir-utopian vector "
"and passing through the reference point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return self.__mu
@property
def w(self):
return self.__w
@property
def q(self):
return self.preference["reference point"]
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class GUESS_GLIDE(GLIDEBase):
"""
Implements the GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
<NAME>, <NAME>, and <NAME>.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line going from the nadir point to the reference point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / (self.nadir - self.preference["reference point"])
@property
def w(self):
return self.__w
@property
def q(self):
return self.preference["reference point"]
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class AUG_GUESS_GLIDE(GUESS_GLIDE):
"""
Implements the Augmented GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
<NAME>, <NAME>, and <NAME>.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__w = 1
class NIMBUS_GLIDE(GLIDEBase):
"""
Implements the NIMBUS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
<NAME>, <NAME>, and <NAME>.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__mu = self.__w = 1 / (self.nadir - self.utopian)
self.has_additional_constraints = True
self.required_keys = {
"current solution": (
"A solution preferred by the DM currently. " "(type: numpy.ndarray)"
),
"classifications": (
"A list of same length as the number of objectives. Elements can only "
"include some or all of ['<', '<=', '=', '>=', '0']. These classify "
"the different objectives as defined in the NIMBUS or GLIDE-II paper. "
"(type: list)"
),
"levels": (
"A vector containing desirable levels of objectives or constraining bounds "
"depending on the classification. Same length as the number of objectives. "
"(type: numpy.ndarray)"
),
}
@property
def improve_unconstrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<")[0]
indices[relevant] = True
return indices
@property
def improve_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<=")[0]
indices[relevant] = True
return indices
@property
def satisfactory(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "=")[0]
indices[relevant] = True
return indices
@property
def relax_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == ">=")[0]
indices[relevant] = True
return indices
@property
def relax_unconstrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "0")[0]
indices[relevant] = True
return indices
@property
def I_alpha(self):
return self.improve_unconstrained + self.improve_constrained
@property
def I_epsilon(self):
return (
self.improve_unconstrained
+ self.improve_constrained
+ self.satisfactory
+ self.relax_constrained
)
@property
def w(self):
# This was in the paper
return self.__w
# This is what I think it should be. There may be division by zero errors here.
"""return (self.objective_vector / (self.objective_vector - self.q)) / (
self.nadir - self.utopian
)"""
@property
def mu(self):
return self.__mu
@property
def q(self):
q = np.full_like(self.utopian, fill_value=0, dtype=float)
q[self.improve_unconstrained] = self.utopian[self.improve_unconstrained]
q[self.improve_constrained] = self.preference["levels"][
self.improve_constrained
]
return q
@property
def epsilon(self):
e =
|
np.full_like(self.utopian, fill_value=np.nan, dtype=float)
|
numpy.full_like
|
import numpy as np
import torch
from finetuna.utils import compute_with_calc
import random
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# from finetuna.utils import write_to_db
from finetuna.offline_learner.offline_learner import OfflineActiveLearner
# from torch.multiprocessing import Pool
torch.multiprocessing.set_sharing_strategy("file_system")
class UncertaintyLearner(OfflineActiveLearner):
"""Offline Active Learner using an uncertainty enabled ML potential to query
data with the most uncertainty.
Parameters
----------
learner_settings: dict
Dictionary of learner parameters and settings.
trainer: object
An isntance of a trainer that has a train and predict method.
training_data: list
A list of ase.Atoms objects that have attached calculators.
Used as the first set of training data.
parent_calc: ase Calculator object
Calculator used for querying training data.
base_calc: ase Calculator object
Calculator used to calculate delta data for training.
ensemble: int
The number of models in ensemble
"""
def query_func(self):
if self.iterations > 1:
uncertainty = np.array(
[atoms.info["max_force_stds"] for atoms in self.sample_candidates]
)
n_retrain = self.samples_to_retrain
query_idx =
|
np.argpartition(uncertainty, -1 * n_retrain)
|
numpy.argpartition
|
import numpy as np
import random
import SharedArray as SA
import torch
from util.voxelize import voxelize
from model.basic_operators import get_overlap
def sa_create(name, var):
x = SA.create(name, var.shape, dtype=var.dtype)
x[...] = var[...]
x.flags.writeable = False
return x
def collate_fn(batch):
"""
Args:
batch - [(xyz, feat, label, ...), ...] - each tuple from a sampler
Returns: [
xyz : [BxN, 3]
feat : [BxN, d]
label : [BxN]
...
offset : int
]
"""
batch_list = []
for sample in batch:
if isinstance(sample, list):
batch_list += sample
else:
batch_list.append(sample)
batch_list = list(zip(*batch_list)) # [[xyz, ...], [feat, ...], ...]
offset, count = [], 0
for item in batch_list[0]:
count += item.shape[0]
offset.append(count)
offset = torch.IntTensor(offset)
batch_list = [torch.cat(v) for v in batch_list]
return [*batch_list, offset]
def data_prepare(coord, feat, label, split='train', voxel_size=0.04, voxel_max=None, transform=None, shuffle_index=False, origin='min'):
""" coord, feat, label - an entire cloud
"""
if transform:
coord, feat, label = transform(coord, feat, label)
if voxel_size:
# voxelize the entire cloud
coord_min = np.min(coord, 0)
coord -= coord_min
uniq_idx = voxelize(coord, voxel_size)
coord, feat, label = coord[uniq_idx], feat[uniq_idx], label[uniq_idx]
if 'train' in split and voxel_max and label.shape[0] > voxel_max:
init_idx =
|
np.random.randint(label.shape[0])
|
numpy.random.randint
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 12:42:53 2018
@author: raymondmg
"""
import numpy as np
class Gaussian:
def __init__(self):
print("init gaussian function!")
def calc(self,image,sigma,height,width,channel=1):
twoSigma2 = 2.0 * sigma * sigma
halfKernelSize = int(np.ceil( 2.0 * sigma ))
if channel == 1:
gaussian_image =
|
np.zeros((height,width))
|
numpy.zeros
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Functions for event segmentation or modeling of dataset."""
__docformat__ = 'restructuredtext'
import copy
import numpy as np
from mvpa2.misc.support import Event, value2idx
from mvpa2.datasets import Dataset
from mvpa2.base.dataset import _expand_attribute
from mvpa2.mappers.fx import _uniquemerge2literal
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.base import warning, externals
def find_events(**kwargs):
"""Detect changes in multiple synchronous sequences.
Multiple sequence arguments are scanned for changes in the unique value
combination at corresponding locations. Each change in the combination is
taken as a new event onset. The length of an event is determined by the
number of identical consecutive combinations.
Parameters
----------
**kwargs : sequences
Arbitrary number of sequences that shall be scanned.
Returns
-------
list
Detected events, where each event is a dictionary with the unique
combination of values stored under their original name. In addition, the
dictionary also contains the ``onset`` of the event (as index in the
sequence), as well as the ``duration`` (as number of identical
consecutive items).
See Also
--------
eventrelated_dataset : event-related segmentation of a dataset
Examples
--------
>>> seq1 = ['one', 'one', 'two', 'two']
>>> seq2 = [1, 1, 1, 2]
>>> events = find_events(targets=seq1, chunks=seq2)
>>> for e in events:
... print e
{'chunks': 1, 'duration': 2, 'onset': 0, 'targets': 'one'}
{'chunks': 1, 'duration': 1, 'onset': 2, 'targets': 'two'}
{'chunks': 2, 'duration': 1, 'onset': 3, 'targets': 'two'}
"""
def _build_event(onset, duration, combo):
ev = Event(onset=onset, duration=duration, **combo)
return ev
events = []
prev_onset = 0
old_combo = None
duration = 1
# over all samples
for r in xrange(len(kwargs.values()[0])):
# current attribute combination
combo = dict([(k, v[r]) for k, v in kwargs.iteritems()])
# check if things changed
if not combo == old_combo:
# did we ever had an event
if not old_combo is None:
events.append(_build_event(prev_onset, duration, old_combo))
# reset duration for next event
duration = 1
# store the current samples as onset for the next event
prev_onset = r
# update the reference combination
old_combo = combo
else:
# current event is lasting
duration += 1
# push the last event in the pipeline
if not old_combo is None:
events.append(_build_event(prev_onset, duration, old_combo))
return events
def _events2dict(events):
evvars = {}
for k in events[0]:
try:
evvars[k] = [e[k] for e in events]
except KeyError:
raise ValueError("Each event property must be present for all "
"events (could not find '%s')" % k)
return evvars
def _evvars2ds(ds, evvars, eprefix):
for a in evvars:
if not eprefix is None and a in ds.sa:
# if there is already a samples attribute like this, it got mapped
# previously (e.g. by BoxcarMapper and is multi-dimensional).
# We move it aside under new `eprefix` name
ds.sa[eprefix + '_' + a] = ds.sa[a]
ds.sa[a] = evvars[a]
return ds
def _extract_boxcar_events(
ds, events=None, time_attr=None, match='prev',
eprefix='event', event_mapper=None):
"""see eventrelated_dataset() for docs"""
# relabel argument
conv_strategy = {'prev': 'floor',
'next': 'ceil',
'closest': 'round'}[match]
if not time_attr is None:
tvec = ds.sa[time_attr].value
# we are asked to convert onset time into sample ids
descr_events = []
for ev in events:
# do not mess with the input data
ev = copy.deepcopy(ev)
# best matching sample
idx = value2idx(ev['onset'], tvec, conv_strategy)
# store offset of sample time and real onset
ev['orig_offset'] = ev['onset'] - tvec[idx]
# rescue the real onset into a new attribute
ev['orig_onset'] = ev['onset']
ev['orig_duration'] = ev['duration']
# figure out how many samples we need
ev['duration'] = \
len(tvec[idx:][tvec[idx:] < ev['onset'] + ev['duration']])
# new onset is sample index
ev['onset'] = idx
descr_events.append(ev)
else:
descr_events = events
# convert the event specs into the format expected by BoxcarMapper
# take the first event as an example of contained keys
evvars = _events2dict(descr_events)
# checks
for p in ['onset', 'duration']:
if not p in evvars:
raise ValueError("'%s' is a required property for all events."
% p)
boxlength = max(evvars['duration'])
if __debug__:
if not max(evvars['duration']) == min(evvars['duration']):
warning('Boxcar mapper will use maximum boxlength (%i) of all '
'provided Events.'% boxlength)
# finally create, train und use the boxcar mapper
bcm = BoxcarMapper(evvars['onset'], boxlength, space=eprefix)
bcm.train(ds)
ds = ds.get_mapped(bcm)
if event_mapper is None:
# at last reflatten the dataset
# could we add some meaningful attribute during this mapping, i.e. would
# assigning 'inspace' do something good?
ds = ds.get_mapped(FlattenMapper(shape=ds.samples.shape[1:]))
else:
ds = ds.get_mapped(event_mapper)
# add samples attributes for the events, simply dump everything as a samples
# attribute
# special case onset and duration in case of conversion into descrete time
if not time_attr is None:
for attr in ('onset', 'duration'):
evvars[attr] = [e[attr] for e in events]
ds = _evvars2ds(ds, evvars, eprefix)
return ds
def _fit_hrf_event_model(
ds, events, time_attr, condition_attr='targets', design_kwargs=None,
glmfit_kwargs=None, regr_attrs=None):
if externals.exists('nipy', raise_=True):
from nipy.modalities.fmri.design_matrix import make_dmtx
from mvpa2.mappers.glm import NiPyGLMMapper
# Decide/device condition attribute on which GLM will actually be done
if isinstance(condition_attr, basestring):
# must be a list/tuple/array for the logic below
condition_attr = [condition_attr]
glm_condition_attr = 'regressor_names' # actual regressors
glm_condition_attr_map = dict([(con, dict()) for con in condition_attr]) #
# to map back to original conditions
events = copy.deepcopy(events) # since we are modifying in place
for event in events:
if glm_condition_attr in event:
raise ValueError("Event %s already has %s defined. Should not "
"happen. Choose another name if defined it"
% (event, glm_condition_attr))
compound_label = event[glm_condition_attr] = \
'glm_label_' + '+'.join(
str(event[con]) for con in condition_attr)
# and mapping back to original values, without str()
# for each condition:
for con in condition_attr:
glm_condition_attr_map[con][compound_label] = event[con]
evvars = _events2dict(events)
add_paradigm_kwargs = {}
if 'amplitude' in evvars:
add_paradigm_kwargs['amplitude'] = evvars['amplitude']
# create paradigm
if 'duration' in evvars:
from nipy.modalities.fmri.experimental_paradigm import BlockParadigm
# NiPy considers everything with a duration as a block paradigm
paradigm = BlockParadigm(
con_id=evvars[glm_condition_attr],
onset=evvars['onset'],
duration=evvars['duration'],
**add_paradigm_kwargs)
else:
from nipy.modalities.fmri.experimental_paradigm \
import EventRelatedParadigm
paradigm = EventRelatedParadigm(
con_id=evvars[glm_condition_attr],
onset=evvars['onset'],
**add_paradigm_kwargs)
# create design matrix -- all kinds of fancy additional regr can be
# auto-generated
if design_kwargs is None:
design_kwargs = {}
if not regr_attrs is None:
names = []
regrs = []
for attr in regr_attrs:
names.append(attr)
regrs.append(ds.sa[attr].value)
if len(regrs) < 2:
regrs = [regrs]
regrs = np.hstack(regrs).T
if 'add_regs' in design_kwargs:
design_kwargs['add_regs'] = np.hstack((design_kwargs['add_regs'],
regrs))
else:
design_kwargs['add_regs'] = regrs
if 'add_reg_names' in design_kwargs:
design_kwargs['add_reg_names'].extend(names)
else:
design_kwargs['add_reg_names'] = names
design_matrix = make_dmtx(ds.sa[time_attr].value,
paradigm,
**design_kwargs)
# push design into source dataset
glm_regs = [
(reg, design_matrix.matrix[:, i])
for i, reg in enumerate(design_matrix.names)]
# GLM
glm = NiPyGLMMapper([], glmfit_kwargs=glmfit_kwargs,
add_regs=glm_regs,
return_design=True, return_model=True, space=glm_condition_attr)
model_params = glm(ds)
# some regressors might be corresponding not to original condition_attr
# so let's separate them out
regressor_names = model_params.sa[glm_condition_attr].value
condition_regressors = np.array([v in glm_condition_attr_map.values()[0]
for v in regressor_names])
assert(condition_regressors.dtype == np.bool)
if not
|
np.all(condition_regressors)
|
numpy.all
|
"""Tests for bdpy.bdata"""
import unittest
import copy
import numpy as np
from numpy.testing import assert_array_equal
import bdpy
class TestBdata(unittest.TestCase):
"""Tests of 'bdata' module"""
def __init__(self, *args, **kwargs):
super(TestBdata, self).__init__(*args, **kwargs)
self.data = bdpy.BData()
x = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
g = np.array([1, 2, 3, 4, 5])
self.data.add(x, 'VoxelData')
self.data.add(g, 'Group')
self.data.add_metadata('Mask_0:3', [1, 1, 1, 0, 0, 0, 0, 0, 0, 0], where='VoxelData')
self.data.add_metadata('Mask_3:3', [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], where='VoxelData')
self.data.add_metadata('Mask_6:3', [0, 0, 0, 0, 0, 0, 1, 1, 1, 0], where='VoxelData')
self.data.add_metadata('Mask_0:5', [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], where='VoxelData')
self.data.add_metadata('Val_A', [9, 7, 5, 3, 1, 0, 2, 4, 6, 8], where='VoxelData')
def test_add(self):
"""Test for add."""
colname = 'TestData'
data = np.random.rand(5, 10)
b = bdpy.BData()
b.add(data, colname)
np.testing.assert_array_equal(b.dataSet, data)
np.testing.assert_array_equal(b.metadata.get(colname, 'value'),
np.array([1] * 10))
def test_add_2(self):
"""Test for add."""
colnames = ['TestData1', 'TestData2']
datalist = [np.random.rand(5, 10),
np.random.rand(5, 3)]
b = bdpy.BData()
for c, d in zip(colnames, datalist):
b.add(d, c)
# Test
np.testing.assert_array_equal(b.dataSet, np.hstack(datalist))
np.testing.assert_array_equal(b.metadata.get(colnames[0], 'value'),
np.array([1] * 10 + [np.nan] * 3))
np.testing.assert_array_equal(b.metadata.get(colnames[1], 'value'),
np.array([np.nan] * 10 + [1] * 3))
def test_add_3(self):
"""Test for add."""
b = bdpy.BData()
data_a1 = np.random.rand(10, 10)
data_b = np.random.rand(10, 3)
data_a2 = np.random.rand(10, 5)
b.add(data_a1, 'TestDataA')
b.add(data_b, 'TestDataB')
b.add(data_a2, 'TestDataA')
np.testing.assert_array_equal(b.dataSet, np.hstack((data_a1, data_b, data_a2)))
np.testing.assert_array_equal(b.metadata.get('TestDataA', 'value'),
np.array([1] * 10 + [np.nan] * 3 + [1] * 5))
np.testing.assert_array_equal(b.metadata.get('TestDataB', 'value'),
np.array([np.nan] * 10 + [1] * 3 + [np.nan] * 5))
def test_add_metadata_1(self):
"""Test for add_metadata."""
md_key = 'TestMetaData'
md_desc = 'Metadata for test'
md_val = np.random.rand(10)
testdata = np.random.rand(10, 10)
b = bdpy.BData()
b.add(testdata, 'TestData')
b.add_metadata(md_key, md_val, md_desc)
assert_array_equal(b.dataSet, testdata)
assert_array_equal(b.metadata.get('TestData', 'value'),
np.array([1] * 10))
assert_array_equal(b.metadata.get('TestMetaData', 'value'),
md_val)
def test_add_metadata_2(self):
"""Test for add_metadata."""
md_key_1 = 'TestMetaData1'
md_desc_1 = 'Metadata for test 1'
md_val_1 = np.random.rand(10)
md_key_2 = 'TestMetaData2'
md_desc_2 = 'Metadata for test 2'
md_val_2 = np.random.rand(10)
testdata = np.random.rand(10, 10)
b = bdpy.BData()
b.add(testdata, 'TestData')
b.add_metadata(md_key_1, md_val_1, md_desc_1)
b.add_metadata(md_key_2, md_val_2, md_desc_2)
assert_array_equal(b.dataSet, testdata)
assert_array_equal(b.metadata.get('TestData', 'value'),
np.array([1] * 10))
assert_array_equal(b.metadata.get('TestMetaData1', 'value'),
md_val_1)
assert_array_equal(b.metadata.get('TestMetaData2', 'value'),
md_val_2)
def test_add_metadata_3(self):
"""Test for add_metadata."""
md_key = 'TestMetaData'
md_desc = 'Metadata for test'
md_val = np.random.rand(10)
testdata_a =
|
np.random.rand(10, 10)
|
numpy.random.rand
|
# -*- coding: utf-8 -*-
from __future__ import division
import sys
sys.path.insert(0,'../../')
sys.path.insert(0,'..')
import numpy as np
from bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows
#from bayes_opt import visualization
#from visualization import Visualization
from bayes_opt.gaussian_process import GaussianProcess
#from visualization import *
from bayes_opt.visualization import visualization
from bayes_opt.acquisition_maximization import acq_max,acq_max_with_name,acq_max_with_init
#from bayes_opt.visualization import vis_variance_reduction_search as viz
from sklearn.metrics.pairwise import euclidean_distances
from sklearn import cluster
from sklearn import mixture
import matplotlib.pyplot as plt
from scipy.ndimage import filters
import time
import copy
from matplotlib import rc
#======================================================================================================
#======================================================================================================
#======================================================================================================
#======================================================================================================
class BatchPVRS(object):
def __init__(self,gp_params, func_params, acq_params):
"""
Input parameters
----------
gp_params: GP parameters
gp_params.thete: to compute the kernel
gp_params.delta: to compute the kernel
func_params: function to optimize
func_params.init bound: initial bounds for parameters
func_params.bounds: bounds on parameters
func_params.func: a function to be optimized
acq_params: acquisition function,
acq_params.acq_func['name']=['ei','ucb','poi','lei']
,acq['kappa'] for ucb, acq['k'] for lei
acq_params.opt_toolbox: optimization toolbox 'nlopt','direct','scipy'
Returns
-------
dim: dimension
bounds: bounds on original scale
scalebounds: bounds on normalized scale of 0-1
time_opt: will record the time spent on optimization
gp: Gaussian Process object
"""
try:
bounds=func_params['bounds']
except:
bounds=func_params['function'].bounds
self.dim = len(bounds)
# Create an array with parameters bounds
if isinstance(bounds,dict):
# Get the name of the parameters
self.keys = list(bounds.keys())
self.bounds = []
for key in list(bounds.keys()):
self.bounds.append(bounds[key])
self.bounds = np.asarray(self.bounds)
else:
self.bounds=np.asarray(bounds)
scalebounds=np.array([np.zeros(self.dim), np.ones(self.dim)])
self.scalebounds=scalebounds.T
self.max_min_gap=self.bounds[:,1]-self.bounds[:,0]
# acquisition function type
self.acq=acq_params['acq_func']
if 'debug' not in self.acq:
self.acq['debug']=0
if 'optimize_gp' not in acq_params:
self.optimize_gp=0
else:
self.optimize_gp=acq_params['optimize_gp']
if 'marginalize_gp' not in acq_params:
self.marginalize_gp=0
else:
self.marginalize_gp=acq_params['marginalize_gp']
# Some function to be optimized
self.function=func_params['function']
try:
self.f = func_params['function']['func']
except:
self.f = func_params['function'].func
# optimization toolbox
if 'opt_toolbox' not in acq_params:
self.opt_toolbox='scipy'
else:
self.opt_toolbox=acq_params['opt_toolbox']
# store the batch size for each iteration
self.NumPoints=[]
# Numpy array place holders
self.X_original= None
# scale the data to 0-1 fit GP better
self.X = None # X=( X_original - min(bounds) / (max(bounds) - min(bounds))
self.Y = None # Y=( Y_original - mean(bounds) / (max(bounds) - min(bounds))
self.Y_original = None
self.opt_time=0
self.L=0 # lipschitz
self.gp=GaussianProcess(gp_params)
self.gp_params=gp_params
# Acquisition Function
#self.acq_func = None
self.acq_func = AcquisitionFunction(acq=self.acq)
self.accum_dist=[]
# theta vector for marginalization GP
self.theta_vector =[]
if 'xstars' not in self.acq:
self.xstars=[]
else:
self.xstars=self.acq['xstars']
# PVRS before and after
self.PVRS_before_after=[]
self.xstars=[]
self.Y_original_maxGP=None
self.X_original_maxGP=None
def posterior(self, Xnew):
#xmin, xmax = -2, 10
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
mu, sigma2 = self.gp.predict(Xnew, eval_MSE=True)
return mu, np.sqrt(sigma2)
def init(self, n_init_points):
"""
Input parameters
----------
gp_params: Gaussian Process structure
n_init_points: # init points
"""
# Generate random points
l = [np.random.uniform(x[0], x[1], size=n_init_points) for x in self.bounds]
# Concatenate new random points to possible existing
# points from self.explore method.
#self.init_points += list(map(list, zip(*l)))
temp=np.asarray(l)
temp=temp.T
init_X=list(temp.reshape((n_init_points,-1)))
# Evaluate target function at all initialization
y_init=self.f(init_X)
# Turn it into np array and store.
self.X_original=np.asarray(init_X)
temp_init_point=np.divide((init_X-self.bounds[:,0]),self.max_min_gap)
self.X_original_maxGP= np.asarray(init_X)
self.X_original = np.asarray(init_X)
self.X = np.asarray(temp_init_point)
y_init=np.reshape(y_init,(n_init_points,1))
self.Y_original =
|
np.asarray(y_init)
|
numpy.asarray
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import pickle
import numpy as np
from tqdm import tqdm
from classifier import get_bogs
from sklearn.metrics import average_precision_score, f1_score
from sklearn import metrics
import time
import sys
sys.path.append('..')
from utils import bog_task_to_attribute, bog_attribute_to_task
def get_bogs(all_preds, all_labels):
total_labels = len(all_labels[0]) - 1
bog_tilde = np.zeros((total_labels, 2))
bog_gt_g =
|
np.zeros((total_labels, 2))
|
numpy.zeros
|
import vplanet
import vplot
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pathlib
import sys
from matplotlib import ticker
import re
import astropy.units as u
import glob
# Path hacks
path = pathlib.Path(__file__).parents[0].absolute()
sys.path.insert(1, str(path.parents[0]))
from get_args import get_args
def comp2huybers(plname, xrange=False, show=True):
"""
Creates plots of insolation, temperature, albedo, ice mass,
and bed rock height over the length of the simulation
Parameters
----------
plname : string
The name of the planet with .Climate data
Keyword Arguments
-----------------
xrange : float tuple, list, or numpy array
Range of x-values (time) to restrict plot
(default = False (no restriction))
orbit : bool
Plot orbital data (obliquity, eccentricity, COPP)
(default = False)
show : bool
Show plot in Python (default = True)
"""
fig = plt.figure(figsize=(8, 12))
fig.subplots_adjust(wspace=0.3, top=0.9, hspace=0.2)
# Run vplanet
out = vplanet.run(path / "vpl.in")
ctmp = 0
for p in range(len(out.bodies)):
if out.bodies[p].name == plname:
body = out.bodies[p]
ctmp = 1
else:
if p == len(out.bodies) - 1 and ctmp == 0:
raise Exception("Planet %s not found." % plname)
try:
ecc = body.Eccentricity
except:
ecc = np.zeros_like(body.Time) + getattr(out.log.initial, plname).Eccentricity
try:
inc = body.Inc
except:
inc = np.zeros_like(body.Time)
try:
obl = body.Obliquity
except:
obltmp = getattr(out.log.initial, plname).Obliquity
if obltmp.unit == "rad":
obltmp *= 180 / np.pi
obl = np.zeros_like(body.Time) + obltmp
plname_lower = plname.lower()
f = open(path / f"{plname_lower}.in", "r")
lines = f.readlines()
f.close()
pco2 = 0
for i in range(len(lines)):
if lines[i].split() != []:
if lines[i].split()[0] == "dRotPeriod":
P = -1 * np.float(lines[i].split()[1])
if lines[i].split()[0] == "dSemi":
semi = np.float(lines[i].split()[1])
if semi < 0:
semi *= -1
if lines[i].split()[0] == "dpCO2":
pco2 = np.float(lines[i].split()[1])
try:
longp = (body.ArgP + body.LongA + body.PrecA + 180)
except:
longp = body.PrecA
esinv = ecc * np.sin(longp)
lats = np.unique(body.Latitude)
nlats = len(lats)
ntimes = len(body.Time)
# plot temperature
temp = np.reshape(body.TempLandLat, (ntimes, nlats))
ax1 = plt.subplot(7, 1, 5)
c = plt.contourf(
body.Time / 1e6, lats[lats > 58 * u.deg], temp.T[lats > 58 * u.deg], 20
)
plt.ylabel("Latitude")
plt.ylim(60, 83)
plt.yticks([60, 70, 80])
if xrange == False:
left = 0
else:
left = xrange[0]
if xrange:
plt.xlim(xrange)
plt.xticks(visible=False)
clb = plt.colorbar(c, ax=plt.gca(), ticks=[-17, -15, -13, -11, -9, -7, -5],)
clb.set_label("Surface Temp.\n($^{\circ}$C)", fontsize=12)
# plot ice height
ice =
|
np.reshape(body.IceHeight + body.BedrockH, (ntimes, nlats))
|
numpy.reshape
|
"""
A simple implementation of linear regression.
"""
import math
import numpy as np
import matplotlib.pyplot as plt
def generate_sin_data(n_points,
theta_start=0,
theta_end=2*math.pi,
noise_sigma=0.1):
"""
Generates some test data from a sin wave with additive Gaussian noise.
Parameters
----------
n_points: int
The number of points to generate
start_theta: float
Where to start on the sin function.
end_theta: float
Where to start on the sin function.
noise_sigma: float
Standard deviation of noise to add
Returns
-------
X: ndarray, (N,)
The input points.
y: ndarray, (N,)
The output points.
"""
x = np.linspace(theta_start, theta_end, n_points)
y = np.sin(x) + (np.random.randn(n_points) * noise_sigma**2)
return x, y
def partition_data(X, y, train_ratio=0.6, val_ratio=0.5):
"""
Partitions data into training, test and validation sets.
Parameters
----------
X: ndarray, (N, D)
X points.
y: ndarray (N,)
y points.
train_ratio: float
Amount of data to use for training
val_ratio: float
The ratio of data to use for validation set after the training data has
been removed.
Returns
-------
training_set, validation_set, test_set
"""
n_points = y.size
randind = list(range(n_points))
np.random.shuffle(randind)
train_ind = int(round(train_ratio * n_points))
val_ind = int(round(val_ratio * (n_points - train_ind)) + train_ind)
train_inds = randind[:train_ind]
val_inds = randind[train_ind:val_ind]
test_inds = randind[val_ind:]
partitioned_data = (
(X[train_inds], y[train_inds]),
(X[val_inds], y[val_inds]),
(X[test_inds], y[test_inds]))
return partitioned_data
def mse(y, y_pred):
"""
Computes the mean squared error between two sets of points.
"""
return np.sum((y - y_pred)**2)
def rbf_kernel(X1, X2, gamma=0.1):
"""
Computes radial basis functions between inputs in X1 and X2.
K(x, y) = exp(-gamma ||x - y||^2)
This is a slow implementation for illustrative purposes.
"""
n_samples_rr = X1.shape[0]
n_samples_cc = X2.shape[0]
pairwise_distances = np.zeros((n_samples_rr, n_samples_cc))
# Compute pairwise distances
for rr in range(n_samples_rr):
for cc in range(n_samples_cc):
pairwise_distances[rr, cc] = np.sum((X1[rr] - X2[cc])**2)
K = np.exp(-gamma * pairwise_distances)
return K
def plot_figure(x, y, x_pred, y_pred, display=False, filename=None):
"""
Plots the output of a 1D regression problem.
"""
fig = plt.figure()
fig.add_subplot(111, aspect='equal')
plt.plot(x, y, 'k+')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.plot(x_pred, y_pred, 'r-')
if display:
plt.show()
if filename is not None:
plt.savefig(filename, bbox_inches="tight")
return fig
class LinearRegression():
"""
Implements basic linear regression with L2 regularization.
"""
def __init__(self, alpha=0):
self.alpha_ = alpha
self.coef_ = None
def fit(self, X, y):
"""
Fit model.
"""
# Check input is the correct shape
if X.ndim == 1:
X = X[:,np.newaxis]
# Append a column of 1's for bias
X = np.hstack((np.ones((X.shape[0], 1)), X))
X = np.matrix(X) # Use a NumPy matrix for clarity
assert y.ndim == 1, "Only supports 1D y"
y =
|
np.matrix(y[:,np.newaxis])
|
numpy.matrix
|
import copy
import errno
import glob
import logging # as logging
import logging.config
import math
import os
import pickle
import struct
import sys
import time
import configuration
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
# from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
from frontend.parameter_generation import ParameterGeneration
from io_funcs.binary_io import BinaryIOCollection
# import matplotlib.pyplot as plt
# our custom logging class that can also plot
# from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, SingleWeightMatrixPlot
from lxml import etree
from utils.providers import ListDataProviderWithProjectionIndex, get_unexpanded_projection_inputs # ListDataProvider
from util import file_util, math_statis
from util.file_util import load_binary_file_frame
# from frontend.feature_normalisation_base import FeatureNormBase
## This should always be True -- tidy up later
expand_by_minibatch = True
if expand_by_minibatch:
proj_type = 'int32'
else:
proj_type = theano.config.floatX
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i + 1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num - 1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i * 2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def infer_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
'''
Unlike the same function in run_tpdnn.py this *DOESN'T* save model at the
end -- just returns array of the learned projection weights
'''
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
######### data providers ##########
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list=train_x_file_list,
y_file_list=train_y_file_list, n_ins=n_ins, n_outs=n_outs,
buffer_size=buffer_size, shuffle=True,
index_to_project=index_to_project,
projection_insize=projection_insize,
indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list=valid_x_file_list,
y_file_list=valid_y_file_list, n_ins=n_ins, n_outs=n_outs,
buffer_size=buffer_size, shuffle=False,
index_to_project=index_to_project,
projection_insize=projection_insize,
indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
####################################
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
############## load existing dnn #####
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
####################################
logger.info('fine-tuning the %s model' % (model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
logger.info('fine-tuning the %s model' % (model_type))
dnn_model.initialise_projection_weights()
inference_epochs = 20 ## <-------- hard coded !!!!!!!!!!
current_finetune_lr = previous_finetune_lr = finetune_lr
warmup_epoch_3 = 10 # 10 ## <-------- hard coded !!!!!!!!!!
# warmup_epoch_3 = epoch + warmup_epoch_3
# inference_epochs += epoch
while (epoch < inference_epochs):
epoch = epoch + 1
current_momentum = momentum
if epoch > warmup_epoch_3:
previous_finetune_lr = current_finetune_lr
current_finetune_lr = previous_finetune_lr * 0.5
dev_error = []
sub_start_time = time.clock()
## osw -- inferring word reps on validation set in a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('infer word representations for validation set')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# valid_error = infer_projections_fn(current_finetune_lr, current_momentum)
# this_validation_loss = numpy.mean(valid_error)
# if plot:
# ## add dummy validation loss so that plot works:
# plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
# plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
#
sub_end_time = time.clock()
logger.info('INFERENCE epoch %i, validation error %f, time spent %.2f' % (
epoch, this_validation_loss, (sub_end_time - sub_start_time)))
# if cfg.hyper_params['model_type'] == 'TPDNN':
# if not os.path.isdir(cfg.projection_weights_output_dir):
# os.mkdir(cfg.projection_weights_output_dir)
# weights = dnn_model.get_projection_weights()
# fname = os.path.join(cfg.projection_weights_output_dir, 'proj_INFERENCE_epoch_%s'%(epoch))
# numpy.savetxt(fname, weights)
#
best_dnn_model = dnn_model ## always update
end_time = time.clock()
##cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
final_weights = dnn_model.get_projection_weights()
logger.info(
'overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
# if plot:
# plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
#
### ========================================================
# if cfg.hyper_params['model_type'] == 'TPDNN':
# os.system('python %s %s'%('/afs/inf.ed.ac.uk/user/o/owatts/scripts_NEW/plot_weights_multiple_phases.py', cfg.projection_weights_output_dir))
return final_weights
def dnn_generation_PROJECTION(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, cfg=None,
synth_mode='constant', projection_end=0, projection_weights_to_use=None,
save_weights_to_file=None):
'''
Use the (training/dev/test) projections learned in training, but shuffled, for test tokens.
-- projection_end is *real* value for last projection index (or some lower value)
-- this is so the samples / means are of real values learned on training data
'''
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation_PROJECTION')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
## 'remove' word representations by randomising them. As model is unpickled and
## not re-saved, this does not throw trained parameters away.
if synth_mode == 'sampled_training':
## use randomly chosen training projection -- shuffle in-place = same as sampling wihtout replacement
P = dnn_model.get_projection_weights()
numpy.random.shuffle(P[:, :projection_end]) ## shuffle in place along 1st dim (reorder rows)
dnn_model.params[0].set_value(P, borrow=True)
elif synth_mode == 'uniform':
## generate utt embeddings uniformly at random within the min-max of the training set (i.e. from a (hyper)-rectangle)
P = dnn_model.get_projection_weights()
column_min = numpy.min(P[:, :projection_end], axis=0) ## vector like a row of P with min of its columns
column_max = numpy.max(P[:, :projection_end], axis=0)
random_proj = numpy.random.uniform(low=column_min, high=column_max, size=numpy.shape(P))
random_proj = random_proj.astype(numpy.float32)
dnn_model.params[0].set_value(random_proj, borrow=True)
elif synth_mode == 'constant':
## use mean projection
P = dnn_model.get_projection_weights()
mean_row = P[:, :projection_end].mean(axis=0)
print('mean row used for projection:')
print(mean_row)
P = numpy.ones(numpy.shape(P), dtype=numpy.float32) * mean_row ## stack mean rows
dnn_model.params[0].set_value(P, borrow=True)
elif synth_mode == 'inferred':
## DEBUG
assert projection_weights_to_use != None
old_weights = dnn_model.get_projection_weights()
## DEBUG:=========
# projection_weights_to_use = old_weights # numpy.array(numpy.random.uniform(low=-0.3, high=0.3, size=numpy.shape(old_weights)), dtype=numpy.float32)
## =============
assert numpy.shape(old_weights) == numpy.shape(projection_weights_to_use), [numpy.shape(old_weights),
numpy.shape(
projection_weights_to_use)]
dnn_model.params[0].set_value(projection_weights_to_use, borrow=True)
elif synth_mode == 'single_sentence_demo':
## generate utt embeddings from a uniform 10 x 10 grid within the min-max of the training set (i.e. from a rectangle)
P = dnn_model.get_projection_weights()
column_min = numpy.min(P[:, :projection_end], axis=0) ## vector like a row of P with min of its columns
column_max = numpy.max(P[:, :projection_end], axis=0)
assert len(column_min) == 2, 'Only 2D projections supported in mode single_sentence_demo'
ranges = column_max - column_min
nstep = 10
steps = ranges / (nstep - 1)
grid_params = [numpy.array([1.0, 1.0])] ## pading to handle 0 index (reserved for defaults)
for x in range(nstep):
for y in range(nstep):
grid_params.append(column_min + (numpy.array([x, y]) * steps))
stacked_params = numpy.vstack(grid_params)
print(stacked_params)
print(numpy.shape(stacked_params))
print()
print()
proj = numpy.ones(numpy.shape(P))
proj[:101, :] = stacked_params
proj = proj.astype(numpy.float32)
dnn_model.params[0].set_value(proj, borrow=True)
elif synth_mode == 'uniform_sampled_within_std_1':
## points uniformly sampled from between the 1.8 - 2.0 stds of a diagonal covariance gaussian fitted to the data
P = dnn_model.get_projection_weights()
column_min = numpy.min(P[:, :projection_end], axis=0) ## vector like a row of P with min of its columns
column_max = numpy.max(P[:, :projection_end], axis=0)
std_val = numpy.std(P[:, :projection_end], axis=0)
dots = numpy.random.uniform(low=column_min, high=column_max, size=(100000, 2))
dots = within_circle(dots, radius=std_val * 2.0)
dots = outside_circle(dots, radius=std_val * 1.8)
m, n = numpy.shape(P)
dots = dots[:m, :]
dots = dots.astype(numpy.float32)
dnn_model.params[0].set_value(dots, borrow=True)
elif synth_mode == 'uniform_sampled_within_std_2':
## points uniformly sampled from between the 1.8 - 2.0 stds of a diagonal covariance gaussian fitted to the data
P = dnn_model.get_projection_weights()
column_min = numpy.min(P[:, :projection_end], axis=0) ## vector like a row of P with min of its columns
column_max = numpy.max(P[:, :projection_end], axis=0)
std_val = numpy.std(P[:, :projection_end], axis=0)
dots = numpy.random.uniform(low=column_min, high=column_max, size=(100000, 2))
dots = within_circle(dots, radius=std_val * 3.0)
dots = outside_circle(dots, radius=std_val * 2.8)
m, n = numpy.shape(P)
dots = dots[:m, :]
dots = dots.astype(numpy.float32)
dnn_model.params[0].set_value(dots, borrow=True)
elif synth_mode == 'uniform_sampled_within_std_3':
## points uniformly sampled from between the 1.8 - 2.0 stds of a diagonal covariance gaussian fitted to the data
P = dnn_model.get_projection_weights()
column_min = numpy.min(P[:, :projection_end], axis=0) ## vector like a row of P with min of its columns
column_max = numpy.max(P[:, :projection_end], axis=0)
std_val =
|
numpy.std(P[:, :projection_end], axis=0)
|
numpy.std
|
import numpy as np
from data_container import DataLoader, SceneDataLoaderNumpy, ObjectDataLoaderNumpy
from PIL import Image
import keras.backend as K
import tensorflow as tf
def align_input_output_image(inputs, target, pred):
x1 = np.concatenate(inputs, axis=1)
x2 = np.concatenate(target, axis=1)
x3 = np.concatenate(pred, axis=1)
xs = np.concatenate((x1, x2, x3), axis=0)
return xs
def save_pred_images(images, file_path):
x = images
x *= 255
x = np.clip(x, 0, 255)
x = x.astype('uint8')
new_im = Image.fromarray(x)
new_im.save("%s.png" % (file_path))
def test_few_models_and_export_image(model, data: DataLoader, file_name, folder_name, test_n=5,
single_model=False):
input_image_original, target_image_original, poseinfo = data.get_batched_data(test_n, single_model=single_model)
poseinfo_processed = model.process_pose_info(data, poseinfo)
pred_images = model.get_predicted_image((input_image_original, poseinfo_processed))
images = align_input_output_image(input_image_original, target_image_original, pred_images)
save_pred_images(images, "%s/%s" % (folder_name, file_name))
return images
def ssim_custom(y_true, y_pred):
return tf.image.ssim(y_pred, y_true, max_val=1.0, filter_sigma=0.5)
def mae_custom(y_true, y_pred):
return K.mean(K.abs(y_true - y_pred))
def test_for_random_scene(data: SceneDataLoaderNumpy, model, N=20000, batch_size=32):
mae = 0
ssim = 0
count = 0
while count < N:
input_image_original, target_image_original, pose_info = data.get_batched_data(batch_size=batch_size, is_train=False)
pose_info_per_model = model.process_pose_info(data, pose_info)
metrics = model.evaluate(input_image_original, target_image_original, pose_info_per_model)
mae += metrics[1] * batch_size
ssim += metrics[2] * batch_size
count += batch_size
mae /= count
ssim /= count
return mae, ssim
def test_for_all_scenes(data: SceneDataLoaderNumpy, model, batch_size=16):
scene_N = len(data.scene_list)
difference_N = 2 * data.max_frame_difference + 1
absolute_errors = np.zeros((difference_N, ), dtype=np.float32)
ssim_errors = np.zeros((difference_N, ), dtype=np.float32)
for difference in range(difference_N):
for i in range(len(data.scene_list)):
scene_id = data.scene_list[i]
index = 0
N = len(data.test_ids[scene_id])
while index < N:
M = min(index + batch_size, N)
input_image_original, target_image_original, pose_info = data.get_batched_data_i_j(
scene_id, difference - data.max_frame_difference, index, M)
pose_info_per_model = model.process_pose_info(data, pose_info)
metrics = model.evaluate(input_image_original, target_image_original, pose_info_per_model)
absolute_errors[difference] += metrics[1] * (M - index)
ssim_errors[difference] += metrics[2] * (M - index)
index += batch_size
total_N = 0
for scene_id in data.scene_list:
total_N += len(data.test_ids[scene_id])
absolute_errors /= total_N
ssim_errors /= total_N
absolute_errors_avg = np.mean(absolute_errors)
ssim_errors_avg = np.mean(ssim_errors)
return absolute_errors_avg, ssim_errors_avg, absolute_errors, ssim_errors
def test_for_all_objects(data: ObjectDataLoaderNumpy, model, batch_size=50):
absolute_errors =
|
np.zeros((18, 18))
|
numpy.zeros
|
#!/usr/bin/env python3
import xarray as xr
import numpy as np
def setup_surface(srfc_height, only_half_area, materials):
x=1000
verts =
|
np.empty((9,3), dtype=np.float64)
|
numpy.empty
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 18:05:51 2019
@author: ben91
"""
from SimulationClasses import *
from TimeSteppingMethods import *
from FiniteVolumeSchemes import *
from FluxSplittingMethods import *
from InitialConditions import *
from Equations import *
from wholeNetworks import *
from LoadDataMethods import *
from keras import *
from keras.models import *
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anime
from matplotlib import style
from matplotlib import rcParams
import math
style.use('fivethirtyeight')
rcParams.update({'figure.autolayout': True})
'''
# Import modules/packages
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.close('all') # close all open figures
# Define and set custom LaTeX style
styleNHN = {
"pgf.rcfonts":False,
"pgf.texsystem": "pdflatex",
"text.usetex": False, #TODO: might need to change this to false
"font.family": "serif"
}
mpl.rcParams.update(styleNHN)
xx = np.linspace(0,1,100)
yy = xx**2
# Plotting defaults
ALW = 0.75 # AxesLineWidth
FSZ = 12 # Fontsize
LW = 2 # LineWidth
MSZ = 5 # MarkerSize
SMALL_SIZE = 8 # Tiny font size
MEDIUM_SIZE = 10 # Small font size
BIGGER_SIZE = 14 # Large font size
plt.rc('font', size=FSZ) # controls default text sizes
plt.rc('axes', titlesize=FSZ) # fontsize of the axes title
plt.rc('axes', labelsize=FSZ) # fontsize of the x and y labels
plt.rc('xtick', labelsize=FSZ) # fontsize of the x-tick labels
plt.rc('ytick', labelsize=FSZ) # fontsize of the y-tick labels
plt.rc('legend', fontsize=FSZ) # legend fontsize
plt.rc('figure', titlesize=FSZ) # fontsize of the figure title
plt.rcParams['axes.linewidth'] = ALW # sets the default axes lindewidth to ``ALW''
plt.rcParams["mathtext.fontset"] = 'cm' # Computer Modern mathtext font (applies when ``usetex=False'')
def discTrackStep(c,x,t,u,P,title, a, b, err):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
c: shock speed
x: x coordinates
y: y coordinates
u: velocity
P: periods advected for
err: plot error if True, otherwise plot solution
'''
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - c*tg
plt.figure()
if err:
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = eex-u
'''
plt.contourf(xp,tg,u)
plt.colorbar()
plt.title(title)
plt.figure()
plt.contourf(xp,tg,eex)
'''
for i in range(-2,int(P)):
plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.7,20))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
else:
for i in range(-2,int(P)+1):
plt.contourf(xp+i*L,tg,u,np.linspace(-0.2,1.2,57))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
def intError(c,x,t,u,title):
L = x[-1] - x[0] + x[1] - x[0]
dx = x[1] - x[0]
nx = np.size(x)
xg, tg = np.meshgrid(t,x)
xp = xg - c*tg
ons = np.ones_like(xp)
#eex = np.roll(np.greater(ons,xp%L),-1,axis = 0)
eex1 = xp/dx
eex1[eex1>=1] = 1
eex1[eex1<=0] = 0
eex2 = (-xp%L-L/2)/dx
eex2[eex2>=1] = 1
eex2[eex2<=0] = 0
eex3 = (-xp%L-L/2)/dx
eex3[eex3>(nx/2-1)] = -(eex3[eex3>(nx/2-1)]-nx/2)
eex3[eex3>=1] = 1
eex3[eex3<=0] = 0
er = eex3-u
ers = np.power(er,2)
ers0 = np.expand_dims(ers[0,:],axis = 0)
ers_aug = np.concatenate((ers,ers0), axis = 0)
err_int = np.trapz(ers_aug, dx = dx, axis = 0)
plt.plot(t,np.sqrt(err_int),'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('L2 Error')
#plt.ylim([0,0.02])
def totalVariation(t,u,title):#plot total variation over time
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
#plt.figure()
plt.plot(t,tv,'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('Total Variation')
#plt.ylim((1.999,2.01))
def totalEnergy(t,u, dx, title):#plot total energy
u0 = np.expand_dims(u[0,:],axis = 0)
u_aug = np.concatenate((u,u0), axis = 0)
energy = 0.5*np.trapz(np.power(u_aug,2), dx = dx, axis = 0)
plt.figure()
plt.plot(t,energy)
plt.title(title)
plt.xlabel('Time')
plt.ylabel('1/2*integral(u^2)')
plt.ylim([0,np.max(energy)*1.1])
def mwn(FVM):
'''
plot modified wavenumber of a finite volume scheme
Inputs:
FVM: finite volume method object to test
'''
nx = 100
nt = 10
L = 2
T = 0.00001
x = np.linspace(0,L,nx,endpoint=False)
t = np.linspace(0,T,nt)
dx = x[1]-x[0]
dt = t[1]-t[0]
sigma = T/dx
EQ = adv()
FS = LaxFriedrichs(EQ, 1)
RK = SSPRK3()
NK = int((np.size(x)-1)/2)
mwn = np.zeros(NK,dtype=np.complex_)
wn = np.zeros(NK)
A = 1
for k in range(2,NK):
IC = cosu(A,k/L)
testCos = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_cos = testCos.run()
u_f_cos = u_cos[:,0]
u_l_cos = u_cos[:,-1]
IC = sinu(A,k/L)
testSin = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_sin = testSin.run()
u_f_sin = u_sin[:,0]
u_l_sin = u_sin[:,-1]
u_h0 =np.fft.fft(u_f_cos+complex(0,1)*u_f_sin)
u_h = np.fft.fft(u_l_cos+complex(0,1)*u_l_sin)
v_h0 = u_h0[k]
v_h = u_h[k]
mwn[k] = -1/(complex(0,1)*sigma)*np.log(v_h/v_h0)
wn[k] = 2*k*np.pi/nx
plt.plot(wn,np.real(mwn))
#plt.hold
plt.plot(wn,wn)
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (real part)')
plt.figure()
plt.plot(wn,np.imag(mwn))
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (imaginary part)')
plt.figure()
plt.semilogy(wn,abs(wn-np.real(mwn)))
return wn
def animateSim(x,t,u,pas):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
t: t coordinates
u: velocity
pas: how long to pause between frames
'''
for i in range(0,len(t)):
plt.plot(x,u[:,i])
plt.pause(pas)
plt.clf()
plt.plot(x,u[:,-1])
def specAnalysis(model, u, RKM,WENONN, NNNN, h, giveModel, makePlots):
'''
perform spectral analysis of a finite volume method when operating on a specific waveform
Finds eigenvalues, and then uses this to compute max
Inputs:
Model: WENO5 neural network that will be analyzed
u: the data that is the input to the method
RKM: time stepping method object to analyze for space-time coupling
wenoName: name of layer in model that gives WENO5 coefficicents
NNname: name of layer in model that gives NN coefficients
giveModel: whether or not we are passing layer names or model names
'''
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
N = np.size(u)
M = 5#just assume stencil size is 5 for now
sortedU = np.zeros((N,M)) + 1j*np.zeros((N,M))
for i in range(0,M):#assume scheme is upwind or unbiased
sortedU[:,i] = np.roll(u,math.floor(M/2)-i)
def scale(sortedU, NNNN):
min_u = np.amin(sortedU,1)
max_u = np.amax(sortedU,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(sortedU[:,2])
u_tmp[:] = sortedU[:,2]
#for i in range(0,5):
# sortedU[:,i] = (sortedU[:,i]-min_u)/(max_u-min_u)
cff = NNNN.predict(sortedU)#compute \Delta u
cff[const_n,:] = np.array([1/30,-13/60,47/60,9/20,-1/20])
#print('fl: ', fl)
return cff
if(np.sum(np.iscomplex(u))>=1):
wec = WENONN.predict(np.real(sortedU)) + WENONN.predict(np.imag(sortedU))*1j
nnc = scale(np.real(sortedU), NNNN) + scale(np.imag(sortedU), NNNN)*1j
op_WENO5 = np.zeros((N,N)) + np.zeros((N,N))*1j
op_NN = np.zeros((N,N)) + np.zeros((N,N))*1j
else:
wec = WENONN.predict(np.real(sortedU))
nnc = scale(np.real(sortedU), NNNN)
op_WENO5 = np.zeros((N,N))
op_NN = np.zeros((N,N))
for i in range(0,N):
for j in range(0,M):
op_WENO5[i,(i+j-int(M/2))%N] -= wec[i,j]
op_WENO5[i,(i+j-int(M/2)-1)%N] += wec[(i-1)%N,j]
op_NN[i,(i+j-int(M/2))%N] -= nnc[i,j]
op_NN[i,(i+j-int(M/2)-1)%N] += nnc[(i-1)%N,j]
#print(i,': ', op_WENO5[i,:])
WEeigs, WEvecs = np.linalg.eig(op_WENO5)
NNeigs, NNvecs = np.linalg.eig(op_NN)
con_nn = np.linalg.solve(NNvecs, u)
#now do some rungekutta stuff
x = np.linspace(-3,3,301)
y = np.linspace(-3,3,301)
X,Y = np.meshgrid(x,y)
Z = X + Y*1j
g = abs(1 + Z + np.power(Z,2)/2 + np.power(Z,3)/6)
g_we = abs(1 + (h*WEeigs) + np.power(h*WEeigs,2)/2 + np.power(h*WEeigs,3)/6)
g_nn = abs(1 + (h*NNeigs) + np.power(h*NNeigs,2)/2 + np.power(h*NNeigs,3)/6)
#do some processing for that plot of the contributions vs the amplification factor
c_abs = np.abs(con_nn)
ords = np.argsort(c_abs)
g_sort = g_nn[ords]
c_sort = con_nn[ords]
c_norm = c_sort/np.linalg.norm(c_sort,1)
c_abs2 = np.abs(c_norm)
#do some processing for the most unstable mode
ordsG = np.argsort(g_nn)
unstb = NNvecs[:,ordsG[-1]]
if(makePlots>=1):
plt.figure()
plt.plot(np.sort(g_we),'.')
plt.plot(np.sort(g_nn),'.')
plt.legend(('WENO5','NN'))
plt.title('CFL = '+ str(h))
plt.xlabel('index')
plt.ylabel('|1+HL+(HL^2)/2+(HL^3)/6|')
plt.ylim([0,1.2])
plt.figure()
plt.plot(np.real(WEeigs),np.imag(WEeigs),'.')
plt.plot(np.real(NNeigs),np.imag(NNeigs),'.')
plt.title('Eigenvalues')
plt.legend(('WENO5','NN'))
plt.figure()
plt.plot(g_nn,abs(con_nn),'.')
plt.xlabel('Amplification Factor')
plt.ylabel('Contribution')
print('Max WENO g: ',np.max(g_we))
print('Max NN g: ',np.max(g_nn))
if(makePlots>=2):
plt.figure()
sml = 1E-2
plt.contourf(X, Y, g, [1-sml,1+sml])
plt.figure()
plt.plot(g_sort,c_abs2,'.')
plt.xlabel('Scaled Amplification Factor')
plt.ylabel('Contribution')
return g_nn, con_nn, unstb
#return np.max(g_we), np.max(g_nn)
#plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.025,20))
def specAnalysisData(model, u, RKM,WENONN, NNNN, CFL, giveModel):
nx, nt = np.shape(u)
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
maxWe = np.zeros(nt)
maxNN = np.zeros(nt)
for i in range(0,nt):
print(i)
maxWe[i], maxNN[i] = specAnalysis(model, u[:,i], RKM, WENONN, NNNN, CFL, True, False)
plt.figure()
plt.plot(maxWe)
plt.figure()
plt.plot(maxNN)
return maxWe, maxNN
def eigenvectorProj(model, u, WENONN, NNNN):
nx = np.shape(u)
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
def evalPerf(x,t,P,u,eex):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
y: y coordinates
P: periods advected for
u: velocity
Outputs:
tvm: max total variation in solution
swm: max shock width in solution
'''
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
tvm = np.max(tv)
u = np.transpose(u)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
#plot width of discontinuity over time for neural network and WENO5
'''
us = np.roll(u, 1, axis = 0)
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
plot width of discontinuity over time for neural network and WENO5
'''
u = np.transpose(u)
u_WE = np.transpose(u_WE)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
dx = x[1]-x[0]
'''
eex = (-xp%L-L/2)/dx
eex[eex>49] = -(eex[eex>49]-50)
eex[eex>=1] = 1
eex[eex<=0] = 0
'''
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
er_we = np.abs(eex-u_WE)
wdth = np.sum(np.greater(er,0.01),axis=1)*dx/2
wdth_we = np.sum(np.greater(er_we,0.01),axis=1)*dx/2
plt.figure()
plt.plot(t,wdth)
plt.plot(t,wdth_we)
plt.legend(('Neural Network','WENO5'))
plt.xlabel('t')
plt.ylabel('Discontinuity Width')
def convStudy():
'''
Test order of accuracy of an FVM
'''
nr = 21
errNN = np.zeros(nr)
errWE = np.zeros(nr)
errEN = np.zeros(nr)
dxs = np.zeros(nr)
for i in range(0,nr):
print(i)
nx = 10*np.power(10,0.1*i)
L = 2
x = np.linspace(0,L,int(nx),endpoint=False)
dx = x[1]-x[0]
FVM1 = NNWENO5dx(dx)
FVM2 = WENO5()
FVM3 = ENO3()
u = np.sin(4*np.pi*x) + np.cos(4*np.pi*x)
du = 4*np.pi*(np.cos(4*np.pi*x)-np.sin(4*np.pi*x))
resNN = FVM1.evalF(u)
resWE = FVM2.evalF(u)
resEN = FVM3.evalF(u)
du_EN = (resNN-np.roll(resEN,1))/dx
du_NN = (resNN-np.roll(resNN,1))/dx
du_WE = (resWE-np.roll(resWE,1))/dx
errNN[i] = np.linalg.norm(du_NN-du,ord = 2)/np.sqrt(nx)
errEN[i] = np.linalg.norm(du_EN-du,ord = 2)/np.sqrt(nx)
errWE[i] = np.linalg.norm(du_WE-du,ord = 2)/np.sqrt(nx)
dxs[i] = dx
nti = 6
toRegDx = np.ones((nti,2))
toRegDx[:,1] = np.log10(dxs[-nti:])
toRegWe = np.log10(errWE[-nti:])
toRegNN = np.log10(errNN[-nti:])
toRegEN = np.log10(errEN[-nti:])
c_we, m_we = np.linalg.lstsq(toRegDx, toRegWe, rcond=None)[0]
c_nn, m_nn = np.linalg.lstsq(toRegDx, toRegNN, rcond=None)[0]
c_en, m_en = np.linalg.lstsq(toRegDx, toRegEN, rcond=None)[0]
print('WENO5 slope: ',m_we)
print('NN slope: ',m_nn)
print('ENO3 slope: ',m_en)
plt.loglog(dxs,errNN,'o')
plt.loglog(dxs,errWE,'o')
plt.loglog(dxs,errEN,'o')
plt.loglog(dxs,(10**c_we)*(dxs**m_we))
plt.loglog(dxs,(10**c_nn)*(dxs**m_nn))
plt.loglog(dxs,(10**c_en)*(dxs**m_en))
plt.legend(['WENO-NN','WENO5-JS','ENO3'])
plt.xlabel('$\Delta x$')
plt.ylabel('$E$')
def plot_visc(x,t,uv,FVM,P,NN,contours):
nx, nt = np.shape(uv)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
def scheme(u,NN):
ust = np.zeros_like(u)
ust = ust + u
min_u = np.amin(u,1)
max_u = np.amax(u,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(u[:,2])
u_tmp[:] = u[:,2]
for i in range(0,5):
u[:,i] = (u[:,i]-min_u)/(max_u-min_u)
ep = 1E-6
#compute fluxes on sub stencils (similar to derivatives I guess)
f1 = 1/3*u[:,0]-7/6*u[:,1]+11/6*u[:,2]
f2 = -1/6*u[:,1]+5/6*u[:,2]+1/3*u[:,3]
f3 = 1/3*u[:,2]+5/6*u[:,3]-1/6*u[:,4]
#compute derivatives on sub stencils
justU = 1/30*ust[:,0]-13/60*ust[:,1]+47/60*ust[:,2]+9/20*ust[:,3]-1/20*ust[:,4]
dudx = 0*ust[:,0]+1/12*ust[:,1]-5/4*ust[:,2]+5/4*ust[:,3]-1/12*ust[:,4]
dudx = (dudx - np.roll(dudx,1))
d2udx2 = -1/4*ust[:,0]+3/2*ust[:,1]-2*ust[:,2]+1/2*ust[:,3]+1/4*ust[:,4]
d2udx2 = (d2udx2 - np.roll(d2udx2,1))
d3udx3 = 0*ust[:,0]-1*ust[:,1]+3*ust[:,2]-3*ust[:,3]+1*ust[:,4]
d3udx3 = (d3udx3 - np.roll(d3udx3,1))
#compute smoothness indicators
B1 = 13/12*np.power(u[:,0]-2*u[:,1]+u[:,2],2) + 1/4*np.power(u[:,0]-4*u[:,1]+3*u[:,2],2)
B2 = 13/12*np.power(u[:,1]-2*u[:,2]+u[:,3],2) + 1/4*np.power(u[:,1]-u[:,3],2)
B3 = 13/12*np.power(u[:,2]-2*u[:,3]+u[:,4],2) + 1/4*np.power(3*u[:,2]-4*u[:,3]+u[:,4],2)
#assign linear weights
g1 = 1/10
g2 = 3/5
g3 = 3/10
#compute the unscaled nonlinear weights
wt1 = g1/np.power(ep+B1,2)
wt2 = g2/np.power(ep+B2,2)
wt3 = g3/np.power(ep+B3,2)
wts = wt1 + wt2 + wt3
#scale the nonlinear weights
w1 = wt1/wts
w2 = wt2/wts
w3 = wt3/wts
#compute the coefficients
c1 = np.transpose(np.array([1/3*w1,-7/6*w1-1/6*w2,11/6*w1+5/6*w2+1/3*w3,1/3*w2+5/6*w3,-1/6*w3]))
#fl = np.multiply(fl,(max_u-min_u))+min_u
if(NN):
A1 = np.array([[-0.94130915, -0.32270527, -0.06769955],
[-0.37087336, -0.05059665, 0.55401474],
[ 0.40815187, -0.5602299 , -0.01871526],
[ 0.56200236, -0.5348897 , -0.04091108],
[-0.6982639 , -0.49512517, 0.52821904]])
b1 = np.array([-0.04064859, 0. , 0. ])
c2 = np.maximum(np.matmul(c1,A1)+b1,0)
A2 = np.array([[ 0.07149544, 0.9637294 , 0.41981453],
[ 0.75602794, -0.0222342 , -0.95690656],
[ 0.07406807, -0.41880417, -0.4687035 ]])
b2 = np.array([-0.0836111 , -0.00330033, -0.01930024])
c3 = np.maximum(np.matmul(c2,A2)+b2,0)
A3 = np.array([[ 0.8568574 , -0.5809458 , 0.04762125],
[-0.26066098, -0.23142155, -0.6449008 ],
[ 0.7623346 , 0.81388015, -0.03217626]])
b3 = np.array([-0.0133561 , -0.05374921, 0. ])
c4 = np.maximum(np.matmul(c3,A3)+b3,0)
A4 = np.array([[-0.2891752 , -0.53783405, -0.17556567, -0.7775279 , 0.69957024],
[-0.12895434, 0.13607207, 0.12294354, 0.29842544, -0.00198237],
[ 0.5356503 , 0.09317833, 0.5135357 , -0.32794708, 0.13765627]])
b4 = np.array([ 0.00881096, 0.01138764, 0.00464343, 0.0070305 , -0.01644066])
dc = np.matmul(c4,A4)+b4
ct = c1 - dc
Ac = np.array([[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2]])
bc = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
dc2 = np.matmul(ct,Ac)+bc
C = ct + dc2
Cons = C[:,0] + C[:,1] + C[:,2] + C[:,3] + C[:,4]
C_visc = -5/2*C[:,0] - 3/2*C[:,1] - 1/2*C[:,2] + 1/2*C[:,3] + 3/2*C[:,4]
C_visc2 = 19/6*C[:,0] + 7/6*C[:,1] + 1/6*C[:,2] + 1/6*C[:,3] + 7/6*C[:,4]
C_visc3 = -65/24*C[:,0] - 5/8*C[:,1] - 1/24*C[:,2] + 1/24*C[:,3] + 5/8*C[:,4]
C_visc = C_visc.flatten()
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
else:
Cons = c1[:,0] + c1[:,1] + c1[:,2] + c1[:,3] + c1[:,4]
C_visc = (-5/2*c1[:,0] - 3/2*c1[:,1] - 1/2*c1[:,2] + 1/2*c1[:,3] + 3/2*c1[:,4])
C_visc2 = (19/6*c1[:,0] + 7/6*c1[:,1] + 1/6*c1[:,2] + 1/6*c1[:,3] + 7/6*c1[:,4])
C_visc3 = (-65/24*c1[:,0] - 5/8*c1[:,1] - 1/24*c1[:,2] + 1/24*c1[:,3] + 5/8*c1[:,4])
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
return Cons,-C_visc,-C_visc2,-C_visc3, dudx, d2udx2, d3udx3
C_ = np.zeros_like(uv)
C_i =
|
np.zeros_like(uv)
|
numpy.zeros_like
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for building the features for the AlphaFold multimer model."""
import collections
import contextlib
import copy
import dataclasses
import json
import os
import tempfile
from typing import Mapping, MutableMapping, Sequence
from absl import logging
from alphafold.common import protein
from alphafold.common import residue_constants
from alphafold.data import feature_processing
from alphafold.data import msa_pairing
from alphafold.data import parsers
from alphafold.data import pipeline
from alphafold.data.tools import jackhmmer
import numpy as np
# Internal import (7716).
@dataclasses.dataclass(frozen=True)
class _FastaChain:
sequence: str
description: str
def _make_chain_id_map(
*,
sequences: Sequence[str],
descriptions: Sequence[str],
) -> Mapping[str, _FastaChain]:
"""Makes a mapping from PDB-format chain ID to sequence and description."""
if len(sequences) != len(descriptions):
raise ValueError('sequences and descriptions must have equal length. '
f'Got {len(sequences)} != {len(descriptions)}.')
if len(sequences) > protein.PDB_MAX_CHAINS:
raise ValueError(
'Cannot process more chains than the PDB format supports. '
f'Got {len(sequences)} chains.')
chain_id_map = {}
for chain_id, sequence, description in zip(protein.PDB_CHAIN_IDS,
sequences, descriptions):
chain_id_map[chain_id] = _FastaChain(sequence=sequence,
description=description)
return chain_id_map
@contextlib.contextmanager
def temp_fasta_file(fasta_str: str):
with tempfile.NamedTemporaryFile('w', suffix='.fasta') as fasta_file:
fasta_file.write(fasta_str)
fasta_file.seek(0)
yield fasta_file.name
def convert_monomer_features(monomer_features: pipeline.FeatureDict,
chain_id: str) -> pipeline.FeatureDict:
"""Reshapes and modifies monomer features for multimer models."""
converted = {}
converted['auth_chain_id'] = np.asarray(chain_id, dtype=np.object_)
unnecessary_leading_dim_feats = {
'sequence', 'domain_name', 'num_alignments', 'seq_length'
}
for feature_name, feature in monomer_features.items():
if feature_name in unnecessary_leading_dim_feats:
# asarray ensures it's a np.ndarray.
feature = np.asarray(feature[0], dtype=feature.dtype)
elif feature_name == 'aatype':
# The multimer model performs the one-hot operation itself.
feature =
|
np.argmax(feature, axis=-1)
|
numpy.argmax
|
import numpy as np
import openmdao.api as om
class PropulsionAssembly(om.ExplicitComponent):
"Connects the different aircraft components"
def initialize(self):
self.options.declare('num_nodes', types=int, default = 1)
def setup(self):
nn = self.options['num_nodes']
self.add_input('P_gen1', val=np.ones(nn), desc='power of generator 1', units='kW')
self.add_input('P_gen2', val=np.ones(nn), desc='power of generator 2', units='kW')
self.add_input('fuel_rate_gen1', val=
|
np.ones(nn)
|
numpy.ones
|
"""
Evaluates the reconstruction quality for varying number of output neurons.
MIT License
Copyright (c) 2019 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import utility as ut
import network as nt
from tqdm import tqdm as tqdm
from collections import deque
from copy import deepcopy
from data_generator import DataGenerator
delta_T = 1e-3
# parameters
spiking_input = False
labels = [0, 1, 2, 3]
n_outputs = 12
W, H = 24, 24
r_net = 50.0
t_max = 1000
n_inputs = W*H
m_k = 1.0/n_outputs
# load data
x, y = ut.load_mnist(h=H, w=W, labels=labels, train=False, frequencies=spiking_input)
def estimate_likelihood(estimation_duration=10.0):
log_likelihoods = deque([])
estimation_net = deepcopy(net)
estimation_net._current_time = 0
estimation_net._trace = deque([])
while estimation_net._current_time < estimation_duration:
estimation_net.step(lambda t: data_generator[t], update_weights=False)
pbar.n = int(net._current_time * 1000) / 1000
pbar.update(0)
# log likelihood
y = estimation_net._trace[-1][1].reshape((1, -1))
pi = ut.sigmoid(net._V)
log_likelihoods.append(
np.log(1.0 / n_outputs) + np.log(np.sum(np.prod(y * pi + (1 - y) * (1 - pi), axis=-1))))
return np.mean(log_likelihoods), np.std(log_likelihoods)
def reconstruct(net, input, t_image=0.250):
estimation_net = deepcopy(net)
estimation_net._current_time = 0
estimation_net._trace = deque([])
reconstruction =
|
np.zeros_like(input)
|
numpy.zeros_like
|
import unittest
import numpy as np
import h5py
from Cryptodome.Random import get_random_bytes
from adaptiveleak.energy_systems import get_group_target_bytes, EnergyUnit, convert_rate_to_energy
from adaptiveleak.utils import data_utils
from adaptiveleak.utils.constants import LENGTH_SIZE
from adaptiveleak.utils.encryption import AES_BLOCK_SIZE, encrypt_aes128, encrypt
from adaptiveleak.utils.data_types import EncryptionMode, CollectMode, PolicyType, EncodingMode
from adaptiveleak.utils.message import encode_standard_measurements
class TestQuantization(unittest.TestCase):
def test_to_fp_pos(self):
self.assertEqual(64, data_utils.to_fixed_point(0.25, precision=8, width=10))
self.assertEqual(256, data_utils.to_fixed_point(0.25, precision=10, width=12))
self.assertEqual(294, data_utils.to_fixed_point(0.28700833, precision=10, width=12))
self.assertEqual(974, data_utils.to_fixed_point(0.95151288, precision=10, width=12))
self.assertEqual(645, data_utils.to_fixed_point(0.63029945, precision=10, width=12))
self.assertEqual(5, data_utils.to_fixed_point(0.28700833, precision=4, width=6))
self.assertEqual(61, data_utils.to_fixed_point(0.95151288, precision=6, width=10))
self.assertEqual(161, data_utils.to_fixed_point(0.63029945, precision=8, width=15))
def test_to_fp_neg(self):
self.assertEqual(-64, data_utils.to_fixed_point(-0.25, precision=8, width=10))
self.assertEqual(-256, data_utils.to_fixed_point(-0.25, precision=10, width=12))
self.assertEqual(-294, data_utils.to_fixed_point(-0.28700833, precision=10, width=12))
self.assertEqual(-974, data_utils.to_fixed_point(-0.95151288, precision=10, width=12))
self.assertEqual(-645, data_utils.to_fixed_point(-0.63029945, precision=10, width=12))
self.assertEqual(-5, data_utils.to_fixed_point(-0.28700833, precision=4, width=6))
self.assertEqual(-61, data_utils.to_fixed_point(-0.95151288, precision=6, width=10))
self.assertEqual(-161, data_utils.to_fixed_point(-0.63029945, precision=8, width=15))
def test_to_fp_range(self):
self.assertEqual(511, data_utils.to_fixed_point(2, precision=8, width=10))
self.assertEqual(511, data_utils.to_fixed_point(7, precision=8, width=10))
self.assertEqual(255, data_utils.to_fixed_point(5, precision=6, width=9))
self.assertEqual(255, data_utils.to_fixed_point(12, precision=6, width=9))
self.assertEqual(-511, data_utils.to_fixed_point(-2, precision=8, width=10))
self.assertEqual(-511, data_utils.to_fixed_point(-7, precision=8, width=10))
self.assertEqual(-255, data_utils.to_fixed_point(-5, precision=6, width=9))
self.assertEqual(-255, data_utils.to_fixed_point(-12, precision=6, width=9))
def test_to_fp_neg_shift(self):
self.assertEqual(1, data_utils.to_fixed_point(2, precision=-1, width=6))
self.assertEqual(1, data_utils.to_fixed_point(4, precision=-2, width=7))
self.assertEqual(2, data_utils.to_fixed_point(5, precision=-1, width=5))
self.assertEqual(3, data_utils.to_fixed_point(12, precision=-2, width=3))
self.assertEqual(3, data_utils.to_fixed_point(15, precision=-2, width=3))
self.assertEqual(-1, data_utils.to_fixed_point(-2, precision=-1, width=6))
self.assertEqual(-1, data_utils.to_fixed_point(-4, precision=-2, width=7))
self.assertEqual(-2, data_utils.to_fixed_point(-5, precision=-1, width=5))
self.assertEqual(-3, data_utils.to_fixed_point(-12, precision=-2, width=3))
self.assertEqual(-3, data_utils.to_fixed_point(-15, precision=-2, width=3))
def test_array_to_fp(self):
array = np.array([0.25, -0.28700833, 0.95151288, 0.63029945])
result = data_utils.array_to_fp(array, precision=10, width=12)
expected = [256, -294, 974, 645]
self.assertEqual(expected, result.tolist())
def test_array_to_fp_shifted(self):
array = np.array([0.25, -1.28700833, 0.95151288, 0.63029945])
shifts = np.array([0, -1, -2, -2])
result = data_utils.array_to_fp_shifted(array, precision=3, width=6, shifts=shifts)
expected = [2, -21, 30, 20]
self.assertEqual(expected, result.tolist())
def test_to_float_pos(self):
self.assertTrue(np.isclose(0.25, data_utils.to_float(64, precision=8)))
self.assertTrue(np.isclose(0.25, data_utils.to_float(256, precision=10)))
self.assertTrue(np.isclose(0.2861328125, data_utils.to_float(293, precision=10)))
self.assertTrue(np.isclose(0.951171875, data_utils.to_float(974, precision=10)))
self.assertTrue(np.isclose(0.6298828125, data_utils.to_float(645, precision=10)))
self.assertTrue(np.isclose(0.25, data_utils.to_float(4, precision=4)))
self.assertTrue(np.isclose(0.9375, data_utils.to_float(60, precision=6)))
self.assertTrue(np.isclose(0.62890625, data_utils.to_float(161, precision=8)))
def test_to_float_neg(self):
self.assertTrue(np.isclose(-0.25, data_utils.to_float(-64, precision=8)))
self.assertTrue(np.isclose(-0.25, data_utils.to_float(-256, precision=10)))
self.assertTrue(np.isclose(-0.2861328125, data_utils.to_float(-293, precision=10)))
self.assertTrue(np.isclose(-0.951171875, data_utils.to_float(-974, precision=10)))
self.assertTrue(np.isclose(-0.6298828125, data_utils.to_float(-645, precision=10)))
self.assertTrue(np.isclose(-0.25, data_utils.to_float(-4, precision=4)))
self.assertTrue(np.isclose(-0.9375, data_utils.to_float(-60, precision=6)))
self.assertTrue(np.isclose(-0.62890625, data_utils.to_float(-161, precision=8)))
def test_to_float_neg_shift(self):
self.assertEqual(2, data_utils.to_float(1, precision=-1))
self.assertEqual(4, data_utils.to_float(1, precision=-2))
self.assertEqual(4, data_utils.to_float(2, precision=-1))
self.assertEqual(12, data_utils.to_float(3, precision=-2))
self.assertEqual(-2, data_utils.to_float(-1, precision=-1))
self.assertEqual(-4, data_utils.to_float(-1, precision=-2))
self.assertEqual(-4, data_utils.to_float(-2, precision=-1))
self.assertEqual(-12, data_utils.to_float(-3, precision=-2))
def test_array_to_float(self):
array = np.array([-256, 293, 974, -645])
result = data_utils.array_to_float(array, precision=10)
expected = np.array([-0.25, 0.2861328125, 0.951171875, -0.6298828125])
self.assertTrue(np.all(np.isclose(expected, result)))
def test_array_to_float_shifted(self):
array = [-2, 21, 30, -20]
shifts = np.array([0, -1, -2, -2])
result = data_utils.array_to_float_shifted(array, precision=3, shifts=shifts)
expected = [-0.25, 1.3125, 0.9375, -0.625]
self.assertEqual(expected, result.tolist())
def test_unsigned(self):
array = np.array([10, 255, 12, 17, 0, 256])
quantized = data_utils.array_to_fp(array, width=8, precision=0)
recovered = data_utils.array_to_float(array, precision=0)
self.assertTrue(np.all(np.isclose(array, recovered)))
def test_neg_end_to_end(self):
array = np.array([16, 32, 33, 48, 1024, 2047])
quantized = data_utils.array_to_fp(array, width=8, precision=-4)
recovered = data_utils.array_to_float(quantized, precision=-4)
expected_quantized = np.array([1, 2, 2, 3, 64, 127])
self.assertTrue(np.all(np.isclose(quantized, expected_quantized)))
expected = np.array([16, 32, 32, 48, 1024, 2032])
self.assertTrue(np.all(np.isclose(recovered, expected)))
def test_end_to_end_eq_precision(self):
value = -0.03125 # -1 / 32
width = 4
precision = 4
fixed_point = data_utils.to_fixed_point(value, width=width, precision=precision)
quantized = data_utils.to_float(fixed_point, precision=precision)
self.assertTrue(np.isclose(quantized, 0.0))
def test_end_to_end_higher_precision_lower(self):
value = -0.03125 # -1 / 32
width = 4
precision = 5
fixed_point = data_utils.to_fixed_point(value, width=width, precision=precision)
quantized = data_utils.to_float(fixed_point, precision=precision)
self.assertTrue(np.isclose(quantized, value))
def test_end_to_end_higher_precision_upper(self):
value = -0.25
width = 4
precision = 5
fixed_point = data_utils.to_fixed_point(value, width=width, precision=precision)
quantized = data_utils.to_float(fixed_point, precision=precision)
self.assertTrue(np.isclose(quantized, -0.21875))
class TestRangeShift(unittest.TestCase):
def test_range_single_int(self):
old_width = 16
old_precision = 7
non_fractional = old_width - old_precision
new_width = 8
num_range_bits = 3
value = (1 << (old_precision + 1))
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=1)
self.assertEqual(shift, -4)
float_value = 2.0
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertEqual(recovered, float_value)
def test_range_single_int_2(self):
old_width = 16
old_precision = 0
non_fractional = old_width - old_precision
new_width = 6
num_range_bits = 4
value = data_utils.to_fixed_point(93, width=old_width, precision=old_precision)
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=1)
self.assertEqual(shift, -8)
float_value = 93.0
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertEqual(recovered, 92.0)
def test_range_single_float(self):
old_width = 16
old_precision = 13
non_fractional = old_width - old_precision
new_width = 14
num_range_bits = 3
float_value = 2.7236943
value = data_utils.to_fixed_point(float_value, width=old_width, precision=old_precision)
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=-2)
self.assertEqual(shift, 0)
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertLess(abs(recovered - float_value), 1e-4)
def test_range_single_large(self):
old_width = 16
old_precision = 7
non_fractional = old_width - old_precision
new_width = 8
num_range_bits = 3
value = 0x4080
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=1)
self.assertEqual(shift, 1)
float_value = 129.0
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertEqual(recovered, 128.0)
def test_range_single_large_2(self):
old_width = 20
old_precision = 8
non_fractional = old_width - old_precision
new_width = 13
num_range_bits = 3
float_value = -426.35
value = data_utils.to_fixed_point(float_value, width=old_width, precision=old_precision)
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=1)
self.assertEqual(shift, -2)
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertEqual(recovered, -426.375)
def test_range_single_large_exact(self):
old_width = 16
old_precision = 7
non_fractional = old_width - old_precision
new_width = 8
num_range_bits = 3
value = 0x4100
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=1)
self.assertEqual(shift, 0)
float_value = data_utils.to_float(value, precision=old_precision)
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertEqual(recovered, float_value)
def test_range_single_frac(self):
old_width = 16
old_precision = 7
non_fractional = old_width - old_precision
new_width = 8
num_range_bits = 3
value = 0x0011
shift = data_utils.select_range_shift(measurement=value,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits,
prev_shift=1)
self.assertEqual(shift, -4)
float_value = data_utils.to_float(value, precision=old_precision)
new_precision = (new_width - non_fractional) - shift
quantized = data_utils.to_fixed_point(float_value, width=new_width, precision=new_precision)
recovered = data_utils.to_float(quantized, precision=new_precision)
self.assertEqual(recovered, 0.125)
def test_range_arr_integers(self):
measurements = np.array([1.0, 2.0, -3.0, 4.0, -1.0, 3.0])
old_width = 16
old_precision = 7
non_fractional = old_width - old_precision
new_width = 8
num_range_bits = 3
shifts = data_utils.select_range_shifts_array(measurements=measurements,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits)
shifts_list = shifts.tolist()
self.assertEqual(shifts_list, [-4, -4, -4, -4, -4, -4])
new_precision = new_width - non_fractional
quantized = data_utils.array_to_fp_shifted(measurements, width=new_width, precision=new_precision, shifts=shifts)
recovered = data_utils.array_to_float_shifted(quantized, precision=new_precision, shifts=shifts)
recovered_list = recovered.tolist()
self.assertEqual(recovered_list, [1.0, 2.0, -3.0, 4.0, -1.0, 3.0])
def test_range_arr_integers_2(self):
measurements = np.array([0, 76, 153, 229, 306, 382, 23, 11, 11, 11, 11, 0, 79, 159, 238, 318, 398, 415, 455])
old_width = 16
old_precision = 0
non_fractional = old_width - old_precision
new_width = 8
num_range_bits = 4
shifts = data_utils.select_range_shifts_array(measurements=measurements,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits)
shifts_list = shifts.tolist()
def test_range_arr_mixed_one(self):
measurements = np.array([1.75, 2.0, -3.5, 4.75, -1.0, 0.1875])
old_width = 8
old_precision = 4
non_fractional = old_width - old_precision
new_width = 5
num_range_bits = 3
shifts = data_utils.select_range_shifts_array(measurements=measurements,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits)
shifts_list = shifts.tolist()
self.assertEqual(shifts_list, [-2, -1, -1, 0, 0, -4])
new_precision = new_width - non_fractional
quantized = data_utils.array_to_fp_shifted(measurements, width=new_width, precision=new_precision, shifts=shifts)
recovered = data_utils.array_to_float_shifted(quantized, precision=new_precision, shifts=shifts)
recovered_list = recovered.tolist()
self.assertEqual(recovered_list, [1.75, 2.0, -3.5, 5.0, -1.0, 0.1875])
def test_range_arr_mixed_two(self):
measurements = np.array([1.5, 2.05, -3.5, 1.03125])
old_width = 10
old_precision = 7
non_fractional = old_width - old_precision
new_width = 7
num_range_bits = 4
shifts = data_utils.select_range_shifts_array(measurements=measurements,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits)
shifts_list = shifts.tolist()
self.assertEqual(shifts_list, [-1, 0, 0, -1])
new_precision = new_width - non_fractional
quantized = data_utils.array_to_fp_shifted(measurements, width=new_width, precision=new_precision, shifts=shifts)
recovered = data_utils.array_to_float_shifted(quantized, precision=new_precision, shifts=shifts)
recovered_list = recovered.tolist()
self.assertEqual(recovered_list, [1.5, 2.0625, -3.5, 1.03125])
def test_range_arr_border(self):
measurements = np.array([1.75, 1.0])
old_width = 6
old_precision = 4
non_fractional = old_width - old_precision
new_width = 4
num_range_bits = 2
shifts = data_utils.select_range_shifts_array(measurements=measurements,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits)
shifts_list = shifts.tolist()
self.assertEqual(shifts_list, [0, 0])
new_precision = new_width - non_fractional
quantized = data_utils.array_to_fp_shifted(measurements, width=new_width, precision=new_precision, shifts=shifts)
recovered = data_utils.array_to_float_shifted(quantized, precision=new_precision, shifts=shifts)
recovered_list = recovered.tolist()
self.assertEqual(recovered_list, [1.75, 1.0])
def test_range_larger_integers(self):
measurements = np.array([65.0, 63.0, 64.0, 8192.0, 9216.0, 8192.0])
old_width = 16
old_precision = 0
non_fractional = old_width - old_precision
new_width = 13
num_range_bits = 3
shifts = data_utils.select_range_shifts_array(measurements=measurements,
old_width=old_width,
old_precision=old_precision,
new_width=new_width,
num_range_bits=num_range_bits)
shifts_list = shifts.tolist()
self.assertEqual(shifts_list, [-4, -4, -4, -1, -1, -1])
new_precision = new_width - non_fractional
quantized = data_utils.array_to_fp_shifted(measurements, width=new_width, precision=new_precision, shifts=shifts)
recovered = data_utils.array_to_float_shifted(quantized, precision=new_precision, shifts=shifts)
recovered_list = recovered.tolist()
self.assertEqual(recovered_list, measurements.tolist())
class TestExtrapolation(unittest.TestCase):
def test_one(self):
prev = np.array([1, 1, 1, 1], dtype=float)
curr = np.array([2, 2, 2, 2], dtype=float)
predicted = data_utils.linear_extrapolate(prev=prev, curr=curr, delta=1, num_steps=1)
expected = np.array([3, 3, 3, 3], dtype=float)
self.assertTrue(np.all(np.isclose(predicted, expected)))
def test_rand(self):
size = 10
rand = np.random.RandomState(seed=38)
m = rand.uniform(low=-1.0, high=1.0, size=size)
b = rand.uniform(low=-1.0, high=1.0, size=size)
t0 = np.ones_like(m) * 1.0
prev = m * t0 + b
t1 = np.ones_like(m) * 1.25
curr = m * t1 + b
predicted = data_utils.linear_extrapolate(prev=prev, curr=curr, delta=0.25, num_steps=1)
t2 = np.ones_like(m) * 1.5
expected = m * t2 + b
self.assertTrue(np.all(np.isclose(predicted, expected)))
class TestPadding(unittest.TestCase):
def test_round_is_multiple(self):
message = get_random_bytes(AES_BLOCK_SIZE)
key = get_random_bytes(AES_BLOCK_SIZE)
padded_size = data_utils.round_to_block(length=len(message), block_size=AES_BLOCK_SIZE)
padded_size += AES_BLOCK_SIZE # Account for the IV
expected_size = len(encrypt_aes128(message, key=key))
self.assertEqual(padded_size, expected_size)
def test_round_non_multiple(self):
message = get_random_bytes(9)
key = get_random_bytes(AES_BLOCK_SIZE)
padded_size = data_utils.round_to_block(length=len(message), block_size=AES_BLOCK_SIZE)
padded_size += AES_BLOCK_SIZE # Account for the IV
expected_size = len(encrypt_aes128(message, key=key))
self.assertEqual(padded_size, expected_size)
def test_pad_below(self):
message = get_random_bytes(9)
padded = data_utils.pad_to_length(message=message, length=AES_BLOCK_SIZE)
self.assertEqual(len(padded), AES_BLOCK_SIZE)
def test_pad_above(self):
message = get_random_bytes(20)
padded = data_utils.pad_to_length(message=message, length=AES_BLOCK_SIZE)
self.assertEqual(len(padded), 20)
def test_pad_equal(self):
message = get_random_bytes(AES_BLOCK_SIZE)
padded = data_utils.pad_to_length(message=message, length=AES_BLOCK_SIZE)
self.assertEqual(len(padded), AES_BLOCK_SIZE)
class TestPacking(unittest.TestCase):
def test_pack_single_byte_ones(self):
values = [0xF, 0xF]
packed = data_utils.pack(values, width=4)
expected = bytes([0xFF])
self.assertEqual(packed, expected)
def test_pack_single_byte_diff(self):
values = [0x1, 0xF]
packed = data_utils.pack(values, width=4)
expected = bytes([0xF1])
self.assertEqual(packed, expected)
def test_pack_full_byte(self):
values = [0xAB, 0xCD]
packed = data_utils.pack(values, width=8)
expected = bytes([0xAB, 0xCD])
self.assertEqual(packed, expected)
def test_pack_multi_byte(self):
values = [0x1, 0x12]
packed = data_utils.pack(values, width=5)
expected = bytes([0x41, 0x2])
self.assertEqual(packed, expected)
def test_pack_multi_byte_three(self):
values = [0x1, 0x12, 0x06]
packed = data_utils.pack(values, width=5)
expected = bytes([0x41, 0x1A])
self.assertEqual(packed, expected)
def test_pack_multi_byte_values(self):
values = [0x101, 0x092]
packed = data_utils.pack(values, width=9)
expected = bytes([0x01, 0x25, 0x01])
self.assertEqual(packed, expected)
def test_unpack_single_byte_ones(self):
encoded = bytes([0xFF])
values = data_utils.unpack(encoded, width=4, num_values=2)
self.assertEqual(len(values), 2)
self.assertEqual(values[0], 0xF)
self.assertEqual(values[1], 0xF)
def test_unpack_single_byte_diff(self):
encoded = bytes([0xF1])
values = data_utils.unpack(encoded, width=4, num_values=2)
self.assertEqual(len(values), 2)
self.assertEqual(values[0], 0x1)
self.assertEqual(values[1], 0xF)
def test_unpack_full_byte(self):
encoded = bytes([0xAB, 0xCD])
values = data_utils.unpack(encoded, width=8, num_values=2)
expected = [0xAB, 0xCD]
self.assertEqual(values, expected)
def test_unpack_multi_byte(self):
encoded = bytes([0x41, 0x2])
values = data_utils.unpack(encoded, width=5, num_values=2)
self.assertEqual(len(values), 2)
self.assertEqual(values[0], 0x1)
self.assertEqual(values[1], 0x12)
def test_unpack_multi_byte_three(self):
encoded = bytes([0x41, 0x1A])
values = data_utils.unpack(encoded, width=5, num_values=3)
self.assertEqual(len(values), 3)
self.assertEqual(values[0], 0x1)
self.assertEqual(values[1], 0x12)
self.assertEqual(values[2], 0x06)
def test_unpack_multi_byte_values(self):
encoded = bytes([0x01, 0x25, 0x01])
values = data_utils.unpack(encoded, width=9, num_values=2)
self.assertEqual(len(values), 2)
self.assertEqual(values[0], 0x101)
self.assertEqual(values[1], 0x092)
class TestGroupTargetBytes(unittest.TestCase):
def test_target_block(self):
encryption_mode = EncryptionMode.BLOCK
width = 7
num_features = 3
seq_length = 10
period = 10
rate = 0.2
energy_unit = EnergyUnit(policy_type=PolicyType.ADAPTIVE_HEURISTIC,
encoding_mode=EncodingMode.GROUP,
encryption_mode=encryption_mode,
collect_mode=CollectMode.LOW,
seq_length=seq_length,
num_features=num_features,
period=period)
target_energy = convert_rate_to_energy(collection_rate=rate,
width=width,
encryption_mode=encryption_mode,
collect_mode=CollectMode.LOW,
seq_length=seq_length,
num_features=num_features)
target_bytes = get_group_target_bytes(width=width,
collection_rate=rate,
num_features=num_features,
seq_length=seq_length,
encryption_mode=encryption_mode,
energy_unit=energy_unit,
target_energy=target_energy)
# Negative value occurs because we reduce the target by 2 frames
self.assertEqual(target_bytes, -14)
def test_target_stream(self):
encryption_mode = EncryptionMode.STREAM
width = 7
num_features = 3
seq_length = 10
period = 10
rate = 0.2
energy_unit = EnergyUnit(policy_type=PolicyType.ADAPTIVE_HEURISTIC,
encoding_mode=EncodingMode.GROUP,
encryption_mode=encryption_mode,
collect_mode=CollectMode.TINY,
seq_length=seq_length,
num_features=num_features,
period=period)
target_energy = convert_rate_to_energy(collection_rate=rate,
width=width,
encryption_mode=encryption_mode,
collect_mode=CollectMode.TINY,
seq_length=seq_length,
num_features=num_features)
target_bytes = get_group_target_bytes(width=width,
collection_rate=rate,
num_features=num_features,
seq_length=seq_length,
encryption_mode=encryption_mode,
energy_unit=energy_unit,
target_energy=target_energy)
# Negative target occurs because we reduce the target by 2 frames
self.assertEqual(target_bytes, -14)
def test_target_stream_large(self):
encryption_mode = EncryptionMode.STREAM
width = 16
num_features = 1
seq_length = 1250
period = 10
rate = 0.7
energy_unit = EnergyUnit(policy_type=PolicyType.ADAPTIVE_HEURISTIC,
encoding_mode=EncodingMode.GROUP,
encryption_mode=encryption_mode,
collect_mode=CollectMode.LOW,
seq_length=seq_length,
num_features=num_features,
period=period)
target_energy = convert_rate_to_energy(collection_rate=rate,
width=width,
encryption_mode=encryption_mode,
collect_mode=CollectMode.LOW,
seq_length=seq_length,
num_features=num_features)
target_bytes = get_group_target_bytes(width=width,
collection_rate=rate,
num_features=num_features,
seq_length=seq_length,
encryption_mode=encryption_mode,
energy_unit=energy_unit,
target_energy=target_energy)
self.assertEqual(target_bytes, 306)
class TestCalculateBytes(unittest.TestCase):
def test_byte_block(self):
# 42 bits -> 6 bytes of data, 2 for sequence mask, 2 for length, 16 for IV = 26 bytes -> 32 bytes
data_bytes = data_utils.calculate_bytes(width=7,
num_features=3,
num_collected=2,
encryption_mode=EncryptionMode.BLOCK,
seq_length=10)
self.assertEqual(data_bytes, 34)
def test_byte_stream(self):
# 42 bits -> 6 bytes of data, 2 for sequence mask, 2 for length, 12 for nonce = 22 bytes
data_bytes = data_utils.calculate_bytes(width=7,
num_features=3,
num_collected=2,
seq_length=9,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(data_bytes, 22)
def test_byte_stream_end_to_end_one(self):
# Encode and encrypt measurements
measurements = np.ones(shape=(2, 3))
collected_indices = [0, 6]
seq_length = 8
precision = 4
width = 8
encoded = encode_standard_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
precision=precision,
width=width,
should_compress=False)
key = get_random_bytes(32)
encrypted = encrypt(message=encoded, key=key, mode=EncryptionMode.STREAM)
message_bytes = data_utils.calculate_bytes(width=width,
num_features=measurements.shape[1],
num_collected=measurements.shape[0],
seq_length=seq_length,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(message_bytes, len(encrypted) + LENGTH_SIZE)
def test_byte_stream_end_to_end_two(self):
# Encode and encrypt measurements
measurements = np.ones(shape=(2, 3))
collected_indices = [0, 6]
seq_length = 12
precision = 4
width = 12
encoded = encode_standard_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
precision=precision,
width=width,
should_compress=False)
key = get_random_bytes(32)
encrypted = encrypt(message=encoded, key=key, mode=EncryptionMode.STREAM)
message_bytes = data_utils.calculate_bytes(width=width,
num_features=measurements.shape[1],
num_collected=measurements.shape[0],
seq_length=seq_length,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(message_bytes, len(encrypted) + LENGTH_SIZE)
def test_byte_block_end_to_end_one(self):
# Encode and encrypt measurements
measurements = np.ones(shape=(2, 3))
collected_indices = [0, 6]
seq_length = 8
precision = 4
width = 8
encoded = encode_standard_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
precision=precision,
width=width,
should_compress=False)
key = get_random_bytes(AES_BLOCK_SIZE)
encrypted = encrypt(message=encoded, key=key, mode=EncryptionMode.BLOCK)
message_bytes = data_utils.calculate_bytes(width=width,
num_features=measurements.shape[1],
num_collected=measurements.shape[0],
seq_length=seq_length,
encryption_mode=EncryptionMode.BLOCK)
self.assertEqual(message_bytes, len(encrypted) + LENGTH_SIZE)
def test_byte_block_end_to_end_two(self):
# Encode and encrypt measurements
measurements = np.ones(shape=(2, 3))
collected_indices = [0, 6]
seq_length = 9
precision = 4
width = 12
encoded = encode_standard_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
precision=precision,
width=width,
should_compress=False)
key = get_random_bytes(AES_BLOCK_SIZE)
encrypted = encrypt(message=encoded, key=key, mode=EncryptionMode.BLOCK)
message_bytes = data_utils.calculate_bytes(width=width,
num_features=measurements.shape[1],
num_collected=measurements.shape[0],
seq_length=seq_length,
encryption_mode=EncryptionMode.BLOCK)
self.assertEqual(message_bytes, len(encrypted) + LENGTH_SIZE)
def test_group_block(self):
# 11 bytes of data, 3 meta-data, 2 for sequence mask = 16 bytes -> 16 bytes + 16 byte IV + 2 byte length = 34 bytes
data_bytes = data_utils.calculate_grouped_bytes(widths=[6, 7],
num_features=3,
num_collected=4,
seq_length=10,
group_size=6,
encryption_mode=EncryptionMode.BLOCK)
self.assertEqual(data_bytes, 34)
def test_group_stream(self):
# 11 bytes of data, 3 meta-data, 2 for sequence mask, 12 for nonce = 28 bytes
data_bytes = data_utils.calculate_grouped_bytes(widths=[6, 7],
num_features=3,
num_collected=4,
seq_length=9,
group_size=6,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(data_bytes, 30)
def test_group_stream_unbalanced(self):
# 11 bytes of data, 3 meta-data, 2 for sequence mask, 12 for nonce = 29 bytes
data_bytes = data_utils.calculate_grouped_bytes(widths=[6, 7],
num_features=3,
num_collected=4,
seq_length=9,
group_size=6,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(data_bytes, 30)
def test_group_stream_large(self):
data_bytes = data_utils.calculate_grouped_bytes(widths=[7, 9],
num_features=6,
num_collected=26,
seq_length=50,
group_size=132,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(data_bytes, 167)
class TestGroupWidths(unittest.TestCase):
def test_widths_block_above(self):
widths = data_utils.get_group_widths(group_size=6,
num_collected=6,
num_features=3,
seq_length=10,
target_frac=0.5,
standard_width=8,
encryption_mode=EncryptionMode.BLOCK)
self.assertEqual(widths, [11, 11, 10])
def test_widths_block_below(self):
widths = data_utils.get_group_widths(group_size=6,
num_collected=3,
num_features=3,
seq_length=10,
target_frac=0.5,
standard_width=8,
encryption_mode=EncryptionMode.BLOCK)
self.assertEqual(widths, [24, 24])
def test_widths_stream_above(self):
widths = data_utils.get_group_widths(group_size=6,
num_collected=6,
num_features=3,
seq_length=10,
target_frac=0.5,
standard_width=8,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(widths, [5, 5, 5])
def test_widths_stream_below(self):
widths = data_utils.get_group_widths(group_size=6,
num_collected=3,
num_features=3,
seq_length=10,
target_frac=0.5,
standard_width=8,
encryption_mode=EncryptionMode.STREAM)
self.assertEqual(widths, [10, 10])
class TestPruning(unittest.TestCase):
def test_prune_two(self):
measurements = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 4.0], [3.5, 3.0]])
max_collected = 3
collected_indices = [1, 3, 5, 9, 10]
seq_length = 12
# Iter 0 -> Expected Diffs: [0, 2, 2.5, 2.0], Expected Diff Idx: [2, 4, 1, 2] -> Prune 1
# iter 1 -> Expected Diffs: [2, 2.5, 2.0], Expected Diff Idx: [4, 1, 2] -> Prune 3
# Errors -> [1.0, 0.5, 1.9] -> Prune 1, 2
pruned_features, pruned_indices = data_utils.prune_sequence(measurements=measurements,
max_collected=max_collected,
collected_indices=collected_indices,
seq_length=seq_length)
expected_features = np.array([[1.0, 1.0], [2.5, 4.0], [3.5, 3.0]])
expected_indices = [1, 9, 10]
self.assertTrue(np.all(np.isclose(pruned_features, expected_features)))
self.assertEqual(pruned_indices, expected_indices)
def test_prune_middle(self):
measurements = np.array([[1.0, 1.0], [1.5, 1.5], [3.0, 3.0], [3.2, 3.0], [3.5, 3.0]])
max_collected = 3
collected_indices = [1, 3, 5, 7, 10]
seq_length = 11
# Iter 0 -> Expected Diffs: [0, 2, 2.5, 2.0], Expected Diff Idx: [2, 2, 3, 1] -> Prune 1
# iter 1 -> Expected Diffs: [2, 2.5, 2.0], Expected Diff Idx: [4, 3, 1] -> Prune 4
# Errors: [1.0, 1.4, 0.0]
pruned_features, pruned_indices = data_utils.prune_sequence(measurements=measurements,
max_collected=max_collected,
collected_indices=collected_indices,
seq_length=seq_length)
expected_features =
|
np.array([[1.0, 1.0], [3.0, 3.0], [3.5, 3.0]])
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.