prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
"""Test cases for recording basic functions."""
from unittest.mock import Mock, call
import numpy as np
import pytest
from zquantum.core.history.example_functions import (
Function2,
Function5,
function_1,
sum_of_squares,
)
from zquantum.core.history.recorder import recorder
from zquantum.core.history.save_conditions import SaveCondition, every_nth
@pytest.mark.parametrize(
"source_function,param",
[
(np.sin, 10),
(sum_of_squares, [1, 2, 3]),
(function_1,
|
np.array([2, 3])
|
numpy.array
|
from math import atan2
from math import asin
from collections import namedtuple
import matplotlib
import matplotlib.pylab as plt
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
import numpy as np
def quat2euler(q):
qw = q[0]
qx = q[1]
qy = q[2]
qz = q[3]
qw2 = qw * qw
qx2 = qx * qx
qy2 = qy * qy
qz2 = qz * qz
x = atan2(2 * (qx * qw + qz * qy), (qw2 - qx2 - qy2 + qz2))
y = asin(2 * (qy * qw - qx * qz))
z = atan2(2 * (qx * qy + qz * qw), (qw2 + qx2 - qy2 - qz2))
return [x, y, z]
def crop_data(time, data, t_min, t_max):
crop_time = []
crop_data = []
idx = 0
for t in time:
if t >= t_min and t <= t_max:
crop_time.append(t)
crop_data.append(data[idx])
idx += 1
return (np.array(crop_time), np.array(crop_data))
def lerp(a, b, t):
return a * (1.0 - t) + b * t
def slerp(p0, p1, t):
omega = np.arccos(np.dot(p0/np.norm(p0), p1/np.norm(p1)))
so =
|
np.sin(omega)
|
numpy.sin
|
import roslib
import sys
import rospy
import numpy as np
import datetime
import time
from geometry_msgs.msg import Pose
from dse_msgs.msg import PoseMarkers
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from dse_msgs.msg import InfFilterPartials
from dse_msgs.msg import InfFilterResults
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import dse_constants
# Covariance based on distance as estimated for the SVGS short-range system
def svgs_R_from_range_SRT(range):
# Assuming linear error with a slope of:
# [x y z phi theta psi]
# x = [0.0515; 0.0515; 0.018; 0.1324; 0.1324; 0.1324]; # Degrees
x = np.transpose([0.0515, 0.0515, 0.018, 0.0023, 0.0023, 0.0023]) # Radians
# x = [0.0075; 0.0075; 0.0075; 0.0075; 0.0075; 0.0075]; # 5% of distance
# Slope values are for 3-sigma error, so dividing by 3
range = (range / 3) * np.eye(6)
r_std = np.multiply(range, x)
r_var = np.multiply(r_std, r_std)
# Compute variance from standard deviation
return r_var
# Covariance based n distance as estimated for the aruco system (Needs more testing)
def aruco_R_from_range(range):
# Assuming linear error with a slope of:
# [x y z phi theta psi]
# x = [0.0515; 0.0515; 0.018; 0.1324; 0.1324; 0.1324]; # Degrees
x = 2*np.transpose([0.01, 0.01, 0.01, 0.01, 0.01, 0.01]) # Radians
# x = [0.0075; 0.0075; 0.0075; 0.0075; 0.0075; 0.0075]; # 5% of distance
# Slope values are for 3-sigma error, so dividing by 3
range = range * np.eye(6)
r_std = np.multiply(range, x)
r_var = np.multiply(r_std, r_std)
# Compute variance from standard deviation
return r_var
# Covariance based n distance as estimated for the aruco system (Needs more testing)
def aruco_R_from_range_3D(range):
# Assuming linear error with a slope of:
# [x y z phi theta psi]
# x = [0.0515; 0.0515; 0.018; 0.1324; 0.1324; 0.1324]; # Degrees
### [x, y, theta] [m, m, radians]
# [0.2, 0.2, 0.2]
# was 20*
x = 20*np.transpose([0.01, 0.01, 0.01]) # Radians
# x = [0.0075; 0.0075; 0.0075; 0.0075; 0.0075; 0.0075]; # 5% of distance
# Slope values are for 3-sigma error, so dividing by 3
range = (range + 0.001) * np.eye(3)
r_std = np.multiply(range, x)
r_var = np.multiply(r_std, r_std)
# Compute variance from standard deviation
return r_var
# Covariance based n distance as estimated for the aruco system (Needs more testing)
def R_from_range(range, mult=None, add=None):
# Distance multiplicative noise:
# [x y z phi theta psi]
if mult is None:
mult = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05] # per meter: 5cm xyz, 3 degrees ypr
# Additive noise
if add is None:
add = [0, 0, 0, 0, 0, 0] # per meter: 0m xyz, 0 degrees ypr
# Add 1mm so that even with zero range we don't explode
range = (range + 0.001) * np.eye(6)
r_std = np.multiply(range, mult) + add
r_var = np.multiply(r_std, r_std)
# Compute variance from standard deviation
return r_var
# Covariance based n distance as estimated for the aruco system (Needs more testing)
def R_from_range_3D(range, mult=None, add=None):
# Distance multiplicative noise:
# [x y z phi theta psi]
if mult is None:
mult = [0.05, 0.05, 0.05] # per meter: 5cm xy, 3 degrees yaw
# Additive noise
if add is None:
add = [0, 0, 0] # per meter: 0m xy, 0 degrees yaw
range = (range + 0.001) * np.eye(3)
r_std = np.multiply(range, mult) + add
r_var = np.multiply(r_std, r_std)
# Compute variance from standard deviation
return r_var
# Compute the 2D rotation matrix from the angle theta
def theta_2_rotm(theta):
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
return R
# Compute the 2D rotation matrix from the angle theta
def rotm_2_theta(R):
theta = np.arctan2(-R[0, 1], R[0, 0])
return theta
# Converts a quaternion into euler angles, using the euler order described in dse_constants.py
def quat2eul(quat):
r = R.from_quat(quat)
eul = r.as_euler(dse_constants.EULER_ORDER)
return eul
# Converts euler angles into a quaternion, using the euler order described in dse_constants.py
def eul2quat(eul):
r = R.from_euler(dse_constants.EULER_ORDER, eul[:, 0])
quat = r.as_quat()
return quat
# Expects a quaternion in the form: orientation.x,y,z,w
def quat_from_pose2eul(orientation):
quat = [0, 0, 0, 0]
quat[0] = orientation.x
quat[1] = orientation.y
quat[2] = orientation.z
quat[3] = orientation.w
eul = quat2eul(quat)
return eul
# Expects a quaternion in the form: orientation.x,y,z,w
def euler2quat_from_pose(orientation, euler):
quat = eul2quat(euler)
orientation.x = quat[0]
orientation.y = quat[1]
orientation.z = quat[2]
orientation.w = quat[3]
return orientation
def state_12D_to_6D(x_12D):
num_objs = int(len(x_12D) / 12)
x_6D = np.zeros((num_objs * 6, 1))
for i in range(num_objs):
i_6D_low = 6 * i
i_12D_low = 12 * i
x_6D[i_6D_low + 0] = x_12D[i_12D_low + 0]
x_6D[i_6D_low + 1] = x_12D[i_12D_low + 1]
x_6D[i_6D_low + 2] = x_12D[i_12D_low + 3]
x_6D[i_6D_low + 3] = x_12D[i_12D_low + 6]
x_6D[i_6D_low + 4] = x_12D[i_12D_low + 7]
x_6D[i_6D_low + 5] = x_12D[i_12D_low + 9]
return x_6D
# Expects a pose in the form: x, y, z, w
def state_from_pose(pose):
euler_orientation = quat_from_pose2eul(pose.orientation)
x = np.array([pose.position.x, pose.position.y, pose.position.z, euler_orientation[0], euler_orientation[1], euler_orientation[2]])[:, None]
return x
# Expects a pose in the form: x, y, z, w
def state_from_pose_3D(pose):
euler_orientation = quat_from_pose2eul(pose.orientation)
x = np.array([pose.position.x, pose.position.y, euler_orientation[0]])[:, None]
return x
# Expects a state in the form: x, y, z, eul_z, eul_y, eul_x
def pose_from_state(x):
pose = Pose()
pose.position.x = x[0, 0]
pose.position.y = x[1, 0]
pose.position.z = x[2, 0]
pose.orientation = euler2quat_from_pose(pose.orientation, x[3:6])
return pose
# Expects a state in the form: x, y, eul_z
def pose_from_state_3D(x):
pose = Pose()
pose.position.x = x[0, 0]
pose.position.y = x[1, 0]
pose.position.z = 0
euler_angles = np.array([x[2, 0], 0, 0])[:, None]
pose.orientation = euler2quat_from_pose(pose.orientation, euler_angles)
return pose
# Fill and return a pose array with values from the state variable x
def pose_array_from_state(pose_array, x, dim_state, dim_obs):
num_objs = int(len(x) / dim_state)
for i in range(num_objs):
i_low = dim_state * i
i_high = i_low + dim_obs
x_i = x[i_low:i_high]
if dim_state == 6:
pose_array.poses.append(pose_from_state_3D(x_i))
else:
pose_array.poses.append(pose_from_state(x_i))
return pose_array
# Fill and return a pose array with values from the state variable x
def state_from_pose_array(pose_array, dim_state, dim_obs):
num_objs = np.shape(pose_array.poses)[0]
x = np.zeros((num_objs * dim_state, 1))
for i in range(num_objs):
i_low = dim_state * i
i_high = i_low + dim_obs
if dim_state == 6:
x[i_low:i_high] = state_from_pose_3D(pose_array.poses[i])
else:
x[i_low:i_high] = state_from_pose(pose_array.poses[i])
return x
# Expects a pose in the form: x, y, z, w
def measurement_from_pose(pose):
euler_orientation = quat_from_pose2eul(pose.orientation)
x = np.array([pose.position.x, pose.position.y, pose.position.z, euler_orientation])[:, None]
return x
# Expects a pose in the form: x, y, z, w
def measurement_from_pose_3D(pose):
euler_orientation = quat_from_pose2eul(pose.orientation)
x = np.array([pose.position.x, pose.position.y, euler_orientation[0]])[:, None]
return x
# Expects a state in the form: x, y, z, eul_z, eul_y, eul_x
def pose_from_measurement(x):
pose = Pose()
pose.position.x = x[0, 0]
pose.position.y = x[1, 0]
pose.position.z = x[2, 0]
pose.orientation = euler2quat_from_pose(pose.orientation, x[3:6])
return pose
# Expects a state in the form: x, y, eul_z
def pose_from_measurement_3D(x):
pose = Pose()
pose.position.x = x[0, 0]
pose.position.y = x[1, 0]
pose.position.z = 0
euler_angles = np.array([x[2, 0], 0, 0])[:, None]
pose.orientation = euler2quat_from_pose(pose.orientation, euler_angles)
return pose
# Fill and return a pose array with values from the measurement z
def pose_array_from_measurement(pose_array, z, dim_obs):
num_objs = int(len(z) / dim_obs)
for i in range(num_objs):
i_low = dim_obs * i
i_high = i_low + dim_obs
x_i = z[i_low:i_high]
if dim_obs == 3:
pose_array.poses.append(pose_from_measurement_3D(x_i))
else:
pose_array.poses.append(pose_from_measurement(x_i))
return pose_array
# Grab the relevant chunk from the input matrix
def sub_matrix(matrix, ids, id, size):
i = np.where(ids == id)[0][0]
i_min = i * size
i_max = i_min + size
return matrix[i_min:i_max, i_min:i_max]
# Fill in a multi-array ROS message type with a 2D input array
def multi_array_2d_input(mat, multi_arr):
multi_arr.layout.dim.append(MultiArrayDimension())
multi_arr.layout.dim.append(MultiArrayDimension())
multi_arr.layout.dim[0].label = 'rows'
multi_arr.layout.dim[0].size = np.shape(mat)[0]
multi_arr.layout.dim[0].stride = np.shape(mat)[0]*np.shape(mat)[1]
multi_arr.layout.dim[1].label = 'cols'
multi_arr.layout.dim[1].size = np.shape(mat)[1]
multi_arr.layout.dim[1].stride = np.shape(mat)[1]
multi_arr.layout.data_offset = 0
multi_arr.data = mat.flatten()
return multi_arr
# Grab and return a 2D array from a multi-array ROS message
def multi_array_2d_output(multi_arr):
arr = np.array(multi_arr.data)
shape = [multi_arr.layout.dim[0].size, multi_arr.layout.dim[1].size]
mat = arr.reshape(shape)
return mat
# def observe_agent2_from_agent1_Hz(agent1_global, agent2_global):
# H = dual_relative_obs_jacobian(agent1_global, agent2_global)
# z = H.dot(np.concatenate(agent1_global, agent2_global))
# return z
#
#
# def observe_agent2_from_agent1_Hz_3D(agent1_global, agent2_global):
# H = dual_relative_obs_jacobian_3D(agent1_global, agent2_global)
# z = H.dot(np.concatenate(agent1_global, agent2_global))
# return z
def agent2_to_frame_agent1(agent1_global, agent2_global):
t1 = agent1_global[0:3]
r1 = R.from_euler(dse_constants.EULER_ORDER, agent1_global[3:6, 0])
R1 = r1.as_matrix()
t2 = agent2_global[0:3]
r2 = R.from_euler(dse_constants.EULER_ORDER, agent2_global[3:6, 0])
R2 = r2.as_matrix()
tz = (np.transpose(R1).dot(t2) - np.transpose(R1).dot(t1))[:, 0]
Rz = np.transpose(R1).dot(R2)
rz = R.from_dcm(Rz)
rz = rz.as_euler(dse_constants.EULER_ORDER)
z = np.concatenate((tz, rz))[:, None]
return z
def agent2_to_frame_agent1_3D(agent1_global, agent2_global):
t1 = agent1_global[0:2, 0]
R1 = theta_2_rotm(agent1_global[2, 0])
t2 = agent2_global[0:2, 0]
R2 = theta_2_rotm(agent2_global[2, 0])
zt = np.transpose(R1).dot(t2) - np.transpose(R1).dot(t1)
zR = np.transpose(R1).dot(R2)
zr = [-np.arctan2(zR[0, 1], zR[0, 0])]
z = np.concatenate((zt, zr))[:, None]
return z
def agent2_from_frame_agent1(agent2_in_agent1, agent1_global):
t1 = agent1_global[0:3]
r1 = R.from_euler(dse_constants.EULER_ORDER, agent1_global[3:6, 0])
R1 = r1.as_matrix()
t2 = agent2_in_agent1[0:3]
r2 = R.from_euler(dse_constants.EULER_ORDER, agent2_in_agent1[3:6, 0])
R2 = r2.as_matrix()
tz = (R1.dot(t2) + t1)[:, 0]
Rz = R1.dot(R2)
rz = R.from_dcm(Rz)
rz = rz.as_euler(dse_constants.EULER_ORDER)
z = np.concatenate((tz, rz))[:, None]
return z
def agent2_from_frame_agent1_3D(agent1_global, agent2_in_agent1):
t1 = agent1_global[0:2, 0]
R1 = theta_2_rotm(agent1_global[2, 0])
t2 = agent2_in_agent1[0:2, 0]
R2 = theta_2_rotm(agent2_in_agent1[2, 0])
tz = (R1.dot(t2) + t1)
Rz = R1.dot(R2)
rz = [-np.arctan2(Rz[0, 1], Rz[0, 0])]
z = np.concatenate((tz, rz))[:, None]
return z
def relative_states_from_global_3D(rel_id, ids, states, dim_state, dim_obs):
rel_index = np.where(ids == rel_id)[0][0]
obj_ids = ids[np.where(ids != rel_id)]
min_index = rel_index * dim_state
max_index = min_index + dim_state
rel_state = states[min_index:max_index]
indices = np.ones(np.shape(states)[0], dtype=bool)
indices[min_index:max_index] = np.zeros((dim_state))
obj_states = states[indices, :]
# print('agent id: ' + str(rel_id) + ' other ids: ' + str(obj_ids))
# print('agent state: ' + str(rel_state) + ' other states: ' + str(obj_states))
transformed_states = np.zeros(np.shape(obj_states))
for i in range(len(obj_ids)):
min_index = i * dim_state
max_index = min_index + dim_obs
obj_state = obj_states[min_index:max_index]
transformed_state = agent2_to_frame_agent1_3D(rel_state[0:dim_obs, :], obj_state)
transformed_states[min_index:max_index] = transformed_state
return obj_ids, transformed_states
# Compute the observation jacobian H for a 6D-obs system.
# Currently no functions for the angles, DO NOT USE
def dual_relative_obs_jacobian(state1, state2):
[x1, y1, z1, p1, t1, s1] = state1
[x2, y2, z2, p2, t2, s2] = state2
Jx = [-np.cos(s1) * np.cos(t1), -np.cos(t1) * np.sin(s1), np.sin(t1), 0, 0, 0,
np.cos(s1) * np.cos(t1), np.cos(t1) * np.sin(s1), -np.sin(t1), 0, 0, 0]
Jy = [np.cos(p1) * np.sin(s1) - np.cos(s1) * np.sin(p1) * np.sin(t1),
- np.cos(p1) * np.cos(s1) - np.sin(p1) * np.sin(s1) * np.sin(t1), -np.cos(t1) * np.sin(p1), 0, 0, 0,
np.cos(s1) * np.sin(p1) * np.sin(t1) - np.cos(p1) * np.sin(s1),
np.cos(p1) * np.cos(s1) + np.sin(p1) * np.sin(s1) * np.sin(t1), np.cos(t1) * np.sin(p1), 0, 0, 0]
Jz = [- np.sin(p1) * np.sin(s1) - np.cos(p1) * np.cos(s1) * np.sin(t1),
np.cos(s1) * np.sin(p1) - np.cos(p1) * np.sin(s1) * np.sin(t1), -np.cos(p1) * np.cos(t1), 0, 0, 0,
np.sin(p1) * np.sin(s1) + np.cos(p1) * np.cos(s1) * np.sin(t1),
np.cos(p1) * np.sin(s1) * np.sin(t1) - np.cos(s1) * np.sin(p1), np.cos(p1) * np.cos(t1), 0, 0, 0]
Jp = [0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0]
Jt = [0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0]
Js = [0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0]
J = [Jx, Jy, Jz, Jp, Jt, Js]
return J
# Compute the observation jacobian H for a 3D-observation system
# Given two state vectors in the global coordinate system, x1 and x2
# What is the jacobian of the local observation of x2 from x1
def dual_relative_obs_jacobian_3D(state1, state2):
[x1, y1, t1] = state1
[x2, y2, t2] = state2
Jx = [-np.cos(t1), -np.sin(t1), 0, np.cos(t1), np.sin(t1), 0]
Jy = [np.sin(t1), -np.cos(t1), 0, -np.sin(t1), np.cos(t1), 0]
Jt = [0, 0, -1, 0, 0, 1]
J = [Jx, Jy, Jt]
return J
# Compute the observation jacobian H for a 3D-observation system
# Given two state vectors in the global coordinate system, x1 and x2
# What is the jacobian of the local observation of x2 from x1
def jacobian_fixed_to_obs_3D(state1, state2):
# z = Hx
# z = meas from this to object (of object from this, object in frame this)
# x = meas from fixed to object (of object from fixed, object in frame fixed)
#z = agent2_to_frame_agent1_3D(state1=this_in_frame_fixed, state2=obj_in_frame_fixed)
[x1, y1, t1] = state1
[x2, y2, t2] = state2
Jx = [-np.cos(t1), -np.sin(t1), 0, np.cos(t1), np.sin(t1), 0]
Jy = [np.sin(t1), -np.cos(t1), 0, -np.sin(t1), np.cos(t1), 0]
Jt = [0, 0, -1, 0, 0, 1]
J = [Jx, Jy, Jt]
return J
# If the agent doesn't know about a newly observed agent, extend all variables to accomodate it
def extend_arrays(observed_ids, id_list, Y_11, y_11, dim_state):
# Compute x and P so that if there are no new agents it doesn't error out
x_11 = np.linalg.inv(Y_11).dot(y_11)
P_11 = np.linalg.inv(Y_11)
for id in observed_ids:
# If we found a new agent
if not np.isin(id, id_list):
id_list = np.concatenate((id_list, [id]))
dim = len(id_list) * dim_state
# Extend the information matrix Y
Y_11_tmp = dse_constants.INF_MATRIX_INITIAL * np.eye(dim)
Y_11_tmp[0:np.shape(Y_11)[0], 0:np.shape(Y_11)[0]] = Y_11
Y_11 = Y_11_tmp
# Extend the information vector y
y_11_tmp = dse_constants.INF_VECTOR_INITIAL * np.arange(1, dim+1)[:, None]
y_11_tmp[0:np.shape(y_11)[0]] = y_11
y_11 = y_11_tmp
# re-compute x and P to match
x_11 = np.linalg.inv(Y_11).dot(y_11)
P_11 = np.linalg.inv(Y_11)
return id_list, Y_11, y_11, P_11, x_11
# Fill in the matrices F and Q:
# F - Motion Jacobian
# Q - Motion Covariance
def fill_FQ(id_list, dt, x_11, dim_state, dim_obs):
n_stored = len(id_list)
F_0 = np.zeros((n_stored * dim_state, n_stored * dim_state))
Q_0 = np.zeros((n_stored * dim_state, n_stored * dim_state))
# Fill in Q and F (Different for waypoint vs. robot)
for i in range(len(id_list)):
i_low = dim_state * i
i_high = i_low + dim_state
# # If we are looking at ID <5, it is a waypoint and as such doesn't move (F is identity matrix)
# if id_list[i] < 5:
# #Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, 0.000001/dt)
# Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state)
# F_0[i_low:i_high, i_low:i_high] = f_eye(dim_state)
# else:
# # Else use the unicycle model
if dim_obs == 3:
# Q is a function of distance traveled in the last time step
#Q_0[i_low:i_high, i_low:i_high] = q_distance_3D(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, 0.000001)
F_0[i_low:i_high, i_low:i_high] = f_unicycle_3D(dt, x_11, i, dim_state)
else:
# Q is a function of distance traveled in the last time step
#Q_0[i_low:i_high, i_low:i_high] = q_distance(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, 0.000001)
F_0[i_low:i_high, i_low:i_high] = f_unicycle(dt, x_11, i, dim_state)
return F_0, Q_0
# Fill in the matrices F and Q:
# F - Motion Jacobian
# Q - Motion Covariance
def fill_FQ_no_control(id_list, this_agent_id, dt, x_11, dim_state, dim_obs):
n_stored = len(id_list)
F_0 = np.zeros((n_stored * dim_state, n_stored * dim_state))
Q_0 = np.zeros((n_stored * dim_state, n_stored * dim_state))
# Fill in Q and F (Different for waypoint vs. robot)
for i in range(len(id_list)):
i_low = dim_state * i
i_high = i_low + dim_state
# # If we are looking at ID <5, it is a waypoint and as such doesn't move (F is identity matrix)
# if id_list[i] < 5:
# #Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, 0.000001/dt)
# Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state)
# F_0[i_low:i_high, i_low:i_high] = f_eye(dim_state)
# else:
# # Else use the unicycle model
if dim_obs == 3:
# Q is a function of distance traveled in the last time step
# Q_0[i_low:i_high, i_low:i_high] = q_distance_3D(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, 0.000001)
F_0[i_low:i_high, i_low:i_high] = f_unicycle_3D(dt, x_11, i, dim_state)
else:
# Q is a function of distance traveled in the last time step
# Q_0[i_low:i_high, i_low:i_high] = q_distance(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, 0.000001)
F_0[i_low:i_high, i_low:i_high] = f_unicycle(dt, x_11, i, dim_state)
return F_0, Q_0
# Fill in the matrices F and Q:
# F - Motion Jacobian
# Q - Motion Covariance
def fill_FQ_our_control(id_list, this_agent_id, dt, x_11, dim_state):
n_stored = len(id_list)
F_0 = np.zeros((n_stored * dim_state, n_stored * dim_state))
Q_0 = np.zeros((n_stored * dim_state, n_stored * dim_state))
# Fill in Q and F (Different for waypoint vs. robot)
for i in range(len(id_list)):
i_low = dim_state * i
i_high = i_low + dim_state
if id_list[i] == this_agent_id:
if dim_state == 6:
# Q is a function of distance traveled in the last time step
# Q_0[i_low:i_high, i_low:i_high] = q_distance_3D(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, (0.01*dt)**2)
F_0[i_low:i_high, i_low:i_high] = f_unicycle_3D(dt, x_11, i, dim_state)
else:
# Q is a function of distance traveled in the last time step
# Q_0[i_low:i_high, i_low:i_high] = q_distance(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, (0.01*dt)**2)
F_0[i_low:i_high, i_low:i_high] = f_unicycle(dt, x_11, i, dim_state)
else:
if dim_state == 6:
# Q is a function of distance traveled in the last time step
# Q_0[i_low:i_high, i_low:i_high] = q_distance_3D(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, (0.04*dt)**2)
F_0[i_low:i_high, i_low:i_high] = f_unicycle_3D(dt, x_11, i, dim_state)
else:
# Q is a function of distance traveled in the last time step
# Q_0[i_low:i_high, i_low:i_high] = q_distance(dt, x_11, i, dim_state)
Q_0[i_low:i_high, i_low:i_high] = q_const(dim_state, (0.04*dt)**2)
F_0[i_low:i_high, i_low:i_high] = f_unicycle(dt, x_11, i, dim_state)
return F_0, Q_0
# Fill in the matrices R and H, as well as the vector z
# R - Measurement Covariance
# H - Measurement Jacobian
# z - The measurement itself
def fill_RHz(id_list, my_id, observed_ids, observed_poses, x_11, euler_order, dim_state, dim_obs, R_var = 0.001):
# Define the sizes of each variable
n_stored = len(id_list)
n_obs = len(observed_ids)
R_0 = R_var * np.eye(n_obs * dim_obs)
H_0 = np.zeros((n_obs * dim_obs, n_stored * dim_state))
z_0 = np.zeros((n_obs * dim_obs, 1))
# Fill in H and Z
for i in range(len(observed_ids)):
id = observed_ids[i]
index = np.where(id_list == id)[0][0] # Index of observed agent
obs_index = np.where(id_list == my_id)[0][0] # Index of observing agent
i_low = dim_obs * i
i_high = i_low + dim_obs
# Compute the euler angles from the quaternion passed in
quat = np.zeros(4)
quat[0] = observed_poses[i].pose.pose.orientation.x
quat[1] = observed_poses[i].pose.pose.orientation.y
quat[2] = observed_poses[i].pose.pose.orientation.z
quat[3] = observed_poses[i].pose.pose.orientation.w
r = R.from_quat(quat)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_quat.html
z_eul = r.as_euler(euler_order)
# Different functions for 3D vs. 6D observation
if dim_obs == 3:
z_pos = np.array([observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y])
z_eul = [z_eul[0]]
R_0[i_low:i_high, i_low:i_high] = covariance_matrix_to_2D_meas_cov(ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance))
H_0 = h_camera_3D(H_0, x_11, i, obs_index, index, dim_state, dim_obs)
else:
z_pos = np.array(observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y, observed_poses[i].pose.pose.position.z)
R_0[i_low:i_high, i_low:i_high] = ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance)
H_0 = h_camera(H_0, x_11, i, obs_index, index, dim_state, dim_obs)
z_0[i_low:i_high] = np.concatenate((z_pos, z_eul))[:, None]
return R_0, H_0, z_0
# Fill in the matrices R and H, as well as the vector z
# R - Measurement Covariance
# H - Measurement Jacobian
# z - The measurement itself
def fill_RHz_fixed(id_list, my_id, observed_ids, observed_poses, x_11, euler_order, dim_state, dim_obs, fixed_ids, fixed_est):
# Define the sizes of each variable
n_stored = len(id_list)
n_obs = len(observed_ids)
R_var = 0.01
R_0 = R_var * np.eye(n_obs * dim_obs)
H_0 = np.zeros((n_obs * dim_obs, n_stored * dim_state))
z_0 = np.zeros((n_obs * dim_obs, 1))
zero_3D = np.zeros((3, 1))
zero_6D = np.zeros((6, 1))
# Fill in H and Z
for i in range(len(observed_ids)):
id = observed_ids[i]
i_low = dim_obs * i
i_high = i_low + dim_obs
# fixed_low = dim_obs * index
# fixed_high = fixed_low + dim_obs
# Compute the euler angles from the quaternion passed in
quat = np.zeros(4)
quat[0] = observed_poses[i].pose.pose.orientation.x
quat[1] = observed_poses[i].pose.pose.orientation.y
quat[2] = observed_poses[i].pose.pose.orientation.z
quat[3] = observed_poses[i].pose.pose.orientation.w
r = R.from_quat(quat)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_quat.html
z_eul = r.as_euler(euler_order)
if id in fixed_ids:
obs_index = np.where(id_list == my_id)[0][0] # Index of observing agent
# Different functions for 3D vs. 6D observation
if dim_obs == 3:
z_pos = np.array([observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y])
z_eul = [z_eul[0]]
R_0[i_low:i_high, i_low:i_high] = covariance_matrix_to_2D_meas_cov(
ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance))
H_0 = h_camera_zero_3D(H_0, x_11, i, obs_index, dim_state, dim_obs)
# z_meas = state_from_pose_3D(observed_poses[i].pose.pose)
# # Compute the estimated position of this agent relative to the global zero.
# #print('tag ', id, ' in frame agent 0 = ', z_meas)
# agent2_in_frame_agent1 = agent2_to_frame_agent1_3D(z_meas, zero_3D)
# #print('inverted measurement ', agent2_in_frame_agent1)
# fixed_state = fixed_est[fixed_low:fixed_high, None]
# #print('tag 0 in global', fixed_state)
# z = agent2_from_frame_agent1_3D(fixed_state, agent2_in_frame_agent1)
# #print('agent 0 in frame tag ', id, ' = ', z)
# dist = np.linalg.norm(z[0:2, 0])
# R_0[i_low:i_high, i_low:i_high] = 1 * gazebo_R_from_range_3D(dist)
# H_0 = h_camera_zero_3D(H_0, x_11, i, obs_index, dim_state, dim_obs)
else:
z_pos = np.array(observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y, observed_poses[i].pose.pose.position.z)
R_0[i_low:i_high, i_low:i_high] = ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance)
H_0 = h_camera_zero(H_0, x_11, i, obs_index, dim_state, dim_obs)
# z_meas = state_from_pose(observed_poses[i].pose)
# # Compute the estimated position of this agent relative to the global zero.
# agent2_in_frame_agent1 = agent2_to_frame_agent1(z_meas, zero_6D)
# z = agent2_from_frame_agent1(fixed_est[index, None], agent2_in_frame_agent1)
# dist = np.linalg.norm(z[0:3, 0])
# R_0[i_low:i_high, i_low:i_high] = 1 * gazebo_R_from_range(dist)
# H_0 = h_camera_zero(H_0, x_11, i, obs_index, dim_state, dim_obs)
z_0[i_low:i_high] = np.concatenate((z_pos, z_eul))[:, None]
else:
index = np.where(id_list == id)[0][0] # Index of observed agent
obs_index = np.where(id_list == my_id)[0][0] # Index of observing agent
# Different functions for 3D vs. 6D observation
if dim_obs == 3:
z_pos = np.array([observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y])
z_eul = [z_eul[0]]
R_0[i_low:i_high, i_low:i_high] = covariance_matrix_to_2D_meas_cov(
ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance))
H_0 = h_camera_3D(H_0, x_11, i, obs_index, index, dim_state, dim_obs)
else:
z_pos = np.array(observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y,
observed_poses[i].pose.pose.position.z)
R_0[i_low:i_high, i_low:i_high] = ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance)
H_0 = h_camera(H_0, x_11, i, obs_index, index, dim_state, dim_obs)
z_0[i_low:i_high] = np.concatenate((z_pos, z_eul))[:, None]
return R_0, H_0, z_0
# Fill in the matrices R and H, as well as the vector z
# R - Measurement Covariance
# H - Measurement Jacobian
# z - The measurement itself
def fill_RHz_fixed_our_vel_2D(id_list, my_id, observed_ids, observed_poses, x_11, euler_order, dim_state, fixed_ids,
fixed_est, ctrl):
# Define the sizes of each variable
n_stored = len(id_list)
n_obs = len(observed_ids) + 1
R_var = 1
R_0 = R_var * np.eye(n_obs * 6)
H_0 = np.zeros((n_obs * 6, n_stored * dim_state))
z_0 = np.zeros((n_obs * 6, 1))
zero_3D = np.zeros((3, 1))
zero_6D = np.zeros((6, 1))
# Add in our velocity measurements
index = np.where(my_id == id_list)[0][0]
i_low = 6 * (n_obs - 1)
i_high = i_low + 6
# The control velocity is in our local frame, but the estimates are in the global frame, so we have to transform it.
# This is wrong, but it works. H should be the derivative of this rotation matrix, not just the identity.
xy_vel = np.array([ctrl.linear.x, ctrl.linear.y])
[local_x_vel, local_y_vel] = xy_vel.dot(theta_2_rotm(x_11[(6*index)+2]))
z_vel = np.array([local_x_vel, local_y_vel, ctrl.angular.z])
R_0[i_low+3:i_high, i_low+3:i_high] = np.diag([0.01**2, 0.01**2, 0.01**2])
H_0[i_low+3:i_high, (6*index)+3:(6*index)+6] = np.eye(3)
z_0[i_low+3:i_high] = z_vel[:, None]
# Fill in H and Z
for i in range(len(observed_ids)):
id = observed_ids[i]
i_low = 6 * i
i_high = i_low + 3
# fixed_low = dim_obs * index
# fixed_high = fixed_low + dim_obs
# Compute the euler angles from the quaternion passed in
quat = np.zeros(4)
quat[0] = observed_poses[i].pose.pose.orientation.x
quat[1] = observed_poses[i].pose.pose.orientation.y
quat[2] = observed_poses[i].pose.pose.orientation.z
quat[3] = observed_poses[i].pose.pose.orientation.w
r = R.from_quat(quat)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_quat.html
z_eul = r.as_euler(euler_order)
if id in fixed_ids:
obs_index = np.where(id_list == my_id)[0][0] # Index of observing agent
z_pos = np.array([observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y])
z_eul = [z_eul[0]]
R_0[i_low:i_high, i_low:i_high] = covariance_matrix_to_2D_meas_cov(
ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance))
H_0 = h_camera_zero_3D(H_0, x_11, i, obs_index, dim_state, 6)
z_0[i_low:i_high] = np.concatenate((z_pos, z_eul))[:, None]
else:
index = np.where(id_list == id)[0][0] # Index of observed agent
obs_index = np.where(id_list == my_id)[0][0] # Index of observing agent
z_pos = np.array([observed_poses[i].pose.pose.position.x, observed_poses[i].pose.pose.position.y])
z_eul = [z_eul[0]]
R_0[i_low:i_high, i_low:i_high] = covariance_matrix_to_2D_meas_cov(
ros_covariance_to_6x6_covariance(observed_poses[i].pose.covariance))
H_0 = h_camera_3D(H_0, x_11, i, obs_index, index, dim_state, 6)
z_0[i_low:i_high] = np.concatenate((z_pos, z_eul))[:, None]
return R_0, H_0, z_0
# Fill in the matrices R and H, as well as the vector z
# R - Measurement Covariance
# H - Measurement Jacobian
# z - The measurement itself
def fill_RHz_gazebo(id_list, my_id, observed_ids, observed_poses, x_11, euler_order, dim_state, dim_obs, R_var=0.001):
# Define the sizes of each variable
n_stored = len(id_list)
n_obs = len(observed_ids)
R_0 = R_var * np.eye(n_obs * dim_obs)
H_0 = np.zeros((n_obs * dim_obs, n_stored * dim_state))
z_0 = np.zeros((n_obs * dim_obs, 1))
# Fill in H and Z
for i in range(len(observed_ids)):
id = observed_ids[i]
index = np.where(id_list == id)[0][0] # Index of observed agent
obs_index = np.where(id_list == my_id)[0][0] # Index of observing agent
i_low = dim_obs * i
i_high = i_low + dim_obs
# Compute the euler angles from the quaternion passed in
quat = np.zeros(4)
quat[0] = observed_poses[i].orientation.x
quat[1] = observed_poses[i].orientation.y
quat[2] = observed_poses[i].orientation.z
quat[3] = observed_poses[i].orientation.w
r = R.from_quat(quat)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_quat.html
z_eul = r.as_euler(euler_order)
# Different functions for 3D vs. 6D observation
if dim_obs == 3:
z_pos = np.array([observed_poses[i].position.x, observed_poses[i].position.y])
z_eul = [z_eul[0]]
dist = np.linalg.norm(z_pos)
R_0[i_low:i_high, i_low:i_high] = 1 * aruco_R_from_range_3D(dist)
H_0 = h_camera_fixed_3D(H_0, x_11, i, obs_index, index, dim_state, dim_obs)
else:
return None
# z_pos = np.array(observed_poses[i].position.x, observed_poses[i].position.y, observed_poses[i].position.z)
# dist = np.linalg.norm(z_pos)
# R_0[i_low:i_high, i_low:i_high] = 1 * aruco_R_from_range(dist)
# H_0 = h_camera_fixed(H_0, fixed_state, x_11, i, obs_index, index, dim_state, dim_obs)
z_0[i_low:i_high] = np.concatenate((z_pos, z_eul))[:, None]
return R_0, H_0, z_0
# Fill in the matrix B and the vector u
# B - Control matrix
# u - Control signals
# This function is not ready and has not been tested
def fill_Bu(id_list, my_id, ctrl, dim_state, dim_obs):
# Define the sizes of each variable
n_stored = len(id_list)
B = np.zeros((n_stored * dim_state, n_stored * dim_state))
u = np.zeros((n_stored * dim_state, 1))
index = np.where(id_list == my_id)[0][0]
i_low = dim_state * index
i_high = i_low + dim_obs
B[i_low:i_high, i_low:i_high] = B_eye(dim_obs)
if dim_obs == 3:
ctrl_vect = np.array([ctrl.linear.x, ctrl.linear.y, ctrl.angular.z])
u[i_low+3:i_high+3] = ctrl_vect[:, None]
else:
ctrl_vect = np.array([ctrl.linear.x, ctrl.linear.y, ctrl.linear.z, ctrl.angular.x, ctrl.angular.y, ctrl.angular.z])
u[i_low+6:i_high+6] = ctrl_vect[:, None]
return B, u
# Fill in the matrix B and the vector u
# B - Control matrix
# u - Control signals
# This function is not ready and has not been tested
def fill_Bu_ours(id_list, my_id, dt, ctrl, dim_state):
# Define the sizes of each variable
n_stored = len(id_list)
B = np.eye(n_stored * dim_state)
u = np.zeros((n_stored * dim_state, 1))
index = np.where(id_list == my_id)[0][0]
i_low = dim_state * index
i_high = i_low + dim_state // 2
if dim_state == 6:
ctrl_vect = np.array([ctrl.linear.x, ctrl.linear.y, ctrl.angular.z]) * dt
u[i_low+3:i_high+3] = ctrl_vect[:, None]
else:
ctrl_vect = np.array([ctrl.linear.x, ctrl.linear.y, ctrl.linear.z, ctrl.angular.x, ctrl.angular.y, ctrl.angular.z]) * dt
u[i_low+6:i_high+6] = ctrl_vect[:, None]
return B, u
# Define the measurement jacobian for a camera (3D-observation)
def h_camera_3D(H, x, meas_index, agent1, agent2, dim_state, dim_obs):
agent1_row_min = dim_state * agent1
agent1_row_max = agent1_row_min + 3
agent2_row_min = dim_state * agent2
agent2_row_max = agent2_row_min + 3
meas_row_min = dim_obs * meas_index
meas_row_max = meas_row_min + 3
x1 = x[agent1_row_min:agent1_row_max]
x2 = x[agent2_row_min:agent2_row_max]
Jacobian = np.array(dual_relative_obs_jacobian_3D(x1, x2))
H[meas_row_min:meas_row_max, agent1_row_min:agent1_row_max] = Jacobian[:, 0:3]
H[meas_row_min:meas_row_max, agent2_row_min:agent2_row_max] = Jacobian[:, 3:6]
return H
# Define the measurement jacobian for a camera
def h_camera(H, x, meas_index, agent1, agent2, dim_state, dim_obs):
agent1_row_min = dim_state * agent1
agent1_row_max = agent1_row_min + dim_obs
agent2_row_min = dim_state * agent2
agent2_row_max = agent2_row_min + dim_obs
meas_row_min = dim_obs * meas_index
meas_row_max = meas_row_min + dim_obs
x1 = x[agent1_row_min:agent1_row_max]
x2 = x[agent2_row_min:agent2_row_max]
Jacobian = np.array(dual_relative_obs_jacobian(x1, x2))
H[meas_row_min:meas_row_max, agent1_row_min:agent1_row_max] = Jacobian[:, 0:dim_obs]
H[meas_row_min:meas_row_max, agent2_row_min:agent2_row_max] = Jacobian[:, dim_obs:2*dim_obs]
return H
# Define the measurement jacobian for a camera (3D-observation)
def h_camera_zero_3D(H, x, meas_index, agent, dim_state, dim_obs):
agent_row_min = dim_state * agent
agent_row_max = agent_row_min + dim_obs
meas_row_min = dim_obs * meas_index
meas_row_max = meas_row_min + dim_obs
H[meas_row_min:meas_row_max, agent_row_min:agent_row_max] = np.eye(dim_obs)
return H
# Define the measurement jacobian for a camera
def h_camera_zero(H, x, meas_index, agent, dim_state, dim_obs):
agent_row_min = dim_state * agent
agent_row_max = agent_row_min + dim_obs
meas_row_min = dim_obs * meas_index
meas_row_max = meas_row_min + dim_obs
H[meas_row_min:meas_row_max, agent_row_min:agent_row_max] = np.eye(dim_obs)
return H
# Define the measurement jacobian for a camera (3D-observation)
def h_camera_fixed_3D(H, x, meas_index, agent1, agent2, dim_state, dim_obs):
agent1_row_min = dim_state * agent1
agent1_row_max = agent1_row_min + dim_obs
agent2_row_min = dim_state * agent2
agent2_row_max = agent2_row_min + dim_obs
meas_row_min = dim_obs * meas_index
meas_row_max = meas_row_min + dim_obs
x1 = x[agent1_row_min:agent1_row_max]
x2 = x[agent2_row_min:agent2_row_max]
Jacobian = np.array(jacobian_fixed_to_obs_3D(x1, x2))
H[meas_row_min:meas_row_max, agent1_row_min:agent1_row_max] = Jacobian[:, 0:dim_obs]
H[meas_row_min:meas_row_max, agent2_row_min:agent2_row_max] = Jacobian[:, dim_obs:2*dim_obs]
return H
# Helper functions
def state_from_id(x, id_list, id, dim_state):
index = np.where(id_list == id)[0][0]
i_low = dim_state * index
i_high = i_low + dim_state
return x[i_low:i_high]
def cov_from_id(P, id_list, id, dim_state):
index = np.where(id_list == id)[0][0]
i_low = dim_state * index
i_high = i_low + dim_state
return P[i_low:i_high, i_low:i_high]
# Define motion jacobian for unicycle robot (3D-observation)
def f_unicycle_3D(dt, x, agent1, dim_state):
agent1_row_min = dim_state * agent1
agent1_row_max = agent1_row_min + dim_state
x1 = x[agent1_row_min:agent1_row_max]
F = np.eye(dim_state)
w = x1[5]
# Using the fundamental theorem of engineering, sin(x) = x,
# sin(a*x)/x = a (Really only when x is 0)
if w == 0:
F[0, 3] = dt
F[0, 4] = 0
F[1, 3] = 0
F[1, 4] = dt
F[3, 3] = 1
F[3, 4] = 0
F[4, 3] = 0
F[4, 4] = 1
else:
F[0, 3] = np.sin(w*dt) / w
F[0, 4] = -(1 - np.cos(w*dt)) / w
F[1, 3] = (1 - np.cos(w*dt)) / w
F[1, 4] = np.sin(w*dt) / w
F[3, 3] = np.cos(w*dt)
F[3, 4] = -np.sin(w*dt)
F[4, 3] = np.sin(w*dt)
F[4, 4] = np.cos(w*dt)
F[2, 5] = dt
return F
# Define motion jacobian for unicycle robot
def f_unicycle(dt, x, agent1, dim_state):
agent1_row_min = dim_state * agent1
agent1_row_max = agent1_row_min + dim_state
x1 = x[agent1_row_min:agent1_row_max]
F = np.eye(dim_state)
w = x1[9]
# Using the fundamental theorem of engineering, sin(x) = x,
# sin(a*x)/x = a (Really only when x is 0)
if w == 0:
F[0, 6] = dt
F[0, 7] = 0
F[1, 6] = 0
F[1, 7] = dt
F[6, 6] = 1
F[6, 7] = 0
F[7, 6] = 0
F[7, 7] = 1
else:
F[0, 6] = np.sin(w*dt) / w
F[0, 7] = -(1 - np.cos(w*dt)) / w
F[1, 6] = (1 - np.cos(w*dt)) / w
F[1, 7] = np.sin(w*dt) / w
F[6, 6] = np.cos(w*dt)
F[6, 7] = -np.sin(w*dt)
F[7, 6] = np.sin(w*dt)
F[7, 7] = np.cos(w*dt)
block = dt * np.eye(3)
F[3:6, 9:12] = block
return F
# Define stationary jacobian for waypoints
def f_eye(dim_state):
F = np.eye(dim_state)
return F
# Define motion model covariance (3D-observation, distance-based)
def q_distance_3D(dt, x, agent1, dim_state):
i_low = dim_state * agent1
i_high = i_low + dim_state
# Q is (dt * (x_dot + 0.001) * 5%) ^ 2
Q_pos = (dt * (np.linalg.norm(x[i_low+3:i_low+5]) + 0.1) * 0.05) ** 2
Q_theta = (dt * (np.linalg.norm(x[i_low+5]) + 0.1) * 0.05) ** 2
# Define the velocity covariance
Q = 1 * np.eye(dim_state)
Q[3:5, 3:5] = dse_constants.MOTION_BASE_COVARIANCE / (dt ** 2) * np.eye(2)
Q[5, 5] = dse_constants.MOTION_BASE_COVARIANCE / (dt ** 2)
# if Q_pos or Q_theta is <= 0, problems occur
if Q_pos > 0:
Q[0:2, 0:2] = Q_pos * np.eye(2)
if Q_theta > 0:
Q[2, 2] = Q_theta
return Q
# Define motion model covariance (distance-based)
def q_distance(dt, x, agent1, dim_state):
i_low = dim_state * agent1
i_high = i_low + dim_state
# Q is (dt * (x_dot + 0.001) * 5%) ^ 2
Q_pos = (dt * (np.linalg.norm(x[i_low+6:i_low+9]) + 0.001) * 0.05) ** 2
Q_theta = (dt * (np.linalg.norm(x[i_low+9:i_low+12]) + 0.001) * 0.05) ** 2
# Define the velocity covariance
Q = 1 * np.eye(dim_state)
Q[6:12, 6:9] = dse_constants.MOTION_BASE_COVARIANCE / (dt ** 2) * np.eye(6)
# if Q_pos or Q_theta is <= 0, problems occur
if Q_pos > 0:
Q[0:3, 0:3] = Q_pos * np.eye(3)
if Q_theta > 0:
Q[3:6, 3:6] = Q_theta * np.eye(3)
return Q
# Define motion model covariance (static)
def q_const(dim_state, var=0.0000001):
Q = var * np.eye(dim_state)
return Q
# Direct control matrix
def B_eye(dim_state):
B = np.eye(dim_state)
return B
# Unknown control matrix
def B_zeros(dim_state):
B = np.zeros((dim_state, dim_state))
return B
# Ensures that every agent has the same state variables in the same order
def get_sorted_agent_states(array_ids, array_Y, array_y, array_I, array_i, dim_state):
# Build combined list of ids
# Still trying to come up with a way to take in data of any form and return vector of ids
flat_list = [item for sublist in array_ids for item in sublist]
id_list = np.unique(flat_list)
id_list = np.sort(id_list)
n_agents = len(id_list)
# Ensure all agents' state variables match the master list
# For each agent that sent in data
for i in range(len(array_ids)):
# If the state variable isn't correct, re-order/extend it
if not np.array_equal(id_list, array_ids[i]):
# Build an empty set of variables
# Full-size, ready for data to be inserted
# Potentially change the initialization?
master_Y = 0.01 * np.eye(n_agents * dim_state)
master_y = np.zeros((n_agents * dim_state, 1))
master_I = np.zeros((n_agents * dim_state, n_agents * dim_state))
master_i = np.zeros((n_agents * dim_state, 1))
# Move the agents' values to the location specified in the master list
# Loop through the input data in chunks of (state_dim x state_dim)
# Take each block and move it to the correct location in the master arrays
for agent_row_index in range(len(array_ids[i])):
for agent_column_index in range(len(array_ids[i])):
# Given a chunk of data and a row and column index,
# grab the row and column ids of the input data
# Find the location of those ids in the master arrays
group_row_index = np.where(id_list == array_ids[i][agent_row_index])[0][0]
group_column_index = np.where(id_list == array_ids[i][agent_column_index])[0][0]
# Generate indices (to make the assignment step shorter)
g_row_lo = dim_state * group_row_index
g_row_hi = g_row_lo + dim_state
g_col_lo = dim_state * group_column_index
g_col_hi = g_col_lo + dim_state
a_row_lo = dim_state * agent_row_index
a_row_hi = a_row_lo + dim_state
a_col_lo = dim_state * agent_column_index
a_col_hi = a_col_lo + dim_state
# Move this chunk of data to the master arrays
master_Y[g_row_lo:g_row_hi, g_col_lo:g_col_hi] = array_Y[i][a_row_lo:a_row_hi, a_col_lo:a_col_hi]
master_I[g_row_lo:g_row_hi, g_col_lo:g_col_hi] = array_I[i][a_row_lo:a_row_hi, a_col_lo:a_col_hi]
master_y[g_row_lo:g_row_hi] = array_y[i][a_row_lo:a_row_hi]
master_i[g_row_lo:g_row_hi] = array_i[i][a_row_lo:a_row_hi]
array_ids[i] = id_list
array_Y[i] = master_Y
array_y[i] = master_y
array_I[i] = master_I
array_i[i] = master_i
return array_ids, array_Y, array_y, array_I, array_i
def state_to_xyzypr(state):
state = state[:, 0]
if len(state) == 6:
output = np.zeros(6)
output[0:2] = state[0:2]
output[5] = state[2]
else:
output = state[0:6]
return output
# Takes in a 6x6 x/y/yaw/dx/dy/dyaw covariance matrix, and returns a 6x6 x/y/z/yaw/pitch/roll covariance matrix
# Assumes that the unknown terms are zero
def state_cov_to_covariance_matrix(cov):
if np.shape(cov)[0] == 6:
output_3D = np.zeros((6, 6))
output_3D[0:2, 0:2] = cov[0:2, 0:2]
output_3D[0:2, 5] = cov[0:2, 2]
output_3D[5, 0:2] = cov[2, 0:2]
output_3D[5, 5] = cov[2, 2]
else:
output_3D = cov[0:6, 0:6]
return output_3D
# Takes in a 6x6 x/y/z/yaw/pitch/roll covariance matrix, and returns a 3x3 x/y/yaw covariance matrix
# Throws out all non-relevant terms
def covariance_matrix_to_2D_meas_cov(cov):
output_2D =
|
np.zeros((3, 3))
|
numpy.zeros
|
"""
Module that condense all the components of the EMS tested above
"""
# General Imports
import numpy as np
import numpy.matlib as matlib
from math import *
from decimal import *
from scipy import linalg
# Sklearn imports
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
import inspect
# Parameters that should be tuneable to configure the SoC
class socParams:
def __init__(self, e_th, p1, p2, q1, q2, alpha, kappa, beta, lambd, gamma, wc, wm, rww, rvv, n):
self._e_th = e_th;
self._p1 = p1;
self._p2 = p2;
self._q1 = q1;
self._q2 = q2;
self._alpha = alpha;
self._kappa = kappa;
self._beta = beta;
self._lambd = lambd;
self._gamma = gamma;
self._wc = wc;
self._wm = wm;
self._rww = rww;
self._rvv = rvv;
self._n = n;
# getter method
@property
def e_th(self):
return self._e_th;
@property
def p1(self):
return self._p1;
@property
def p2(self):
return self._p2;
@property
def q1(self):
return self._q1;
@property
def q2(self):
return self._q2;
@property
def alpha(self):
return self._alpha;
@property
def kappa(self):
return self._kappa;
@property
def beta(self):
return self._beta;
@property
def lambd(self):
return self._lambd;
@property
def gamma(self):
return self._gamma;
@property
def wc(self):
return self._wc;
@property
def wm(self):
return self._wm;
@property
def rww(self):
return self._rww;
@property
def rvv(self):
return self._rvv;
@property
def n(self):
return self._n;
# setter method
@e_th.setter
def e_th(self, x):
self._e_th = x;
@p1.setter
def p1(self, x):
self._p1 = x
@p2.setter
def p2(self, x):
self._p2 = x;
@q1.setter
def q1(self, x):
self._q1 = x;
@q2.setter
def q2(self, x):
self._q2 = x;
@alpha.setter
def alpha(self, x):
self._alpha = x;
@kappa.setter
def kappa(self, x):
self._kappa = x;
@beta.setter
def beta(self, x):
self._beta = x;
@lambd.setter
def lambd(self, x):
self._lambd = x;
@gamma.setter
def gamma(self, x):
self._gamma = x;
@wc.setter
def wc(self, x):
self._wc = x;
@wm.setter
def wm(self, x):
self._wm = x;
@rww.setter
def rww(self, x):
self._rww = x;
@rvv.setter
def rvv(self, x):
self._rvv = x;
@n.setter
def n(self, x):
self._n = x;
# Method for implementation of the UKF estimator based on simple model for the batetry bank
# Negative Current is for Discharging
# Positive current is for Charging
# State Equations
# SoC SoC(k+1)=SoC(k)+((eff*T)/C)*I(k) + Noise
# Resistor for discharge R-(k+1) = R-(k) + Noise
# Resistor for charge R+(k+1) = R+(k) + Noise
# Observation equation
# Voltage = Voc (Soc) + sig*R-*I + (1-sig)*R+*I;
def socEstimation(p_soc,v_bat,i_prev,i_now,t_sampling,e_acum_in,states_in,rxx_in,c_n,f) :
# Initialization
# Local vars
e_acum = e_acum_in;
# Output vars
states_out = None;
rxx_out = None;
err = None;
e_acum_out = None;
##-------------------- Estimator turns off and it returns the previous values-----------------%
# Condition disabled, originally abs(i_now) <= 0.1
if False :
print('Current too small to be considered.');
states_out = states_in;
rxx_out = rxx_in;
err = 0;
e_acum_out = e_acum_in;
print('Returning values by default.');
return states_out, rxx_out, err, e_acum_out;
#-------------------------------------- Turns on the filter-----------------------------------%
#---------------------------------- Efficiency initialization---------------------------------%
# Efficiency = 1 for discharge; Less than one for charge
if i_prev <= 0 :
n_0 = 1;
else :
n_0 = 0.95;
#---------------------------------States and noise initialization-----------------------------%
# States Initialization
x_med = [states_in];
# Noise Process Initialization
rxx = [rxx_in];
#------------------------Definition of the Process state Equations----------------------------%
# First coeff of each equation
a = np.eye(p_soc.n,p_soc.n);
# Capacity in Amperes-seconds
b = [((t_sampling*n_0)/(3600*c_n)), 0, 0];
#------------------------------------- UKF Algorithm------------------------------------------%
#---------------------------------- OFCL implementation---------------------------------------%
#----- The process and observation noises are update according with acumlative error (e_acum)
#----- See equation in document and flowchart
# Enhanced OFCL
if e_acum <= p_soc.e_th :
g_std = [max(p_soc.p1*np.sqrt(p_soc.rww[0,0], dtype=np.float128),1.2e-15),
max(p_soc.p2*np.sqrt(p_soc.rww[1,1], dtype=np.float128),1e-10),
max(p_soc.p2*np.sqrt(p_soc.rww[2,2], dtype=np.float128),1e-9)];
p_soc.rww = np.array(np.diag([g_std[0]**2, g_std[1]**2, g_std[2]**2]), dtype=np.float128);
else :
g_std = [min(p_soc.q1*np.sqrt(p_soc.rww[0,0], dtype=np.float128),1.2e-3),
min(p_soc.q2*np.sqrt(p_soc.rww[1,1], dtype=np.float128),1e-1),
min(p_soc.q2*np.sqrt(p_soc.rww[2,2], dtype=np.float128),1e-1)];
p_soc.rww = np.array(np.diag([g_std[0]**2, g_std[1]**2, g_std[2]**2]), dtype=np.float128);
#---------------------Calculate the sigmas point for the states--------------------------------%
try :
sxx = linalg.cholesky(rxx[0], lower=True, overwrite_a=False, check_finite=True);
sxx = [[Decimal(x) for x in y] for y in sxx];
sxx = np.array([[np.float128(x) for x in y] for y in sxx]);
except Exception as e:
print('Cholesky decomposition failed due to: '+ str(e));
print('Returning values by default.');
states_out = states_in;
rxx_out = rxx_in;
err = 0;
e_acum_out = e_acum_in;
return states_out, rxx_out, err, e_acum_out;
# Concatenate Cholesky factors
x_ = np.concatenate(([np.zeros(len(x_med[0]))], sxx, -sxx), axis=0).transpose();
# Multiply Gamma with Cholesky factors
x_ = p_soc.gamma*x_;
# Calculation of Sigma points
x_0 = matlib.repmat(np.array(x_med[0]),x_.shape[1],1).transpose();
x_ = np.add(x_,x_0, dtype=np.float128);
#-----------------------------States Prediction------------------------------------------------%
x = np.add(a.dot(x_),matlib.repmat(b,x_.shape[1],1).transpose()*i_prev, dtype=np.float128);
x_med.append(np.sum(matlib.repmat(p_soc.wm,p_soc.n,1)*x,axis=1, dtype=np.float128));
#------------------------Error Prediciton of the States and covariance------------------------%
x_e = np.subtract(x,matlib.repmat(x_med[1],x_.shape[1],1).transpose(), dtype=np.float128);
rxx.append(np.add(p_soc.wm[0]*(np.array([x_e[:,0]])*np.array([x_e[:,0]]).transpose()),p_soc.rww, dtype=np.float128));
index = 1;
while(index < 2*p_soc.n+1) :
rxx[1] = np.add(p_soc.wm[index]*(np.array([x_e[:,index]])*np.array([x_e[:,index]]).transpose()), rxx[1], dtype=np.float128);
index += 1;
#---------------------Calculate the sigmas point for the Observations--------------------------%
try :
sxx = linalg.cholesky(rxx[1], lower=True, overwrite_a=False, check_finite=True);
sxx = [[Decimal(x) for x in y] for y in sxx];
sxx = np.array([[np.float128(x) for x in y] for y in sxx], dtype=np.float128);
except Exception as e:
print('Cholesky decomposition failed due to: '+ str(e));
print('Returning values by default.');
states_out = states_in;
rxx_out = rxx_in;
err = 0;
e_acum_out = e_acum_in;
return states_out, rxx_out, err, e_acum_out;
# Concatenate Cholesky factors
x_ = np.concatenate(([np.zeros(len(x_med[1]))], sxx, -sxx), axis=0).transpose();
# Multiply Gamma with Cholesky factors
x_ = p_soc.gamma*x_;
# Calculation of Sigma points
x_0 = matlib.repmat(np.array(x_med[1]),x_.shape[1],1).transpose();
x_ = np.add(x_,x_0, dtype=np.float128);
#--------------------------------Observation Prediction ---------------------------------------%
#----------VoC vs SoC is calculate at the laboratory method------------------------------------%
voc = (3.755*np.power(x_[0],3, dtype=np.float128) - 5.059*np.power(x_[0],2, dtype=np.float128) + 3.959*x_[0] + 17.064)*f;
# Identify the sign according to the current
if i_now <= 0 :
sig=1;
else :
sig=0;
# Calculate the voltage in the battery according to the simple model (to upgrade)
v_model = np.array([voc + sig*x_[1]*i_now + (1-sig)*x_[2]*i_now], dtype=np.float128);
# Average output
v_avg = np.array(p_soc.wm, dtype=np.float128).dot(v_model.transpose());
#--------------------- Residual prediction (Residuos de la predicción)-----------------------%
v_e = np.subtract(v_model,matlib.repmat(v_avg,x_.shape[1],1).transpose(), dtype=np.float128);
ryy = np.add(p_soc.wc[0]*(np.array([v_e[:,0]])*np.array([v_e[:,0]]).transpose()),p_soc.rvv, dtype=np.float128);
index = 1;
while(index < 2*p_soc.n+1) :
ryy = np.add(p_soc.wc[index]*(np.array([v_e[:,index]])*
|
np.array([v_e[:,index]])
|
numpy.array
|
import os, sys
from math import sqrt, copysign
import numpy as np
import helpers as nhp
from helpers import rotmat_dict
from LatticeModel import LatticeModel
from cached_property import cached_property
import random
import plotly as py
import plotly.graph_objs as go
from cached_property import cached_property
class Lattice(LatticeModel):
"""Class containing all that pertains to a particular type of lattice (initialization, allowed moves etc.)
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.steric_hindrance_penalty = 55 # energy penalty for side chains at adjacent indices pointing the same way, Abeln 2014
self.hb_penalty = -50 # energy bonus for forming an H bond, Abeln 2014
self.ca_dist = 3.8 # Distance in A between CAs at adjacent coordinates
self.linker_dist = 20 # Distance tagged CA to dye todo: wild guess based on linker, should MD!
self.cacb_dist = 1.53 # Distance between CA and CB
self.n1_dist = 1.48 # estimate of distance N to CA
self.cm_dist = 4.75 # distance between adjacent centers of mass (deduced from human swissprot dataset)
self.tag_dist = 25
self.pairs_mat = kwargs['pairs_mat']
self.pro_penalty = kwargs.get('pro_penalty', 0.0)
self.sol_energy_factor = kwargs.get('sol_energy_factor', 1.0)
self.p_global = kwargs.get('p_global', 0.1)
self.no_anchoring = kwargs.get('no_anchoring', False)
self.coords = kwargs.get('coords', None)
self.res_coords_mod = kwargs.get('res_coords_mod', None)
self.correct_side_chains()
self.state = np.ones(self.seq_length, dtype=bool) # start off with all in coil state == 1
@property
def rg(self):
"""
Radius of gyration, based on the implementation for pymol: https://pymolwiki.org/index.php/Radius_of_gyration
Uses mass of amino acid, centered at center of mass (actually ca-coords but spaced as if centers of mass)
"""
coord_mods = self.coords[1:, :3] - self.coords[:-1, :3]
cm_list = np.split(coord_mods, axis=0, indices_or_sections=coord_mods.shape[0])
cm_coords = np.zeros_like(self.coords[:, :3], dtype=float)
cm_coords[0, :] = self.coords[0, :3]
for cmi, cm in enumerate(cm_list):
aa1 = nhp.aa_dict[self.aa_sequence[cmi]]
aa2 = nhp.aa_dict[self.aa_sequence[cmi + 1]]
cm_coords[cmi + 1, :] = cm_coords[cmi, :] + cm * nhp.cm_dist_df.loc[aa1, aa2]
# cm_coords = self.coords[:, :3] * self.cm_dist # note: commented, because unit coords are multiplied by modifiers above!
res_mass = np.expand_dims([nhp.aa_mass_dict[rn] for rn in self.aa_sequence], -1)
cm = cm_coords * res_mass
tmass = np.sum(res_mass)
rr = np.sum(cm * cm_coords)
mm = np.sum(np.power(np.sum(cm, axis=0) / tmass, 2))
rg2 = rr / tmass - mm
if rg2 < 0:
return 0.0
return sqrt(rg2)
@cached_property
def start_pos(self):
"""
Placeholder vector for coordinates
"""
# mid_point = self.lattice_dims[0] // 2
# return np.tile((mid_point, mid_point, 0, 1), (self.seq_length, 1))
return np.tile((0, 0, 0), (self.seq_length, 1))
@property
def individual_energies(self):
"""
Energy cost function
"""
e_aa = 0
e_hb = 0
has_neighbor_bool = nhp.inNd(self.res_coords, self.coords)
# AA water contact contribution
e_sol_vec = np.delete(np.invert(has_neighbor_bool) * self.pairs_mat.loc[self.aa_sequence, 'HOH'].to_numpy(),
self.tagged_resi)
e_sol_vec[e_sol_vec < 0] *= self.sol_energy_factor
e_sol = np.sum(e_sol_vec)
# e_sol = np.sum(np.invert(has_neighbor_bool) * self.pairs_mat.loc[aa_seq_noTagged, 'HOH']) * self.sol_energy_factor
# Steric hindrance contribution
# sh_bool_fwd = np.all(self.res_coords_mod[1:, :] - self.res_coords_mod[:-1, :] == 0, axis=1)
# sh_bool_rev = np.all(self.res_coords_mod[:-1, :] - self.res_coords_mod[1:, :] == 0, axis=1)
# e_sh = np.sum(np.logical_or(sh_bool_fwd, sh_bool_rev)) * self.steric_hindrance_penalty
sh_bool = np.all(self.res_coords_mod[1:, :] - self.res_coords_mod[:-1, :] == 0, axis=1)
e_sh = np.sum(sh_bool) * self.steric_hindrance_penalty
for ci in range(self.seq_length):
cur_aa = self.aa_sequence[ci]
neighbor_bool = np.sum(np.abs(self.coords[ci, :] - self.coords), axis=1) == 1
if ci != 0: neighbor_bool[ci - 1] = False # Direct sequential neighbors do NOT count
if ci < self.seq_length - 1: neighbor_bool[ci + 1] = False
# H-bond contribution
if self.state[ci] == 0:
resdir_signed_bool = nhp.inNd(self._res_coords_mod,
self._res_coords_mod[ci, :]) # check: direction same
hbond_disallowed_neighbor_coords = np.vstack((self.coords[ci, :] + self.res_coords_mod[ci, :],
self.coords[ci, :] - self.res_coords_mod[ci, :]))
hbond_neighbor_bool = np.logical_and(neighbor_bool, np.invert(nhp.inNd(self.coords,
hbond_disallowed_neighbor_coords))) # check: neighbor positions, but not in same dimension as side chain extends
hbond_bool = np.logical_and(hbond_neighbor_bool, np.logical_and(resdir_signed_bool, np.invert(
self.state))) # check: in beta-strand state
e_hb += np.sum(hbond_bool) * self.hb_penalty * 0.5
# AA contact contribution
if ci in self.tagged_resi: continue # tagged aa's can't form contacts
if not np.any(neighbor_bool): continue
ni = np.where(neighbor_bool)[0] # neighbor index
# case 1: CA--R R--CA
res_opposite_bool = nhp.inNd(self._res_coords_mod[ni, :], self._res_coords_mod[ci, :] * -1)
res_on_bb_bool = nhp.inNd(self.coords[ni, :], self.res_coords[ci, :])
e_bool1 = np.logical_and(res_on_bb_bool, res_opposite_bool)
# case 2 parallel residues: CA^--R CA^--R
res_parallel_bool = nhp.inNd(self._res_coords_mod[ni, :], self._res_coords_mod[ci, :])
ca_dim = np.where(self.coords[ni, :3] != self.coords[ci, :3])[1]
res_dim = np.array([np.argwhere(self.res_coords[nii, :3] != self.res_coords[ci, :3])[0, 0] for nii in ni])
e_bool2 = np.logical_and(res_parallel_bool, ca_dim != res_dim)
e_bool = np.logical_or(e_bool1, e_bool2)
neighbor_aas = self.aa_sequence[ni[e_bool]]
# res_neighbor_bool = np.sum(np.abs(self.res_coords[ci, :] - self.res_coords), axis=1) == 1
# resmod_unsigned = np.row_stack((self._res_coords_mod[ci, :], self._res_coords_mod[ci, :] * -1))
# resdir_unsigned_bool = nhp.inNd(self._res_coords_mod, resmod_unsigned)
# e_bool = np.logical_and(neighbor_bool, np.logical_and(resdir_unsigned_bool, res_neighbor_bool))
# e_bool = np.logical_and(~self.tagged_resi_bool, e_bool)
# neighbor_aas = self.aa_sequence[e_bool]
e_aa += sum([self.pairs_mat.loc[cur_aa, na] for na in neighbor_aas]) * 0.5
return e_aa, e_hb, e_sol, e_sh
@property
def base_energy(self):
return sum(self.individual_energies)
@cached_property
def tagged_resi_bool(self):
bool_array = np.zeros(self.seq_length, dtype=bool)
bool_array[self.tagged_resi] = True
return bool_array
@staticmethod
def stretched_init(seq_length):
"""
Generate a stretched configuration for a given number of residues
"""
coords = np.zeros((seq_length, 3), dtype=int)
coords[:, 2] += np.arange(seq_length)
return coords
# --- Global mutations ---
@staticmethod
def branch_rotation(c, pivot, dim):
"""
:param c: coordinates to change
:param pivot: point around which to rotate
:param dim: signed dimension in which to perform rotation (1, 2 or 3), pos for fwd, neg for rev
:return: mutated coords
"""
return np.dot(rotmat_dict[dim], (c - pivot).T).T + pivot
@staticmethod
def corner_flip(c1, c2, c3):
return c2 + ((c1 + c3) - 2 * c2)
@staticmethod
def crankshaft_move(c, direction):
da = c[0, :] != c[3, :] # dim in which hinge points differ
db = np.all(c == c[0, :], axis=0) # dim in which all points are same
dc = np.logical_and(np.invert(da), np.invert(db)) # dim in which 'swing' differs from hinge points
c[(1, 2), dc] = c[(0, 3), dc]
c[(1, 2), db] = c[(1, 2), db] + direction
return c[1:3, :]
@staticmethod
def get_neighbors(c, d=1):
neighbors = np.tile(c, (6, 1))
neighbors += np.row_stack((np.eye(3, dtype=int) * d, np.eye(3, dtype=int) * -1) * d)
return neighbors
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, new_coords):
"""
Set coords to newly provided coords if provided, or set with a random walk
"""
# self._coords = self.start_pos
if new_coords is None:
nb_attempts = 500
# Allowed types:
# - stretched
# - serrated
# - free_random
# - anchored_random
if self.starting_structure == 'stretched':
self._coords = self.stretched_init(self.seq_length)
return
# if self.starting_structure == 'free_random':
# for attempt_idx in range(nb_attempts):
# if self.perform_random_walk([]): return
# elif self.starting_structure == 'anchored_random':
# anchors = [idx for idx, aa in enumerate(self.aa_sequence) if idx in self.tagged_resi]
# for attempt_idx in range(nb_attempts):
# if self.perform_random_walk(anchors): return
raise ValueError(f'No feasible random start state reached in {nb_attempts} attempts!')
else:
self._coords = new_coords
@property
def res_coords(self):
"""
Residue coords
"""
return self.coords + self._res_coords_mod
@property
def res_coords_mod(self):
"""
Modifier for residue coords, add to CA coords to get residue coords
"""
return self._res_coords_mod
@property
def res_coords_plottable(self):
"""
residue coords, fit for plotting only; shorter distances for untagged, arbitrarily longer for tagged
"""
coords_mod = self._res_coords_mod * 0.3
coords_mod[self.tagged_resi, :] *= 20
return self.coords + coords_mod
def get_distmat(self, coords, anchors):
"""
return the distances between all lattice points and a number of anchor coordinates
"""
if anchors.ndim == 1:
anchors = np.expand_dims(anchors, 0)
return np.column_stack([np.sum(
|
np.abs(coords - an)
|
numpy.abs
|
import numpy as np
import torch
from torch.autograd import Variable
from model.utils.cython_bbox import bbox_overlaps
def choose_gt(boxes, cls_prob, im_labels):
boxes = boxes[...,1:]
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :]
gt_boxes = np.zeros((0, 5), dtype=np.float32)
if 21 == cls_prob.shape[2] :
cls_prob = cls_prob[:,:,1:]
for i in range(num_classes):
if im_labels_tmp[i] == 1:
gt_boxes_tmp = np.zeros((1, 5), dtype=np.float32)
cls_prob_tmp = cls_prob[:,:, i].data
max_index = np.argmax(cls_prob_tmp)
gt_boxes_tmp[:, 0:4] = boxes[:,max_index, :].reshape(1, -1)
gt_boxes_tmp[:, 4] = i+1
gt_boxes = np.vstack((gt_boxes, gt_boxes_tmp))
# choose pos samples by gt
overlaps = bbox_overlaps(
np.ascontiguousarray(boxes[0], dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
max_overlaps = overlaps.max(axis=1)
fg_inds = np.where(max_overlaps >= 0.5)[0]
pos_samples = np.empty((0,4), dtype=np.float32)
if fg_inds.shape[0] != 0:
pos_samples =
|
np.vstack((pos_samples, boxes[0][fg_inds, :]))
|
numpy.vstack
|
"""
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019-2021 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: <NAME>
Exampe script for generating sin sweep type excitations.
"""
import numpy as np
import matplotlib.pyplot as plt
import control
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
from Core import GenExcite
from Core import FreqTrans
from Core import Servo
# Constants
pi = np.pi
hz2rps = 2*pi
rps2hz = 1/hz2rps
rad2deg = 180/pi
deg2rad = 1/rad2deg
mag2db = control.mag2db
db2mag = control.db2mag
#%% Define the frequency selection and distribution of the frequencies into the signals
numChan = 1
freqRate_hz = 50;
timeDur_s = 10.0
numCycles = 3
freqMinDes_rps = (1/timeDur_s) * hz2rps * np.ones(numChan)
freqMaxDes_rps = 15 * hz2rps * np.ones(numChan)
freqStepDes_rps = (10 / 50) * hz2rps
methodSW = 'zip' # "zippered" component distribution
## Generate MultiSine Frequencies
freqExc_rps, sigIndx, time_s = GenExcite.MultiSineComponents(freqMinDes_rps, freqMaxDes_rps, freqRate_hz, numCycles, freqStepDes_rps, methodSW)
timeDur_s = time_s[-1] - time_s[0]
N = time_s.shape[-1]
M = freqExc_rps.shape[-1]
## Generate Schroeder MultiSine Signal
ampElem_nd = np.ones_like(freqExc_rps) ## Approximate relative signal amplitude, create flat
sigList, phaseElem_rad, sigElem = GenExcite.MultiSine(freqExc_rps, ampElem_nd, sigIndx, time_s, costType = 'Schroeder', phaseInit_rad = 0, boundPhase = True, initZero = True, normalize = 'rms');
sigPeakFactor = GenExcite.PeakFactor(sigList)
if False:
sigList[0], _, _ = GenExcite.Chirp(freqMinDes_rps, freqMaxDes_rps, time_s)
if False:
zPts = ampElem_nd * np.exp(1j*phaseElem_rad)
plt.plot(zPts.real, zPts.imag, '.')
# np.diff(np.diff(phaseElem_rad * 180/np.pi))
# plt.plot(np.diff(phaseElem_rad * 180/np.pi), '.')
#%%
# ampList = np.array([3, 5, 10, 20])
ampList = np.arange(1.0, 40.1, 0.5)
# ampList = np.array([1, 1.5, 2.0, 2.5, 20, 21, 22, 23, 24, 25])
# Create Servo Object (HiTec HS-225BB)
freqNat_hz = 6.0
freqNat_rps = freqNat_hz * hz2rps
objServo = Servo.Servo(1/freqRate_hz, freqNat_rps = freqNat_rps, damp = 0.8)
objServo.freeplay = 1.0 # @ 2.0
objServo.timeDelay_s = 50 / 1000 # this ends up rounded to an integer (timeDelay_s * freqRate_hz)
# objServo.cmdLim = 20
objServo.pLim = 20
objServo.vLim = 560 # (28 / sigPeakFactor) * (freqNat_rps)
# objServo.aLim = 46993 # (28 / sigPeakFactor) * (freqNat_rps**2)
# objServo.pwrLim = 2e6 # pwr = 0.5 * J * (amp**2 * freqNat_rps**3)
pCmdList = []
pOutList = []
lim = 0.0
for amp in ampList:
pCmd = amp * sigList[0]
pOut = np.zeros_like(pCmd)
p = 0; v = 0; a = 0; av = 0
objServo.Start() # Resets the servo states
for i, s in enumerate(pCmd):
pOut[i] = objServo.Update(s)
p = max(p, np.abs(objServo.pOut))
v = max(v, np.abs(objServo.v))
a = max(a, np.abs(objServo.a))
av = max(av, a*v)
print(amp, p, v, a, av)
pCmdList.append(pCmd)
pOutList.append(pOut)
if True:
plt.figure(1)
plt.plot(time_s, pCmd, time_s, pOut)
#%% Plot the Excitation Spectrum
optSpec = FreqTrans.OptSpect(dftType = 'czt', freqRate = freqRate_hz * hz2rps, freq = freqExc_rps, smooth = ('box', 3), winType = 'rect')
plt.figure(2)
TxyList = []
CxyList = []
PxxList = []
PyyList = []
PxyList = []
for i, pOut in enumerate(pOutList):
pCmd = pCmdList[i]
freq_rps, Txy, Cxy, Sxx, Syy, Sxy, Txn, SxxNull, Snn = FreqTrans.FreqRespFuncEstNoise(pCmd, pOut, optSpec)
# print(np.sum(SxxNull))
gain_mag, phase_deg = FreqTrans.GainPhase(Txy, magUnit='mag')
freq_hz = freq_rps * rps2hz
freq_hz = np.squeeze(freq_hz)
gain_dB = np.squeeze(gain_dB)
phase_deg = np.squeeze(phase_deg)
Cxy = np.squeeze(Cxy)
# Cxy = np.squeeze(np.abs(Cxy)**2)
TxyList.append(Txy)
CxyList.append(Cxy)
PxxList.append(Pxx)
PyyList.append(Pyy)
PxyList.append(Pxy)
ax1 = plt.subplot(3,1,1); plt.grid(True)
ax1.semilogx(freq_hz, gain_dB, '-', label = 'Amplitude: ' + str(ampList[i]))
ax2 = plt.subplot(3,1,2); plt.grid(True)
ax2.semilogx(freq_hz, phase_deg, '-'); plt.ylim([-180, 180]);
ax3 = plt.subplot(3,1,3); plt.grid(True)
ax3.semilogx(freq_hz, Cxy, '-'); #plt.ylim([0, 1.2])
plt.subplot(3,1,1);
plt.legend()
#%%
TxyArray = np.array(TxyList)
CxyArray = np.array(CxyList)
PxxArray = np.array(PxxList)
PyyArray = np.array(PyyList)
PxyArray = np.array(PxyList)
# plt.figure()
# plt.plot(ampList, CxyArray)
# plt.plot(ampList, np.mean(CxyArray, axis=-1))
# plt.plot(ampList, np.min(CxyArray, axis=-1))
# plt.plot(ampList, np.max(CxyArray, axis=-1))
# plt.grid(True)
#%%
import numpy as np
import matplotlib.pyplot as plt
sat = 10
cmd = np.linspace(0, 11 * sat, 201)
def SatFunc(gam):
# A is a vector of amplitudes
# delta is a threshold
gam = np.clip(gam, -1, 1)
f = 2/np.pi * (np.arcsin(gam) + gam * np.sqrt(1 - gam**2))
return f
delta = sat
A = cmd
f = SatFunc(delta/A)
fig = plt.figure()
plt.plot(A/delta, f, '-', label = 'Saturation Function')
# plt.plot(A/delta, 1 - f, '-', label = 'Limiter Function')
plt.grid(True)
plt.xlim([0, 10])
plt.ylim([0, 1.1])
plt.xlabel('Input Amplitude [$A / \delta$]')
plt.ylabel('Output Amplitude [$|N(A)| / m$]')
plt.legend()
fig.set_size_inches([6.4, 3.6])
if False:
FreqTrans.PrintPrettyFig(fig, 'SaturationFunction.pgf')
#%%
def DF_Saturation(A, delta, m = 1):
# Saturation (Gelb #7)
n_r = m * SatFunc(delta/A)
n_i = 0.0
n = n_r + 1j * n_i
return n
def DF_TimeDelay(A, omega, tDelay_s):
# Time Delay (Gelb #33)
n_r = np.cos(omega * tDelay_s)
n_i = -np.sin(omega * tDelay_s)
n = (n_r + 1j * n_i) * np.ones_like(A)
return n
def DF_HardLimFreeplay(A, D, delta, m = 1):
# Hard-limit with Freeplay (Gelb #42)
# D is the position limit
# delta is half the freeplay
deltaPos = D/m + delta
deltaNeg = D/m - delta
A[A <= deltaPos] = np.nan
n_r = m/2 * (SatFunc(deltaPos/A) + SatFunc(deltaNeg/A))
n_i = -4*D*delta / (np.pi * A**2)
n = n_r + 1j * n_i
return n
def DF_CmdLimFreeplay(A, D, m = 1):
# Cmd-limit with Freeplay (Gelb #46)
n_r = m
n_i = -4 * D / (np.pi * A)
n = n_r + 1j * n_i
return n
def DF_BacklashFrict(A, b):
# Backlash (friction controlled) (Gelb #48)
A[np.abs(A) <= b/2] = np.nan
n_r = 1/2 * (1 + SatFunc(1 - b/A))
n_i = -1/np.pi * (2*b/A - (b/A)**2)
n = n_r + 1j * n_i
return n
def DF_RateLimit(A, rateLimit, omega):
# Rate Limit (Duda)
omegaOnset = rateLimit / A
omegaRatio = omegaOnset / omega
n = (4/np.pi) * omegaRatio * np.exp(-1j * np.arccos(np.pi/2 * omegaRatio))
return n
m = 1
freeplay = objServo.freeplay
cmdLim = objServo.cmdLim
defLim = objServo.pLim
vLim = objServo.vLim / sigPeakFactor
cmd = np.linspace(0, ampList.max(), 401)
# Saturation (Gelb #7)
A = np.copy(cmd)
nSat = DF_Saturation(A, defLim)
# Time Delay (Gelb #33)
A = np.copy(cmd)
omega = objServo.freqNat_rps * rps2hz
tDelay_s = objServo.timeDelay_s
nDelay = DF_TimeDelay(A, omega, tDelay_s)
# Hard-limit with Freeplay (#42)
delta = freeplay/2
D = defLim
A = np.copy(cmd)
nLim = DF_HardLimFreeplay(A, D, delta, m)
# nLim[np.abs(nLim) > 1] = 1.0
# Cmd-limit with Freeplay (Gelb #46)
D = m * freeplay/2
A = np.copy(cmd)
nCmdLim = DF_CmdLimFreeplay(A, D, m)
# Backlash (friction controlled) (Gelb #48)
b = freeplay
nBack = DF_BacklashFrict(A, b)
# Rate Limit
A = np.copy(cmd)
nRL = DF_RateLimit(A, vLim, freqNat_rps)
# Combine into a single DF response
nSat_temp = np.copy(nSat)
nSat_temp[np.isnan(nSat_temp)] = 1.0
nBack_temp = np.copy(nBack)
# nBack_temp[np.isnan(nBack_temp)] = 0.0
nRL_temp = np.copy(nRL)
nRL_temp[np.isnan(nRL_temp)] = 1.0
nDF = nSat_temp * nDelay * nBack_temp * nRL_temp
# Linear System
sysLin = control.tf([objServo.freqNat_rps**2], [1, 2*objServo.damp*objServo.freqNat_rps, objServo.freqNat_rps**2])
nLin = FreqTrans.FreqResp(sysLin, freqExc_rps)
#% Plot
fig = None
fig = FreqTrans.PlotGainType(cmd, np.abs(nSat), np.angle(nSat, deg=True), fig=fig, dB = False, label = 'Saturation Limit')
fig = FreqTrans.PlotGainType(cmd, np.abs(nDelay),
|
np.angle(nDelay, deg=True)
|
numpy.angle
|
import logging
from mpl_toolkits.basemap import Basemap
import numpy as np
from datetime import timedelta
import itertools
import matplotlib
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from obspy.core import UTCDateTime, read, Stream
from obspy.geodetics import gps2dist_azimuth
from .tables1D import Pick, PickModified, Candidate, Associated
from .tt_stations_1D import Station1D
from .func1D import TTtable1D
log = logging.getLogger(__name__)
def add_subplot_axes(ax, rect, axisbg='w'):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
transFigure = fig.transFigure.inverted()
infig_position = transFigure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[2]
subax = fig.add_axes([x, y, width, height], facecolor=axisbg)
x_labelsize = subax.get_xticklabels()[0].get_size()
y_labelsize = subax.get_yticklabels()[0].get_size()
x_labelsize *= rect[2] ** 0.5
y_labelsize *= rect[3] ** 0.5
subax.xaxis.set_tick_params(labelsize=x_labelsize)
subax.yaxis.set_tick_params(labelsize=y_labelsize)
return subax
class Plot:
def __init__(self, db_assoc, db_tt):
# Define travel time and associator database
engine_assoc = create_engine(db_assoc, echo=False)
# create a configuration file including paths
engine_tt_stations = create_engine(db_tt, echo=False)
Session1 = sessionmaker(bind=engine_assoc) # events table
Session2 = sessionmaker(bind=engine_tt_stations) # traveltime table
self.assoc_db = Session1()
self.tt_stations_db_1D = Session2()
def cluster_plot(self, assoc_ot_uncert=3):
# | | /\
# | | / \ /\
# | | /\ /\ / \ / \ /\
# _____|/\__|/ \ / \ / \ / \ / \ /\____
# | | \ / \ / \ / \ / \/
# | | \/ \ / \ / \/
# | | \/ \/
log.debug('Creating cluster plot')
matplotlib.rcParams["axes.labelsize"] = "large"
matplotlib.rcParams["axes.linewidth"] = 2.0
matplotlib.rcParams["xtick.major.size"] = 8
matplotlib.rcParams["ytick.major.size"] = 8
matplotlib.rcParams["ytick.minor.size"] = 5
matplotlib.rcParams["xtick.labelsize"] = "large"
matplotlib.rcParams["ytick.labelsize"] = "large"
dt_ot = timedelta(seconds=assoc_ot_uncert)
candidate_ots = self.assoc_db.query(Candidate).order_by(
Candidate.ot).all()
L_ots = len(candidate_ots)
arr = []
for i in range(L_ots):
cluster = self.assoc_db.query(Candidate).filter(
Candidate.ot >= candidate_ots[i].ot).filter(
Candidate.ot < (candidate_ots[i].ot + dt_ot)).order_by(
Candidate.ot).all()
cluster_sta = self.assoc_db.query(Candidate.sta).filter(
Candidate.ot >= candidate_ots[i].ot).filter(
Candidate.ot < (candidate_ots[i].ot + dt_ot)).order_by(
Candidate.ot).all()
l_cluster = len(set(cluster_sta))
arr.append((i, candidate_ots[i].ot, l_cluster, len(cluster)))
log.debug('DB query successful for canditate '
'origin time {}'.format(i))
log.debug('DB query successful for origin times and stations')
x1 = np.array(arr)[:, 0]
x2 =
|
np.array(arr)
|
numpy.array
|
"""
Demonstrates the following baseline anomaly-detection algorithms via sklearn
- One-class SVM
- Robust covariance estimate
- Isolation Forest
- Local Outlier Factor
The dataset is similar to the one described in Section 5.3 from
<NAME>, and <NAME>. "Anomaly Detection with Robust Deep Autoencoders." Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. ACM, 2017.
https://doi.org/10.1145/3097983.3098052
By default,
5000 datapoints are sampled from the MNIST dataset
95% (4750) consist of images of the digit '4'
5% (250) consist of anomalous images of other digits {'0', '7', '9'}
Reproducible sampling is ensured via setting a random seed and shuffle=False
"""
import numpy as np
import random
from sklearn.svm import OneClassSVM
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
#from sklearn.neighbors import LocalOutlierFactor
from tensorflow.examples.tutorials.mnist import input_data
random_seed = np.random.RandomState(1)
classifiers = {
"One-class SVM": OneClassSVM(
random_state=random_seed),
#"Robust covariance": EllipticEnvelope(
# contamination=0.05,
# random_state=random_seed),
"Isolation Forest": IsolationForest(
contamination=0.05,
random_state=random_seed)
#"Local Outlier Factor": LocalOutlierFactor(
# contamination=0.05)
}
# shuffle with fixed seed
# need to call random.seed(seed) everytime to reinitialize
def shuffle(data, seed=1):
random.seed(seed)
random.shuffle(data)
return data
# sample nominal and anomalous data from the MNIST dataset
def generate_mnist_anomaly_data(contamination=0.05, n_data=5000):
mnist = input_data.read_data_sets('./Data', one_hot=True)
nominal_label = {4}
anomalous_label = {0, 7, 9}
nominal_training_data = {'data':[], 'labels':[]}
anomalous_training_data = {'data':[], 'labels':[]}
total_training_size = n_data
anomalous_training_size = int(contamination * total_training_size)
nominal_training_size = total_training_size - anomalous_training_size
print("Generating {} total datapoint(s)...".format(total_training_size))
print("Generating {} nominal datapoint(s)...".format(nominal_training_size))
print("Generating {} anomalous datapoint(s)...".format(anomalous_training_size))
while len(nominal_training_data['data']) < nominal_training_size or len(anomalous_training_data['data']) < anomalous_training_size:
sample_data, sample_label = mnist.train.next_batch(1, shuffle=False)
sample_label = [[np.argmax(sample_label[0])]]
if len(nominal_training_data['data']) < nominal_training_size and sample_label[0][0] in nominal_label:
if len(nominal_training_data['data']) == 0:
nominal_training_data['data'] = sample_data
nominal_training_data['labels'] = sample_label
else:
nominal_training_data['data'] = np.concatenate((nominal_training_data['data'], sample_data))
nominal_training_data['labels'] =
|
np.concatenate((nominal_training_data['labels'], sample_label))
|
numpy.concatenate
|
"""utility and helper functions / classes."""
import torch
import json
import os
import logging
from tqdm import tqdm
from sklearn.metrics import f1_score
import numpy as np
import random
from transformers import AutoTokenizer
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
def get_num_classes(DATASET: str) -> int:
"""Get the number of classes to be classified by dataset."""
if DATASET == 'MELD':
NUM_CLASSES = 7
elif DATASET == 'IEMOCAP':
NUM_CLASSES = 6
else:
raise ValueError
return NUM_CLASSES
def compute_metrics(eval_predictions) -> dict:
"""Return f1_weighted, f1_micro, and f1_macro scores."""
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
f1_weighted = f1_score(label_ids, preds, average='weighted')
f1_micro = f1_score(label_ids, preds, average='micro')
f1_macro = f1_score(label_ids, preds, average='macro')
return {'f1_weighted': f1_weighted, 'f1_micro': f1_micro, 'f1_macro': f1_macro}
def set_seed(seed: int) -> None:
"""Set random seed to a fixed value.
Set everything to be deterministic
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
|
np.random.seed(seed)
|
numpy.random.seed
|
from __future__ import absolute_import, division, print_function
__copyright__ = "Copyright (C) 2017 - 2018 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import loopy as lp
import pyopencl as cl
from scipy.interpolate import BarycentricInterpolator as Interpolator
from functools import partial
import volumential.list1_gallery as gallery
import volumential.singular_integral_2d as squad
logger = logging.getLogger('NearFieldInteractionTable')
def _self_tp(vec, tpd=2):
"""
Self tensor product
"""
assert len(vec.shape) == 1
if tpd == 1:
return vec
elif tpd == 2:
return vec.reshape([len(vec), 1]) * vec.reshape([1, len(vec)])
elif tpd == 3:
return (
vec.reshape([len(vec), 1, 1])
* vec.reshape([1, len(vec), 1])
* vec.reshape([1, 1, len(vec)])
)
else:
raise NotImplementedError
def _orthonormal(n, i):
eb = np.zeros(n)
eb[i] = 1
return eb
def constant_one(x, y=None, z=None):
return np.ones(np.array(x).shape)
# {{{ kernel function getters
def get_laplace(dim):
if dim != 2:
raise NotImplementedError(
"Kernel function Laplace" + str(dim) + "D not implemented."
)
else:
def laplace(x, y):
return -0.25 * np.log(np.array(x) ** 2 + np.array(y) ** 2) / np.pi
return laplace
def get_cahn_hilliard(dim, b=0, c=0, approx_at_origin=False):
if dim != 2:
raise NotImplementedError(
"Kernel function Laplace" + str(dim) + "D not implemented."
)
else:
def quadratic_formula_1(a, b, c):
return (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
def quadratic_formula_2(a, b, c):
return (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
def citardauq_formula_1(a, b, c):
return 2 * c / (-b - np.sqrt(b ** 2 - 4 * a * c))
def citardauq_formula_2(a, b, c):
return 2 * c / (-b + np.sqrt(b ** 2 - 4 * a * c))
def f(x):
return x ** 2 - b * x + c
root11 = quadratic_formula_1(1, -b, c)
root12 = citardauq_formula_1(1, -b, c)
if np.abs(f(root11)) < np.abs(f(root12)):
lam1 = np.sqrt(root11)
else:
lam1 = np.sqrt(root12)
root21 = quadratic_formula_2(1, -b, c)
root22 = citardauq_formula_2(1, -b, c)
if np.abs(f(root21)) < np.abs(f(root22)):
lam1 = np.sqrt(root21)
else:
lam2 = np.sqrt(root22)
# assert np.abs(f(lam1**2)) < 1e-12
# assert np.abs(f(lam2**2)) < 1e-12
lambdas = sorted([lam1, lam2], key=abs, reverse=True) # biggest first
lam1 = lambdas[0]
lam2 = lambdas[1]
import scipy.special as sp
def cahn_hilliard(x, y):
r = np.sqrt(np.array(x) ** 2 + np.array(y) ** 2)
# Leading order removed analytically
def k0_approx(rr, lam):
euler_constant = 0.57721566490153286060651209008240243104215933593992
r = rr * lam
return (
-(np.log(lam) + euler_constant)
- (np.log(r / 2) + euler_constant) * (r ** 2 / 4 + r ** 4 / 64)
+ (r ** 2 / 4 + r ** 4 * 3 / 128)
)
if approx_at_origin:
return (
-1
/ (2 * np.pi * (lam1 ** 2 - lam2 ** 2))
* (k0_approx(r, lam1) - k0_approx(r, lam2))
)
else:
return (
-1
/ (2 * np.pi * (lam1 ** 2 - lam2 ** 2))
* (sp.kn(0, lam1 * r) - sp.kn(0, lam2 * r))
)
return cahn_hilliard
def get_cahn_hilliard_laplacian(dim, b=0, c=0):
raise NotImplementedError(
"Transform method under construction, " "use DrosteSum instead"
)
# }}} End kernel function getters
def sumpy_kernel_to_lambda(sknl):
from sympy import Symbol, symbols, lambdify
var_name_prefix = "x"
var_names = " ".join([var_name_prefix + str(i) for i in range(sknl.dim)])
arg_names = symbols(var_names)
args = [Symbol(var_name_prefix + str(i)) for i in range(sknl.dim)]
def func(x, y=None, z=None):
coord = (x, y, z)
lmd = lambdify(
arg_names, sknl.get_expression(args) * sknl.get_global_scaling_const()
)
return lmd(*coord[: sknl.dim])
return func
# {{{ table data structure
class NearFieldInteractionTable(object):
"""Class for a near-field interaction table.
A near-field interaction table stores precomputed singular integrals
on template boxes and supports transforms to actual boxes on lookup.
The query process is done through scaling the entries based on actual
box sized.
Orientations are ordered counter-clockwise.
A template box is one of [0,1]^dim
"""
# {{{ constructor
def __init__(
self,
quad_order,
method="gauss-legendre",
dim=2,
kernel_func=None,
kernel_type=None,
sumpy_kernel=None,
build_method=None,
source_box_extent=1,
dtype=np.float64,
inverse_droste=False,
progress_bar=True,
**kwargs
):
"""
kernel_type determines how the kernel is scaled w.r.t. box size.
build_method can be "Transform" or "DrosteSum".
The source box is [0, source_box_extent]^dim
:arg inverse_droste True if computing with the fractional Laplacian kernel.
"""
self.quad_order = quad_order
self.dim = dim
self.dtype = dtype
self.inverse_droste = inverse_droste
assert source_box_extent > 0
self.source_box_extent = source_box_extent
self.center = np.ones(self.dim) * 0.5 * self.source_box_extent
self.build_method = build_method
if dim == 1:
if build_method == "Transform":
raise NotImplementedError("Use build_method=DrosteSum for 1d")
self.kernel_func = kernel_func
self.kernel_type = kernel_type
self.integral_knl = sumpy_kernel
elif dim == 2:
# Constant kernel can be used for fun/testing
if kernel_func is None:
kernel_func = constant_one
kernel_type = "const"
# for DrosteSum kernel_func is unused
if build_method == "Transform":
logger.warning("setting kernel_func to be constant.")
# Kernel function differs from OpenCL's kernels
self.kernel_func = kernel_func
self.kernel_type = kernel_type
self.integral_knl = sumpy_kernel
if build_method == "DrosteSum":
assert sumpy_kernel is not None
elif dim == 3:
if build_method == "Transform":
raise NotImplementedError("Use build_method=DrosteSum for 3d")
self.kernel_func = kernel_func
self.kernel_type = kernel_type
self.integral_knl = sumpy_kernel
else:
raise NotImplementedError
# number of quad points per box
# equals to the number of modes per box
self.n_q_points = self.quad_order ** dim
# Normalizers for polynomial modes
# Needed only when we want to rescale log type kernels
self.mode_normalizers = np.zeros(self.n_q_points, dtype=self.dtype)
# Exterior normalizers for hypersingular kernels
self.kernel_exterior_normalizers = np.zeros(
self.n_q_points, dtype=self.dtype)
# number of (source_mode, target_point) pairs between two boxes
self.n_pairs = self.n_q_points ** 2
# possible interaction cases
self.interaction_case_vecs, self.case_encode, self.case_indices = \
gallery.generate_list1_gallery(self.dim)
self.n_cases = len(self.interaction_case_vecs)
if method == "gauss-legendre":
# quad points in [-1,1]
import volumential.meshgen as mg
if 'queue' in kwargs:
queue = kwargs['queue']
else:
queue = None
q_points, _, _ = mg.make_uniform_cubic_grid(
degree=quad_order, level=1, dim=self.dim,
queue=queue)
# map to source box
mapped_q_points = np.array(
[
0.5 * self.source_box_extent * (qp + np.ones(self.dim))
for qp in q_points
]
)
# sort in dictionary order, preserve only the leading
# digits to prevent floating point errors from polluting
# the ordering.
q_points_ordering = sorted(
range(len(mapped_q_points)),
key=lambda i: list(np.floor(mapped_q_points[i] * 10000)),
)
self.q_points = mapped_q_points[q_points_ordering]
else:
raise NotImplementedError
self.data = np.empty(self.n_pairs * self.n_cases, dtype=self.dtype)
self.data.fill(np.nan)
total_evals = len(self.data) + self.n_q_points
if progress_bar:
from pytools import ProgressBar
self.pb = ProgressBar("Building table:", total_evals)
else:
self.pb = None
self.is_built = False
# }}} End constructor
# {{{ encode to table index
def get_entry_index(self, source_mode_index, target_point_index, case_id):
assert source_mode_index >= 0 and source_mode_index < self.n_q_points
assert target_point_index >= 0 and target_point_index < self.n_q_points
pair_id = source_mode_index * self.n_q_points + target_point_index
return case_id * self.n_pairs + pair_id
# }}} End encode to table index
# {{{ decode table index to entry info
def decode_index(self, entry_id):
"""This is the inverse function of get_entry_index()
"""
index_info = dict()
case_id = entry_id // self.n_pairs
pair_id = entry_id % self.n_pairs
source_mode_index = pair_id // self.n_q_points
target_point_index = pair_id % self.n_q_points
index_info["case_index"] = case_id
index_info["source_mode_index"] = source_mode_index
index_info["target_point_index"] = target_point_index
return index_info
# }}} End decode table index to entry info
# {{{ basis modes in the template box
def unwrap_mode_index(self, mode_index):
# NOTE: these two lines should be changed
# in accordance with the mesh generator
# to get correct xi (1d grid)
if self.dim == 1:
idx = [mode_index]
elif self.dim == 2:
idx = [mode_index // self.quad_order, mode_index % self.quad_order]
elif self.dim == 3:
idx = [
mode_index // (self.quad_order ** 2),
mode_index % (self.quad_order ** 2) // self.quad_order,
mode_index % (self.quad_order ** 2) % self.quad_order,
]
return idx
def get_template_mode(self, mode_index):
assert mode_index >= 0 and mode_index < self.n_q_points
"""
template modes are defined on an l_infty circle.
"""
idx = self.unwrap_mode_index(mode_index)
xi = (
np.array([p[self.dim - 1] for p in self.q_points[: self.quad_order]])
/ self.source_box_extent
)
assert len(xi) == self.quad_order
yi = []
for d in range(self.dim):
yi.append(np.zeros(self.quad_order, dtype=self.dtype))
yi[d][idx[d]] = 1
axis_interp = [Interpolator(xi, yi[d]) for d in range(self.dim)]
def mode(*coords):
assert len(coords) == self.dim
if isinstance(coords[0], (int, float, complex)):
fvals = np.ones(1)
else:
fvals = np.ones(np.array(coords[0]).shape)
for d, coord in zip(range(self.dim), coords):
fvals = np.multiply(fvals, axis_interp[d](np.array(coord)))
return fvals
return mode
def get_mode(self, mode_index):
"""
normal modes are deined on the source box
"""
assert mode_index >= 0 and mode_index < self.n_q_points
idx = self.unwrap_mode_index(mode_index)
xi = np.array([p[self.dim - 1] for p in self.q_points[: self.quad_order]])
assert len(xi) == self.quad_order
yi = []
for d in range(self.dim):
yi.append(np.zeros(self.quad_order, dtype=self.dtype))
yi[d][idx[d]] = 1
axis_interp = [Interpolator(xi, yi[d]) for d in range(self.dim)]
def mode(*coords):
assert len(coords) == self.dim
if isinstance(coords[0], (int, float, complex)):
fvals = np.ones(1)
else:
fvals = np.ones(np.array(coords[0]).shape)
for d, coord in zip(range(self.dim), coords):
fvals = np.multiply(fvals, axis_interp[d](np.array(coord)))
return fvals
return mode
def get_mode_cheb_coeffs(self, mode_index, cheb_order):
"""
Cheb coeffs of a mode.
The projection process is performed on [0,1]^dim.
"""
import scipy.special as sps
cheby_nodes, _, cheby_weights = \
sps.chebyt(cheb_order).weights.T # pylint: disable=E1136,E0633
window = [0, 1]
cheby_nodes = cheby_nodes * (window[1] - window[0]) / 2 + np.mean(window)
cheby_weights = cheby_weights * (window[1] - window[0]) / 2
mode = self.get_template_mode(mode_index)
grid = np.meshgrid(
*[cheby_nodes for d in range(self.dim)],
indexing='ij')
mvals = mode(*grid)
from numpy.polynomial.chebyshev import Chebyshev
coef_scale = 2 *
|
np.ones(cheb_order)
|
numpy.ones
|
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic, logistic_sigmoid
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(
|
np.dot(U1, V1)
|
numpy.dot
|
"""Tables are sequences of labeled columns."""
__all__ = ['Table']
import abc
import collections
import collections.abc
import functools
import inspect
import itertools
import numbers
import urllib.parse
import warnings
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pandas
import IPython
import datascience.formats as _formats
import datascience.util as _util
from datascience.util import make_array
import datascience.predicates as _predicates
class Table(collections.abc.MutableMapping):
"""A sequence of string-labeled columns."""
plots = collections.deque(maxlen=10)
def __init__(self, labels=None, formatter=_formats.default_formatter):
"""Create an empty table with column labels.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles
letter | count | points
Args:
``labels`` (list of strings): The column labels.
``formatter`` (Formatter): An instance of :class:`Formatter` that
formats the columns' values.
"""
self._columns = collections.OrderedDict()
self._formats = dict()
self.formatter = formatter
labels = labels if labels is not None else []
columns = [[] for _ in labels]
self._num_rows = 0 if len(columns) == 0 else len(columns[0])
# Add each column to table
for column, label in zip(columns, labels):
self[label] = column
self.take = _RowTaker(self)
self.exclude = _RowExcluder(self)
# Deprecated
@classmethod
def empty(cls, labels=None):
"""Creates an empty table. Column labels are optional. [Deprecated]
Args:
``labels`` (None or list): If ``None``, a table with 0
columns is created.
If a list, each element is a column label in a table with
0 rows.
Returns:
A new instance of ``Table``.
"""
warnings.warn("Table.empty(labels) is deprecated. Use Table(labels)", FutureWarning)
if labels is None:
return cls()
values = [[] for label in labels]
return cls(values, labels)
# Deprecated
@classmethod
def from_rows(cls, rows, labels):
"""Create a table from a sequence of rows (fixed-length sequences). [Deprecated]"""
warnings.warn("Table.from_rows is deprecated. Use Table(labels).with_rows(...)", FutureWarning)
return cls(labels).with_rows(rows)
@classmethod
def from_records(cls, records):
"""Create a table from a sequence of records (dicts with fixed keys).
Args:
records: A list of dictionaries with same keys.
Returns:
If the list is empty, it will return an empty table.
Otherwise, it will return a table with the dictionary's keys as the column name, and the corresponding data.
If the dictionaries do not have identical keys, the keys of the first dictionary in the list is used.
"""
if not records:
return cls()
labels = sorted(list(records[0].keys()))
columns = [[rec[label] for rec in records] for label in labels]
return cls().with_columns(zip(labels, columns))
# Deprecated
@classmethod
def from_columns_dict(cls, columns):
"""Create a table from a mapping of column labels to column values. [Deprecated]"""
warnings.warn("Table.from_columns_dict is deprecated. Use Table().with_columns(...)", FutureWarning)
return cls().with_columns(columns.items())
@classmethod
def read_table(cls, filepath_or_buffer, *args, **vargs):
"""Read a table from a file or web address.
filepath_or_buffer -- string or file handle / StringIO; The string
could be a URL. Valid URL schemes include http,
ftp, s3, and file.
"""
# Look for .csv at the end of the path; use "," as a separator if found
try:
path = urllib.parse.urlparse(filepath_or_buffer).path
if 'data8.berkeley.edu' in filepath_or_buffer:
raise ValueError('data8.berkeley.edu requires authentication, '
'which is not supported.')
except AttributeError:
path = filepath_or_buffer
try:
if 'sep' not in vargs and path.endswith('.csv'):
vargs['sep'] = ','
except AttributeError:
pass
df = pandas.read_csv(filepath_or_buffer, *args, **vargs)
return cls.from_df(df)
def _with_columns(self, columns):
"""Create a table from a sequence of columns, copying column labels."""
table = type(self)()
for label, column in zip(self.labels, columns):
self._add_column_and_format(table, label, column)
return table
def _add_column_and_format(self, table, label, column):
"""Add a column to table, copying the formatter from self."""
label = self._as_label(label)
table[label] = column
if label in self._formats:
table._formats[label] = self._formats[label]
@classmethod
def from_df(cls, df, keep_index=False):
"""Convert a Pandas DataFrame into a Table.
`keep_index` -- keeps the index of the DataFrame
and turns it into a column called `index` in
the new Table
"""
t = cls()
if keep_index:
t.append_column("index", df.index.values)
labels = df.columns
for label in labels:
t.append_column(label, df[label])
return t
@classmethod
def from_array(cls, arr):
"""Convert a structured NumPy array into a Table.
Args:
arr: A structured numpy array
Returns:
A table with the field names as the column names and the corresponding data.
"""
return cls().with_columns([(f, arr[f]) for f in arr.dtype.names])
#################
# Magic Methods #
#################
def __getitem__(self, index_or_label):
return self.column(index_or_label)
def __setitem__(self, index_or_label, values):
self.append_column(index_or_label, values)
def __delitem__(self, index_or_label):
label = self._as_label(index_or_label)
del self._columns[label]
if label in self._formats:
del self._formats[label]
def __len__(self):
return len(self._columns)
def __iter__(self):
return iter(self.labels)
# Deprecated
def __getattr__(self, attr):
"""Return a method that applies to all columns or a table of attributes. [Deprecated]
E.g., t.sum() on a Table will return a table with the sum of each column.
"""
if self.columns and all(hasattr(c, attr) for c in self.columns):
warnings.warn("Implicit column method lookup is deprecated.", FutureWarning)
attrs = [getattr(c, attr) for c in self.columns]
if all(callable(attr) for attr in attrs):
@functools.wraps(attrs[0])
def method(*args, **vargs):
"""Create a table from the results of calling attrs."""
columns = [attr(*args, **vargs) for attr in attrs]
return self._with_columns(columns)
return method
else:
return self._with_columns([[attr] for attr in attrs])
else:
msg = "'{0}' object has no attribute '{1}'".format(type(self).__name__, attr)
raise AttributeError(msg)
####################
# Accessing Values #
####################
@property
def num_rows(self):
"""
Computes the number of rows in a table
Returns:
integer value stating number of rows
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.num_rows
4
"""
return self._num_rows
@property
def rows(self):
"""
Return a view of all rows.
Returns:
list-like Rows object that contains tuple-like Row objects
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.rows
Rows(letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10)
"""
return self.Rows(self)
def row(self, index):
"""Return a row."""
return self.rows[index]
@property
def labels(self):
"""
Return a tuple of column labels.
Returns:
tuple of labels
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.labels
('letter', 'count', 'points')
"""
return tuple(self._columns.keys())
# Deprecated
@property
def column_labels(self):
"""Return a tuple of column labels. [Deprecated]"""
warnings.warn("column_labels is deprecated; use labels", FutureWarning)
return self.labels
@property
def num_columns(self):
"""Number of columns."""
return len(self.labels)
@property
def columns(self):
"""
Return a tuple of columns, each with the values in that column.
Returns:
tuple of columns
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.columns
(array(['a', 'b', 'c', 'z'], dtype='<U1'),
array([9, 3, 3, 1]),
array([ 1, 2, 2, 10]))
"""
return tuple(self._columns.values())
def column(self, index_or_label):
"""Return the values of a column as an array.
table.column(label) is equivalent to table[label].
>>> tiles = Table().with_columns(
... 'letter', make_array('c', 'd'),
... 'count', make_array(2, 4),
... )
>>> list(tiles.column('letter'))
['c', 'd']
>>> tiles.column(1)
array([2, 4])
Args:
label (int or str): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table.
"""
if (isinstance(index_or_label, str)
and index_or_label not in self.labels):
raise ValueError(
'The column "{}" is not in the table. The table contains '
'these columns: {}'
.format(index_or_label, ', '.join(self.labels))
)
if (isinstance(index_or_label, int)
and not 0 <= index_or_label < len(self.labels)):
raise ValueError(
'The index {} is not in the table. Only indices between '
'0 and {} are valid'
.format(index_or_label, len(self.labels) - 1)
)
return self._columns[self._as_label(index_or_label)]
@property
def values(self):
"""Return data in `self` as a numpy array.
If all columns are the same dtype, the resulting array
will have this dtype. If there are >1 dtypes in columns,
then the resulting array will have dtype `object`.
"""
dtypes = [col.dtype for col in self.columns]
if len(set(dtypes)) > 1:
dtype = object
else:
dtype = None
return np.array(self.columns, dtype=dtype).T
def column_index(self, label):
"""
Return the index of a column by looking up its label.
Args:
``label`` (str) -- label value of a column
Returns:
integer value specifying the index of the column label
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.column_index('letter')
0
"""
return self.labels.index(label)
def apply(self, fn, *column_or_columns):
"""Apply ``fn`` to each element or elements of ``column_or_columns``.
If no ``column_or_columns`` provided, `fn`` is applied to each row.
Args:
``fn`` (function) -- The function to apply to each element
of ``column_or_columns``.
``column_or_columns`` -- Columns containing the arguments to ``fn``
as either column labels (``str``) or column indices (``int``).
The number of columns must match the number of arguments
that ``fn`` expects.
Raises:
``ValueError`` -- if ``column_label`` is not an existing
column in the table.
``TypeError`` -- if insufficent number of ``column_label`` passed
to ``fn``.
Returns:
An array consisting of results of applying ``fn`` to elements
specified by ``column_label`` in each row.
>>> t = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> t.apply(lambda x: x - 1, 'points')
array([0, 1, 1, 9])
>>> t.apply(lambda x, y: x * y, 'count', 'points')
array([ 9, 6, 6, 10])
>>> t.apply(lambda x: x - 1, 'count', 'points')
Traceback (most recent call last):
...
TypeError: <lambda>() takes 1 positional argument but 2 were given
>>> t.apply(lambda x: x - 1, 'counts')
Traceback (most recent call last):
...
ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points
Whole rows are passed to the function if no columns are specified.
>>> t.apply(lambda row: row[1] * 2)
array([18, 6, 6, 2])
"""
if not column_or_columns:
return np.array([fn(row) for row in self.rows])
else:
if len(column_or_columns) == 1 and \
_is_non_string_iterable(column_or_columns[0]):
warnings.warn(
"column lists are deprecated; pass each as an argument", FutureWarning)
column_or_columns = column_or_columns[0]
rows = zip(*self.select(*column_or_columns).columns)
return np.array([fn(*row) for row in rows])
def first(self, label):
"""
Return the zeroth item in a column.
Args:
``label`` (str) -- value of column label
Returns:
zeroth item of column
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.first('letter')
'a'
"""
return self.column(label)[0]
def last(self, label):
"""
Return the last item in a column.
Args:
``label`` (str) -- value of column label
Returns:
last item of column
Example:
>>> t = Table().with_columns({
... 'letter': ['a', 'b', 'c', 'z'],
... 'count': [ 9, 3, 3, 1],
... 'points': [ 1, 2, 2, 10],
... })
>>> t.last('letter')
'z'
"""
return self.column(label)[-1]
############
# Mutation #
############
def set_format(self, column_or_columns, formatter):
"""Set the format of a column."""
if inspect.isclass(formatter):
formatter = formatter()
if callable(formatter) and not hasattr(formatter, 'format_column'):
formatter = _formats.FunctionFormatter(formatter)
if not hasattr(formatter, 'format_column'):
raise Exception('Expected Formatter or function: ' + str(formatter))
for label in self._as_labels(column_or_columns):
if formatter.converts_values:
self[label] = formatter.convert_column(self[label])
self._formats[label] = formatter
return self
def move_to_start(self, column_label):
"""Move a column to the first in order."""
self._columns.move_to_end(self._as_label(column_label), last=False)
return self
def move_to_end(self, column_label):
"""Move a column to the last in order."""
self._columns.move_to_end(self._as_label(column_label))
return self
def append(self, row_or_table):
"""Append a row or all rows of a table. An appended table must have all
columns of self."""
if isinstance(row_or_table, np.ndarray):
row_or_table = row_or_table.tolist()
elif not row_or_table:
return
if isinstance(row_or_table, Table):
t = row_or_table
columns = list(t.select(self.labels)._columns.values())
n = t.num_rows
else:
if (len(list(row_or_table)) != self.num_columns):
raise Exception('Row should have '+ str(self.num_columns) + " columns")
columns, n = [[value] for value in row_or_table], 1
for i, column in enumerate(self._columns):
if self.num_rows:
self._columns[column] = np.append(self[column], columns[i])
else:
self._columns[column] = np.array(columns[i])
self._num_rows += n
return self
def append_column(self, label, values, formatter=None):
"""Appends a column to the table or replaces a column.
``__setitem__`` is aliased to this method:
``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to
``table['new_col'] = make_array(1, 2, 3)``.
Args:
``label`` (str): The label of the new column.
``values`` (single value or list/array): If a single value, every
value in the new column is ``values``.
If a list or array, the new column contains the values in
``values``, which must be the same length as the table.
``formatter`` (single formatter): Adds a formatter to the column being
appended. No formatter added by default.
Returns:
Original table with new or replaced column
Raises:
``ValueError``: If
- ``label`` is not a string.
- ``values`` is a list/array and does not have the same length
as the number of rows in the table.
>>> table = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> table.append_column('new_col1', make_array(10, 20, 30, 40))
letter | count | points | new_col1
a | 9 | 1 | 10
b | 3 | 2 | 20
c | 3 | 2 | 30
z | 1 | 10 | 40
>>> table.append_column('new_col2', 'hello')
letter | count | points | new_col1 | new_col2
a | 9 | 1 | 10 | hello
b | 3 | 2 | 20 | hello
c | 3 | 2 | 30 | hello
z | 1 | 10 | 40 | hello
>>> table.append_column(123, make_array(1, 2, 3, 4))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> table.append_column('bad_col', [1, 2])
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# TODO(sam): Allow append_column to take in a another table, copying
# over formatter as needed.
if not isinstance(label, str):
raise ValueError('The column label must be a string, but a '
'{} was given'.format(label.__class__.__name__))
if not isinstance(values, np.ndarray):
# Coerce a single value to a sequence
if not _is_non_string_iterable(values):
values = [values] * max(self.num_rows, 1)
values = np.array(tuple(values))
if self.num_rows != 0 and len(values) != self.num_rows:
raise ValueError('Column length mismatch. New column does not have '
'the same number of rows as table.')
else:
self._num_rows = len(values)
self._columns[label] = values
if (formatter != None):
self.set_format(label, formatter)
return self
def relabel(self, column_label, new_label):
"""Changes the label(s) of column(s) specified by ``column_label`` to
labels in ``new_label``.
Args:
``column_label`` -- (single str or array of str) The label(s) of
columns to be changed to ``new_label``.
``new_label`` -- (single str or array of str): The label name(s)
of columns to replace ``column_label``.
Raises:
``ValueError`` -- if ``column_label`` is not in table, or if
``column_label`` and ``new_label`` are not of equal length.
``TypeError`` -- if ``column_label`` and/or ``new_label`` is not
``str``.
Returns:
Original table with ``new_label`` in place of ``column_label``.
>>> table = Table().with_columns(
... 'points', make_array(1, 2, 3),
... 'id', make_array(12345, 123, 5123))
>>> table.relabel('id', 'yolo')
points | yolo
1 | 12345
2 | 123
3 | 5123
>>> table.relabel(make_array('points', 'yolo'),
... make_array('red', 'blue'))
red | blue
1 | 12345
2 | 123
3 | 5123
>>> table.relabel(make_array('red', 'green', 'blue'),
... make_array('cyan', 'magenta', 'yellow', 'key'))
Traceback (most recent call last):
...
ValueError: Invalid arguments. column_label and new_label must be of equal length.
"""
if isinstance(column_label, numbers.Integral):
column_label = self._as_label(column_label)
if isinstance(column_label, str) and isinstance(new_label, str):
column_label, new_label = [column_label], [new_label]
if len(column_label) != len(new_label):
raise ValueError('Invalid arguments. column_label and new_label '
'must be of equal length.')
old_to_new = dict(zip(column_label, new_label)) # maps old labels to new ones
for label in column_label:
if not (label in self.labels):
raise ValueError('Invalid labels. Column labels must '
'already exist in table in order to be replaced.')
rewrite = lambda s: old_to_new[s] if s in old_to_new else s
columns = [(rewrite(s), c) for s, c in self._columns.items()]
self._columns = collections.OrderedDict(columns)
for label in column_label:
# TODO(denero) Error when old and new columns share a name
if label in self._formats:
formatter = self._formats.pop(label)
self._formats[old_to_new[label]] = formatter
return self
def remove(self, row_or_row_indices):
"""Removes a row or multiple rows of a table in place."""
if not row_or_row_indices and not isinstance(row_or_row_indices, int):
return
if isinstance(row_or_row_indices, int):
rows_remove = [row_or_row_indices]
else:
rows_remove = row_or_row_indices
for col in self._columns:
self._columns[col] = [elem for i, elem in enumerate(self[col]) if i not in rows_remove]
self._num_rows -= len(rows_remove)
return self
##################
# Transformation #
##################
def copy(self, *, shallow=False):
"""Return a copy of a table."""
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table
def select(self, *column_or_columns):
"""Return a table with only the columns in ``column_or_columns``.
Args:
``column_or_columns``: Columns to select from the ``Table`` as
either column labels (``str``) or column indices (``int``).
Returns:
A new instance of ``Table`` containing only selected columns.
The columns of the new ``Table`` are in the order given in
``column_or_columns``.
Raises:
``KeyError`` if any of ``column_or_columns`` are not in the table.
>>> flowers = Table().with_columns(
... 'Number of petals', make_array(8, 34, 5),
... 'Name', make_array('lotus', 'sunflower', 'rose'),
... 'Weight', make_array(10, 5, 6)
... )
>>> flowers
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select('Number of petals', 'Weight')
Number of petals | Weight
8 | 10
34 | 5
5 | 6
>>> flowers # original table unchanged
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select(0, 2)
Number of petals | Weight
8 | 10
34 | 5
5 | 6
"""
labels = self._varargs_as_labels(column_or_columns)
table = type(self)()
for label in labels:
self._add_column_and_format(table, label, np.copy(self[label]))
return table
# These, along with a snippet below, are necessary for Sphinx to
# correctly load the `take` and `exclude` docstrings. The definitions
# will be over-ridden during class instantiation.
def take(self):
raise NotImplementedError()
def exclude(self):
raise NotImplementedError()
def drop(self, *column_or_columns):
"""Return a Table with only columns other than selected label or
labels.
Args:
``column_or_columns`` (string or list of strings): The header
names or indices of the columns to be dropped.
``column_or_columns`` must be an existing header name, or a
valid column index.
Returns:
An instance of ``Table`` with given columns removed.
>>> t = Table().with_columns(
... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),
... 'prices', make_array(6, 5, 5),
... 'calories', make_array(743, 651, 582))
>>> t
burgers | prices | calories
cheeseburger | 6 | 743
hamburger | 5 | 651
veggie burger | 5 | 582
>>> t.drop('prices')
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
>>> t.drop(['burgers', 'calories'])
prices
6
5
5
>>> t.drop('burgers', 'calories')
prices
6
5
5
>>> t.drop([0, 2])
prices
6
5
5
>>> t.drop(0, 2)
prices
6
5
5
>>> t.drop(1)
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
"""
exclude = _varargs_labels_as_list(column_or_columns)
return self.select([c for (i, c) in enumerate(self.labels)
if i not in exclude and c not in exclude])
def where(self, column_or_label, value_or_predicate=None, other=None):
"""
Return a new ``Table`` containing rows where ``value_or_predicate``
returns True for values in ``column_or_label``.
Args:
``column_or_label``: A column of the ``Table`` either as a label
(``str``) or an index (``int``). Can also be an array of booleans;
only the rows where the array value is ``True`` are kept.
``value_or_predicate``: If a function, it is applied to every value
in ``column_or_label``. Only the rows where ``value_or_predicate``
returns True are kept. If a single value, only the rows where the
values in ``column_or_label`` are equal to ``value_or_predicate``
are kept.
``other``: Optional additional column label for
``value_or_predicate`` to make pairwise comparisons. See the
examples below for usage. When ``other`` is supplied,
``value_or_predicate`` must be a callable function.
Returns:
If ``value_or_predicate`` is a function, returns a new ``Table``
containing only the rows where ``value_or_predicate(val)`` is True
for the ``val``s in ``column_or_label``.
If ``value_or_predicate`` is a value, returns a new ``Table``
containing only the rows where the values in ``column_or_label``
are equal to ``value_or_predicate``.
If ``column_or_label`` is an array of booleans, returns a new
``Table`` containing only the rows where ``column_or_label`` is
``True``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue",
... "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular",
... "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.2
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 0
Green | Round | 2 | 3
Use a value to select matching rows
>>> marbles.where("Price", 1.3)
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
In general, a higher order predicate function such as the functions in
``datascience.predicates.are`` can be used.
>>> from datascience.predicates import are
>>> # equivalent to previous example
>>> marbles.where("Price", are.equal_to(1.3))
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
>>> marbles.where("Price", are.above(1.5))
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Round | 2 | 3
Use the optional argument ``other`` to apply predicates to compare
columns.
>>> marbles.where("Price", are.above, "Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 3
>>> marbles.where("Price", are.equal_to, "Amount") # empty table
Color | Shape | Amount | Price
"""
column = self._get_column(column_or_label)
if other is not None:
assert callable(value_or_predicate), "Predicate required for 3-arg where"
predicate = value_or_predicate
other = self._get_column(other)
column = [predicate(y)(x) for x, y in zip(column, other)]
elif value_or_predicate is not None:
if not callable(value_or_predicate):
predicate = _predicates.are.equal_to(value_or_predicate)
else:
predicate = value_or_predicate
column = [predicate(x) for x in column]
return self.take(np.nonzero(column)[0])
def sort(self, column_or_label, descending=False, distinct=False):
"""Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
"""
column = self._get_column(column_or_label)
if distinct:
_, row_numbers = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::-1])
return self.take(row_numbers)
def group(self, column_or_label, collect=None):
"""Group rows by unique values in a column; count or aggregate others.
Args:
``column_or_label``: values to group (column label or index, or array)
``collect``: a function applied to values in other columns for each group
Returns:
A Table with each row corresponding to a unique value in ``column_or_label``,
where the first column contains the unique values from ``column_or_label``, and the
second contains counts for each of the unique values. If ``collect`` is
provided, a Table is returned with all original columns, each containing values
calculated by first grouping rows according to ``column_or_label``, then applying
``collect`` to each set of grouped values in the other columns.
Note:
The grouped column will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.group("Color") # just gives counts
Color | count
Blue | 1
Green | 3
Red | 2
>>> marbles.group("Color", max) # takes the max of each grouping, in each column
Color | Shape max | Amount max | Price max
Blue | Rectangular | 12 | 2
Green | Round | 9 | 1.4
Red | Round | 7 | 1.75
>>> marbles.group("Shape", sum) # sum doesn't make sense for strings
Shape | Color sum | Amount sum | Price sum
Rectangular | | 27 | 4.7
Round | | 13 | 4.05
"""
# Assume that a call to group with a list of labels is a call to groups
if _is_non_string_iterable(column_or_label) and \
len(column_or_label) != self._num_rows:
return self.groups(column_or_label, collect)
self = self.copy(shallow=True)
collect = _zero_on_type_error(collect)
# Remove column used for grouping
column = self._get_column(column_or_label)
if isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral):
column_label = self._as_label(column_or_label)
del self[column_label]
else:
column_label = self._unused_label('group')
# Group by column
groups = self.index_by(column)
keys = sorted(groups.keys())
# Generate grouped columns
if collect is None:
labels = [column_label, 'count' if column_label != 'count' else self._unused_label('count')]
columns = [keys, [len(groups[k]) for k in keys]]
else:
columns, labels = [], []
for i, label in enumerate(self.labels):
labels.append(_collected_label(collect, label))
c = [collect(np.array([row[i] for row in groups[k]])) for k in keys]
columns.append(c)
grouped = type(self)().with_columns(zip(labels, columns))
assert column_label == self._unused_label(column_label)
grouped[column_label] = keys
grouped.move_to_start(column_label)
return grouped
def groups(self, labels, collect=None):
"""Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05
"""
# Assume that a call to groups with one label is a call to group
if not _is_non_string_iterable(labels):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if label not in self.labels:
raise ValueError("All labels must exist in the table")
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), lambda s: s)
grouped._columns.popitem(last=False) # Discard the column of tuples
# Flatten grouping values and move them to front
counts = [len(v) for v in grouped[0]]
for label in labels[::-1]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
# Aggregate other values
if collect is None:
count = 'count' if 'count' not in labels else self._unused_label('count')
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if label in labels:
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped
def pivot(self, columns, rows, values=None, collect=None, zero=None):
"""Generate a table with a column for each unique value in ``columns``,
with rows for each unique value in ``rows``. Each row counts/aggregates
the values that match both row and column based on ``collect``.
Args:
``columns`` -- a single column label or index, (``str`` or ``int``),
used to create new columns, based on its unique values.
``rows`` -- row labels or indices, (``str`` or ``int`` or list),
used to create new rows based on it's unique values.
``values`` -- column label in table for use in aggregation.
Default None.
``collect`` -- aggregation function, used to group ``values``
over row-column combinations. Default None.
``zero`` -- zero value to use for non-existent row-column
combinations.
Raises:
TypeError -- if ``collect`` is passed in and ``values`` is not,
vice versa.
Returns:
New pivot table, with row-column combinations, as specified, with
aggregated ``values`` by ``collect`` across the intersection of
``columns`` and ``rows``. Simple counts provided if values and
collect are None, as default.
>>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95
... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),
... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),
... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))
>>> titanic
age | survival | gender | prediction
21 | 0 | M | 0
44 | 0 | M | 0
56 | 0 | M | 1
89 | 1 | M | 1
95 | 1 | F | 0
40 | 1 | F | 1
80 | 0 | F | 0
45 | 1 | F | 1
>>> titanic.pivot('survival', 'gender')
gender | 0 | 1
F | 1 | 3
M | 3 | 1
>>> titanic.pivot('prediction', 'gender')
gender | 0 | 1
F | 2 | 2
M | 2 | 2
>>> titanic.pivot('survival', 'gender', values='age', collect = np.mean)
gender | 0 | 1
F | 80 | 60
M | 40.3333 | 89
>>> titanic.pivot('survival', make_array('prediction', 'gender'))
prediction | gender | 0 | 1
0 | F | 1 | 1
0 | M | 2 | 0
1 | F | 0 | 2
1 | M | 1 | 1
>>> titanic.pivot('survival', 'gender', values = 'age')
Traceback (most recent call last):
...
TypeError: values requires collect to be specified
>>> titanic.pivot('survival', 'gender', collect = np.mean)
Traceback (most recent call last):
...
TypeError: collect requires values to be specified
"""
if collect is not None and values is None:
raise TypeError('collect requires values to be specified')
if values is not None and collect is None:
raise TypeError('values requires collect to be specified')
columns = self._as_label(columns)
rows = self._as_labels(rows)
if values is None:
selected = self.select([columns] + rows)
else:
selected = self.select([columns, values] + rows)
grouped = selected.groups([columns] + rows, collect)
# Generate existing combinations of values from columns in rows
rows_values = sorted(list(set(self.select(rows).rows)))
pivoted = type(self)(rows).with_rows(rows_values)
# Generate other columns and add them to pivoted
by_columns = grouped.index_by(columns)
for label in sorted(by_columns):
tuples = [t[1:] for t in by_columns[label]] # Discard column value
column = _fill_with_zeros(rows_values, tuples, zero)
pivot = self._unused_label(str(label))
pivoted[pivot] = column
return pivoted
def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs) :
"""Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.
"""
pivot_columns = _as_labels(pivot_columns)
selected = self.select(pivot_columns + [value_column])
grouped = selected.groups(pivot_columns, collect=lambda x:x)
# refine bins by taking a histogram over all the data
if bins is not None:
vargs['bins'] = bins
_, rbins = np.histogram(self[value_column],**vargs)
# create a table with these bins a first column and counts for each group
vargs['bins'] = rbins
binned = type(self)().with_column('bin',rbins)
for group in grouped.rows:
col_label = "-".join(map(str,group[0:-1]))
col_vals = group[-1]
counts,_ = np.histogram(col_vals,**vargs)
binned[col_label] = np.append(counts,0)
return binned
def stack(self, key, labels=None):
"""Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.
"""
rows, labels = [], labels or self.labels
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for k, v in row.asdict().items()
if k != key and k in labels]
return type(self)([key, 'column', 'value']).with_rows(rows)
def join(self, column_label, other, other_label=None):
"""Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label``: label of column or array of labels in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label``: default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table.join(['a', 'b'], table2, ['a', 'd']) # joining on multiple columns
a | b | c | e
1 | 10 | 6 | 6
9 | 1 | 3 | 3
"""
if self.num_rows == 0 or other.num_rows == 0:
return None
if not other_label:
other_label = column_label
# checking to see if joining on multiple columns
if _is_non_string_iterable(column_label):
# then we are going to be joining multiple labels
return self._multiple_join(column_label, other, other_label)
# original single column join
return self._join(column_label, other, other_label)
def _join(self, column_label, other, other_label=[]):
"""joins when COLUMN_LABEL is a string"""
if self.num_rows == 0 or other.num_rows == 0:
return None
if not other_label:
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
return self._join_helper([column_label], self_rows, other, [other_label], other_rows)
def _multiple_join(self, column_label, other, other_label=[]):
"""joins when column_label is a non-string iterable"""
assert len(column_label) == len(other_label), 'unequal number of columns'
self_rows = self._multi_index(column_label)
other_rows = other._multi_index(other_label)
return self._join_helper(column_label, self_rows, other, other_label, other_rows)
def _join_helper(self, column_label, self_rows, other, other_label, other_rows):
# Gather joined rows from self_rows that have join values in other_rows
joined_rows = []
for v, rows in self_rows.items():
if v in other_rows:
joined_rows += [row + o for row in rows for o in other_rows[v]]
if not joined_rows:
return None
# Build joined table
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
if (len(set(self_labels + other_labels)) != len(list(self_labels + other_labels))):
other_labels = [self._unused_label_in_either_table(s, other) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)(self_labels + other_labels).with_rows(joined_rows)
# Copy formats from both tables
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
# Remove redundant column, but perhaps save its formatting
for duplicate in other_label:
del joined[other_labels_map[duplicate]]
for duplicate in other_label:
if duplicate not in self._formats and duplicate in other._formats:
joined._formats[duplicate] = other._formats[duplicate]
for col in column_label[::-1]:
joined = joined.move_to_start(col).sort(col)
return joined
def stats(self, ops=(min, max, np.median, sum)):
"""Compute statistics for each column and place them in a table."""
names = [op.__name__ for op in ops]
ops = [_zero_on_type_error(op) for op in ops]
columns = [[op(column) for op in ops] for column in self.columns]
table = type(self)().with_columns(zip(self.labels, columns))
stats = table._unused_label('statistic')
table[stats] = names
table.move_to_start(stats)
return table
def _as_label(self, index_or_label):
"""Convert index to label."""
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError(str(index_or_label) + ' is not a label or index')
def _as_labels(self, label_or_labels):
"""Convert single label to list and convert indices to labels."""
return [self._as_label(s) for s in _as_labels(label_or_labels)]
def _varargs_as_labels(self, label_list):
"""Converts a list of labels or singleton list of list of labels into
a list of labels. Useful when labels are passed as varargs."""
return self._as_labels(_varargs_labels_as_list(label_list))
def _unused_label(self, label):
"""Generate an unused label."""
original = label
existing = self.labels
i = 2
while label in existing:
label = '{}_{}'.format(original, i)
i += 1
return label
def _unused_label_in_either_table(self, label, other):
original = label
existing_self = self.labels
existing_other = other.labels
i = 2
while label in existing_self:
label = '{}_{}'.format(original, i)
i += 1
while label in existing_other:
label = '{}_{}'.format(original, i)
i += 1
return label
def _get_column(self, column_or_label):
"""Convert label to column and check column length."""
c = column_or_label
if isinstance(c, collections.abc.Hashable) and c in self.labels:
return self[c]
elif isinstance(c, numbers.Integral):
return self[c]
elif isinstance(c, str):
raise ValueError('label "{}" not in labels {}'.format(c, self.labels))
else:
assert len(c) == self.num_rows, 'column length mismatch'
return c
def percentile(self, p):
"""Return a new table with one row containing the pth percentile for
each column.
Assumes that each column only contains one type of value.
Returns a new table with one row and the same column labels.
The row contains the pth percentile of the original column, where the
pth percentile of a column is the smallest value that at at least as
large as the p% of numbers in the column.
>>> table = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> table.percentile(80)
count | points
9 | 10
"""
percentiles = [[_util.percentile(p, column)] for column in self.columns]
return self._with_columns(percentiles)
def sample(self, k=None, with_replacement=True, weights=None):
"""Return a new table where k rows are randomly sampled from the
original table.
Args:
``k`` -- specifies the number of rows (``int``) to be sampled from
the table. Default is k equal to number of rows in the table.
``with_replacement`` -- (``bool``) By default True;
Samples ``k`` rows with replacement from table, else samples
``k`` rows without replacement.
``weights`` -- Array specifying probability the ith row of the
table is sampled. Defaults to None, which samples each row
with equal probability. ``weights`` must be a valid probability
distribution -- i.e. an array the length of the number of rows,
summing to 1.
Raises:
ValueError -- if ``weights`` is not length equal to number of rows
in the table; or, if ``weights`` does not sum to 1.
Returns:
A new instance of ``Table`` with ``k`` rows resampled.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.sample() # doctest: +SKIP
job | wage
b | 20
b | 20
a | 10
d | 8
>>> jobs.sample(with_replacement=True) # doctest: +SKIP
job | wage
d | 8
b | 20
c | 15
a | 10
>>> jobs.sample(k = 2) # doctest: +SKIP
job | wage
b | 20
c | 15
>>> ws = make_array(0.5, 0.5, 0, 0)
>>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP
job | wage
a | 10
a | 10
>>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))
Traceback (most recent call last):
...
ValueError: probabilities do not sum to 1
>>> jobs.sample(k=2, weights=make_array(1, 0, 0)) # Weights must be length of table.
Traceback (most recent call last):
...
ValueError: 'a' and 'p' must have same size
"""
n = self.num_rows
if k is None:
k = n
index = np.random.choice(n, k, replace=with_replacement, p=weights)
columns = [[c[i] for i in index] for c in self.columns]
sample = self._with_columns(columns)
return sample
def shuffle(self):
"""Return a new table where all the rows are randomly shuffled from the
original table..
Returns:
A new instance of ``Table`` with all ``k`` rows shuffled.
"""
return self.sample(with_replacement=False)
def sample_from_distribution(self, distribution, k, proportions=False):
"""Return a new table with the same number of rows and a new column.
The values in the distribution column are define a multinomial.
They are replaced by sample counts/proportions in the output.
>>> sizes = Table(['size', 'count']).with_rows([
... ['small', 50],
... ['medium', 100],
... ['big', 50],
... ])
>>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP
size | count | count sample
small | 50 | 239
medium | 100 | 496
big | 50 | 265
>>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP
size | count | count sample
small | 50 | 0.24
medium | 100 | 0.51
big | 50 | 0.25
"""
dist = self._get_column(distribution)
total = sum(dist)
assert total > 0 and np.all(dist >= 0), 'Counts or a distribution required'
dist = dist/sum(dist)
sample = np.random.multinomial(k, dist)
if proportions:
sample = sample / sum(sample)
label = self._unused_label(self._as_label(distribution) + ' sample')
return self.with_column(label, sample)
def split(self, k):
"""Return a tuple of two tables where the first table contains
``k`` rows randomly sampled and the second contains the remaining rows.
Args:
``k`` (int): The number of rows randomly sampled into the first
table. ``k`` must be between 1 and ``num_rows - 1``.
Raises:
``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.
Returns:
A tuple containing two instances of ``Table``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> sample, rest = jobs.split(3)
>>> sample # doctest: +SKIP
job | wage
c | 15
a | 10
b | 20
>>> rest # doctest: +SKIP
job | wage
d | 8
"""
if not 1 <= k <= self.num_rows - 1:
raise ValueError("Invalid value of k. k must be between 1 and the"
"number of rows - 1")
rows = np.random.permutation(self.num_rows)
first = self.take(rows[:k])
rest = self.take(rows[k:])
for column_label in self._formats:
first._formats[column_label] = self._formats[column_label]
rest._formats[column_label] = self._formats[column_label]
return first, rest
def with_row(self, row):
"""Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2
"""
self = self.copy()
self.append(row)
return self
def with_rows(self, rows):
"""Return a table with additional rows.
Args:
``rows`` (sequence of sequences): Each row has a value per column.
If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.
Raises:
``ValueError``: If a row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_rows(make_array(make_array('c', 2, 3),
... make_array('d', 4, 2)))
letter | count | points
c | 2 | 3
d | 4 | 2
"""
self = self.copy()
self.append(self._with_columns(zip(*rows)))
return self
def with_column(self, label, values, formatter=None):
"""Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``formatter`` (single value): Specifies formatter for the new column. Defaults to no formatter.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# Ensure that if with_column is called instead of with_columns;
# no error is raised.
new_table = self.copy()
if formatter == {}:
formatter = None
elif isinstance(formatter, dict):
formatter = formatter["formatter"]
new_table.append_column(label, values, formatter)
return new_table
def with_columns(self, *labels_and_values, **formatter):
"""Return a table with additional or replaced columns.
Args:
``labels_and_values``: An alternating list of labels and values
or a list of label-value pairs. If one of the labels is in
existing table, then every value in the corresponding column is
set to that value. If label has only a single value (``int``),
every row of corresponding column takes on that value.
''formatter'' (single Formatter value): A single formatter value
that will be applied to all columns being added using this
function call.
Raises:
``ValueError``: If
- any label in ``labels_and_values`` is not a valid column
name, i.e if label is not of type (str).
- if any value in ``labels_and_values`` is a list/array and
does not have the same length as the number of rows in the
table.
``AssertionError``:
- 'incorrect columns format', if passed more than one sequence
(iterables) for ``labels_and_values``.
- 'even length sequence required' if missing a pair in
label-value pairs.
Returns:
Copy of original table with new or replaced columns. Columns added
in order of labels. Equivalent to ``with_column(label, value)``
when passed only one label-value pair.
>>> players = Table().with_columns('player_id',
... make_array(110234, 110235), 'wOBA', make_array(.354, .236))
>>> players
player_id | wOBA
110234 | 0.354
110235 | 0.236
>>> players = players.with_columns('salaries', 'N/A', 'season', 2016)
>>> players
player_id | wOBA | salaries | season
110234 | 0.354 | N/A | 2016
110235 | 0.236 | N/A | 2016
>>> salaries = Table().with_column('salary',
... make_array(500000, 15500000))
>>> players.with_columns('salaries', salaries.column('salary'),
... 'bonus', make_array(6, 1), formatter=_formats.CurrencyFormatter)
player_id | wOBA | salaries | season | bonus
110234 | 0.354 | $500,000 | 2016 | $6
110235 | 0.236 | $15,500,000 | 2016 | $1
>>> players.with_columns(2, make_array('$600,000', '$20,000,000'))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> players.with_columns('salaries', make_array('$600,000'))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
if not isinstance(self, Table):
raise TypeError('Use Table().with_columns() to create a new table, \
not Table.with_columns()')
if len(labels_and_values) == 1:
labels_and_values = labels_and_values[0]
if isinstance(labels_and_values, collections.abc.Mapping):
labels_and_values = list(labels_and_values.items())
if not isinstance(labels_and_values, collections.abc.Sequence):
labels_and_values = list(labels_and_values)
if not labels_and_values:
return self
first = labels_and_values[0]
if not isinstance(first, str) and hasattr(first, '__iter__'):
for pair in labels_and_values:
assert len(pair) == 2, 'incorrect columns format'
labels_and_values = [x for pair in labels_and_values for x in pair]
assert len(labels_and_values) % 2 == 0, 'Even length sequence required'
for i in range(0, len(labels_and_values), 2):
label, values = labels_and_values[i], labels_and_values[i+1]
self = self.with_column(label, values, formatter)
return self
def relabeled(self, label, new_label):
"""Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.
"""
copy = self.copy()
copy.relabel(label, new_label)
return copy
# Deprecated
def with_relabeling(self, *args):
warnings.warn("with_relabeling is deprecated; use relabeled", FutureWarning)
return self.relabeled(*args)
def bin(self, *columns, **vargs):
"""Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.
"""
if columns:
self = self.select(*columns)
if 'normed' in vargs:
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = 'density' if density else 'count'
cols = list(self._columns.values())
_, bins = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
counts, _ = np.histogram(self[label], bins=bins, density=density)
binned[label + ' ' + tag] = np.append(counts, 0)
return binned
def move_column(self, label, index):
"""Returns a new table with specified column moved to the specified column index.
Args:
``label`` (str) A single label of column to be moved.
``index`` (int) A single index of column to move to.
>>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95
... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),
... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),
... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))
>>> titanic
age | survival | gender | prediction
21 | 0 | M | 0
44 | 0 | M | 0
56 | 0 | M | 1
89 | 1 | M | 1
95 | 1 | F | 0
40 | 1 | F | 1
80 | 0 | F | 0
45 | 1 | F | 1
>>> titanic.move_column('survival', 3)
age | gender | prediction | survival
21 | M | 0 | 0
44 | M | 0 | 0
56 | M | 1 | 0
89 | M | 1 | 1
95 | F | 0 | 1
40 | F | 1 | 1
80 | F | 0 | 0
45 | F | 1 | 1
"""
table = type(self)()
col_order = list(self._columns)
label_idx = col_order.index(self._as_label(label))
col_to_move = col_order.pop(label_idx)
col_order.insert(index, col_to_move)
for col in col_order:
table[col] = self[col]
return table
##########################
# Exporting / Displaying #
##########################
def __str__(self):
return self.as_text(self.max_str_rows)
__repr__ = __str__
def _repr_html_(self):
return self.as_html(self.max_str_rows)
def show(self, max_rows=0):
"""Display the table."""
IPython.display.display(IPython.display.HTML(self.as_html(max_rows)))
max_str_rows = 10
@staticmethod
def _use_html_if_available(format_fn):
"""Use the value's HTML rendering if available, overriding format_fn."""
def format_using_as_html(v, label=False):
if not label and hasattr(v, 'as_html'):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html
def _get_column_formatters(self, max_rows, as_html):
"""Return one value formatting function per column.
Each function has the signature f(value, label=False) -> str
"""
formats = {s: self._formats.get(s, self.formatter) for s in self.labels}
cols = self._columns.items()
fmts = [formats[k].format_column(k, v[:max_rows]) for k, v in cols]
if as_html:
fmts = list(map(type(self)._use_html_if_available, fmts))
return fmts
def as_text(self, max_rows=0, sep=" | "):
"""Format table as text."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self._columns.keys()
fmts = self._get_column_formatters(max_rows, False)
rows = [[fmt(label, label=True) for fmt, label in zip(fmts, labels)]]
for row in itertools.islice(self.rows, max_rows):
rows.append([f(v, label=False) for v, f in zip(row, fmts)])
lines = [sep.join(row) for row in rows]
if omitted:
lines.append('... ({} rows omitted)'.format(omitted))
return '\n'.join([line.rstrip() for line in lines])
def as_html(self, max_rows=0):
"""Format table as HTML."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self.labels
lines = [
(0, '<table border="1" class="dataframe">'),
(1, '<thead>'),
(2, '<tr>'),
(3, ' '.join('<th>' + label + '</th>' for label in labels)),
(2, '</tr>'),
(1, '</thead>'),
(1, '<tbody>'),
]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [
(2, '<tr>'),
(3, ' '.join('<td>' + fmt(v, label=False) + '</td>' for
v, fmt in zip(row, fmts))),
(2, '</tr>'),
]
lines.append((1, '</tbody>'))
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p>'.format(omitted)))
return '\n'.join(4 * indent * ' ' + text for indent, text in lines)
def index_by(self, column_or_label):
"""Return a dict keyed by values in a column that contains lists of
rows corresponding to each value.
"""
column = self._get_column(column_or_label)
index = {}
for key, row in zip(column, self.rows):
if isinstance(key, tuple):
key_transformed = list(key)
else:
key_transformed = [key]
has_null = pandas.isnull(key_transformed)
if any(has_null):
for i in range(len(key_transformed)):
if pandas.isnull(key_transformed[i]):
key_transformed[i] = np.nan
key = tuple(key_transformed) if len(key_transformed) > 1 else key_transformed[0]
index.setdefault(key, []).append(row)
return index
def _multi_index(self, columns_or_labels):
"""Returns a dict keyed by a tuple of the values that correspond to
the selected COLUMNS_OR_LABELS, with values corresponding to """
columns = [self._get_column(col) for col in columns_or_labels]
index = {}
for key, row in zip(zip(*columns), self.rows):
index.setdefault(key, []).append(row)
return index
def to_df(self):
"""Convert the table to a Pandas DataFrame."""
return pandas.DataFrame(self._columns)
def to_csv(self, filename):
"""Creates a CSV file with the provided filename.
The CSV is created in such a way that if we run
``table.to_csv('my_table.csv')`` we can recreate the same table with
``Table.read_table('my_table.csv')``.
Args:
``filename`` (str): The filename of the output CSV file.
Returns:
None, outputs a file with name ``filename``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.to_csv('my_table.csv') # doctest: +SKIP
<outputs a file called my_table.csv in the current directory>
"""
# index=False avoids row numbers in the output
self.to_df().to_csv(filename, index=False)
def to_array(self):
"""Convert the table to a structured NumPy array."""
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr
##################
# Visualizations #
##################
# As RGB tuples
chart_colors = (
(0.0, 30/256, 66/256),
(1.0, 200/256, 44/256),
(0.0, 150/256, 207/256),
(30/256, 100/256, 0.0),
(172/256, 60/256, 72/256),
)
chart_colors += tuple(tuple((x+0.7)/2 for x in c) for c in chart_colors)
default_alpha = 0.7
default_options = {
'alpha': default_alpha,
}
def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price>
"""
options = self.default_options.copy()
options.update(vargs)
if column_for_xticks is not None:
x_data, y_labels = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
x_data, y_labels = None, self.labels
x_label = None
if select is not None:
y_labels = self._as_labels(select)
if x_data is not None:
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if x_data is None:
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height)
def bar(self, column_for_categories=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot bar charts for the table.
Each plot is labeled using the values in `column_for_categories` and
one plot is produced for every other column (or for the columns
designated by `select`).
Every selected column except `column_for_categories` must be numerical.
Args:
column_for_categories (str): A column containing x-axis categories
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
options = self.default_options.copy()
# Matplotlib tries to center the labels, but we already handle that
# TODO consider changing the custom centering code and using matplotlib's default
vargs['align'] = 'edge'
options.update(vargs)
xticks, labels = self._split_column_and_labels(column_for_categories)
if select is not None:
labels = self._as_labels(select)
index = np.arange(self.num_rows)
def draw(axis, label, color):
axis.bar(index-0.5, self[label], 1.0, color=color, **options)
def annotate(axis, ticks):
if (ticks is not None) :
tick_labels = [ticks[int(l)] if 0<=l<len(ticks) else '' for l in axis.get_xticks()]
axis.set_xticklabels(tick_labels, stretch='ultra-condensed')
self._visualize(column_for_categories, labels, xticks, overlay, draw, annotate, width=width, height=height)
def group_bar(self, column_label, **vargs):
"""Plot a bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``bar`` in that there is no need to specify
bar heights; the height of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``bar`` behaves more like ``plot`` or
``scatter`` (which require the height of each point to be specified).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).bar(column_label, **vargs)
def barh(self, column_for_categories=None, select=None, overlay=True, width=6, **vargs):
"""Plot horizontal bar charts for the table.
Args:
``column_for_categories`` (``str``): A column containing y-axis categories
used to create buckets for bar chart.
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.barh`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected except column for ``column_for_categories``
must be numerical.
Returns:
Horizontal bar graph with buckets specified by ``column_for_categories``.
Each plot is labeled using the values in ``column_for_categories``
and one plot is produced for every other column (or for the columns
designated by ``select``).
>>> t = Table().with_columns(
... 'Furniture', make_array('chairs', 'tables', 'desks'),
... 'Count', make_array(6, 1, 2),
... 'Price', make_array(10, 20, 30)
... )
>>> t
Furniture | Count | Price
chairs | 6 | 10
tables | 1 | 20
desks | 2 | 30
>>> furniture_table.barh('Furniture') # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
>>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP
<bar graph with furniture as categories and bars for price>
>>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
"""
options = self.default_options.copy()
# Matplotlib tries to center the labels, but we already handle that
# TODO consider changing the custom centering code and using matplotlib's default
vargs['align'] = 'edge'
options.update(vargs)
yticks, labels = self._split_column_and_labels(column_for_categories)
if select is not None:
labels = self._as_labels(select)
n = len(labels)
index = np.arange(self.num_rows)
margin = 0.1
bwidth = 1 - 2 * margin
if overlay:
bwidth /= len(labels)
if 'height' in options:
height = options.pop('height')
else:
height = max(4, len(index)/2)
def draw(axis, label, color):
if overlay:
ypos = index + margin + (1-2*margin)*(n - 1 - labels.index(label))/n
else:
ypos = index
# barh plots entries in reverse order from bottom to top
axis.barh(ypos, self[label][::-1], bwidth, color=color, **options)
ylabel = self._as_label(column_for_categories)
def annotate(axis, ticks):
axis.set_yticks(index+0.5) # Center labels on bars
# barh plots entries in reverse order from bottom to top
axis.set_yticklabels(ticks[::-1], stretch='ultra-condensed')
axis.set_xlabel(axis.get_ylabel())
axis.set_ylabel(ylabel)
self._visualize('', labels, yticks, overlay, draw, annotate, width=width, height=height)
def group_barh(self, column_label, **vargs):
"""Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).barh(column_label, **vargs)
def scatter(self, column_for_x, select=None, overlay=True, fit_line=False,
group=None, labels=None, sizes=None, width=5, height=5, s=20,
colors=None, **vargs):
"""Creates scatterplots, optionally adding a line of best fit.
Args:
``column_for_x`` (``str``): The column to use for the x-axis values
and label of the scatter plots.
Kwargs:
``overlay`` (``bool``): If true, creates a chart with one color
per data column; if False, each plot will be displayed separately.
``fit_line`` (``bool``): draw a line of best fit for each set of points.
``vargs``: Additional arguments that get passed into `plt.scatter`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
for additional arguments that can be passed into vargs. These
include: `marker` and `norm`, to name a couple.
``group``: A column of categories to be used for coloring dots per
each category grouping.
``labels``: A column of text labels to annotate dots.
``sizes``: A column of values to set the relative areas of dots.
``s``: Size of dots. If sizes is also provided, then dots will be
in the range 0 to 2 * s.
``colors``: (deprecated) A synonym for ``group``. Retained
temporarily for backwards compatibility. This argument
will be removed in future releases.
Raises:
ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical
Returns:
Scatter plot of values of ``column_for_x`` plotted against
values for all other columns in self. Each plot uses the values in
`column_for_x` for horizontal positions. One plot is produced for
all other columns in self as y (or for the columns designated by
`select`).
>>> table = Table().with_columns(
... 'x', make_array(9, 3, 3, 1),
... 'y', make_array(1, 2, 2, 10),
... 'z', make_array(3, 4, 5, 6))
>>> table
x | y | z
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table.scatter('x') # doctest: +SKIP
<scatterplot of values in y and z on x>
>>> table.scatter('x', overlay=False) # doctest: +SKIP
<scatterplot of values in y on x>
<scatterplot of values in z on x>
>>> table.scatter('x', fit_line=True) # doctest: +SKIP
<scatterplot of values in y and z on x with lines of best fit>
"""
options = self.default_options.copy()
options.update(vargs)
x_data, y_labels = self._split_column_and_labels(column_for_x)
if group is not None and colors is not None and group != colors:
warnings.warn("Do not pass both colors and group to scatter().")
if group is None and colors is not None:
# Backward compatibility
group = colors
warnings.warn("scatter(colors=x) is deprecated. Use scatter(group=x)", FutureWarning)
if group is not None:
y_labels.remove(self._as_label(group))
if sizes is not None:
y_labels.remove(self._as_label(sizes))
if select is not None:
y_labels = self._as_labels(select)
if len(y_labels) > 1 and group is not None and overlay:
warnings.warn("Group and overlay are incompatible in a scatter")
overlay = False
def draw(axis, label, color):
if group is not None:
colored = sorted(np.unique(self.column(group)))
color_list = list(itertools.islice(itertools.cycle(self.chart_colors), len(colored)))
color_map = collections.OrderedDict(zip(colored, color_list))
color = [color_map[x] for x in self.column(group)]
elif 'color' in options:
color = options.pop('color')
y_data = self[label]
if sizes is not None:
max_size = max(self[sizes]) ** 0.5
size = 2 * s * self[sizes] ** 0.5 / max_size
else:
size = s
axis.scatter(x_data, y_data, color=color, s=size, **options)
if fit_line:
m, b = np.polyfit(x_data, self[label], 1)
minx, maxx = np.min(x_data),np.max(x_data)
axis.plot([minx,maxx],[m*minx+b,m*maxx+b], color=color)
if labels is not None:
for x, y, label in zip(x_data, y_data, self[labels]):
axis.annotate(label, (x, y),
xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.7),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0', color='black'))
if group is not None:
import matplotlib.patches as mpatches
group_col_name = self._as_label(group)
patches = [mpatches.Patch(color=c, label="{0}={1}".format(group_col_name, v)) \
for (v, c) in color_map.items()]
axis.legend(loc=2, bbox_to_anchor=(1.05, 1), handles=patches)
x_label = self._as_label(column_for_x)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height)
def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
"""Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories`
"""
for label in y_labels:
if not all(isinstance(x, numbers.Real) for x in self[label]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A plot cannot be drawn for this column."
.format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay and n > 1:
_, axis = plt.subplots(figsize=(width, height))
if x_label is not None:
axis.set_xlabel(x_label)
for label, color in zip(y_labels, colors):
draw(axis, label, color)
if ticks is not None:
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
fig, axes = plt.subplots(n, 1, figsize=(width, height*n))
if not isinstance(axes, collections.Iterable):
axes=[axes]
for axis, y_label, color in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if x_label is not None:
axis.set_xlabel(x_label, fontsize=16)
if ticks is not None:
annotate(axis, ticks)
type(self).plots.append(axis)
def _split_column_and_labels(self, column_or_label):
"""Return the specified column and labels of other columns."""
column = None if column_or_label is None else self._get_column(column_or_label)
labels = [label for i, label in enumerate(self.labels) if column_or_label not in (i, label)]
return column, labels
# Deprecated
def pivot_hist(self, pivot_column_label, value_column_label, overlay=True, width=6, height=4, **vargs):
"""Draw histograms of each category in a column. (Deprecated)"""
warnings.warn("pivot_hist is deprecated; use "
"hist(value_column_label, group=pivot_column_label), or "
"with side_by_side=True if you really want side-by-side "
"bars.")
pvt_labels = np.unique(self[pivot_column_label])
pvt_columns = [self[value_column_label][np.where(self[pivot_column_label] == pivot)] for pivot in pvt_labels]
n = len(pvt_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay:
plt.figure(figsize=(width, height))
vals, bins, patches = plt.hist(pvt_columns, color=colors, **vargs)
plt.legend(pvt_labels)
else:
_, axes = plt.subplots(n, 1, figsize=(width, height * n))
vals = []
bins = None
for axis, label, column, color in zip(axes, pvt_labels, pvt_columns, colors):
if isinstance(bins, np.ndarray):
avals, abins, patches = axis.hist(column, color=color, bins=bins, **vargs)
else:
avals, abins, patches = axis.hist(column, color=color, **vargs)
axis.set_xlabel(label, fontsize=16)
vals.append(avals)
if not isinstance(bins, np.ndarray):
bins = abins
else:
assert bins.all() == abins.all(), "Inconsistent bins in hist"
t = type(self)()
t['start'] = bins[0:-1]
t['end'] = bins[1:]
for label, column in zip(pvt_labels,vals):
t[label] = column
def hist(self, *columns, overlay=True, bins=None, bin_column=None, unit=None, counts=None, group=None, side_by_side=False, left_end=None, right_end=None, width=6, height=4, **vargs):
"""Plots one histogram for each column in columns. If no column is
specified, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column. Note that
if the histograms are not overlaid, they are not forced to the
same scale.
bins (list or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
counts (column name or index): Deprecated name for bin_column.
unit (string): A name for the units of the plotted column (e.g.
'kg'), to be used in the plot.
group (column name or index): A column of categories. The rows are
grouped by the values in this column, and a separate histogram is
generated for each group. The histograms are overlaid or plotted
separately depending on the overlay argument. If None, no such
grouping is done.
side_by_side (bool): Whether histogram bins should be plotted side by
side (instead of directly overlaid). Makes sense only when
plotting multiple histograms, either by passing several columns
or by using the group option.
left_end (int or float) and right_end (int or float): (Not supported
for overlayed histograms) The left and right edges of the shading of
the histogram. If only one of these is None, then that property
will be treated as the extreme edge of the histogram. If both are
left None, then no shading will occur.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `normed`/`density`, `cumulative`, and
`orientation`, to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist() # doctest: +SKIP
<histogram of values in count>
<histogram of values in points>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'proportion', make_array(0.25, 0.5, 0.25))
>>> t.hist(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding proportions>
>>> t = Table().with_columns(
... 'value', make_array(1, 2, 3, 2, 5 ),
... 'category', make_array('a', 'a', 'a', 'b', 'b'))
>>> t.hist('value', group='category') # doctest: +SKIP
<two overlaid histograms of the data [1, 2, 3] and [2, 5]>
"""
if counts is not None and bin_column is None:
warnings.warn("counts arg of hist is deprecated; use bin_column")
bin_column=counts
if columns:
columns_included = list(columns)
if bin_column is not None:
columns_included.append(bin_column)
if group is not None:
columns_included.append(group)
self = self.select(*columns_included)
if group is not None:
if bin_column is not None:
raise ValueError("Using bin_column and group together is "
"currently unsupported.")
if len(columns) > 1:
raise ValueError("Using group with multiple histogram value "
"columns is currently unsupported.")
# Check for non-numerical values and raise a ValueError if any found
for col in self:
if col != group and any(isinstance(cell, np.flexible) for cell in self[col]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A histogram cannot be drawn for this table."
.format(col))
if bin_column is not None and bins is None:
bins = np.unique(self.column(bin_column))
if bins is not None:
vargs['bins'] = bins
# Matplotlib has deprecated the normed keyword.
# TODO consider changing this function to use density= instead too
if 'normed' not in vargs and 'density' not in vargs:
vargs['density'] = True
elif 'normed' in vargs and 'density' not in vargs:
vargs['density'] = vargs.pop('normed')
elif 'normed' in vargs and 'density' in vargs:
raise ValueError("You can't specify both normed and density. "
"Use one or the other.")
def prepare_hist_with_bin_column(bin_column):
# This code is factored as a function for clarity only.
weight_columns = [c for c in self.labels if c != bin_column]
bin_values = self.column(bin_column)
values_dict = [(w[:-6] if w.endswith(' count') else w, (bin_values, self.column(w))) \
for w in weight_columns]
return values_dict
def prepare_hist_with_group(group):
# This code is factored as a function for clarity only.
grouped = self.group(group, np.array)
if grouped.num_rows > 20:
warnings.warn("It looks like you're making a grouped histogram with "
"a lot of groups ({:d}), which is probably incorrect."
.format(grouped.num_rows))
return [("{}={}".format(group, k), (v[0][1],)) for k, v in grouped.index_by(group).items()]
# Populate values_dict: An ordered dict from column name to singleton
# tuple of array of values or a (values, weights) pair of arrays. If
# any values have weights, they all must have weights.
if bin_column is not None:
values_dict = prepare_hist_with_bin_column(bin_column)
elif group is not None:
values_dict = prepare_hist_with_group(group)
else:
values_dict = [(k, (self.column(k),)) for k in self.labels]
values_dict = collections.OrderedDict(values_dict)
if left_end is not None or right_end is not None:
if left_end is None:
if bins is not None and bins[0]:
left_end = bins[0]
else:
left_end = min([min(self.column(k)) for k in self.labels if np.issubdtype(self.column(k).dtype, np.number)])
elif right_end is None:
if bins is not None and bins[-1]:
right_end = bins[-1]
else:
right_end = max([max(self.column(k)) for k in self.labels if np.issubdtype(self.column(k).dtype, np.number)])
def draw_hist(values_dict):
with np.printoptions(legacy='1.13'):
# This code is factored as a function for clarity only.
n = len(values_dict)
colors = [rgb_color + (self.default_alpha,) for rgb_color in
itertools.islice(itertools.cycle(self.chart_colors), n)]
hist_names = list(values_dict.keys())
values = [v[0] for v in values_dict.values()]
weights = [v[1] for v in values_dict.values() if len(v) > 1]
if n > len(weights) > 0:
raise ValueError("Weights were provided for some columns, but not "
" all, and that's not supported.")
if vargs['density']:
y_label = 'Percent per ' + (unit if unit else 'unit')
percentage = plt.FuncFormatter(lambda x, _: "{:g}".format(100*x))
else:
y_label = 'Count'
if overlay and n > 1:
# Reverse because legend prints bottom-to-top
values = values[::-1]
weights = weights[::-1]
colors = list(colors)[::-1]
if len(weights) == n:
vargs['weights'] = weights
if not side_by_side:
vargs.setdefault('histtype', 'stepfilled')
figure = plt.figure(figsize=(width, height))
plt.hist(values, color=colors, **vargs)
axis = figure.get_axes()[0]
_vertical_x(axis)
axis.set_ylabel(y_label)
if vargs['density']:
axis.yaxis.set_major_formatter(percentage)
x_unit = ' (' + unit + ')' if unit else ''
if group is not None and len(self.labels) == 2:
#There's a grouping in place but we're only plotting one column's values
label_not_grouped = [l for l in self.labels if l != group][0]
axis.set_xlabel(label_not_grouped + x_unit, fontsize=16)
else:
axis.set_xlabel(x_unit, fontsize=16)
plt.legend(hist_names, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
_, axes = plt.subplots(n, 1, figsize=(width, height * n))
if 'bins' in vargs:
bins = vargs['bins']
if isinstance(bins, numbers.Integral) and bins > 76 or hasattr(bins, '__len__') and len(bins) > 76:
# Use stepfilled when there are too many bins
vargs.setdefault('histtype', 'stepfilled')
if n == 1:
axes = [axes]
for i, (axis, hist_name, values_for_hist, color) in enumerate(zip(axes, hist_names, values, colors)):
axis.set_ylabel(y_label)
if vargs['density']:
axis.yaxis.set_major_formatter(percentage)
x_unit = ' (' + unit + ')' if unit else ''
if len(weights) == n:
vargs['weights'] = weights[i]
axis.set_xlabel(hist_name + x_unit, fontsize=16)
heights, bins, patches = axis.hist(values_for_hist, color=color, **vargs)
if left_end is not None and right_end is not None:
x_shade, height_shade, width_shade = _compute_shading(heights, bins.copy(), left_end, right_end)
axis.bar(x_shade, height_shade, width=width_shade,
color=self.chart_colors[1], align="edge")
_vertical_x(axis)
type(self).plots.append(axis)
draw_hist(values_dict)
def hist_of_counts(self, *columns, overlay=True, bins=None, bin_column=None,
group=None, side_by_side=False, width=6, height=4, **vargs):
"""
Plots one count-based histogram for each column in columns. The
heights of each bar will represent the counts, and all the bins
must be of equal size.
If no column is specified, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column. Note that
if the histograms are not overlaid, they are not forced to the
same scale.
bins (array or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
group (column name or index): A column of categories. The rows are
grouped by the values in this column, and a separate histogram is
generated for each group. The histograms are overlaid or plotted
separately depending on the overlay argument. If None, no such
grouping is done.
side_by_side (bool): Whether histogram bins should be plotted side by
side (instead of directly overlaid). Makes sense only when
plotting multiple histograms, either by passing several columns
or by using the group option.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `cumulative`, and
`orientation`, to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist_of_counts() # doctest: +SKIP
<histogram of values in count with counts on y-axis>
<histogram of values in points with counts on y-axis>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'count', make_array(5, 10, 5))
>>> t.hist_of_counts(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding counts>
>>> t = Table().with_columns(
... 'value', make_array(1, 2, 3, 2, 5 ),
... 'category', make_array('a', 'a', 'a', 'b', 'b'))
>>> t.hist('value', group='category') # doctest: +SKIP
<two overlaid histograms of the data [1, 2, 3] and [2, 5]>
"""
if bin_column is not None and bins is None:
bins = np.unique(self.column(bin_column))
# TODO ensure counts are integers even when `columns` is empty
for column in columns:
if not _is_array_integer(self.column(column)):
raise ValueError('The column {0} contains non-integer values '
'When using hist_of_counts with bin_columns, '
'all columns should contain counts.'
.format(column))
if vargs.get('normed', False) or vargs.get('density', False):
raise ValueError("hist_of_counts is for displaying counts only, "
"and should not be used with the normed or "
"density keyword arguments")
vargs['density'] = False
if bins is not None:
if len(bins) < 2:
raise ValueError("bins must have at least two items")
diffs = np.diff(sorted(bins))
# Diffs should all be equal (up to floating point error)
normalized_diff_deviances = np.abs((diffs - diffs[0])/diffs[0])
if np.any(normalized_diff_deviances > 1e-11):
raise ValueError("Bins of unequal size should not be used "
"with hist_of_counts. Please use hist() and "
"make sure to set normed=True")
return self.hist(*columns, overlay=overlay, bins=bins, bin_column=bin_column, group=group, side_by_side=side_by_side, width=width, height=height, **vargs)
def boxplot(self, **vargs):
"""Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure>
"""
# Check for non-numerical values and raise a ValueError if any found
for col in self:
if any(isinstance(cell, np.flexible) for cell in self[col]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A histogram cannot be drawn for this table."
.format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs)
###########
# Support #
###########
class Row(tuple):
_table = None # Set by subclasses in Rows
def __getattr__(self, column_label):
try:
return self[self._table.column_index(column_label)]
except ValueError: #adding support for NumPy v1.18.0 as per changes in https://github.com/numpy/numpy/pull/14745
raise AttributeError("Attribute ({0}) not found in row.".format(column_label))
def item(self, index_or_label):
"""Return the item at an index or label."""
if isinstance(index_or_label, numbers.Integral):
index = index_or_label
else:
index = self._table.column_index(index_or_label)
return self[index]
def __repr__(self):
return 'Row({})'.format(', '.join('{}={}'.format(
self._table.labels[i], v.__repr__()) for i, v in enumerate(self)))
def asdict(self):
return collections.OrderedDict(zip(self._table.labels, self))
class Rows(collections.abc.Sequence):
"""An iterable view over the rows in a table."""
def __init__(self, table):
self._table = table
self._labels = None
def __getitem__(self, i):
if isinstance(i, slice):
return (self[j] for j in range(*i.indices(len(self))))
labels = tuple(self._table.labels)
if labels != self._labels:
self._labels = labels
self._row = type('Row', (Table.Row, ), dict(_table=self._table))
return self._row(c[i] for c in self._table._columns.values())
def __len__(self):
return self._table.num_rows
def __repr__(self):
return '{0}({1})'.format(type(self).__name__, repr(self._table))
def _is_array_integer(arr):
"""Returns True if an array contains integers (integer type or near-int
float values) and False otherwise.
>>> _is_array_integer(np.arange(10))
True
>>> _is_array_integer(np.arange(7.0, 20.0, 1.0))
True
>>> _is_array_integer(np.arange(0, 1, 0.1))
False
"""
return issubclass(arr.dtype.type, np.integer) or np.allclose(arr, np.round(arr))
def _zero_on_type_error(column_fn):
"""Wrap a function on an np.ndarray to return 0 on a type error."""
if not column_fn:
return column_fn
if not callable(column_fn):
raise TypeError('column functions must be callable')
@functools.wraps(column_fn)
def wrapped(column):
try:
return column_fn(column)
except TypeError:
if isinstance(column, np.ndarray):
return column.dtype.type() # A typed zero value
else:
raise
return wrapped
def _compute_shading(heights, bins, left_end, right_end):
shade_start_idx = np.max(np.where(bins <= left_end)[0], initial=0)
shade_end_idx = np.max(np.where(bins < right_end)[0], initial=0) + 1
# x_shade are the bin starts, so ignore bins[-1], which is the RHS of the last bin
x_shade = bins[:-1][shade_start_idx:shade_end_idx]
height_shade = heights[shade_start_idx:shade_end_idx]
width_shade = np.diff(bins[shade_start_idx:(shade_end_idx+1)])
if left_end > x_shade[0]:
# shrink the width by the unshaded area, then move the bin start
width_shade[0] -= (left_end - x_shade[0])
x_shade[0] = left_end
original_ending = (x_shade[-1] + width_shade[-1])
if right_end < original_ending:
width_shade[-1] -= (original_ending - right_end)
return x_shade, height_shade, width_shade
def _fill_with_zeros(partials, rows, zero=None):
"""Find and return values from rows for all partials. In cases where no
row matches a partial, zero is assumed as value. For a row, the first
(n-1) fields are assumed to be the partial, and the last field,
the value, where n is the total number of fields in each row. It is
assumed that there is a unique row for each partial.
partials -- single field values or tuples of field values
rows -- table rows
zero -- value used when no rows match a particular partial
"""
assert len(rows) > 0
if not _is_non_string_iterable(partials):
# Convert partials to tuple for comparison against row slice later
partials = [(partial,) for partial in partials]
# Construct mapping of partials to values in rows
mapping = {}
for row in rows:
mapping[tuple(row[:-1])] = row[-1]
if zero is None:
# Try to infer zero from given row values.
array = np.array(tuple(mapping.values()))
if len(array.shape) == 1:
zero = array.dtype.type()
return np.array([mapping.get(partial, zero) for partial in partials])
def _as_labels(column_or_columns):
"""Return a list of labels for a label or labels."""
if not _is_non_string_iterable(column_or_columns):
return [column_or_columns]
else:
return column_or_columns
def _varargs_labels_as_list(label_list):
"""Return a list of labels for a list of labels or singleton list of list
of labels."""
if len(label_list) == 0:
return []
elif not _is_non_string_iterable(label_list[0]):
# Assume everything is a label. If not, it'll be caught later.
return label_list
elif len(label_list) == 1:
return label_list[0]
else:
raise ValueError("Labels {} contain more than list.".format(label_list),
"Pass just one list of labels.")
def _assert_same(values):
"""Assert that all values are identical and return the unique value."""
assert len(values) > 0
first, rest = values[0], values[1:]
for v in rest:
assert (v == first) or (pandas.isnull(v) and pandas.isnull(first))
return first
def _collected_label(collect, label):
"""Label of a collected column."""
if not collect.__name__.startswith('<'):
return label + ' ' + collect.__name__
else:
return label
def _is_non_string_iterable(value):
"""Whether a value is iterable."""
if isinstance(value, str):
return False
if hasattr(value, '__iter__'):
return True
if isinstance(value, collections.abc.Sequence):
return True
return False
def _vertical_x(axis, ticks=None, max_width=5):
"""Switch labels to vertical if they are long."""
if ticks is None:
ticks = axis.get_xticks()
if (
|
np.array(ticks)
|
numpy.array
|
from keras.layers import Input, Dense, Flatten, Concatenate
from keras.models import Model
import numpy as np
import pandas as pd
import keras
import pickle
df = pd.read_csv('sequence_data.tsv',sep='\t')
with open('all','rb') as f:
test = pickle.load(f)
status = df.iloc[test].status
status = np.array(status)
metrics = [keras.metrics.AUC(name='auc'),
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall')]
pos = 6931
neg = 101232
total = pos + neg
weight_0 = (1 / neg)*(total) / 2.0
weight_1 = (1 / pos)*(total) / 2.0
weights = {0: weight_0, 1: weight_1}
#weights = {0: 1., 1: 101232 / 6931}
gen = np.load('genomic.npy')
gen = np.reshape(gen[test], (gen[test].shape[0], gen[test].shape[1], gen[test].shape[2]))
signal = np.load('signal.npy')
signal = np.reshape(signal[test], (signal[test].shape[0], signal[test].shape[1], signal[test].shape[2]))
clip = np.load('softclipped.npy')
clip = np.reshape(clip[test], (clip[test].shape[0], clip[test].shape[1], clip[test].shape[2]))
seq_shape = np.load('shape.npy')
seq_shape =
|
np.reshape(seq_shape[test], (seq_shape[test].shape[0], seq_shape.shape[1], seq_shape.shape[2]))
|
numpy.reshape
|
"""计算费米面上点的位置"""
import numpy
from scipy import optimize
from basics import Square, Point
from basics.point import get_absolute_angle
def get_patches(brlu: Square, npatch, dispfun):
'''获得费米面上面的patch\n
dispfun是色散关系\n
'''
gap = numpy.pi * 2 / npatch
angles = [gap * (idx + 0.5) for idx in range(npatch)]
#解出每个角度下和费米面的交点
patches = []
#半径最大是这么多
maxv = brlu.width * 1.414 / 2.
for ang in angles:
xcoff = numpy.cos(ang)
ycoff = numpy.sin(ang)
def __raddisp(rad):
kxv = rad * xcoff
if numpy.abs(kxv) > numpy.pi:
kxv = numpy.sign(kxv) * numpy.pi
kyv = rad * ycoff
if numpy.abs(kyv) > numpy.pi:
kyv = numpy.sign(kyv) * numpy.pi
return dispfun(kxv, kyv)
rrad = optimize.bisect(
__raddisp,
0., maxv
)
patches.append(Point(rrad * xcoff, rrad * ycoff, 1))
return patches
def find_patch(
pnt: Point, patches, dispfun, dispgdfun, step,
brlim=(numpy.pi, numpy.pi), mode=1
):
'''找到这个点是属于哪个patch的\n
dispfun是色散关系的函数,dispgdfun是向费米面投影的梯度\n
注意这个step最好小于pi / 2 * mesh\n
brlim是布里渊区的边界,这里是最大的绝对值\n
如果mode=1,就是向费米面投影然后找patch的算法,如果mode=2,
就是直接找最近的一个点的算法
'''
if mode == 2:
return find_patch_mode2(pnt, patches)
if mode == 3:
return find_patch_mode3(pnt, patches)
#从这个点引出一条线,如果两端反号,则停止
kxv, kyv = pnt.coord
olddisp = dispfun(kxv, kyv)
while True:
cita = dispgdfun(kxv, kyv)
#print(kxv, kyv, cita)
#确定方向
kxp = kxv + step * numpy.cos(cita)
kyp = kyv + step * numpy.sin(cita)
#大于pi就贴边
if numpy.abs(kxp) > brlim[0]:
kxp = numpy.sign(kxp) * brlim[0]
if numpy.abs(kyp) > brlim[1]:
kyp = numpy.sign(kyp) * brlim[1]
newdispp = dispfun(kxp, kyp)
#另一个方向
kxn = kxv - step * numpy.cos(cita)
kyn = kyv - step * numpy.sin(cita)
if numpy.abs(kxn) > brlim[0]:
kxn = numpy.sign(kxn) * brlim[0]
if numpy.abs(kyn) > brlim[1]:
kyn = numpy.sign(kyn) * brlim[1]
newdispn = dispfun(kxn, kyn)
#反号,有些时候会直接碰到0,这个时候,如果是old等于0了,那么gsign无关紧要,
#如果是新的等于零,那么朝它的方向也是对的
if newdispp * olddisp <= 0:
gsign = +1
break
if newdispn * olddisp <= 0:
gsign = -1
break
#看谁下降得快
#如果p方向下降的快
if numpy.abs(newdispn) > numpy.abs(newdispp):
kxv, kyv = kxp, kyp
else:#如果n方向下降的快
kxv, kyv = kxn, kyn
#print(newdispn, newdispp)
#raise
if numpy.abs(kxv) > brlim[0] or numpy.abs(kyv) > brlim[1]:
raise ValueError('出界了')
#现在kxv,kyv向cita方向step长度的符号是不同的
def __disp_by_dis(dis):
'''从pnt这个点沿着slope走dis这么长的位置上的能量'''
xdis = kxv + dis * numpy.cos(cita)
ydis = kyv + dis * numpy.sin(cita)
if numpy.abs(xdis) > brlim[0]:
xdis = numpy.sign(xdis) * brlim[0]
if numpy.abs(ydis) > brlim[1]:
ydis = numpy.sign(ydis) * brlim[1]
return dispfun(xdis, ydis)
rootd = optimize.bisect(__disp_by_dis, 0, gsign * step)
crsx = kxv + rootd *
|
numpy.cos(cita)
|
numpy.cos
|
import h5py
import numpy as np
from tqdm import tqdm
from neurokernel.LPU.InputProcessors.BaseInputProcessor import BaseInputProcessor
from neurokernel.LPU.InputProcessors.PresynapticInputProcessor import PresynapticInputProcessor
from .parse_arborization import NeuronArborizationParser
class BU_InputProcessor(BaseInputProcessor):
def __init__(self, shape, dt, dur, name, video_config, rf_config, neurons,
scale = 1.0,
record_file = None, record_interval = 1):
video_cls = Video_factory(video_config.get('type', 'moving_bar_l2r'))
self.video = video_cls(shape, dt, dur, video_config.get('bar_width', 50),
start = video_config.get('start', None),
stop = video_config.get('stop', None),
record_file = video_config.get('record', None),
record_interval = video_config.get('record_interval', 1))
uids = list(neurons.keys())
neuron_names = [neurons[n]['name'] for n in uids]
neuron_ids = np.array([int(name.split('/')[1][1:]) for name in neuron_names])
neuron_side = set([name.split('/')[1][0] for name in neuron_names])
if len(neuron_side) > 1:
raise ValueError('BU neurons must be on one side')
else:
self.hemisphere = neuron_side.pop()
self.fc = CircularGaussianFilterBank(
(shape[0], shape[1]),
rf_config.get('sigma', 0.05), 10,
hemisphere = self.hemisphere)
self.index = neuron_ids - 1
var_list = [('I', uids)]
self.name = name
self.scale = scale
# self.n_inputs = 80
#self.filter_filename = '{}_filters.h5'.format(self.name)
super(BU_InputProcessor, self).__init__(var_list,
sensory_file = self.video.record_file,
sensory_interval = self.video.record_interval,
input_file = record_file,
input_interval = record_interval)
def pre_run(self):
self.video.pre_run()
self.fc.create_filters()
# self.file = h5py.File('{}_inputs.h5'.format(self.name), 'w')
# self.file.create_dataset('I',
# (0, self.n_inputs),
# dtype = np.double,
# maxshape=(None, self.n_inputs))
def is_input_available(self):
return True
def update_input(self):
frame = self.video.run_step()
BU_input = self.fc.apply_filters(frame, scale = self.scale).reshape(-1)
self.variables['I']['input'] = BU_input[self.index]
# self.record_frame(BU_input)
# def record_frame(self, input):
# self.file['I'].resize((self.file['I'].shape[0]+1, self.n_inputs))
# self.file['I'][-1,:] = input
def __del__(self):
try:
self.close_file()
except:
pass
class PB_InputProcessor(BaseInputProcessor):
def __init__(self, shape, dt, dur, name, video_config, rf_config, neurons,
scale = 1.0,
record_file = None, record_interval = 1):
video_cls = Video_factory(video_config.get('type', 'moving_bar_l2r'))
self.video = video_cls(shape, dt, dur, video_config.get('bar_width', 50),
start = video_config.get('start', None),
stop = video_config.get('stop', None),
record_file = video_config.get('record', None),
record_interval = video_config.get('record_interval', 1))
num_glomeruli = rf_config.get('num_glomeruli', 8)
self.fr = RectangularFilterBank(shape, num_glomeruli)
self.scale = scale
uids = list(neurons.keys())
parser = NeuronArborizationParser()
new_uids = []
neuron_ids = []
for n in uids:
neuron_name = neurons[n]['name']
subregions = [u['regions'] for u in parser.parse(neuron_name) if u['neuropil'] == 'PB' and 's' in u['neurite']][0]
for region in subregions:
new_uids.append(n)
if int(region[1:]) == 1:
neuron_ids.append(1)
else:
neuron_ids.append( (num_glomeruli + 2 - int(region[1:])) \
if region[0] == 'L' else \
int(region[1:]))
self.index = np.array(neuron_ids, np.int32) - 1
var_list = [('I', new_uids)]
self.name = name
# self.n_inputs = 18
super(PB_InputProcessor, self).__init__(var_list,
sensory_file = self.video.record_file,
sensory_interval = self.video.record_interval,
input_file = record_file,
input_interval = record_interval)
def pre_run(self):
self.video.pre_run()
self.fr.create_filters()
# self.file = h5py.File('{}_inputs.h5'.format(self.name), 'w')
# self.file.create_dataset('I',
# (0, self.n_inputs),
# dtype = np.double,
# maxshape=(None, self.n_inputs))
def is_input_available(self):
return True
def update_input(self):
frame = self.video.run_step()
PB_input = self.fr.apply_filters(frame, scale = self.scale)
self.variables['I']['input'] = PB_input[self.index]
def __del__(self):
try:
self.close_file()
except:
pass
class PB_InputProcessorPaper(BaseInputProcessor):
def __init__(self, shape, dt, dur, name, video_config, rf_config, neurons,
scale = 1.0,
record_file = None, record_interval = 1):
video_cls = Video_factory(video_config.get('type', 'moving_bar_l2r'))
self.video = video_cls(shape, dt, dur, video_config.get('bar_width', 50),
start = video_config.get('start', None),
stop = video_config.get('stop', None),
record_file = video_config.get('record', None),
record_interval = video_config.get('record_interval', 1))
num_glomeruli = rf_config.get('num_glomeruli', 18)
self.fr = RectangularFilterBank(shape, num_glomeruli)
self.scale = scale
uids = list(neurons.keys())
parser = NeuronArborizationParser()
new_uids = []
neuron_ids = []
for n in uids:
neuron_name = neurons[n]['name']
subregions = [u['regions'] for u in parser.parse(neuron_name) if u['neuropil'] == 'PB' and 's' in u['neurite']][0]
for region in subregions:
new_uids.append(n)
neuron_ids.append(
(num_glomeruli//2+1 - int(region[1:])) \
if region[0] == 'L' else\
int(region[1:]) + num_glomeruli//2)
self.index = np.array(neuron_ids, np.int32) - 1
var_list = [('I', new_uids)]
self.name = name
# self.n_inputs = 18
super(PB_InputProcessor, self).__init__(var_list,
sensory_file = self.video.record_file,
sensory_interval = self.video.record_interval,
input_file = record_file,
input_interval = record_interval)
def pre_run(self):
self.video.pre_run()
self.fr.create_filters()
def is_input_available(self):
return True
def update_input(self):
frame = self.video.run_step()
PB_input = self.fr.apply_filters(frame, scale = self.scale)
self.variables['I']['input'] = PB_input[self.index]
def __del__(self):
try:
self.close_file()
except:
pass
class EB_InputProcessor(BaseInputProcessor):
def __init__(self, shape, dt, dur, name, video_config, rf_config, neurons,
scale = 1.0,
record_file = None, record_interval = 1):
video_cls = Video_factory(video_config.get('type', 'moving_bar_l2r'))
self.video = video_cls(shape, dt, dur, video_config.get('bar_width', 50),
start = video_config.get('start', None),
stop = video_config.get('stop', None),
record_file = video_config.get('record', None),
record_interval = video_config.get('record_interval', 1))
num_glomeruli = rf_config.get('num_glomeruli', 16)
self.fr = RectangularFilterBank(shape, num_glomeruli)
self.scale = scale
uids = list(neurons.keys())
parser = NeuronArborizationParser()
new_uids = []
neuron_ids = []
for n in uids:
neuron_name = neurons[n]['name']
subregions = set()
for u in parser.parse(neuron_name):
if u['neuropil'] == 'EB' and 's' in u['neurite']:
subregions |= u['regions']
for region in subregions:
new_uids.append(n)
neuron_ids.append(
(num_glomeruli//2+1 - int(region[1:])) \
if region[0] == 'L' else\
int(region[1:]) + num_glomeruli//2)
self.index = np.array(neuron_ids, dtype = np.int32) - 1
var_list = [('I', new_uids)]
self.name = name
# self.n_inputs = 18
super(EB_InputProcessor, self).__init__(var_list,
sensory_file = self.video.record_file,
sensory_interval = self.video.record_interval,
input_file = record_file,
input_interval = record_interval)
def pre_run(self):
self.video.pre_run()
self.fr.create_filters()
def is_input_available(self):
return True
def update_input(self):
frame = self.video.run_step()
EB_input = self.fr.apply_filters(frame, scale = self.scale)
self.variables['I']['input'] = EB_input[self.index]
# self.record_frame(PB_input)
def __del__(self):
try:
self.close_file()
except:
pass
class EB_Kakaria_InputProcessor(PresynapticInputProcessor):
def __init__(self, shape, dt, dur, name, video_config, rf_config, neurons,
scale = 1.0,
record_file = None, record_interval = 1):
uids = list(neurons.keys())
parser = NeuronArborizationParser()
new_uids = []
neuron_ids = []
wedge_map = {'L{}'.format(i): (0+22.5*(i-1), 22.5*i) for i in range(1, 9)}
wedge_map.update({'R{}'.format(i): (-22.5*i, -22.5*(i-1)) for i in range(1, 9)})
input_mapping = {}
for n in uids:
neuron_name = neurons[n]['name']
subregions = set()
for u in parser.parse(neuron_name):
if u['neuropil'] == 'EB' and 's' in u['neurite']:
subregions |= u['regions']
if len(subregions) == 2:
new_uids.append(n)
input_mapping[n] = []
for region in subregions:
input_mapping[n].append(wedge_map[region])
steps = int(dur/dt)
# Define Inputs
Gposes = []
Gweights = []
Gpos = np.zeros(steps)
Gweight = np.zeros(steps)
t = np.arange(0, dur, dt)
Gweight[int(np.round(0.0/dt)):int(np.round(30.0/dt))] = 1.0
Gpos[int(np.round(0.0/dt)):int(np.round(1.0/dt))] = -180.
Gpos[int(np.round(1.0/dt)):int(np.round(17.0/dt))] = -180+22.5*np.arange(0,16, dt)
Gpos[int(np.round(17.0/dt)):int(np.round(30.0/dt))] = 180-22.5*np.arange(0,13, dt)
# Gpos[int(np.round(30.0/dt)):int(np.round(31.0/dt))] = -180+22.5*2
Gposes.append(Gpos)
Gweights.append(Gweight)
Gpos = np.zeros(steps)
Gweight = np.zeros(steps)
Gweight[int(np.round(0.0/dt)):int(np.round(33.0/dt))] = 0.4
Gpos[int(np.round(0.0/dt)):int(np.round(33.0/dt))] = 60.
Gposes.append(Gpos)
Gweights.append(Gweight)
x = np.arange(-180,181)
r = np.zeros((steps, len(input_mapping)))
inputs = np.empty((steps, len(x)))
for Gpos, Gweight in zip(Gposes, Gweights):
for i in tqdm(range(steps)):
inputs[i,:] = np.exp(50*np.cos((x-Gpos[i])/180*np.pi))/1.842577884719606e+21
for j, uid in enumerate(new_uids):
v =
|
np.zeros(steps)
|
numpy.zeros
|
"""image input/output functionalities."""
import datetime
import logging
import numbers
import os
import pathlib
import re
import shutil
from subprocess import run
from textwrap import dedent
import nibabel as nib
import numpy as np
import pydicom as dcm
from miutil.imio.nii import array2nii # NOQA: F401 # yapf: disable
from miutil.imio.nii import getnii # NOQA: F401 # yapf: disable
from miutil.imio.nii import nii_gzip # NOQA: F401 # yapf: disable
from miutil.imio.nii import nii_ugzip # NOQA: F401 # yapf: disable
from miutil.imio.nii import niisort # NOQA: F401 # yapf: disable
# > NiftyPET resources
from .. import resources as rs
log = logging.getLogger(__name__)
# possible extentions for DICOM files
dcmext = ('dcm', 'DCM', 'ima', 'IMA', 'img', 'IMG')
# > DICOM coding of PET isotopes
istp_code = {
'C-111A1': 'F18', 'C-105A1': 'C11', 'C-B1038': 'O15', 'C-128A2': 'Ge68', 'C-131A3': 'Ga68'}
def create_dir(pth):
if not os.path.exists(pth):
os.makedirs(pth)
def time_stamp(simple_ascii=False):
now = datetime.datetime.now()
if simple_ascii:
nowstr = str(now.year) + '-' + str(now.month) + '-' + str(now.day) + '_' + str(
now.hour) + 'h' + str(now.minute)
else:
nowstr = str(now.year) + '-' + str(now.month) + '-' + str(now.day) + ' ' + str(
now.hour) + ':' + str(now.minute)
return nowstr
def fwhm2sig(fwhm, voxsize=2.0):
return (fwhm/voxsize) / (2 * (2 * np.log(2))**.5)
def mgh2nii(fim, fout=None, output=None):
''' Convert `*.mgh` or `*.mgz` FreeSurfer image to NIfTI.
Arguments:
fim: path to the input MGH file
fout: path to the output NIfTI file, if None then
creates based on `fim`
output: if not None and an applicable string it will
output a dictionary or an array (see below)
Return:
None: returns nothing
'image' or 'im': outputs just the image
'affine': outputs just the affine matrix
'all': outputs all as a dictionary
'''
if not os.path.isfile(fim):
raise ValueError('The input path is incorrect!')
# > get the image dictionary
mghd = getmgh(fim, output='all')
im = mghd['im']
# > sort out the output
if fout is None:
fout = fim.parent / (fim.name.split('.')[0] + '.nii.gz')
out = fout
if output == 'image' or output == 'im':
out = fout, im
elif output == 'affine':
out = fout, mghd['affine']
elif output == 'all':
out = mghd
out['fout'] = fout
array2nii(
mghd['im'], mghd['affine'], fout,
trnsp=(mghd['transpose'].index(0), mghd['transpose'].index(1), mghd['transpose'].index(2)),
flip=mghd['flip'])
return out
def getmgh(fim, nan_replace=None, output='image'):
'''
Get image from `*.mgz` or `*.mgh` file (FreeSurfer).
Arguments:
fim: input file name for the MGH/Z image
output: option for choosing output: 'image', 'affine' matrix or
'all' for a dictionary with all the info.
Return:
'image': outputs just the image
'affine': outputs just the affine matrix
'all': outputs all as a dictionary
'''
if not os.path.isfile(fim):
raise ValueError('The input path is incorrect!')
mgh = nib.freesurfer.load(str(fim))
if output == 'image' or output == 'all':
imr = np.asanyarray(mgh.dataobj)
# replace NaNs if requested
if isinstance(nan_replace, numbers.Number):
imr[
|
np.isnan(imr)
|
numpy.isnan
|
import numpy as np
import math
data = np.genfromtxt('data25.csv',delimiter=',')
theta = []
for i in range(400):
theta.append(i*math.pi/200)
flag = 0
#data = np.concatenate ((data[357:400], data[0:357]),axis=0)
for i in range(400):
if (data[i]==0):
data[i] = 500
rmin=min(data)
rmin_ind=
|
np.argmin(data)
|
numpy.argmin
|
import _test
from nose.tools import *
import warnings
import numpy as np
import numdifftools
import sparsemax
def test_sparsemax_of_zero():
"""check sparsemax proposition 1, part 1"""
z = np.zeros((1, 10))
np.testing.assert_array_equal(
sparsemax.forward(z),
np.ones_like(z) / z.size
)
def test_sparsemax_of_inf():
"""check sparsemax proposition 1, part 2"""
z = np.random.uniform(low=-3, high=3, size=(100, 10))
# assume |A(z)| = 1, as z is continues random
z_sort_arg = np.argsort(z, axis=1)[:, ::-1]
z_sort = np.sort(z, axis=-1)[:, ::-1]
gamma_z = z_sort[:, 0] - z_sort[:, 1]
epsilon = (0.99 * gamma_z * 1).reshape(-1, 1)
# construct the expected 1_A(z) array
p_expected =
|
np.zeros((100, 10))
|
numpy.zeros
|
import pathlib
import numpy as np
import matplotlib.pyplot as plt
import gaus_example
plt.rcParams.update({'font.size': 16}) # Adjust 16 to the size you need
IMAGE_DIR = pathlib.Path(__file__).parent.parent.absolute() / "src/images/"
print(f"Saving images to {IMAGE_DIR}")
def plt_plot_example():
x = np.array([1, 2, 3, 4]) # Create a 1D numpy array
y = 2 * x # Use element-wise operations to generate f(x)
plt.plot(x, y) # Create a plot element on the current figure
plt.savefig(IMAGE_DIR / "plt_plot_example.png", dpi=200)
plt.clf()
def multiple_plot_example():
t = np.linspace(0, 2*np.pi, 100) # list representing time steps from 0 to 10 seconds
x = np.sin(t) # Position over time
v = np.cos(t) # Velocity over time
plt.plot(t, x)
plt.plot(t, v)
plt.savefig(IMAGE_DIR / "multiple_plot_example.png", dpi=200)
plt.clf()
def plt_scatter_example():
x = np.array([1, 2, 3])
y = np.array([2, 4, 6])
plt.scatter(x, y) # similar usage to plt.plot()
plt.scatter(x, y + 1, s=100) # s represents the size of dots plotted
plt.savefig(IMAGE_DIR / "plt_scatter_example.png", dpi=200)
plt.clf()
def plt_errorbar_example():
time = np.arange(0, 5)
temperature =
|
np.array([82, 71, 63, 56, 50])
|
numpy.array
|
# AUTOGENERATED! DO NOT EDIT! File to edit: src/01_Clustering.ipynb (unless otherwise specified).
__all__ = ['module_path', 'get_alpha_shape', 'set_colinear', 'collinear', 'get_segments', 'get_polygons_buf',
'labels_filtra', 'levels_from_strings', 'get_tag_level_df_labels', 'level_tag', 'get_dics_labels',
'get_label_clusters_df', 'get_mini_jaccars', 'jaccard_distance', 'mod_cid_label', 'retag_originals',
'clustering', 'recursive_clustering', 'recursive_clustering_tree', 'compute_dbscan', 'adaptative_DBSCAN',
'compute_hdbscan', 'compute_OPTICS', 'compute_Natural_cities', 'SSM', 'get_tree_from_clustering',
'generate_tree_clusterize_form']
# Cell
#export
import os
import sys
import numpy as np
import pandas as pd
import kneed
import itertools
import shapely
import random
import time
import re
from CGAL.CGAL_Alpha_shape_2 import *
from CGAL.CGAL_Kernel import Point_2
from sklearn.cluster import DBSCAN, OPTICS
from sklearn.preprocessing import StandardScaler
from shapely.geometry import LineString
from shapely.ops import polygonize, cascaded_union
from shapely.geometry import box
from shapely.geometry import Point, Polygon, MultiPolygon
from shapely.ops import polygonize_full, linemerge, unary_union
from scipy.spatial import cKDTree, Delaunay
import hdbscan
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from .TreeClusters import *
# Cell
#export
def get_alpha_shape(point_list):
"""
Returns a polygon representing the hull of the points sample.
:param list point_list: list list of tuples with samples coordinates.
:returns shapely.Polygon: concave hull shapely polygon
"""
uni_po = np.unique(point_list, axis=0)
if len(uni_po) < 3:
raise ValueError('Alpha Shape needs more than 3 points')
if set_colinear(uni_po) == True:
raise ValueError('The set of points can be colinear')
list_of_points = [Point_2(l[0], l[1]) for l in point_list]
a = Alpha_shape_2()
a.make_alpha_shape(list_of_points)
a.set_mode(REGULARIZED)
alpha = a.find_optimal_alpha(1).next()
a.set_alpha(alpha)
edges = []
for it in a.alpha_shape_edges():
edges.append(a.segment(it))
lines = []
for e in edges:
source_p = (e.source().x(), e.source().y())
target_p = (e.target().x(), e.target().y())
lines.append(LineString([source_p, target_p]))
return cascaded_union(list(polygonize(lines)))
# Cell
#export
def set_colinear(list_points):
"""
Check if in the list of points any of triplet of points
is colinear
:param list list_points: List of shapely Points
:returns bool: True if all are not colinear
"""
for i in itertools.combinations(list_points, 3):
if collinear(i[0], i[1], i[2]) == False:
return False
return True
# Cell
#export
def collinear(p1, p2, p3):
"""
Check if the points are colinear
:param shapely Point p1: point to chek if is colinear
:param shapely Point p2: point to chek if is colinear
:param shapely Point p3: point to chek if is colinear
:return bool: True if are colinear
"""
return (p1[1]-p2[1]) * (p1[0]-p3[0]) == (p1[1]-p3[1])*(p1[0]-p2[0])
# Cell
#export
def get_segments(points):
"""
Get the segments from a delaunay triangulation
:param points: Point to get Delaunay triangulation and exctract points
:return edges:
"""
TIN = Delaunay(points)
# list of coordinates for each edge
edges = []
for tr in TIN.simplices:
for i in range(3):
edge_idx0 = tr[i]
edge_idx1 = tr[(i+1) % 3]
edges.append(LineString((Point(TIN.points[edge_idx0]),
Point(TIN.points[edge_idx1]))))
return edges
# Cell
#export
def get_polygons_buf(lines):
"""
Obtain the poligons from the lines
:param list lines: List of lines
:returns shapely polygon: the union of the union of
edges (Polygon or multypolygon)
"""
linework = linemerge(lines)
linework = unary_union(linework)
result, _, _, _ = polygonize_full(linework)
result = unary_union(result)
result = result.buffer(0.0000001)
return result
# Cell
#export
def labels_filtra(point_points, multy_pol):
"""
Labels the points in the multy_pol if no polygon contains
a point is label as -1
:param shapely MultyPoint point_points: Points to check
:param multy_pol
:returns np.array: Label array with -1 if is not contained
in a polygon
"""
point_Po = [Point(i) for i in point_points]
labels_p=[]
if type(multy_pol)==shapely.geometry.MultiPolygon :
for po in point_Po:
if multy_pol.contains(po):
for num_pol, poly in enumerate( multy_pol):
if poly.contains(po):
labels_p.append(num_pol)
break
else:
labels_p.append(-1)
elif type(multy_pol)==shapely.geometry.Polygon :
for po in point_Po:
if multy_pol.contains(po):
labels_p.append(0)
else:
labels_p.append(-1)
else:
raise ValueError('The input is not MultiPolygon or Polygon type')
return np.array(labels_p)
# Cell
#export
def levels_from_strings(
string_tag,
level_str='l_',
node_str = 'n_',
**kwargs
):
"""
Returns the levels and the node id using the expected strings
that identify the level id and node id
:param str level_str: string for the level
:param str node_str: string for the nodes
:returns tuple (levels, nodeid):
"""
positions = [i.start() for i in re.finditer( level_str, string_tag )]
levels = [string_tag[i+len(level_str)] for i in positions ]
nodeid_positions = [i.start() for i in re.finditer( node_str, string_tag )]
nodeid = [string_tag[i+len(node_str)] for i in nodeid_positions ]
return levels, nodeid
# Cell
#export
def get_tag_level_df_labels(df, levels_int ):
"""
Get the tag for the cluster
:param Pandas.DataFrame df:
:param int levels_int:
:returns None:
"""
for i in range(levels_int):
df['level_'+ str(i) +'_cluster']= df['cluster_id'].apply(lambda l: level_tag(l,i))
# Cell
#export
def level_tag(list_tags, level_int ):
"""
Tags if the are nois or signal
"""
if len(list_tags)==0:
return 'noise'
try:
return list_tags[level_int]
except:
return 'noise'
# Cell
#export
def get_dics_labels(tree_or, tree_res, level_get):
"""
Obtains a list of dictionaries to retag the original tree_tag with their
correspondance in the tree_res on level level_get +1
:param tree_or:
:param tree_res:
:param level_get:
:param return list:
"""
dic_list_levels= []
for i in range(level_get):
dic_level_df = get_label_clusters_df(tree_or, tree_res, i)
## Eliminate the clusters with nan
dic_level_df.dropna(axis=0, subset=['Sim_cluster'], inplace=True)
dic_lev = dic_level_df['Sim_cluster'].to_dict()
dic_list_levels.append({'level_ori':'level_'+str(i)+'_cluster', 'dict': dic_lev})
return dic_list_levels
# Cell
#export
def get_label_clusters_df(tree_1, tree_2, level_int):
"""
Obtains the dataframe with the label
:param TreeClusters tree_1:
:param TreeClusters tree_2:
:param int level_int:
:reutrns Pandas.DataFrame df_level_clus:
"""
level_all = tree_1.get_level(level_int)
df_level_clus = pd.DataFrame(level_all, columns=['Clusters'])
df_level_clus['Area'] = df_level_clus['Clusters'].apply(lambda l: l.polygon_cluster.area)
df_level_clus['Name'] = df_level_clus['Clusters'].apply(lambda l: l.name)
df_level_clus['Sim_cluster'] = df_level_clus['Clusters'].apply(lambda l: get_mini_jaccars(l, tree_2,level_int+1)) ###### Como se hacen las clusterizaciones se debe usar el siguiente nivel
#print('', df_level_clus['Sim_cluster'].dtype)
df_level_clus= df_level_clus.sort_values(by ='Area', ascending=False)
df_level_clus['Sim_cluster'] = (df_level_clus['Sim_cluster']
.where(~df_level_clus.duplicated(subset=['Sim_cluster']), None))
#print(df_level_clus['Sim_cluster'].dtype)
level_2= tree_2.get_level(level_int+1)
df_level_clus['Sim_cluster_name'] =(df_level_clus['Sim_cluster']
.astype('int32', errors='ignore')
.replace({np.nan: ''})
.apply(lambda l: level_2[int(l)].name if l !='' else None) )
return df_level_clus
# Cell
#export
def get_mini_jaccars(cluster, tree_2, level_int):
"""
Find the most simmilar cluster in the tree_2 at level level_int
returns int the index of the most similar polygon in the level
"""
tree_2_level= tree_2.get_level(level_int)
Jaccard_i= [jaccard_distance(cluster.polygon_cluster, j.polygon_cluster) for j in tree_2_level]
valu_min = Jaccard_i.index( min(Jaccard_i))
return valu_min
# Cell
#export
def jaccard_distance(p1, p2):
"""
Computes the Jaccard similarity measuremen between two polygons.
param: p1 shapely Poligon
param: p2 shapely Poligon
return float Jaccard distance
"""
intersection_area = p1.intersection(p2).area
#print(intersection_area)
jacc= 1 - (intersection_area)/(p1.area + p2.area - intersection_area)
return jacc
# Cell
#export
def mod_cid_label(dic_label):
"""
"""
dic_label={str(k):str(v) for k,v in dic_label.items()}
dic_label['noise'] = 'noise'
return dic_label
# Cell
#export
def retag_originals(df_fram_or , df_results, tag_original, tag_results, dic_tag_or_res):
"""
Retags the labels in the df_fram_or using the dictionary dic_tag_or_res to match
the tags with the corresponding tag in the df_result and all the labels that are
not in the dictionary generate a new tag fo them.
:param Pandas.DataFrame df_fram_or
:param Pandas.DataFrame df_results
:param tag_original
:param tag_results
:param Pandas.DataFrame dic_tag_or_res
"""
tag_plus= len(df_results[tag_results].unique()) +100 - len(df_results[tag_results].unique())%100
df_fram_or['re_tag_'+str(df_results.name)+'_'+tag_original] = df_fram_or[tag_original].apply(lambda l: dic_tag_or_res[l] if l in dic_tag_or_res.keys() else str(int(l) +tag_plus) )
# Cell
def clustering(
t_next_level_2,
level=None,
algorithm='dbscan',
**kwargs
):
"""Function to get the clusters for single group by
:param t_next_level_2: Dictionary with the points to compute the
cluster
:param level: None Level to compute (Default None)
:param str algorithm : Algorithm type is supported (Default= 'dbscan')
:param int min_points_cluster: minimun number of point to consider a cluster(Default 50)
:param double eps: Epsilon parameter In case is needed
:param bool return_noise: To return the noise (Default False)
:param bool verbose: Printing (Dafault False)
:returns list t_next_level_n: A list with dictionaries with the points, the parent, and nois
"""
verbose= kwargs.get('verbose',False)
min_points = kwargs.get( 'min_points_cluster', 50)
ret_noise= kwargs.get('return_noise', True)
eps = kwargs.get('eps',0.8) # Epsilon value to dbscan
t_next_level_n = []
if level == None:
level = 0
for li_num, cluster_list_D in enumerate(t_next_level_2):
cluster_list = cluster_list_D['points']
cluster_list_pa = cluster_list_D['parent']
if verbose:
print("Size cluster list: ", len(cluster_list))
for c_num, cluster in enumerate(cluster_list):
if verbose:
print("Size cluster: ", len(cluster))
print('Algorithm: ', algorithm)
if len(cluster) > 5:
if algorithm == 'dbscan':
if verbose:
print("Epsilon Value: ", eps)
tmp = compute_dbscan(cluster,
eps_DBSCAN = eps,
debugg=verbose,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
elif algorithm == 'hdbscan':
tmp = compute_hdbscan(cluster,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
##########
elif algorithm == 'adaptative_DBSCAN':
#### If the number of cluster is too small
tmp = adaptative_DBSCAN(cluster, **kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
elif algorithm == 'optics':
tmp = compute_OPTICS(cluster,
eps_OPTICS = eps,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
##########
elif algorithm == 'natural_cities':
tmp = compute_Natural_cities(cluster,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
##########
else:
raise ValueError('Algorithm must be dbscan or hdbscan')
# sys.exit("1")
if verbose:
print("The number of resulting clusters is : ", len(tmp))
if ret_noise:
dic_clos = {'points': tmp,
'parent': cluster_list_pa + '_L_'+str(level) +
'_l_' + str(li_num) + '_c_'+str(c_num),
'noise_points':noise_points
}
else:
dic_clos = {'points': tmp, 'parent': cluster_list_pa +
'_L_'+str(level) + '_l_' + str(li_num) + '_c_'+str(c_num)}
t_next_level_n.append(dic_clos)
else:
if ret_noise:
dic_clos = {'points': [],
'parent': cluster_list_pa + '_L_'+str(level) +
'_l_' + str(li_num) + '_c_'+str(c_num),
'noise_points':cluster
}
else:
dic_clos = {'points': [], 'parent': cluster_list_pa +
'_L_'+str(level) + '_l_' + str(li_num) + '_c_'+str(c_num)}
t_next_level_n.append(dic_clos)
return t_next_level_n
# Cell
def recursive_clustering(
this_level, # Dictionary with Points
to_process, # levels to process
cluster_tree, # to store the clusters
level = 0, # current level
**kwargs
):
"""
Performs the recursive clustering.
Calls compute_dbscan for each
list of clusters keepen the structure and then calls itself
until no more clusters satisfy the condition
:param dict this_level: level is the current level
:param int to_process: the max level to process
:param double eps: The epsilon parameter distance to pass to the needed algorithm
:param list cluster_tree : list of list to insert the levels
:param bool verbose : To print
:param double decay: In the use of dbscan the deacy parameter to reduce eps
:param int min_points_cluster: The min point for each cluster to pass to algorithm
:param str algorithm: The string of the algorithm name to use
"""
algorithm= kwargs.get('algorithm' ,'dbscan') # Algorithm to use
verbose= kwargs.get('verbose',False)
min_points = kwargs.get( 'min_points_cluster', 50)
decay = kwargs.get('decay', 0.7)
eps = kwargs.get('eps' ,0.8) # Epsilon distance to DBSCAN parameter
max_k_increase = kwargs.get('max_k_increase', None)
tmp = None
if level == 0:
kwargs['eps'] = eps
else:
kwargs['eps'] = eps * decay
if max_k_increase != None:
if level == 0:
kwargs['max_k_percent'] = 0.1
else:
kwargs['max_k_percent'] = kwargs['max_k_percent'] * max_k_increase
cluster_result_polygons = []
if level > to_process:
if verbose:
print('Done clustering')
return
######## Get the clusters for the current list of points
all_l = clustering(
this_level,
level=level,
**kwargs
)
##########
cluster_tree.append(all_l)
cluster_n = 0
for i in all_l:
cluster_n += len(i['points'])
if verbose:
print('At level ', level, ' the number of lists are ',
len(all_l), ' with ', cluster_n, 'clusters')
level += 1
if len(all_l) > 0:
return recursive_clustering(all_l,
to_process=to_process,
cluster_tree=cluster_tree,
level= level,
**kwargs
)
else:
if verbose:
print('done clustering')
return
# Cell
def recursive_clustering_tree(dic_points_ori, **kwargs):
"""
Obtaing the recursive tree using a specific algorithm
"""
levels_clustering= kwargs.get('levels_clustering',4)
cluster_tree = []
recursive_clustering([dic_points_ori], # Dictionary with Points
levels_clustering, # levels to process
cluster_tree, # to store the clusters
level=0, # current level
**kwargs
)
tree_clus= get_tree_from_clustering(cluster_tree)
tree_from_clus= TreeClusters()
tree_from_clus.levels_nodes = tree_clus
tree_from_clus.root= tree_from_clus.levels_nodes[0][0]
return tree_from_clus
# Cell
def compute_dbscan(cluster, **kwargs):
"""
Sklearn DBSCAN wrapper.
:param cluster: a (N,2) numpy array containing the obsevations
:returns list with numpy arrays for all the clusters obtained
"""
eps = kwargs.get( 'eps_DBSCAN',.04)
debugg= kwargs.get( 'debugg',False)
min_samples= kwargs.get( 'min_samples',50)
ret_noise = kwargs.get('return_noise', False)
# Standarize sample
scaler = StandardScaler()
cluster = scaler.fit_transform(cluster)
if debugg:
print('epsilon distance to DBSCAN: ', eps)
print("min_samples to DBScan: ", min_samples )
print("Number of points to fit the DBScan: ",cluster.shape[0])
db = DBSCAN(eps=eps, min_samples=min_samples).fit(cluster) # Check if can be run with n_jobs = -1
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
l_unique_labels = len(set(labels)) - (1 if -1 in labels else 0)
unique_labels = set(labels)
cluster = scaler.inverse_transform(cluster)
clusters = []
if debugg:
print('Number of clusters:' ,l_unique_labels)
for l in unique_labels:
if l != -1:
class_member_mask = (labels == l)
clusters.append(cluster[class_member_mask])
elif l == -1 and debugg == True:
class_member_mask = (labels == l)
print("Muestras consideradas ruido: ", sum(class_member_mask))
if ret_noise == True:
class_member_mask = (labels == -1)
return clusters, cluster[class_member_mask]
return clusters
# Cell
def adaptative_DBSCAN(points2_clusters ,
**kwargs):
"""
The function use the knee and average to obtain a good value for epsilon and use
DBSCAN to obtain the clusters
:param list Points points2_clusters: Point to clusterize
:param int max_k: = (Default = len(points2_clusters)*.1)
:param int min_k: (Default =50)
:param int step_k: (Default = 50)
:param int leaf_size: (Default = 50)
:param bool scale_points: (Default = True)
:param bool debugg: (Default = False)
:param bool ret_noise: (Default = True)
:returns list : list of cluster. If ret_noise = True return tuple list of cluter and noise
"""
max_k = kwargs.get('max_k', int(len(points2_clusters)*.1))
max_k_percent = kwargs.get('max_k_percent', None)
min_k = kwargs.get('min_k', 50)
step_k = kwargs.get('step_k', 50)
leaf_size = kwargs.get('leaf_size',50)
scale_points= kwargs.get('scale_points',True)
debugg = kwargs.get('verbose',False)
ret_noise = kwargs.get('return_noise', True)
###### Se tienen que hacer algunos cambios para cuando
# los clusters son menores a los minimos establecidos previemente
##### Establecer los minimos y maximos posibles
if max_k > len(points2_clusters):
raise ValueError('The max_k value is too large for the number of points')
if max_k_percent != None:
max_k = int(len(points2_clusters)*max_k_percent)
if min_k > len(points2_clusters):
print('The min_k value is too large for the number of points returns empty clusters')
if ret_noise == True:
return [] , points2_clusters
else:
return []
if step_k > len(points2_clusters):
raise ValueError('The step_k value is too large for the number of points')
if min_k == max_k:
print('min_k reset to obtain at least 1 value')
min_k = max_k-1
if scale_points ==True:
scaler = StandardScaler()
points_arr = scaler.fit_transform(points2_clusters)
else:
points_arr = points2_clusters
kdt= cKDTree(points_arr, leafsize=leaf_size)
lits_appe_all_aver=[]
for j in range( min_k, max_k, step_k ):
dist_va, ind = kdt.query(points_arr, k=j, n_jobs =-1)
non_zero = dist_va[:, 1:]
non_zero = np.ndarray.flatten(non_zero)
non_zero =
|
np.sort(non_zero)
|
numpy.sort
|
import numpy as np
import os
# embedding the position
def pos_embed(x):
if x < -60:
return 0
if -60 <= x <= 60:
return x + 61
if x > 60:
return 122
# find the index of x in y, if x not in y, return -1
def find_index(x, y):
flag = -1
for i in range(len(y)):
if x != y[i]:
continue
else:
return i
return flag
# reading data
def init():
dim = 50
print('reading word embedding data...')
vec = []
word2id = {}
f = open('./origin_data/vectors.txt', encoding="utf-8")
f.readline()
# 读取词向量表,获取所有词
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
word2id[content[0]] = len(word2id)
content = content[1:]
content = [(float)(i) for i in content]
vec.append(content)
f.close()
# 因为发现语料中有的实体与文本中的实体并不是完全一样的,因此为了简便,直接将实体自身再次作为词加入词表中
f = open('./origin_data/train.txt', 'r', encoding="utf-8")
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
# get entity name
en1 = content[2]
en2 = content[3]
if en1 not in word2id.keys():
word2id[en1] = len(word2id)
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
if en2 not in word2id.keys():
word2id[en2] = len(word2id)
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
f.close()
f = open('./origin_data/test.txt', 'r', encoding="utf-8")
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
# get entity name
en1 = content[2]
en2 = content[3]
if en1 not in word2id.keys():
word2id[en1] = len(word2id)
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
if en2 not in word2id.keys():
word2id[en2] = len(word2id)
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
f.close()
word2id['UNK'] = len(word2id)
word2id['BLANK'] = len(word2id)
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
vec = np.array(vec, dtype=np.float32)
print('reading relation to id')
relation2id = {}
f = open('./origin_data/relation2id.txt', 'r', encoding="utf-8")
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
relation2id[content[0]] = int(content[1])
f.close()
# length of sentence is 70
fixlen = 70
# max length of position embedding is 60 (-60~+60)
maxlen = 60
train_sen = {} # {entity pair:[[[label1-sentence 1],[label1-sentence
# 2]...],[[label2-sentence 1],[label2-sentence 2]...]}
train_ans = {} # {entity pair:[label1,label2,...]} the label is one-hot
# vector
print('reading train data...')
f = open('./origin_data/train.txt', 'r', encoding="utf-8")
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
# get entity name
en1 = content[2]
en2 = content[3]
relation = 0
if content[4] not in relation2id:
relation = relation2id['NA']
else:
relation = relation2id[content[4]]
# put the same entity pair sentences into a dict
tup = (en1, en2)
label_tag = 0
if tup not in train_sen:
train_sen[tup] = []
train_sen[tup].append([])
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
train_ans[tup] = []
train_ans[tup].append(label)
else:
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
temp = find_index(label, train_ans[tup])
if temp == -1:
train_ans[tup].append(label)
label_tag = len(train_ans[tup]) - 1
train_sen[tup].append([])
else:
label_tag = temp
sentence = content[5:-1]
en1pos = 0
en2pos = 0
for i in range(len(sentence)):
if sentence[i] == en1:
en1pos = i
if sentence[i] == en2:
en2pos = i
output = []
for i in range(fixlen):
word = word2id['BLANK']
rel_e1 = pos_embed(i - en1pos)
rel_e2 = pos_embed(i - en2pos)
output.append([word, rel_e1, rel_e2])
for i in range(min(fixlen, len(sentence))):
word = 0
if sentence[i] not in word2id:
word = word2id['UNK']
else:
word = word2id[sentence[i]]
output[i][0] = word
train_sen[tup][label_tag].append(output)
print('reading test data ...')
test_sen = {} # {entity pair:[[sentence 1],[sentence 2]...]}
test_ans = {} # {entity pair:[labels,...]} the labels is N-hot vector (N
# is the number of multi-label)
f = open('./origin_data/test.txt', 'r', encoding="utf-8")
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
en1 = content[2]
en2 = content[3]
relation = 0
if content[4] not in relation2id:
relation = relation2id['NA']
else:
relation = relation2id[content[4]]
tup = (en1, en2)
if tup not in test_sen:
test_sen[tup] = []
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
test_ans[tup] = label
else:
y_id = relation
test_ans[tup][y_id] = 1
sentence = content[5:-1]
en1pos = 0
en2pos = 0
for i in range(len(sentence)):
if sentence[i] == en1:
en1pos = i
if sentence[i] == en2:
en2pos = i
output = []
for i in range(fixlen):
word = word2id['BLANK']
rel_e1 = pos_embed(i - en1pos)
rel_e2 = pos_embed(i - en2pos)
output.append([word, rel_e1, rel_e2])
for i in range(min(fixlen, len(sentence))):
word = 0
if sentence[i] not in word2id:
word = word2id['UNK']
else:
word = word2id[sentence[i]]
output[i][0] = word
test_sen[tup].append(output)
train_x = []
train_y = []
test_x = []
test_y = []
train_entity_pair = []
test_entity_pair = []
print('organizing train data')
# f = open('./data/train_q&a.txt', 'w', encoding="utf-8")
temp = 0
# train_sen : {entity pair:[[[label1-sentence 1],[label1-sentence 2]...],[[label2-sentence 1],[label2-sentence 2]...]}
# train_ans : {entity pair:[label1,label2,...]}
loss_train_sent = 0
loss_test_sent = 0
for i in train_sen:
if len(train_ans[i]) != len(train_sen[i]):
print('ERROR')
lenth = len(train_ans[i]) # 一个实体对label的种类数
if i[0] not in word2id:
loss_train_sent += 1
continue
if i[1] not in word2id:
loss_train_sent += 1
continue
h, t = word2id[i[0]], word2id[i[1]]
for j in range(lenth):
train_x.append(train_sen[i][j]) #将相同实体对-关系的句子放在一个数组里
train_y.append(train_ans[i][j]) #每个实体对对应的关系
train_entity_pair.append([h,t])
# f.write(str(temp) + '\t' + i[0] + '\t' + i[1] + '\t' + str(
# np.argmax(train_ans[i][j])) + '\n')
temp += 1
f.close()
print('organizing test data')
# f = open('./data/test_q&a.txt', 'w', encoding="utf-8")
temp = 0
# 遍历每个实体对
for i in test_sen:
if i[0] not in word2id:
loss_test_sent += 1
continue
if i[1] not in word2id:
loss_test_sent += 1
continue
test_x.append(test_sen[i])
test_y.append(test_ans[i])
h, t = word2id[i[0]], word2id[i[1]]
test_entity_pair.append([h,t])
tempstr = ''
for j in range(len(test_ans[i])):
if test_ans[i][j] != 0:
tempstr = tempstr + str(j) + '\t'
# f.write(str(temp) + '\t' + i[0] + '\t' + i[1] + '\t' + tempstr + '\n')
temp += 1
f.close()
print('loss train sent (ilegal) number:', loss_train_sent)
print('loss test sent (ilegal) number:', loss_test_sent)
train_x = np.array(train_x)
train_y = np.array(train_y)
test_x = np.array(test_x)
test_y = np.array(test_y)
train_entity_pair = np.array(train_entity_pair)
test_entity_pair = np.array(test_entity_pair)
special_token = np.array([word2id['BLANK'], word2id['UNK']])
np.save('./data/vec.npy', vec)
np.save('./data/word2id.npy', word2id)
np.save('./data/train_x.npy', train_x)
np.save('./data/train_y.npy', train_y)
np.save('./data/testall_x.npy', test_x)
np.save('./data/testall_y.npy', test_y)
np.save('./data/train_entity_pair.npy', train_entity_pair)
np.save('./data/test_entity_pair.npy', test_entity_pair)
np.save('./data/special_token.npy', special_token)
# get test data for P@N evaluation, in which only entity pairs with more
# than 1 sentence exist
print('get test data for p@n test')
pone_test_x = []
pone_test_y = []
ptwo_test_x = []
ptwo_test_y = []
pall_test_x = []
pall_test_y = []
for i in range(len(test_x)):
if len(test_x[i]) > 1:
pall_test_x.append(test_x[i])
pall_test_y.append(test_y[i])
onetest = []
temp = np.random.randint(len(test_x[i]))
onetest.append(test_x[i][temp])
pone_test_x.append(onetest)
pone_test_y.append(test_y[i])
twotest = []
temp1 = np.random.randint(len(test_x[i]))
temp2 = np.random.randint(len(test_x[i]))
while temp1 == temp2:
temp2 = np.random.randint(len(test_x[i]))
twotest.append(test_x[i][temp1])
twotest.append(test_x[i][temp2])
ptwo_test_x.append(twotest)
ptwo_test_y.append(test_y[i])
pone_test_x = np.array(pone_test_x)
pone_test_y = np.array(pone_test_y)
ptwo_test_x = np.array(ptwo_test_x)
ptwo_test_y = np.array(ptwo_test_y)
pall_test_x = np.array(pall_test_x)
pall_test_y = np.array(pall_test_y)
np.save('./data/pone_test_x.npy', pone_test_x)
np.save('./data/pone_test_y.npy', pone_test_y)
np.save('./data/ptwo_test_x.npy', ptwo_test_x)
np.save('./data/ptwo_test_y.npy', ptwo_test_y)
|
np.save('./data/pall_test_x.npy', pall_test_x)
|
numpy.save
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from astropy.utils.data import get_pkg_data_filename
from astropy.table import Table
from astropy.io import fits
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
import math
from shapely.geometry import Polygon, Point
#I compute the covariance matrix from a set of clustering measurements in
#independent zones calculated from the jackknife technique
#the clustering measurements can be replaced by any other wanted measurement
#I read the fits file to obtain the wanted data
event_filename = get_pkg_data_filename('60fields.fits')
events = Table.read(event_filename, hdu=1)
hdul = fits.open(event_filename)
data = hdul[1].data
#extract the colums from the table
dec = data['DEC']
ra = data['RA']
redshift = data['Z']
#some specific selection
dec_sel = dec[:1593]
ra_sel = ra[:1593]
redshift_sel = redshift[:1593]
#redshift selection from 3 to 6
select = (redshift_sel >= 3 ) & (redshift_sel <= 6.)
Zf_wide = redshift_sel[select]
DECf_wide = dec_sel[select]
RAf_wide = ra_sel[select]
#function to split the sample area in different jackknife zones
def dec_cut(ra_values,a,b):
#a and b: parameters of the line y=mx+a
y=a*ra_values+b #equation of the line
return y
cm = plt.cm.get_cmap('jet')
fig = plt.figure().add_subplot(111)
plt.scatter(RAf_wide,DECf_wide, s=10, c=Zf_wide, marker='o', cmap=cm)
plt.gca().invert_xaxis()
plt.text(53.25,-27.73,'zone3')
plt.text(53.27,-27.825,'zone4')
plt.text(53.17,-27.71,'zone5')
plt.text(53.23,-27.87,'zone6')
plt.text(53.11,-27.73,'zone7')
plt.text(53.17,-27.88,'zone8')
plt.text(53.065,-27.75,'zone9')
plt.text(53.1,-27.9,'zone10')
colorbar=plt.colorbar()
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]+0.004,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]-0.055,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]-0.115,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide,dec_cut(RAf_wide,0.4192,-50.08213)+0.005,color='k')
colorbar.set_label('z')
colorbar.ax.tick_params( direction='in')
plt.clim(3., 6)
fig.xaxis.set_ticks_position('both')
fig.yaxis.set_ticks_position('both')
fig.xaxis.set_tick_params(direction='in', which='both')
fig.yaxis.set_tick_params(direction='in', which='both')
plt.xlabel("RA", fontsize=14)
plt.ylabel("Dec", fontsize=14)
plt.grid(False)
plt.tight_layout()
#plt.savefig('Jacknife zones',dpi=500)
plt.show()
#select all zones but one
RAf_wide3=np.hstack((RAf_wide[(RAf_wide<np.min(RAf_wide)+0.06*3)],RAf_wide[(DECf_wide<np.mean(DECf_wide))&(RAf_wide>np.min(RAf_wide)+0.06*3)]))
DECf_wide3=np.hstack((DECf_wide[(RAf_wide<np.min(RAf_wide)+0.06*3)],DECf_wide[(DECf_wide<np.mean(DECf_wide))&(RAf_wide>np.min(RAf_wide)+0.06*3)]))
Zf_wide3=np.hstack((Zf_wide[(RAf_wide<np.min(RAf_wide)+0.06*3)],Zf_wide[(DECf_wide<np.mean(DECf_wide))&(RAf_wide>np.min(RAf_wide)+0.06*3)]))
#calculate the clustering in the above subsample with the K-estimator from adelberger et al. 2005
#transverse and radial separations, rij and zij
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide3):
phi = np.sqrt((RAf_wide3[k]-RAf_wide3[k+1:])**2+(DECf_wide3[k]-DECf_wide3[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide3[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide3[k+1:]).value)*0.7)
#clustering
kab_wide3 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide3 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide3 = np.append(kab_wide3, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide3 = np.append(err_wide3, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide3, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide3))])
ax.errorbar(binp, kab_wide3, yerr=err_wide3, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#second subsample, all zones but one (different one than previously)
RAf_wide4=np.hstack((RAf_wide[(RAf_wide<np.min(RAf_wide)+0.06*3)],RAf_wide[(DECf_wide>np.mean(DECf_wide))&(RAf_wide>np.min(RAf_wide)+0.06*3)] ))
DECf_wide4=np.hstack((DECf_wide[(RAf_wide<np.min(RAf_wide)+0.06*3)],DECf_wide[(DECf_wide>np.mean(DECf_wide))&(RAf_wide>np.min(RAf_wide)+0.06*3)] ))
Zf_wide4=np.hstack((Zf_wide[(RAf_wide<np.min(RAf_wide)+0.06*3)],Zf_wide[(DECf_wide>np.mean(DECf_wide))&(RAf_wide>np.min(RAf_wide)+0.06*3)] ))
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide4):
phi = np.sqrt((RAf_wide4[k]-RAf_wide4[k+1:])**2+(DECf_wide4[k]-DECf_wide4[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide4[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide4[k+1:]).value)*0.7)
kab_wide4 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide4 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide4 = np.append(kab_wide4, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide4 = np.append(err_wide4, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide4, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide4))])
ax.errorbar(binp, kab_wide4, yerr=err_wide4, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#define polygons for the jackknife zones
#cross point of lines, vertices for a future polygon
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.97 # slope & intercept (left vertical line)
x1 = (b1-b2) / (m2-m1)
y1 = m1 * x1 + b1
print('intersecting point of lines',x1,y1)
m1, b1 = 0.4192, -50.00213 # slope & intercept (top horizontal line)
m2, b2 = -2.664, 113.97 # slope & intercept (left vertical line)
x2 = (b1-b2) / (m2-m1)
y2 = m1 * x2 + b1
print('intersecting point of lines',x2,y2)
m1, b1 = 0.4192, -50.00213 # slope & intercept (top horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x3 = (b1-b2) / (m2-m1)
y3 = m1 * x3 + b1
print('intersecting point of lines',x3,y3)
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x4 = (b1-b2) / (m2-m1)
y4 = m1 * x4 + b1
print('intersecting point of lines',x4,y4)
#Create a Polygon with the intersecting points, third subsample
coords = [(x1, y1), (x2, y2), (x3, y3), (x4, y4)]
poly = Polygon(coords)
RAf_wide5=np.array([])
DECf_wide5=np.array([])
Zf_wide5=np.array([])
for i, item in enumerate(RAf_wide):
if Point(item,DECf_wide[i]).within(poly)==False: #check if point is within the polygon: true false answer
RAf_wide5=np.append(RAf_wide5,item)
DECf_wide5=np.append(DECf_wide5,DECf_wide[i])
Zf_wide5=np.append(Zf_wide5,Zf_wide[i])
#clustering computation
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide5):
phi = np.sqrt((RAf_wide5[k]-RAf_wide5[k+1:])**2+(DECf_wide5[k]-DECf_wide5[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide5[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide5[k+1:]).value)*0.7)
kab_wide5 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide5 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide5 = np.append(kab_wide5, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide5 = np.append(err_wide5, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide5, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide5))])
ax.errorbar(binp, kab_wide5, yerr=err_wide5, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#cross point of lines, vertices for a future polygon
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.97 # slope & intercept (left vertical line)
x1 = (b1-b2) / (m2-m1)
y1 = m1 * x1 + b1
print('intersecting point of lines',x1,y1)
m1, b1 = 0.4192, -50.14713 # slope & intercept (bottom horizontal line)
m2, b2 = -2.664, 113.97 # slope & intercept (left vertical line)
x2 = (b1-b2) / (m2-m1)
y2 = m1 * x2 + b1
print('intersecting point of lines',x2,y2)
m1, b1 = 0.4192, -50.14713 # slope & intercept (bottom horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x3 = (b1-b2) / (m2-m1)
y3 = m1 * x3 + b1
print('intersecting point of lines',x3,y3)
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x4 = (b1-b2) / (m2-m1)
y4 = m1 * x4 + b1
print('intersecting point of lines',x4,y4)
#Create a Polygon with the intersecting points, forth subsample
coords = [(x1, y1), (x2, y2), (x3, y3), (x4, y4)]
poly = Polygon(coords)
RAf_wide6=np.array([])
DECf_wide6=np.array([])
Zf_wide6=np.array([])
for i, item in enumerate(RAf_wide):
if Point(item,DECf_wide[i]).within(poly)==False:
RAf_wide6=np.append(RAf_wide6,item)
DECf_wide6=np.append(DECf_wide6,DECf_wide[i])
Zf_wide6=np.append(Zf_wide6,Zf_wide[i])
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide6):
phi = np.sqrt((RAf_wide6[k]-RAf_wide6[k+1:])**2+(DECf_wide6[k]-DECf_wide6[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide6[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide6[k+1:]).value)*0.7)
kab_wide6 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide6 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide6 = np.append(kab_wide6, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide6 = np.append(err_wide6, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide6, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide6))])
ax.errorbar(binp, kab_wide6, yerr=err_wide6, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#cross point of lines, vertices for a future polygon
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x1 = (b1-b2) / (m2-m1)
y1 = m1 * x1 + b1
print('intersecting point of lines',x1,y1)
m1, b1 = 0.4192, -50.00213 # slope & intercept (top horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x2 = (b1-b2) / (m2-m1)
y2 = m1 * x2 + b1
print('intersecting point of lines',x2,y2)
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x3 = (b1-b2) / (m2-m1)
y3 = m1 * x3 + b1
print('intersecting point of lines',x3,y3)
m1, b1 = 0.4192, -50.00213 # slope & intercept (top horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x4 = (b1-b2) / (m2-m1)
y4 = m1 * x4 + b1
print('intersecting point of lines',x4,y4)
#Create a Polygon with the intersecting points
coords = [(x1, y1), (x2, y2), (x4, y4), (x3, y3)] #careful bc the order of the vertices matters
poly = Polygon(coords)
RAf_wide7=np.array([])
DECf_wide7=np.array([])
Zf_wide7=np.array([])
for i, item in enumerate(RAf_wide):
if Point(item,DECf_wide[i]).within(poly)==False:
RAf_wide7=np.append(RAf_wide7,item)
DECf_wide7=np.append(DECf_wide7,DECf_wide[i])
Zf_wide7=np.append(Zf_wide7,Zf_wide[i])
#check that we excluded right the zone
cm = plt.cm.get_cmap('jet')
fig = plt.figure().add_subplot(111)
plt.scatter(RAf_wide7, DECf_wide7, s=10, c=Zf_wide7, marker='o', cmap=cm)
plt.gca().invert_xaxis()
plt.scatter(x1,y1,marker='x',color='r',s=30)
plt.scatter(x2,y2,marker='x',color='r',s=30)
plt.scatter(x3,y3,marker='x',color='r',s=30)
plt.scatter(x4,y4,marker='x',color='r',s=30)
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]+0.01,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]-0.055,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]-0.115,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)]-0.175,dec_cut(RAf_wide[(RAf_wide<53.25)&(RAf_wide>53.16)],-2.664,113.95),color='k')
plt.plot(RAf_wide,dec_cut(RAf_wide,0.4192,-50.07713),color='k')
plt.plot(RAf_wide,dec_cut(RAf_wide,0.4192,-50.00213),color='k')
plt.plot(RAf_wide,dec_cut(RAf_wide,0.4192,-50.14713),color='k')
fig.xaxis.set_ticks_position('both')
fig.yaxis.set_ticks_position('both')
fig.xaxis.set_tick_params(direction='in', which='both')
fig.yaxis.set_tick_params(direction='in', which='both')
plt.xlabel("RA", fontsize=14)
plt.ylabel("Dec", fontsize=14)
plt.grid(False)
plt.tight_layout()
plt.show()
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide7):
phi = np.sqrt((RAf_wide7[k]-RAf_wide7[k+1:])**2+(DECf_wide7[k]-DECf_wide7[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide7[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide7[k+1:]).value)*0.7)
kab_wide7 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide7 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide7 = np.append(kab_wide7, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide7 = np.append(err_wide7, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide7, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide7))])
ax.errorbar(binp, kab_wide7, yerr=err_wide7, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#cross point of lines, vertices for a future polygon
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x1 = (b1-b2) / (m2-m1)
y1 = m1 * x1 + b1
print('intersecting point of lines',x1,y1)
m1, b1 = 0.4192, -50.14713 # slope & intercept (bottom horizontal line)
m2, b2 = -2.664, 113.82 # slope & intercept (middle1 vertical line)
x2 = (b1-b2) / (m2-m1)
y2 = m1 * x2 + b1
print('intersecting point of lines',x2,y2)
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x3 = (b1-b2) / (m2-m1)
y3 = m1 * x3 + b1
print('intersecting point of lines',x3,y3)
m1, b1 = 0.4192, -50.14713 # slope & intercept (bottom horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x4 = (b1-b2) / (m2-m1)
y4 = m1 * x4 + b1
print('intersecting point of lines',x4,y4)
#Create a Polygon with the intersecting points
coords = [(x1, y1), (x2, y2), (x4, y4), (x3, y3)] #careful bc the order of the vertices matters
poly = Polygon(coords)
RAf_wide8=np.array([])
DECf_wide8=np.array([])
Zf_wide8=np.array([])
for i, item in enumerate(RAf_wide):
if Point(item,DECf_wide[i]).within(poly)==False:
RAf_wide8=np.append(RAf_wide8,item)
DECf_wide8=np.append(DECf_wide8,DECf_wide[i])
Zf_wide8=np.append(Zf_wide8,Zf_wide[i])
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide8):
phi = np.sqrt((RAf_wide8[k]-RAf_wide8[k+1:])**2+(DECf_wide8[k]-DECf_wide8[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide8[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide8[k+1:]).value)*0.7)
kab_wide8 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide8 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide8 = np.append(kab_wide8, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide8 = np.append(err_wide8, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide8, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide8))])
ax.errorbar(binp, kab_wide8, yerr=err_wide8, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#cross point of lines, vertices for a future polygon
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x1 = (b1-b2) / (m2-m1)
y1 = m1 * x1 + b1
print('intersecting point of lines',x1,y1)
m1, b1 = 0.4192, -50.00213 # slope & intercept (top horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x2 = (b1-b2) / (m2-m1)
y2 = m1 * x2 + b1
print('intersecting point of lines',x2,y2)
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.95-0.47 # slope & intercept (right vertical line)
x3 = (b1-b2) / (m2-m1)
y3 = m1 * x3 + b1
print('intersecting point of lines',x3,y3)
m1, b1 = 0.4192, -50.00213 # slope & intercept (top horizontal line)
m2, b2 = -2.664, 113.95-0.47 # slope & intercept (right vertical line)
x4 = (b1-b2) / (m2-m1)
y4 = m1 * x4 + b1
print('intersecting point of lines',x4,y4)
#Create a Polygon with the intersecting points
coords = [(x1, y1), (x2, y2), (x4, y4), (x3, y3)] #careful bc the order of the vertices matters
poly = Polygon(coords)
RAf_wide9=np.array([])
DECf_wide9=np.array([])
Zf_wide9=np.array([])
for i, item in enumerate(RAf_wide):
if Point(item,DECf_wide[i]).within(poly)==False:
RAf_wide9=np.append(RAf_wide9,item)
DECf_wide9=np.append(DECf_wide9,DECf_wide[i])
Zf_wide9=np.append(Zf_wide9,Zf_wide[i])
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide9):
phi = np.sqrt((RAf_wide9[k]-RAf_wide9[k+1:])**2+(DECf_wide9[k]-DECf_wide9[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide9[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide9[k+1:]).value)*0.7)
kab_wide9 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide9 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide9 = np.append(kab_wide9, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide9 = np.append(err_wide9, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp = np.append(binp, bini + (bins[k+1]-bini)/2)
ax = plt.figure().add_subplot(111)
ax.scatter(binp, kab_wide9, s=10, c = 'b', marker='o')
horiz_line = np.array([7/45 for m in range(len(kab_wide9))])
ax.errorbar(binp, kab_wide9, yerr=err_wide9, xerr=None, c = 'b', ls='None', capsize=2, elinewidth=1)
ax.plot(binp, horiz_line, 'k-', linewidth = 1)
plt.xlabel(r'$R_{ij}$ [$h^{-1}$Mpc]', fontsize=14)
plt.ylabel(r'$K^{0,7}_{7,45}$', fontsize=14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(direction='in', which='both')
ax.yaxis.set_tick_params(direction='in', which='both')
plt.tick_params(labelsize = 'large')
ax.set_xscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.tight_layout()
plt.grid(False)
plt.show()
#cross point of lines, vertices for a future polygon
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x1 = (b1-b2) / (m2-m1)
y1 = m1 * x1 + b1
print('intersecting point of lines',x1,y1)
m1, b1 = 0.4192, -50.07713 # slope & intercept (middle horizontal line)
m2, b2 = -2.664, 113.95-0.47 # slope & intercept (right vertical line)
x2 = (b1-b2) / (m2-m1)
y2 = m1 * x2 + b1
print('intersecting point of lines',x2,y2)
m1, b1 = 0.4192, -50.14713 # slope & intercept (bottom horizontal line)
m2, b2 = -2.664, 113.95-0.3 # slope & intercept (middle2 vertical line)
x3 = (b1-b2) / (m2-m1)
y3 = m1 * x3 + b1
print('intersecting point of lines',x3,y3)
m1, b1 = 0.4192, -50.14713 # slope & intercept (bottom horizontal line)
m2, b2 = -2.664, 113.95-0.47 # slope & intercept (right vertical line)
x4 = (b1-b2) / (m2-m1)
y4 = m1 * x4 + b1
print('intersecting point of lines',x4,y4)
#Create a Polygon with the intersecting points
coords = [(x1, y1), (x2, y2), (x4, y4), (x3, y3)] #careful bc the order of the vertices matters
poly = Polygon(coords)
RAf_wide10=np.array([])
DECf_wide10=np.array([])
Zf_wide10=np.array([])
for i, item in enumerate(RAf_wide):
if Point(item,DECf_wide[i]).within(poly)==False:
RAf_wide10=np.append(RAf_wide10,item)
DECf_wide10=np.append(DECf_wide10,DECf_wide[i])
Zf_wide10=np.append(Zf_wide10,Zf_wide[i])
zij = np.array([])
rij = np.array([])
phi = np.array([])
for k, zk in enumerate(Zf_wide10):
phi = np.sqrt((RAf_wide10[k]-RAf_wide10[k+1:])**2+(DECf_wide10[k]-DECf_wide10[k+1:])**2)*math.pi/180
rij= np.append(rij, cosmo.comoving_distance((zk + Zf_wide10[k+1:])/2).value * phi * 0.7)
zij = np.append(zij, abs(cosmo.comoving_distance(zk).value-cosmo.comoving_distance(Zf_wide10[k+1:]).value)*0.7)
kab_wide10 = np.array([])
bins=np.array([0.155,0.17,0.42,0.595,1.09,1.79,3.5,6.,11,20,35])
err_wide10 = np.array([])
binp = np.array([])
for k, bini in enumerate(bins):
if k < len(bins)-1:
idxtrans = (rij >= bini) & (rij < (bini+bins[k+1]))
idxlos1 = (zij > 0) & (zij < 7)
idxlos2 = (zij > 0) & (zij < 45)
kab_wide10 = np.append(kab_wide10, sum(idxtrans & idxlos1)/sum(idxtrans & idxlos2))
err_wide10 = np.append(err_wide10, math.sqrt(sum(idxtrans & idxlos1))/sum(idxtrans & idxlos2))
binp =
|
np.append(binp, bini + (bins[k+1]-bini)/2)
|
numpy.append
|
from __future__ import print_function
# Copyright (c) 2015, Danish Geodata Agency <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
######################################
# Grid class below - just a numpy array and some metadata + some usefull methods
####################################
from builtins import range
from builtins import object
import numpy as np
import os
from osgeo import gdal
from osgeo import osr
import ctypes
try:
import scipy.ndimage as image
except:
HAS_NDIMAGE = False
else:
HAS_NDIMAGE = True
LIBDIR = os.path.realpath(os.path.join(os.path.dirname(__file__), "lib"))
LIBNAME = "libgrid"
XY_TYPE =
|
np.ctypeslib.ndpointer(dtype=np.float64, flags=['C', 'O', 'A', 'W'])
|
numpy.ctypeslib.ndpointer
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH " \
"- Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Aug 21, 2021"
class Eschelby:
"""
Anisotropic elasticity theory for dislocations described by
[Eschelby](https://doi.org/10.1016/0001-6160(53)90099-6).
All notations follow the original paper.
"""
def __init__(self, elastic_tensor, burgers_vector):
self.elastic_tensor = elastic_tensor
self.burgers_vector = burgers_vector
self.fit_range = np.linspace(0, 1, 10)
self._p = None
self._Ak = None
self._D = None
def _get_pmat(self, x):
return (
self.elastic_tensor[:, 0, :, 0]
+ np.einsum(
'...,ij->...ij', x, self.elastic_tensor[:, 0, :, 1]+self.elastic_tensor[:, 1, :, 0]
)
+ np.einsum('...,ij->...ij', x**2, self.elastic_tensor[:, 1, :, 1])
)
@property
def p(self):
if self._p is None:
coeff = np.polyfit(self.fit_range, np.linalg.det(self._get_pmat(self.fit_range)), 6)
self._p = np.roots(coeff)
self._p = self._p[
|
np.imag(self._p)
|
numpy.imag
|
#!/usr/bin/env python3
import logging
import numpy as np
import copy
import crosstalk
import gates
import predistortion
import pulses
import qubits
import readout
import tomography
# Allow logging to Labber's instrument log
log = logging.getLogger('LabberDriver')
# TODO Select qubits to benchmark (all sequences?) with check boxes
# TODO Add phase tracking of readout
# TODO Reduce calc of CZ by finding all unique TwoQubitGates in seq and calc.
# TODO Make I(width=None) have the width of the longest gate in the step
# TODO Add checks so that not both t0 and dt are given
# TODO test demod with some data
# TODO Two composite gates should be able to be parallell
# TODO implement eq test for gates
# TODO check number of qubits in seq and in gate added to seq
class GateOnQubit:
def __init__(self, gate, qubit, pulse=None):
self.gate = gate
self.qubit = qubit
self.pulse = pulse
if pulse is None:
self.duration = 0
else:
self.duration = pulse.total_duration()
def __str__(self):
return "Gate {} on qubit {}".format(self.gate, self.qubit)
def __repr__(self):
return self.__str__()
class Step:
"""Represent one step in a sequence.
Parameters
----------
n_qubit : int
Number of qubits in the sequece.
t0 : float
Center of the sequence in seconds (the default is None).
dt : float
Spacing to previous pulse in seconds (the default is None). Use only
either t0 or dt.
align : str {'left', 'center', 'right'}
The alignment of pulses if they have different lengths,
(the default is 'center').
Attributes
----------
gates : list of :dict:
The different gates in the step.
"""
def __init__(self, t0=None, dt=None, align='center'):
self.gates = []
self.align = align
self.t0 = t0
self.dt = dt
self.t_start = None
self.t_end = None
def add_gate(self, qubit, gate):
"""Add the given gate to the specified qubit(s).
The number of gates must equal the number of qubits.
If the number of qubits given are less than the number of qubits in the
step, I gates are added to the other qubits.
Parameters
----------
qubit : int or list of int
The qubit indices.
gate : :obj:`BaseGate`
The gate(s).
"""
if gate.number_of_qubits() > 1 and not isinstance(qubit, list):
raise ValueError("Please provide a list of qubits for gates with more than one qubit")
if gate.number_of_qubits() > 1 and not gate.number_of_qubits() == len(qubit):
raise ValueError("Number of qubits in the gate must be equal to the number of qubit indices given")
if gate.number_of_qubits() == 1 and not isinstance(qubit, int):
raise ValueError("Please provide qubit as int for gates with one qubit")
if isinstance(qubit, int):
if self._qubit_in_step(qubit):
raise ValueError("Qubit {} already in step.".format(qubit))
else:
for n in qubit:
if self._qubit_in_step(n):
raise ValueError("Qubit {} already in step.".format(n))
self.gates.append(GateOnQubit(gate, qubit))
def time_shift(self, shift):
"""Shift the timings of the step.
Parameters
----------
shift : float
The amount of shift to apply in seconds.
"""
self.t_start += shift
self.t0 += shift
self.t_end += shift
def _qubit_in_step(self, qubit):
if not isinstance(qubit, int):
raise ValueError("Qubit index should be int.")
def _in(input_list, n):
flat_list = []
for sublist_or_el in input_list:
if isinstance(sublist_or_el, list):
if _in(sublist_or_el, n) == True:
return True
elif sublist_or_el == n:
return True
return False
return _in([x.qubit for x in self.gates], qubit)
def __str__(self):
return str(self.gates)
def __repr__(self):
return str(self.gates)
class Sequence:
"""A multi qubit seqence.
Parameters
----------
n_qubit : type
The number of qubits in the sequence.
Attributes
----------
sequences : list of :obj:`Step`
Holds the steps of the sequence.
perform_process_tomography : bool
Flag for performing process tomography.
perform_state_tomography : bool
Flag for performing state tomography.
readout_delay : float
Delay time between last pulse and readout, in seconds.
n_qubit
"""
def __init__(self, n_qubit):
self.n_qubit = n_qubit
self.sequence_list = []
# process tomography
self.perform_process_tomography = False
self._process_tomography = tomography.ProcessTomography()
# state tomography
self.perform_state_tomography = False
self._state_tomography = tomography.StateTomography()
# readout
self.readout_delay = 0.0
# Public methods
def generate_sequence(self, config):
"""Generate sequence by adding gates/pulses to waveforms.
Parameters
----------
config : dict
Configuration as defined by Labber driver configuration window.
"""
raise NotImplementedError()
def get_sequence(self, config):
"""Compile sequence and return it.
Parameters
----------
config : dict
Labber instrument configuration.
Returns
-------
list of :obj:`Step`
The compiled qubit sequence.
"""
self.sequence_list = []
if self.perform_process_tomography:
self._process_tomography.add_pulses(self)
self.generate_sequence(config)
if self.perform_state_tomography:
self._state_tomography.add_pulses(self)
if self.readout_delay > 0:
delay = gates.IdentityGate(width=self.readout_delay)
self.add_gate_to_all(delay, dt=0)
self.add_gate_to_all(gates.ReadoutGate(), dt=0, align='left')
return self
# Public methods for adding pulses and gates to the sequence.
def add_single_pulse(self, qubit, pulse, t0=None, dt=None,
align_left=False):
"""Add single qubit pulse to specified qubit.
This function still exist to not break existing
funcationallity. You should really use the add_gate method.
t0 or dt can be used to override the global pulse spacing.
Parameters
----------
qubit : int
Qubit number, indexed from 0.
pulse : :obj:`Pulse`
Definition of pulse to add.
t0 : float, optional
Absolute pulse position.
dt : float, optional
Pulse spacing, referenced to the previous pulse.
align_left: bool, optional
If True, aligns the pulse to the left. Defaults to False.
"""
gate = gates.CustomGate(pulse)
if align_left is True:
self.add_gate(qubit, gate, t0, dt, 'left')
else:
self.add_gate(qubit, gate, t0, dt, 'center')
def add_single_gate(self, qubit, gate, t0=None, dt=None, align_left=False):
"""Add single gate to specified qubit sequence.
Note, this function still exist is to not break existing
funcationallity. You should really use the add_gate method.
t0 or dt can be used to override the global pulse spacing.
Parameters
----------
qubit : int
Qubit number, indexed from 0.
gate : :obj:`Gate`
Definition of gate to add.
t0 : float, optional
Absolute pulse position.
dt : float, optional
Pulse spacing, referenced to the previous pulse.
align_left : boolean, optional
If True, t0 is the start of the pulse, otherwise it is the center
of the pulse. False is the default.
"""
if align_left is True:
self.add_gate(qubit, gate, t0, dt, 'left')
else:
self.add_gate(qubit, gate, t0, dt, 'center')
def add_gate(self, qubit, gate, t0=None, dt=None, align='center', index=None):
"""Add a set of gates to the given qubit sequences.
For the qubits with no specificied gate, an IdentityGate will be given.
The length of the step is given by the longest pulse.
Parameters
----------
qubit : int or list of int
The qubit(s) to add the gate(s) to.
gate : :obj:`BaseGate` or list of :obj:`BaseGate`
The gate(s) to add.
t0 : float, optional
Absolute gate position (the default is None).
dt : float, optional
Gate spacing, referenced to the previous pulse
(the default is None).
align : str, optional
If two or more qubits have differnt pulse lengths, `align`
specifies how those pulses should be aligned. 'Left' aligns the
start, 'center' aligns the centers, and 'right' aligns the end,
(the default is 'center').
index : int, optional
Where in the sequence to insert the new gate. Default is at the end.
"""
step = Step(t0=t0, dt=dt, align=align)
if isinstance(gate, list):
if len(gate) == 1:
raise ValueError("For single gates, don't provide gate as a list.")
if not isinstance(qubit, list):
raise ValueError("Please provide qubit indices as a list when adding more thab one gate.")
if len(gate) != len(qubit):
raise ValueError("Length of gate list must be equal to length of qubit list.")
for q, g in zip(qubit, gate):
step.add_gate(q, g)
else:
if gate.number_of_qubits() > 1:
if not isinstance(qubit, list):
raise ValueError("Please provide qubit list for gates with more than one qubit.")
else:
if not isinstance(qubit, int):
raise ValueError("For single gates, give qubit as int (not list).")
step.add_gate(qubit, gate)
if index is None:
self.sequence_list.append(step)
else:
self.sequence_list.insert(index+1, step)
def add_gate_to_all(self, gate, t0=None, dt=None, align='center'):
"""Add a single gate to all qubits.
Pulses are added at the end of the sequence, with the gate spacing set
by either the spacing parameter or the aboslut position.
"""
if isinstance(gate, list):
raise ValueError("Only single gates allowed.")
if isinstance(gate, (gates.BaseGate, gates.CompositeGate)):
if gate.number_of_qubits() > 1:
raise ValueError("Not clear how to add multi-qubit gates to all qubits.")
qubit = list(range((self.n_qubit)))
gate = [gate for n in range(self.n_qubit)]
# Single qubit gates shouldn't be lists
if len(qubit) == 1:
qubit = qubit[0]
gate = gate[0]
self.add_gate(qubit,
gate,
t0=t0,
dt=dt,
align=align)
def add_gates(self, gates):
"""Add multiple gates to the qubit waveform.
Pulses are added at the end of the sequence, with the gate spacing set
by the spacing parameter.
Examples
--------
Add three gates to a two-qubit sequence, first a positive pi-pulse
around X to qubit 1, then a negative pi/2-pulse to qubit 2, finally
simultaneous positive pi-pulses to qubits 1 and 2.
>>> add_gates([[gates.Xp, None ],
[None, gates.Y2m],
[gates.Xp, gates.Xp]])
Parameters
----------
gates : list of list of :obj:`BaseGate`
List of lists defining gates to add. The innermost list should
have the same length as number of qubits in the sequence.
"""
# make sure we have correct input
if not isinstance(gates, (list, tuple)):
raise Exception('The input must be a list of list with gates')
if len(gates) == 0:
return
if not isinstance(gates[0], (list, tuple)):
raise Exception('The input must be a list of list with gates')
# add gates sequence to waveforms
for gate in gates:
# add gate to specific qubit waveform
qubit = list(range(len(gate)))
# Single qubit gates shouldn't be lists
if len(qubit) == 1:
qubit = qubit[0]
gate = gate[0]
self.add_gate(qubit, gate)
def set_parameters(self, config={}):
"""Set base parameters using config from from Labber driver.
Parameters
----------
config : dict
Configuration as defined by Labber driver configuration window
"""
# sequence parameters
d = dict(Zero=0, One=1, Two=2, Three=3, Four=4, Five=5, Six=6, Seven=7,
Eight=8, Nine=9)
# If the number of qubits changed, we need to re-init
if self.n_qubit != d[config.get('Number of qubits')]:
self.__init__(d[config.get('Number of qubits')])
# Readout
self.readout_delay = config.get('Readout delay')
# process tomography prepulses
self.perform_process_tomography = \
config.get('Generate process tomography prepulse', False)
self._process_tomography.set_parameters(config)
# state tomography
self.perform_state_tomography = config.get(
'Generate state tomography postpulse', False)
self._state_tomography.set_parameters(config)
class SequenceToWaveforms:
"""Compile a multi qubit sequence into waveforms.
Parameters
----------
n_qubit : type
The maximum number of qubits.
Attributes
----------
dt : float
Pulse spacing, in seconds.
local_xy : bool
If False, collate all waveforms into one.
simultaneous_pulses : bool
If False, seperate all pulses in time.
sample_rate : float
AWG Sample rate.
n_pts : float
Number of points in the waveforms.
first_delay : float
Delay between start of waveform and start of the first pulse.
trim_to_sequence : bool
If True, adjust `n_points` to just fit the sequence.
align_to_end : bool
Align the whole sequence to the end of the waveforms.
Only relevant if `trim_to_sequence` is False.
sequences : list of :obj:`Step`
The qubit sequences.
qubits : list of :obj:`Qubit`
Parameters of each qubit.
wave_xy_delays : list of float
Indiviudal delays for the XY waveforms.
wave_z_delays : list of float
Indiviudal delays for the Z waveforms.
n_qubit
"""
def __init__(self, n_qubit):
self.n_qubit = n_qubit
self.dt = 10E-9
self.local_xy = True
self.simultaneous_pulses = True
# waveform parameter
self.sample_rate = 1.2E9
self.n_pts = 240E3
self.first_delay = 100E-9
self.trim_to_sequence = True
self.align_to_end = False
self.sequence_list = []
self.qubits = [qubits.Qubit() for n in range(self.n_qubit)]
# waveforms
self._wave_xy = [np.zeros(0, dtype=np.complex)
for n in range(self.n_qubit)]
self._wave_z = [np.zeros(0) for n in range(self.n_qubit)]
self._wave_gate = [np.zeros(0) for n in range(self.n_qubit)]
# waveform delays
self.wave_xy_delays = np.zeros(self.n_qubit)
self.wave_z_delays = np.zeros(self.n_qubit)
# define pulses
self.pulses_1qb_xy = [None for n in range(self.n_qubit)]
self.pulses_1qb_xy_12 = [None for n in range(self.n_qubit)]
self.pulses_1qb_SL = [None for n in range(self.n_qubit)]
self.pulses_1qb_z = [None for n in range(self.n_qubit)]
self.pulses_2qb = [None for n in range(self.n_qubit - 1)]
self.pulses_cplr = [None for n in range(self.n_qubit)]
self.pulses_tqb = [None for n in range(self.n_qubit)]
self.pulses_readout = [None for n in range(self.n_qubit)]
# cross-talk
self.compensate_crosstalk = False
self._crosstalk = crosstalk.Crosstalk()
# predistortion
self.perform_predistortion = False
self._predistortions = [predistortion.Predistortion(n)
for n in range(self.n_qubit)]
self._predistortions_z = [predistortion.ExponentialPredistortion(n)
for n in range(self.n_qubit)]
# gate switch waveform
self.generate_gate_switch = False
self.uniform_gate = False
self.gate_delay = 0.0
self.gate_overlap = 20E-9
self.minimal_gate_time = 20E-9
# readout trig settings
self.readout_trig_generate = False
# readout wave object and settings
self.readout = readout.Demodulation(self.n_qubit)
self.readout_trig = np.array([], dtype=float)
self.readout_iq = np.array([], dtype=np.complex)
def get_waveforms(self, sequence):
"""Compile the given sequence into waveforms.
Parameters
----------
sequences : list of :obj:`Step`
The qubit sequence to be compiled.
Returns
-------
type
Description of returned object.
"""
self.sequence = sequence
self.sequence_list = sequence.sequence_list
if not self.simultaneous_pulses:
self._seperate_gates()
self._explode_composite_gates()
self._add_pulses_and_durations()
self._add_timings()
self._init_waveforms()
if self.align_to_end:
shift = self._round((self.n_pts - 2) / self.sample_rate -
self.sequence_list[-1].t_end)
for step in self.sequence_list:
step.time_shift(shift)
self._perform_virtual_z()
self._generate_waveforms()
# collapse all xy pulses to one waveform if no local XY control
if not self.local_xy:
# sum all waveforms to first one
self._wave_xy[0] = np.sum(self._wave_xy[:self.n_qubit], 0)
# clear other waveforms
for n in range(1, self.n_qubit):
self._wave_xy[n][:] = 0.0
# if self.compensate_crosstalk:
# self._perform_crosstalk_compensation()
if self.perform_predistortion:
self._predistort_xy_waveforms()
if self.perform_predistortion_z:
self._predistort_z_waveforms()
if self.readout_trig_generate:
self._add_readout_trig()
if self.generate_gate_switch:
self._add_microwave_gate()
# Apply offsets
self.readout_iq += self.readout_i_offset + 1j * self.readout_q_offset
# create and return dictionary with waveforms
waveforms = dict()
waveforms['xy'] = self._wave_xy
waveforms['z'] = self._wave_z
waveforms['gate'] = self._wave_gate
waveforms['readout_trig'] = self.readout_trig
waveforms['readout_iq'] = self.readout_iq
return waveforms
def _seperate_gates(self):
new_sequences = []
for step in self.sequence_list:
if any(isinstance(gate, (gates.ReadoutGate, gates.IdentityGate))
for gate in step.gates):
# Don't seperate I gates or readouts since we do
# multiplexed readout
new_sequences.append(step)
continue
for gate in step.gates:
if gate.gate is not None:
new_step = Step(t0=step.t_start,
dt=step.dt,
align=step.align)
new_step.add_gate(gate.qubit, gate.gate)
new_sequences.append(new_step)
self.sequence_list = new_sequences
def _add_timings(self):
t_start = 0
for step in self.sequence_list:
if step.dt is None and step.t0 is None:
# Use global pulse spacing
step.dt = self.dt
# Find longest gate in sequence
max_duration = np.max([x.duration for x in step.gates])
if step.t0 is None:
step.t_start = self._round(t_start + step.dt)
step.t0 = self._round(step.t_start + max_duration/2)
else:
step.t_start = self._round(step.t0 - max_duration/2)
step.t_end = self._round(step.t_start + max_duration)
t_start = step.t_end # Next step starts where this one ends
if max_duration == 0: # Avoid double spacing for steps with 0 duration
t_start = t_start - step.dt
# Make sure that the sequence is sorted chronologically.
# self.sequence_list.sort(key=lambda x: x.t_start) # TODO Fix this
# Make sure that sequnce starts on first delay
time_diff = self._round(self.first_delay-self.sequence_list[0].t_start)
for step in self.sequence_list:
step.time_shift(time_diff)
def _add_pulses_and_durations(self):
for step in self.sequence_list:
for gate in step.gates:
if gate.pulse is None:
gate.pulse = self._get_pulse_for_gate(gate)
if gate.pulse is None:
gate.duration = 0
else:
gate.duration = gate.pulse.total_duration()
def _get_pulse_for_gate(self, gate):
qubit = gate.qubit
gate = gate.gate
# Virtual Z is special since it has no length
if isinstance(gate, gates.VirtualZGate):
pulse = None
# Get the corresponding pulse for other gates
elif isinstance(gate, gates.SingleQubitXYRotation):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_xy[qubit])
elif isinstance(gate, gates.SingleQubitXYRotation_12):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_xy_12[qubit])
elif isinstance(gate, gates.SpinlockingGate):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_SL[qubit])
elif isinstance(gate, gates.SingleQubitZRotation):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_z[qubit])
elif isinstance(gate, gates.IdentityGate):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_xy[qubit])
elif isinstance(gate, gates.RabiGate):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_xy[qubit])
elif isinstance(gate, gates.TwoQubitGate):
pulse = gate.get_adjusted_pulse(self.pulses_2qb[qubit[0]])
elif isinstance(gate, gates.CplrGate):
pulse = gate.get_adjusted_pulse(self.pulses_cplr[qubit])
elif isinstance(gate, gates.TQBGate):
pulse = gate.get_adjusted_pulse(self.pulses_tqb[qubit])
elif isinstance(gate, gates.ReadoutGate):
pulse = gate.get_adjusted_pulse(self.pulses_readout[qubit])
elif isinstance(gate, gates.CustomGate):
pulse = gate.get_adjusted_pulse(gate.pulse)
else:
raise ValueError('Please provide a pulse for {}'.format(gate))
return pulse
def _predistort_xy_waveforms(self):
"""Pre-distort the waveforms."""
# go through and predistort all xy waveforms
n_wave = self.n_qubit if self.local_xy else 1
for n in range(n_wave):
self._wave_xy[n] = self._predistortions[n].predistort(
self._wave_xy[n])
def _predistort_z_waveforms(self):
# go through and predistort all waveforms
for n in range(self.n_qubit):
self._wave_z[n] = self._predistortions_z[n].predistort(
self._wave_z[n])
def _perform_crosstalk_compensation(self):
"""Compensate for Z-control crosstalk."""
self._wave_z = self._crosstalk.compensate(self._wave_z)
def _explode_composite_gates(self):
# Keep looping through the sequence until all CompositeGates are removed
# Note that there could be nested CompositeGates
n = 0
while n < len(self.sequence_list):
step = self.sequence_list[n]
i = 0
while i < len(step.gates):
gate = step.gates[i]
if isinstance(gate.gate, gates.CompositeGate):
for m, g in enumerate(gate.gate.sequence):
new_gate = [x.gate for x in g.gates]
# Single gates shouldn't be lists
if len(new_gate) == 1:
new_gate = new_gate[0]
# Need to translate composite qubit number to device qubit number
new_qubit = [x.qubit for x in g.gates]
for j, q in enumerate(new_qubit):
if isinstance(q, int):
if isinstance(gate.qubit, int):
new_qubit[j] = gate.qubit
continue
new_qubit[j] = gate.qubit[q]
else:
new_qubit[j] = []
for k in q:
new_qubit[j].append(gate.qubit[k])
# Single qubit shouldn't be lists
if len(new_qubit) == 1:
new_qubit = new_qubit[0]
self.sequence.add_gate(new_qubit, new_gate, index=n+m)
del step.gates[i]
continue
i = i + 1
n = n + 1
# Remove any empty steps where the composite gates were
i = 0
while i < len(self.sequence_list):
step = self.sequence_list[i]
if len(step.gates) == 0:
del self.sequence_list[i]
continue
i = i + 1
def _perform_virtual_z(self):
"""Shifts the phase of pulses subsequent to virtual z gates."""
for qubit in range(self.n_qubit):
phase = 0
for step in self.sequence_list:
for gate in step.gates:
gate_obj = None
if qubit == gate.qubit: # TODO Allow for 2 qb
gate_obj = gate.gate
if isinstance(gate_obj, gates.VirtualZGate):
phase += gate_obj.theta
continue
if (isinstance(gate_obj, gates.SingleQubitXYRotation)
and phase != 0):
gate.gate = copy.copy(gate_obj)
gate.gate.phi += phase
# Need to recomput the pulse
gate.pulse = self._get_pulse_for_gate(gate)
def _add_microwave_gate(self):
"""Create waveform for gating microwave switch."""
n_wave = self.n_qubit if self.local_xy else 1
# go through all waveforms
for n, wave in enumerate(self._wave_xy[:n_wave]):
if self.uniform_gate:
# the uniform gate is all ones
gate = np.ones_like(wave)
# if creating readout trig, turn off gate during readout
if self.readout_trig_generate:
gate[-int((self.readout_trig_duration -
self.gate_overlap -
self.gate_delay) * self.sample_rate):] = 0.0
else:
# non-uniform gate, find non-zero elements
gate = np.array(np.abs(wave) > 0.0, dtype=float)
# fix gate overlap
n_overlap = int(np.round(self.gate_overlap * self.sample_rate))
diff_gate = np.diff(gate)
indx_up = np.nonzero(diff_gate > 0.0)[0]
indx_down = np.nonzero(diff_gate < 0.0)[0]
# add extra elements to left and right for overlap
for indx in indx_up:
gate[max(0, indx - n_overlap):(indx + 1)] = 1.0
for indx in indx_down:
gate[indx:(indx + n_overlap + 1)] = 1.0
# fix gaps in gate shorter than min (look for 1>0)
diff_gate = np.diff(gate)
indx_up = np.nonzero(diff_gate > 0.0)[0]
indx_down = np.nonzero(diff_gate < 0.0)[0]
# ignore first transition if starting in zero
if gate[0] == 0:
indx_up = indx_up[1:]
n_down_up = min(len(indx_down), len(indx_up))
len_down = indx_up[:n_down_up] - indx_down[:n_down_up]
# find short gaps
short_gaps = np.nonzero(len_down < (self.minimal_gate_time *
self.sample_rate))[0]
for indx in short_gaps:
gate[indx_down[indx]:(1 + indx_up[indx])] = 1.0
# shift gate in time
n_shift = int(np.round(self.gate_delay * self.sample_rate))
if n_shift < 0:
n_shift = abs(n_shift)
gate = np.r_[gate[n_shift:], np.zeros((n_shift,))]
elif n_shift > 0:
gate = np.r_[np.zeros((n_shift,)), gate[:(-n_shift)]]
# make sure gate starts/ends in 0
gate[0] = 0.0
gate[-1] = 0.0
# store results
self._wave_gate[n] = gate
def _round(self, t, acc=1E-12):
"""Round the time `t` with a certain accuarcy `acc`.
Parameters
----------
t : float
The time to be rounded.
acc : float
The accuarcy (the default is 1E-12).
Returns
-------
float
The rounded time.
"""
return int(np.round(t / acc)) * acc
def _add_readout_trig(self):
"""Create waveform for readout trigger."""
trig = np.zeros_like(self.readout_iq)
start = (np.abs(self.readout_iq) > 0.0).nonzero()[0][0]
end = int(np.min((start +
self.readout_trig_duration * self.sample_rate,
self.n_pts_readout)))
trig[start:end] = self.readout_trig_amplitude
# make sure trig starts and ends in 0.
trig[0] = 0.0
trig[-1] = 0.0
self.readout_trig = trig
def _init_waveforms(self):
"""Initialize waveforms according to sequence settings."""
# To keep the first pulse delay, use the smallest delay as reference.
min_delay = np.min([self.wave_xy_delays[:self.n_qubit],
self.wave_z_delays[:self.n_qubit]])
self.wave_xy_delays -= min_delay
self.wave_z_delays -= min_delay
max_delay = np.max([self.wave_xy_delays[:self.n_qubit],
self.wave_z_delays[:self.n_qubit]])
# find the end of the sequence
# only include readout in size estimate if all waveforms have same size
if self.readout_match_main_size:
end = np.max([s.t_end for s in self.sequence_list]) + max_delay
else:
end = np.max([s.t_end for s in self.sequence_list[0:-1]]) + max_delay
# create empty waveforms of the correct size
if self.trim_to_sequence:
self.n_pts = int(np.ceil(end * self.sample_rate)) + 1
if self.n_pts % 2 == 1:
# Odd n_pts give spectral leakage in FFT
self.n_pts += 1
for n in range(self.n_qubit):
self._wave_xy[n] =
|
np.zeros(self.n_pts, dtype=np.complex)
|
numpy.zeros
|
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import rubin_sim.photUtils.Bandpass as Bandpass
import rubin_sim.photUtils.Sed as Sed
from make_bb import *
compw, compf = np.loadtxt('vandenberk_qsrcompspc.txt', usecols=(0,1), unpack=True)
def dcr_qsr(band, z, airmass, s_left, s_right, compw=compw, compf=compf):
# Read the LSST throughput curves.
filterlist = ['u', 'g', 'r', 'i', 'z', 'y']
filtercolors = {'u':'b', 'g':'c', 'r':'g', 'i':'orange', 'z':'r', 'y':'m'}
# Get the throughputs directory using the 'throughputs' package env variables.
#throughputsDir = os.getenv('LSST_THROUGHPUTS_BASELINE')
lsst = {}
for f in filterlist:
lsst[f] = Bandpass()
# Use os.path.join to join directory and filenames - it's safer.
#throughputsFile = os.path.join(throughputsDir, 'total_' + f + '.dat')
lsst[f].readThroughput('baseline/total_' + f + '.dat')
sb, w = lsst[band].sb*35, lsst[band].wavelen*10 #scale flux, conv nm to A
##Composite SED calculation
compw_c = np.copy(compw)
compw_c *= (1+z)
#take slice where band is non-zero
cleft = np.where(np.abs(compw_c - s_left) == np.abs(compw_c - s_left).min())[0][0]
cright = np.where(np.abs(compw_c - s_right) == np.abs(compw_c - s_right).min())[0][0]
wleft = np.where(np.abs(w - s_left) == np.abs(w - s_left).min())[0][0]
wright = np.where(np.abs(w - s_right) == np.abs(w - s_right).min())[0][0]
#Interp SED
f = interpolate.interp1d(w[wleft:wright],sb[wleft:wright], bounds_error=False, fill_value=0.0)
new_sb = f(compw_c[cleft:cright])
compf_clip = compf[cleft:cright]
compw_c_clip = compw_c[cleft:cright]
#Calc weff
w_eff = np.exp(np.sum(compf_clip * new_sb * np.log(compw_c_clip))/ np.sum(compf_clip * new_sb))
w_effm = w_eff / 1e4
#Calc index of refr
n = (10**-6 * (64.328 + (29498.1 / (146-(1/w_effm**2))) + (255.4 / (41 - (1/w_effm**2))))) + 1
#Calc R_0
R_0_sed = (n**2 - 1) / (2 * n**2)
##Calculate power-law stuff
pl_w = np.copy(compw_c)
pl_f = pl_w**(-0.5)
#take slice where band is non-zero
cleft = np.where(np.abs(pl_w - s_left) == np.abs(pl_w - s_left).min())[0][0]
cright = np.where(np.abs(pl_w - s_right) == np.abs(pl_w - s_right).min())[0][0]
'''
#Interp SED
f = interpolate.interp1d(w[wleft:wright],sb[wleft:wright], bounds_error=False, fill_value=0.0)
new_sb = f(pl_w[cleft:cright])
'''
pl_f_clip = pl_f[cleft:cright]
pl_w_clip = pl_w[cleft:cright]
#Calc weff
w_eff_pl = np.exp(np.sum(pl_f_clip * new_sb * np.log(pl_w_clip))/
|
np.sum(pl_f_clip * new_sb)
|
numpy.sum
|
"""
MiniMax Player with AlphaBeta pruning with light heuristic
"""
import time
from copy import deepcopy
import numpy as np
from SearchAlgos import GameUtils, GameState, AlphaBeta
from players.AbstractPlayer import AbstractPlayer
# TODO: you can import more modules, if needed
class Player(AbstractPlayer):
def __init__(self, game_time):
AbstractPlayer.__init__(self, game_time) # keep the inheritance of the parent's (AbstractPlayer) __init__()
# TODO: initialize more fields, if needed, and the AlphaBeta algorithm from SearchAlgos.py
self.utils = GameUtils
def set_game_params(self, board):
"""Set the game parameters needed for this player.
This function is called before the game starts.
(See GameWrapper.py for more info where it is called)
input:
- board: np.array, of the board.
No output is expected.
"""
# TODO: erase the following line and implement this function.
self.board = board
self.prev_board = None
self.my_pos = np.full(9, -1)
self.rival_pos = np.full(9, -1)
self.turn = 0
def make_move(self, time_limit):
"""Make move with this Player.
input:
- time_limit: float, time limit for a single turn.
output:
- direction: tuple, specifing the Player's movement
"""
# TODO: erase the following line and implement this function.
print(f'======================== Starting turn {self.turn} =========================')
state = GameState(deepcopy(self.board), self.prev_board, self.my_pos, self.rival_pos, self.turn,
time.time() + time_limit - 0.01, True)
search_algo = AlphaBeta(self.utils.utility_method, self.utils.successor_func, None, self.utils.check_goal)
depth = 4
best_move = (None, None)
print(f'trying depth {depth}')
# start_time = time.time()
temp_move = search_algo.search(state, depth, True)
# end_time = time.time()
# print(f'Depth: {depth}, Time: {end_time - start_time}')
# try:
# self.search_time_dict[self.turn].append(f'{depth}:{end_time - start_time}')
# self.search_time_list[self.turn].append(end_time - start_time)
# except KeyError:
# self.search_time_dict[self.turn] = [f'{depth}:{end_time - start_time}']
# self.search_time_list[self.turn] = [end_time - start_time]
if temp_move[1] is not None:
best_move = temp_move
else:
print(f'GOT NONE!')
move = best_move[1]
self.prev_board = deepcopy(self.board)
new_state = GameState(self.board, self.prev_board, self.my_pos, self.rival_pos, self.turn,
time.time() + time_limit)
GameUtils.perform_move(new_state, move, 1)
self.turn += 1
return move
def set_rival_move(self, move):
"""Update your info, given the new position of the rival.
input:
- move: tuple, the new position of the rival.
No output is expected
"""
# TODO: erase the following line and implement this function.
rival_pos, rival_soldier, my_dead_pos = move
if self.turn < 18:
# Currently, still in the first part of the game
# Update the board to include the new enemy soldier
self.board[rival_pos] = 2
# In the array containing the positions of all enemy soldiers, put in the index of the new soldier,
# it's position on the board
self.rival_pos[rival_soldier] = rival_pos
else:
# Now in the second part of the game
rival_prev_pos = self.rival_pos[rival_soldier]
self.board[rival_prev_pos] = 0
self.board[rival_pos] = 2
self.rival_pos[rival_soldier] = rival_pos
if my_dead_pos != -1:
# The enemy player has killed one of our soldiers
self.board[my_dead_pos] = 0
# Get from the board the index of the killed soldier
dead_soldier = int(
|
np.where(self.my_pos == my_dead_pos)
|
numpy.where
|
"""
画出弯道仿真环境
author:ming.ustb
date:2019/4/3
"""
import numpy as np
import matplotlib.pyplot as plt
import model.simModel as vehicle
import math
# ==========================================
# 画出车道线图
# 圆的基本信息
r = 100.0
# 圆心坐标
a, b = (0., 0.)
# ==========================================
# 参数方程
theta = np.arange(np.pi/2, np.pi, 0.01)
x = a + r * np.cos(theta)
y = b + r * np.sin(theta)
x2_center = a + (r-3.75/2) * np.cos(theta)
y2_center = b + (r-3.75/2) *
|
np.sin(theta)
|
numpy.sin
|
# Module for plotting and fitting EIS data
# (C) <NAME> 2020
import os
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize, basinhopping, differential_evolution, curve_fit, least_squares
from datetime import datetime, timedelta
import itertools
import re
from scipy.stats import iqr, mode
from scipy.special import binom
import inspect
from copy import copy
import warnings
from io import StringIO
#------------------
# Misc functions
#------------------
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def camel_case_split(identifier):
# from https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z0-9])|(?<=[A-Z0-9])(?=[A-Z0-9][a-z])|$)', identifier)
return [m.group(0) for m in matches]
def fit_r_squared(x,y,fit,weights=None):
"""
Calculate r squared for polynomial fit
Args:
x: x values
y: y values
fit: numpy polyfit output, or array of coefficients
weights: sample weights
"""
y_hat = np.polyval(fit,x)
return r_squared(y,y_hat,weights)
def r_squared(y,y_hat,weights=None):
"""
Calculate r squared for
Args:
y: y values
y_hat: predicted y values
weights: sample weights
"""
if weights is None:
ss_res = np.sum((y_hat-y)**2)#np.var(y_hat-y)
ss_tot = np.sum((y - np.mean(y))**2) #np.var(y)
else:
ss_res = np.sum(weights*(y_hat-y)**2)
ss_tot = np.sum(weights*(y-np.average(y,weights=weights))**2)
return 1-(ss_res/ss_tot)
def reg_degree_polyfit(x,y,alpha,min_r2=0,weights=None,verbose=False):
"""
Regularized-degree polynomial fit. L2 regularization penalty applied to polynomial degree
Args:
x: x values
y: y values
alpha: regularization strength
min_r2: minimum r2. If specified, degree will be increased until this min value is achieved, even if overall score decreases
weights: weights for fit
verbose: if True, print info about best fit, plus previous and next degree fits
"""
best_score = -np.inf
deg = 1
r2 = -np.inf
while deg < len(x):
fit = np.polyfit(x,y,deg=deg,w=weights)
last_r2 = r2
r2 = fit_r_squared(x,y,fit,weights=weights)
score = r2 - alpha*deg**2
if score > best_score:# or r2 < min_r2:
#print(f'Deg {deg}, Case 1,','r2={},last r2={}'.format(round(r2,5),round(last_r2,5)))
best_fit = fit
best_score = score
best_deg = deg
deg += 1
elif last_r2 < min_r2:# and r2 >= min_r2:
#print(f'Deg {deg}, Case 2,','r2={},last r2={}'.format(round(r2,5),round(last_r2,5)))
best_fit = fit
best_score = score
best_deg = deg
deg += 1
#break
else:
break
if verbose==True:
print('Best score: degree={}, r2={}, score={}'.format(best_deg,round(fit_r_squared(x,y,best_fit,w=weights),5), round(best_score,5)))
if best_deg > 1:
prev_r2 = fit_r_squared(x,y,np.polyfit(x,y,deg=deg-2,w=weights),w=weights)
prev_score = prev_r2 - alpha*(deg-2)**2
print('Previous degree: degree={}, r2={}, score={}'.format(deg-2,round(prev_r2,5), round(prev_score,5)))
print('Next degree: degree={}, r2={}, score={}'.format(deg,round(fit_r_squared(x,y,fit,weights),5), round(score,5)))
return best_fit
#---------------------
# File loading
#---------------------
def source_extension(source):
"""Get file extension for source"""
extensions = {'gamry':'.DTA','zplot':'.z'}
return extensions[source]
def get_file_source(file):
"""Determine file source"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
# determine format
if txt.split('\n')[0]=='EXPLAIN':
source = 'gamry'
elif txt.split('\n')[0]=='ZPLOT2 ASCII':
source='zplot'
return source
def get_timestamp(file):
"""Get experiment start timestamp from file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source=='gamry':
date_start = txt.find('DATE')
date_end = txt[date_start:].find('\n') + date_start
date_line = txt[date_start:date_end]
date = date_line.split('\t')[2]
time_start = txt.find('TIME')
time_end = txt[time_start:].find('\n') + time_start
time_line = txt[time_start:time_end]
time = time_line.split('\t')[2]
timestr = date + ' ' + time
dt = datetime.strptime(timestr,"%m/%d/%Y %H:%M:%S")
elif source=='zplot':
date_start = txt.find('Date')
date_end = txt[date_start:].find('\n') + date_start
date_line = txt[date_start:date_end]
date = date_line.split()[1]
time_start = txt.find('Time')
time_end = txt[time_start:].find('\n') + time_start
time_line = txt[time_start:time_end]
time = time_line.split()[1]
timestr = date + ' ' + time
dt = datetime.strptime(timestr,"%m-%d-%Y %H:%M:%S")
return dt
def get_file_info(file,sequence=['file_type','T','aflow','cflow']):
"""
Get information from filename
Args:
file: filename (basename or full path)
sequence: list of identifiers in the order that they appear in the filename (separated by _)
"""
fname = os.path.basename(file).replace('.DTA','')
info = dict(zip(sequence,fname.split('_')))
info['T'] = int(info['T'][:info['T'].find('C')])
for flow in ('aflow','cflow'):
try:
if info[flow].find('sccm') > 0:
rate,gas = info[flow].split('sccm')
gas = ' '.join(camel_case_split(gas))
info[flow] = ' '.join([rate,'SCCM',gas])
else:
info[flow] = ' '.join(camel_case_split(info[flow]))
except KeyError:
pass
return info
def read_eis_zdata(file):
"""read EIS zcurve data from Gamry .DTA file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source=='gamry':
#find start of zcurve data
zidx = txt.find('ZCURVE')
#check for experiment aborted flag
if txt.find('EXPERIMENTABORTED') > -1:
skipfooter = len(txt[txt.find('EXPERIMENTABORTED'):].split('\n')) - 1
else:
skipfooter = 0
#preceding text
pretxt = txt[:zidx]
#zcurve data
ztable = txt[zidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ztable.find('\n') + 1
header_end = header_start + ztable[header_start:].find('\n')
header = ztable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ztable[header_end + 1:].find('\n')
units = ztable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
# if extra tab at end of data rows, add an extra column to header to match (for Igor data)
first_data_row = ztable[unit_end+1: unit_end+ 1 + ztable[unit_end+1:].find('\n')]
if first_data_row.split('\t')[-1]=='':
header = header + ['extra_tab']
#read data to DataFrame
#python engine required to use skipfooter
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols,skipfooter=skipfooter,engine='python')
elif source=='zplot':
#find start of zcurve data
zidx = txt.find('End Comments')
#preceding text
pretxt = txt[:zidx]
#z data
ztable = txt[zidx:]
#column headers are in line above "End Comments"
header = pretxt.split('\n')[-2].strip().split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n'))
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
# read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols)
# rename to standard format
rename = {"Z'(a)":"Zreal","Z''(b)":"Zimag","Freq(Hz)":"Freq"}
data = data.rename(rename,axis=1)
# calculate Zmod and Zphz
Zmod, Zphz = bode_from_complex(data)
data['Zmod'] = Zmod
data['Zphz'] = Zphz
return data
def read_nleis_data(file):
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source=='gamry':
# get number of points measured
num_freq_start = txt.find('NUMFREQ')
num_freq_end = txt.find('\n',num_freq_start+1)
num_freq_line = txt[num_freq_start:num_freq_end]
num_freq = int(num_freq_line.split('\t')[2])
frequency_data = {}
for n in range(num_freq):
fra_start = txt.find(f'FREQUENCY {n}')
if n==num_freq-1:
fra_end = txt.find('ZCURVE')
else:
fra_end = txt.find('FREQUENCY {}'.format(n+1))
fra_txt = txt[fra_start:fra_end]
# get frequency
freq_line = fra_txt[:fra_txt.find('\n')]
requested_freq = float(freq_line.split('\t')[1].replace('Requested Freq (Hz):','').strip())
actual_freq = float(freq_line.split('\t')[2].replace('Actual Freq (Hz):','').strip())
# get header
header_start = fra_txt.find('\n',fra_txt.find('\n')+1) + 1
header_end = fra_txt.find('\n',header_start)
header = fra_txt[header_start:header_end].split('\t')
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
fra_table = fra_txt[fra_txt.find('\n',header_end+1) + 1:]
fra_data = pd.read_csv(StringIO(fra_table),sep='\t',header=None,names=header,usecols=usecols)
frequency_data[n] = {'requested_freq':requested_freq,'actual_freq':actual_freq,'data':fra_data}
return frequency_data
def read_jv_data(file,source='gamry'):
"""read from manual jV txt file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
if source=='manual':
"""Manually created j-V txt file"""
jv_idx = txt.find('Current')
pretxt = txt[:jv_idx]
skiprows = len(pretxt.split('\n'))-1
data = pd.read_csv(file,sep='\t',skiprows=skiprows)
elif source=='gamry':
#find start of curve data
cidx = txt.find('CURVE\tTABLE')
#preceding text
pretxt = txt[:cidx]
#curve data
ctable = txt[cidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols)
else:
raise ValueError(f'Invalid source {source}. Options are ''gamry'', ''manual''')
return data
def read_ocv_data(file,file_type='auto'):
"""
read OCV data from Gamry .DTA file
Args:
file: file to read
file_type: file type. Options are 'ocv','eis'
"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
if file_type=='auto':
file_type = os.path.basename(file).split('_')[0].lower()[:3]
#find start (and end, if needed) of ocv data
if file_type=='ocv':
cidx = txt.find('CURVE\tTABLE')
skipfooter = 0
elif file_type=='eis':
cidx = txt.find('OCVCURVE\tTABLE')
post_txt = txt[txt.find('EOC\tQUANT'):]
skipfooter = len(post_txt.split('\n')) - 1
if cidx==-1:
# coudn't find OCV curve data in file
# return empty dataframe
return pd.DataFrame([])
else:
#preceding text
pretxt = txt[:cidx]
#ocv curve data
ctable = txt[cidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,skipfooter=skipfooter,header=None,names=header,usecols=usecols,engine='python')
#get timestamp
dt = get_timestamp(file)
#time_col = np.intersect1d(['Time','T'],data.columns) # EIS files in Repeating jv-EIS files have column named 'Time' instead of 'T'
data['timestamp'] = [dt + timedelta(seconds=t) for t in data['T']]
return data
def read_gen_curve_data(file):
"""
read generic curve data from Gamry .DTA file
Args:
file: file to read
"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
#find start of curve data
cidx = txt.find('CURVE\tTABLE')
skipfooter = 0
if cidx==-1:
# coudn't find OCV curve data in file
# return empty dataframe
return pd.DataFrame([])
else:
#preceding text
pretxt = txt[:cidx]
#ocv curve data
ctable = txt[cidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,skipfooter=skipfooter,header=None,names=header,usecols=usecols,engine='python')
#get timestamp
dt = get_timestamp(file)
#time_col = np.intersect1d(['Time','T'],data.columns) # EIS files in Repeating jv-EIS files have column named 'Time' instead of 'T'
data['timestamp'] = [dt + timedelta(seconds=t) for t in data['T']]
return data
def read_lsv_data(file):
"""read LSV data from Gamry .DTA file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
#find start of curve data
cidx = txt.find('CURVE\tTABLE')
#preceding text
pretxt = txt[:cidx]
#LSV curve data
ctable = txt[cidx:]
#column headers are next line after CURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols)
return data
def get_cell_name(datadir):
datadir = os.path.abspath(datadir)
if os.path.basename(os.path.split(datadir)[0])=='Win10 Gamry data':
# datadir is one level below Gamry data
celldir = os.path.basename(datadir)
else:
# datadir is two levels below Gamry data - need info from both top dir and subdir
celldir = os.path.join(os.path.basename(os.path.split(datadir)[0]),os.path.basename(datadir))
dirsplit = [txt.replace('-',' ') for txt in re.split('_|/|\\\\',celldir.strip('./'))]
cell = dirsplit[0] + ' ' + ' | '.join(dirsplit[1:])
return cell
#---------------------------------
# Data processing
#---------------------------------
def bode_from_complex(data):
if type(data)==pd.core.frame.DataFrame:
Zmod = (data['Zreal'].values**2 + data['Zimag'].values**2)**0.5
Zphz = (180/np.pi)*np.arctan(data['Zimag'].values/data['Zreal'].values)
elif type(data)==np.ndarray:
Zmod = ((data*data.conjugate())**0.5).real
Zphz = (180/np.pi)*np.arctan(data.imag/data.real)
return Zmod, Zphz
def complex_from_bode(data):
if type(data)==pd.core.frame.DataFrame:
Zmod = data['Zmod'].values
Zphz = data['Zphz'].values
elif type(data)==np.ndarray:
Zmod = data[:,0]
Zphz = data[:,1]
Zreal = Zmod*np.cos(np.pi*Zphz/180)
Zimag = Zmod*np.sin(np.pi*Zphz/180)
return Zreal,Zimag
def lsv_resistance(df):
"""
Calculate resistance from LSV file
Args:
df: dataframe of LSV data
"""
return np.polyfit(df['Im'],df['Vf'],deg=1)[0]
def extract_eis_HFR(datadir,filter_func,ignore_files=[],**est_HFR_kw):
"""
Extract HFR from multiple EIS files and load into a DataFrame
Args:
datadir: data file directory
filter_func: function to select files to load. Should return True when passed desired filenames.
Ex: filter_func = lambda file: file.split('_')[2].replace('.DTA','')=='TimedRampDown'
ignore_files: list of filenames to ignore (exclude from loading even if they meet filter_func conditions)
est_HFR_kw: kwargs for estimate_HFR()
"""
files = [file for file in os.listdir(datadir) if filter_func(file)==True]
#sort files by time
files = sorted(files,key=lambda x: get_timestamp(os.path.join(datadir,x)).timestamp())
columns=['T_str','time','HFR','file']
df = pd.DataFrame(columns=columns)
for filename in files:
if filename not in ignore_files:
file = os.path.join(datadir,filename)
temp = filename.split('_')[1]
data = read_eis_zdata(file)
time = get_timestamp(file).timestamp()
HFR = estimate_HFR(data,**est_HFR_kw)
df = df.append(pd.Series([temp,time,HFR,filename],index=columns),ignore_index=True)
df['T'] = df['T_str'].str[:-1].astype(int)
return df
def flag_eis_diffs(df,n=3,iqr_bound=5,cols=['Zmod','Zphz'],scaling='unity',show_plots=False,direction=['fwd','rev']):
bad_idx = []
if scaling in ('modulus','mixed'):
# get frequency spacing
dx = mode(np.log(df['Freq']).values[1:] - np.log(df['Freq']).values[:-1]).mode[0]
# get variance of nth derivative
var_n = binom(2*n,n)*df['Zmod'].values**2/(dx**(2*n))
# weight is inverse std
mod_weights = 1/var_n**0.5
if type(direction)==str:
direction = [direction]
mod_df = df.copy()
plot_col_idx = {}
plot_col_diff = {}
for col in cols:
# do differences forward and in reverse to catch all points
for drct in direction:#,'rev']:
mod_df[f'{drct}_diff_{col}'] = np.nan
# filter by nth derivative
if drct=='fwd':
diff = np.diff(df[col],n=n)/(np.diff(np.log(df['Freq']),n=1)[n-1:])**n
if scaling=='unity':
weights = np.ones_like(diff)
elif scaling=='modulus':
weights = mod_weights[n:] #1/(df['Zmod'].values[n:]
elif scaling=='mixed':
if col=='Zphz':
weights = np.ones_like(diff)
else:
weights = mod_weights[n:]
diff *= weights
mod_df.loc[mod_df.index.min()+n:,f'{drct}_diff_{col}'] = diff
plot_col_diff[(col,drct)] = diff
else:
diff = np.diff(df[col].values[::-1],n=n)/(np.diff(np.log(df['Freq'].values[::-1]),n=1)[n-1:])**n
if scaling=='unity':
weights = np.ones_like(diff)
elif scaling=='modulus':
weights = mod_weights[::-1][n:] #1/(df['Zmod'].values[::-1][n:]*np.exp(9.825*n))
elif scaling=='mixed':
if col=='Zphz':
weights = np.ones_like(diff)
else:
weights = mod_weights[::-1][n:]
diff *= weights
mod_df.loc[:mod_df.index.max()-n,f'{drct}_diff_{col}'] = diff[::-1]
plot_col_diff[(col,drct)] = diff[::-1]
# get indexes of data outside bounds
col_idx = (np.where(np.abs(diff-np.median(diff)) > iqr_bound*iqr(diff))[0]).astype(int) # + np.ceil(n/2)).astype(int)
if drct=='rev':
col_idx = len(df) - col_idx - 1
# print(col,drct,sorted(col_idx))
bad_idx += list(col_idx)
plot_col_idx[(col,drct)] = col_idx.astype(int)
bad_idx = np.unique(bad_idx).astype(int)
mod_df['flag'] = 0
if len(bad_idx) > 0:
mod_df.iloc[bad_idx,-1] = 1
if show_plots:
fig, axes = plt.subplots(2,2,figsize=(10,8))
axes[0,0].get_shared_x_axes().join(axes[0,0],axes[0,1],axes[1,0],axes[1,1])
bad_df = mod_df[mod_df['flag']==1]
good_df = mod_df[mod_df['flag']==0]
unit_scale = get_unit_scale(mod_df)
# plot nth diff vs. frequency
for col, ax in zip(cols,axes[0]):
# obsolete
# diff = np.diff(df[col],n=n)/np.diff(np.log(df['Freq']),n=1)[n-1:]
# if weighting=='unity':
# weights = np.ones_like(diff)
# elif weighting=='modulus':
# weights = 1/df['Zmod'].values[n:]
# elif weighting=='mixed':
# if col=='Zphz':
# weights = np.ones_like(diff)
# else:
# weights = mod_weights[n:]
# diff *= weights
# # color the points that triggered the flag based on nth diff, not the points actually flagged
# col_idx = np.where(np.abs(diff-np.median(diff)) > iqr_bound*iqr(diff))[0] + n
for drct in direction:#,'rev']:
diff = plot_col_diff[(col,drct)]
col_idx = plot_col_idx[(col,drct)]
if drct=='fwd':
ax.scatter(df['Freq'][n:],diff,s=8,c='k',label='Fwd Diff')
ax.scatter(df.iloc[col_idx+n,:]['Freq'],diff[col_idx],s=20,c='k',edgecolor='r',linewidth=0.8)
ax.axhline(np.median(diff)-iqr_bound*iqr(diff),c='r',lw=1,label='Fwd Bound')
ax.axhline(np.median(diff)+iqr_bound*iqr(diff),c='r',lw=1)
# print(col,drct,iqr_bound*iqr(diff),diff[col_idx])
elif drct=='rev':
ax.scatter(df['Freq'][:-n],diff,s=8,c='gray',label='Rev Diff')
# print(col_idx[::-1]-2, bad_df.index)
# print(col,drct,np.median(diff),iqr_bound*iqr(diff),diff[col_idx-n])
ax.scatter(df.iloc[col_idx-n,:]['Freq'],diff[col_idx-n],s=20,c='gray',edgecolor='r',linewidth=0.8)
ax.axhline(np.median(diff)-iqr_bound*iqr(diff),c='r',lw=1,ls='--',label='Rev Bound')
ax.axhline(np.median(diff)+iqr_bound*iqr(diff),c='r',lw=1,ls='--')
ax.set_xscale('log')
ax.set_title(col)
ax.set_xlabel('Frequency')
ax.set_ylabel('nth Discrete Difference')
ax.legend(fontsize=9)
# plot bode with flagged points in red
plot_bode(good_df,axes=axes[1],cols=cols)
plot_bode(bad_df,axes=axes[1],cols=cols,c='r')
fig.tight_layout()
return mod_df
def flag_eis_points(df,n=3,iqr_bound=5,cols=['Zmod','Zphz'],scaling='unity',fill_rolling_mean=False,show_plots=False,direction=['fwd','rev'],
trim_method='direction',trim_offset=-2,axes=None,plot_kw={'s':8}): #,trim_start_consec_pts=5):
"""
Flag bad points in EIS data using finite differencing on Zmod and Zphz
Args:
df: data DataFrame
n: order of discrete differencing. Should be 3 for best results (I think - n>3 might also work)
iqr_bound: bound on nth discrete difference. Points that are above or below iqr_bound*iqr will be flagged
scaling: how to weight/scale differences. Options:
'unity': assume constant variance
'modulus': assume variance of both Zmod and Zphz is proportional to Zmod**2
'mixed': assume variance of Zmod is proportional to Zmod**2, variance of Zphz is constant
fill_rolling_mean: if True, fill in values for bad frequencies by interpolating rolling average. Not recommended
show_plots: if True, show plots illustrating data processing
Returns:
DataFrame with 'flag' column added. 'flag' value of 1 indicates bad data point
"""
bad_idx = []
if scaling in ('modulus','mixed'):
# get frequency spacing
dx = mode(np.log(df['Freq']).values[1:] - np.log(df['Freq']).values[:-1]).mode[0]
# get variance of nth derivative
var_n = binom(2*n,n)*df['Zmod'].values**2/(dx**(2*n))
# weight is inverse std
mod_weights = 1/var_n**0.5
if type(direction)==str:
direction = [direction]
trim_options = ['direction','full','none']
if trim_method not in trim_options:
raise ValueError(f'Invalid trim_method {trim_method}. Options: {trim_options}')
# number of points to trim from start and end of each range:
# n trims all points except supposed bad point. Very conservative, may leave actual bad points in
# n-1 leaves an extra point on each side of supposed bad point
# n-2 leaves 2 extra points on each side of supposed bad point
trim_len = n + trim_offset
plot_col_idx = {}
plot_col_diff = {}
for drct in direction:
# do differences forward and in reverse to catch all points
drct_idx = []
for col in cols:
# filter by nth derivative
if drct=='fwd':
diff = np.diff(df[col],n=n)/(np.diff(np.log(df['Freq']),n=1)[n-1:])**n
if scaling=='unity':
weights = np.ones_like(diff)
elif scaling=='modulus':
weights = mod_weights[n:] #1/(df['Zmod'].values[n:]
elif scaling=='mixed':
if col=='Zphz':
weights = np.ones_like(diff)
else:
weights = mod_weights[n:]
diff *= weights
plot_col_diff[(col,drct)] = diff
else:
diff = np.diff(df[col].values[::-1],n=n)/(np.diff(np.log(df['Freq'].values[::-1]),n=1)[n-1:])**n
if scaling=='unity':
weights = np.ones_like(diff)
elif scaling=='modulus':
weights = mod_weights[::-1][n:] #1/(df['Zmod'].values[::-1][n:]*np.exp(9.825*n))
elif scaling=='mixed':
if col=='Zphz':
weights = np.ones_like(diff)
else:
weights = mod_weights[::-1][n:]
diff *= weights
plot_col_diff[(col,drct)] = diff[::-1]
# get indexes of data outside bounds
# fluctuation in diff shows up 1 point after the errant point (subtract 1). Diff starts at nth point (add n). The errant point is at the diff index plus n minus 1
col_idx = (np.where(np.abs(diff-np.median(diff)) > iqr_bound*iqr(diff))[0]).astype(int) # + n-1).astype(int)
# for plotting, track the actual points that triggered the flags, not the flagged points. Align diff index with function index
plot_idx = col_idx #- (n-1)
# print('Pre-condense:',col,drct,col_idx)
# a single bad point cascades to n subsequent points in the diff. Condense the ranges accordingly
# Still flag one point on each side of the point thought to be "bad" as it may be unclear which point is actually bad
# (i.e., a transition from a bad point to a good point may make it look like the good point is actually the errant one)
# if len(col_idx) > 0:
# rng_end_idx = np.where(np.diff(col_idx)!=1)[0]
# rng_start_idx = np.insert((rng_end_idx + 1),0,0)
# rng_end_idx = np.append(rng_end_idx,len(col_idx)-1)
# trimmed_ranges = [np.arange(col_idx[start],max(col_idx[start]+1,col_idx[end] - (n-2))) for start,end in zip(rng_start_idx,rng_end_idx)]
# col_idx = np.concatenate([r for r in trimmed_ranges])
# print('Post-condense:',col,drct,col_idx)
# check last point - won't be flagged above due to centering
# if np.abs(diff[-1]-np.median(diff)) > iqr_bound*iqr(diff):
# col_idx = np.insert(col_idx, len(col_idx), n + len(diff) - 1)
if drct=='rev':
col_idx = len(df) - col_idx - 1 + (-1) #+((n-1)-1)
plot_idx = len(df) - plot_idx - 1
# print(col,drct,sorted(col_idx))
# concatenate all the flags determined in the same direction
drct_idx += list(col_idx)
plot_col_idx[(col,drct)] = plot_idx.astype(int)
drct_idx = np.unique(drct_idx)
if trim_method=='direction':
if len(drct_idx) > 0:
print('Pre-trim:',drct_idx)
# a single bad point cascades to n points in the diff. Trim the ranges accordingly
# do this after aggregating all flags in one direction to ensure that contiguous ranges of bad points are not lost by prematurely condensing ranges
rng_end_idx = np.where(np.diff(drct_idx)!=1)[0]
rng_start_idx = np.insert((rng_end_idx + 1),0,0)
rng_end_idx = np.append(rng_end_idx,len(drct_idx)-1)
# trim logic: trim the end unless it truncates the range to length zero.
trimmed_ranges = [np.arange( drct_idx[start],
max(drct_idx[start]+1, drct_idx[end] - (trim_len-1))
)
for start,end in zip(rng_start_idx,rng_end_idx)]
# the very last points in the spectra should not be trimmed
if drct_idx[-1]==len(df)-1:
start = rng_start_idx[-1]
end = rng_end_idx[-1]
# reset the range end to the last point
trimmed_ranges[-1] = np.arange(drct_idx[start],drct_idx[end])
drct_idx = np.concatenate([r for r in trimmed_ranges])
print('Post-trim:',drct_idx)
bad_idx += list(drct_idx)
bad_idx = np.unique(bad_idx).astype(int)
if trim_method=='full':
if len(bad_idx) > 0:
print('Pre-trim:',bad_idx)
# a single bad point cascades to n subsequent points in the diff. Condense (trim) the ranges accordingly
# do this after aggregating all flags to ensure that contiguous ranges of bad points are not lost by prematurely condensing ranges
rng_end_idx = np.where(np.diff(bad_idx)!=1)[0]
rng_start_idx = np.insert((rng_end_idx + 1),0,0)
rng_end_idx = np.append(rng_end_idx,len(bad_idx)-1)
# trim logic: use the trim unless it truncates the range to length zero.
# min(bad_idx[start] + trim, bad_idx[end]-trim): start at the smaller of start + trim and end - trim
# max( bad_idx[start], <above>): if end-trim < start, just start at start (i.e. don't extend the range just because it's short)
# same logic applies to end of range
trimmed_ranges = [np.arange( max(bad_idx[start],min(bad_idx[start]+trim_len,bad_idx[end]-trim_len)),
min(bad_idx[end]+1,max(bad_idx[start]+trim_len+1,bad_idx[end] - (trim_len-1)))
)
for start,end in zip(rng_start_idx,rng_end_idx)]
# the very first and very last points in the spectra should not be trimmed
if bad_idx[0]==0:
start = rng_start_idx[0]
end = rng_end_idx[0]
# reset the range start to point 0
trimmed_ranges[0] = np.arange(bad_idx[start],min(bad_idx[end]+1,max(bad_idx[start]+trim_len+1,bad_idx[end] - (trim_len-1))))
if bad_idx[-1]==len(df)-1:
start = rng_start_idx[-1]
end = rng_end_idx[-1]
# reset the range end to the last point
trimmed_ranges[-1] = np.arange(max(bad_idx[start],min(bad_idx[start]+trim_len,bad_idx[end]-trim_len)),bad_idx[rng_end_idx[-1]])
bad_idx = np.concatenate([r for r in trimmed_ranges])
print('Post-trim:',bad_idx)
# if len(bad_idx) >= trim_start_consec_pts:
# # if the first trim_start_consec_pts points are all flagged, also flag the first n-1 points
# if np.sum(bad_idx[:trim_start_consec_pts] == np.arange(n-1, n-1+trim_start_consec_pts))==trim_start_consec_pts:
# bad_idx = np.concatenate((np.arange(0,n-1),bad_idx))
#print(bad_idx)
mod_df = df.copy()
mod_df['flag'] = 0
if len(bad_idx) > 0:
mod_df.iloc[bad_idx,-1] = 1
if fill_rolling_mean:
# get rolling mean
ma = df.rolling(5,center=True).mean()
mod_df['Zmod_filled'] = mod_df['Zmod']
mod_df['Zphz_filled'] = mod_df['Zphz']
bad_df = mod_df[mod_df['flag']==1]
for col in cols:
# interpolate rolling mean to fill bad data points
mod_df.loc[bad_idx,col + '_filled'] = bad_df.apply(lambda r: np.interp(r['Freq'],ma['Freq'][::-1],ma[col][::-1]), axis=1)
mod_df['Zreal_filled'] = mod_df['Zmod_filled']*np.cos(2*np.pi*mod_df['Zphz_filled']/360)
mod_df['Zimag_filled'] = mod_df['Zmod_filled']*np.sin(2*np.pi*mod_df['Zphz_filled']/360)
if show_plots:
if axes is None:
fig, axes = plt.subplots(3,2,figsize=(10,10))
else:
fig = axes.ravel()[0].get_figure()
axes[0,0].get_shared_x_axes().join(axes[0,0],axes[0,1],axes[1,0],axes[1,1])
bad_df = mod_df[mod_df['flag']==1]
good_df = mod_df[mod_df['flag']==0]
unit_scale = get_unit_scale(mod_df)
# plot nth diff vs. frequency
for col, ax in zip(cols,axes[0]):
# obsolete
# diff = np.diff(df[col],n=n)/np.diff(np.log(df['Freq']),n=1)[n-1:]
# if weighting=='unity':
# weights = np.ones_like(diff)
# elif weighting=='modulus':
# weights = 1/df['Zmod'].values[n:]
# elif weighting=='mixed':
# if col=='Zphz':
# weights = np.ones_like(diff)
# else:
# weights = mod_weights[n:]
# diff *= weights
# # color the points that triggered the flag based on nth diff, not the points actually flagged
# col_idx = np.where(np.abs(diff-np.median(diff)) > iqr_bound*iqr(diff))[0] + n
for drct in direction:#,'rev']:
diff = plot_col_diff[(col,drct)]
col_idx = plot_col_idx[(col,drct)]
if drct=='fwd':
ax.scatter(df['Freq'][n:],diff,c='k',label='Fwd Diff',**plot_kw)
ax.scatter(df.iloc[col_idx+n,:]['Freq'],diff[col_idx],c='k',edgecolor='r',linewidth=0.8,**plot_kw)
ax.axhline(np.median(diff)-iqr_bound*iqr(diff),c='r',lw=1,label='Fwd Bound')
ax.axhline(np.median(diff)+iqr_bound*iqr(diff),c='r',lw=1)
# print(col,drct,iqr_bound*iqr(diff),diff[col_idx])
elif drct=='rev':
ax.scatter(df['Freq'][:-n],diff,c='gray',label='Rev Diff',**plot_kw)
# print(col_idx[::-1]-2, bad_df.index)
# print(col,drct,np.median(diff),iqr_bound*iqr(diff),diff[col_idx-n])
ax.scatter(df.iloc[col_idx-n,:]['Freq'],diff[col_idx-n],c='gray',edgecolor='r',linewidth=0.8,**plot_kw)
ax.axhline(np.median(diff)-iqr_bound*iqr(diff),c='r',lw=1,ls='--',label='Rev Bound')
ax.axhline(np.median(diff)+iqr_bound*iqr(diff),c='r',lw=1,ls='--')
ax.set_xscale('log')
ax.set_title(col)
ax.set_xlabel('Frequency')
ax.set_ylabel('nth Discrete Difference')
ax.legend(fontsize=9)
# plot bode with flagged points in red
plot_bode(good_df,axes=axes[1],cols=cols,**plot_kw)
plot_bode(bad_df,axes=axes[1],cols=cols,c='r',**plot_kw)
if fill_rolling_mean:
# plot interpolated points
fdf = mod_df.copy()
for col in ['Zreal','Zimag','Zmod','Zphz']:
fdf[col] = fdf[col + '_filled']
fdf = fdf.loc[bad_idx,:]
plot_bode(fdf,axes=axes[1],c='g')
# plot nyquist with flagged points in red
plot_nyquist(good_df,ax=axes[2,0],unit_scale=unit_scale,**plot_kw)
plot_nyquist(bad_df,ax=axes[2,0],c='r',unit_scale=unit_scale,**plot_kw)
if fill_rolling_mean:
plot_nyquist(fdf,ax=axes[2,0],c='g',unit_scale=unit_scale,**plot_kw)
axes[2,0].set_title('Nyquist')
axes[2,1].axis('off')
fig.tight_layout()
return mod_df
def calc_sigma(R,d,t,units='mm'):
"""
Calculate conductivity in S/cm given resistance and cell dimensions
Assumes button cell geometry
Args:
R: resistance (ohm)
d: diameter
t: thickness
units: units for d and t. Default mm. Options: 'mm', 'cm'
"""
#convert to cm
if units=='mm':
d,t = d/10, t/10
elif units!='cm':
raise ValueError(f'Units arg {units} not recognized. Valid units are ''mm'',''cm''')
a = np.pi*(d/2)**2
sigma = t/(R*a)
return sigma
def aggregate_prop(df,by,property_col,aggregate):
grp_df = df.groupby(by)
if aggregate=='end':
prop = np.array([gdf.loc[gdf['time'].idxmax(),property_col] for name, gdf in grp_df])
elif aggregate=='start':
prop = np.array([gdf.loc[gdf['time'].idxmin(),property_col] for name, gdf in grp_df])
else:
prop = getattr(grp_df[property_col],aggregate)().values
return prop
def calc_G_act(df,property_col,aggregate,return_fit=False):
"""
Calculate activation energy (in eV) from EIS data
Args:
df: DataFrame with property by temperature (in C)
aggregate: function to use to aggregate multiple values for same temperature
property_col: column name of property for which to calculate activation energy
return_fit: if True, return fit coefficients in addition to G_act
Returns:
G_act: activation energy in eV
fit: fit coefficients for log(prop) vs. 1/T (if return_fit==True)
"""
prop_agg = aggregate_prop(df,'T',property_col,aggregate)
temps = np.unique(df['T'])
# grp_df = df.groupby('T')
# temps = np.array(list(grp_df.groups.keys())) #np.zeros(len(grp_df))
# sigma_agg = np.zeros(len(grp_df))
# for i, (T, df) in enumerate(grp_df):
# if aggregate in ['start','end']:
# if aggregate=='start':
# agg_time = df['time'].min()
# else:
# agg_time = df['time'].max()
# sigma_agg[i] = float(df[df['time']==agg_time][property_col])
# else:
# sigma_agg[i] = getattr(df[property_col],aggregate)()
T_inv = 1/(273+temps)
#print(sigma_agg)
fit = np.polyfit(T_inv,np.log(prop_agg),deg=1)
k_B = 8.617e-5 #eV/K
if return_fit==True:
return -k_B*fit[0], fit
else:
return -k_B*fit[0]
def get_unit_scale(df):
""" Get unit scale (mu, m, k, M, G) for EIS data"""
unit_map = {-2:'$\mu$',-1:'m',0:'',1:'k',2:'M',3:'G'}
Z_max = max(df['Zreal'].max(),df['Zimag'].abs().max())
Z_ord = np.floor(np.log10(Z_max)/3)
unit_scale = unit_map.get(Z_ord,'')
return unit_scale
def get_scale_factor(df):
Z_max = max(df['Zreal'].max(),df['Zimag'].abs().max())
Z_ord = np.floor(np.log10(Z_max)/3)
return 10**(3*Z_ord)
def get_common_unit_scale(df_list,aggregate='min'):
"""
Get common unit scale for multiple datasets
Parameters:
df_list: list of DataFrames
aggregate: method for choosing common scale. Defaults to min (smallest scale)
"""
unit_map = {-2:'$\mu$',-1:'m',0:'',1:'k',2:'M',3:'G'}
rev_map = {v:k for k,v in unit_map.items()}
units = [get_unit_scale(df) for df in df_list]
unit_nums = [rev_map[u] for u in units]
common_num = getattr(np,aggregate)(unit_nums)
common_unit = unit_map.get(common_num,'')
return common_unit
#---------------------------------
# Plotting
#---------------------------------
def plot_ocv(datadir, filter_func=None, files=None, ax=None, invert='auto', same_color=True,**plt_kw):
#get files
if filter_func is None and files is None:
#if no filter or files specified, get all OCV files
filter_func = lambda x: x[0:3]=='OCV' and x[-3:]=='DTA'
files = [f for f in os.listdir(datadir) if filter_func(f)]
elif files and not filter_func:
if type(files)==str:
#if single file specified, convert to 1-element list
files = [files]
elif filter_func and not files:
files = [f for f in os.listdir(datadir) if filter_func(f)]
elif filter_func and files:
raise ValueError('Both filter_func and files have been specified. Please specify only one')
dfs = [read_ocv_data(os.path.join(datadir,file)) for file in files]
dfs = [df for df in dfs if len(df) > 0]
start_times = [df['timestamp'][0] for df in dfs]
start_time = min(start_times)
ts_func = lambda ts: (ts - start_time).dt.total_seconds()/3600
if ax is None:
fig, ax = plt.subplots()
if invert=='auto':
# choose sign based on max voltage
tdf = pd.concat(dfs,ignore_index=True)
V_sign = np.sign(tdf.loc[tdf['Vf'].abs().idxmax(),'Vf'])
elif invert==True:
V_sign = -1
elif invert==False:
V_sign = 1
for df in dfs:
if 'c' not in plt_kw and 'color' not in plt_kw and same_color==True:
#if no color specified and same color desired, set color to first default color
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt_kw['c'] = default_colors[0]
ax.plot(ts_func(df['timestamp']),V_sign*df['Vf'],**plt_kw)
ax.set_xlabel('Time (h)')
ax.set_ylabel('OCV (V)')
def plot_jv(df,area=None,plot_pwr=False,ax=None,pwr_kw={'ls':'--'},**plt_kw):
if ax is None:
fig, ax = plt.subplots()
if area is not None:
# if area given, convert to densities
df = df.copy()
df['Im'] /= area
df['Pwr'] /= area
ax.plot(1000*df['Im'].abs(),df['Vf'].abs(),**plt_kw)
if area is None:
ax.set_xlabel('Current (mA)')
else:
ax.set_xlabel('Current Density (mA/cm$^2$)')
ax.set_ylabel('Voltage (V)')
if 'label' in plt_kw.keys():
ax.legend()
if plot_pwr is True:
# plot power on same axes
# get twin ax if already exists
for other_ax in ax.figure.axes:
if other_ax is ax:
ax2 = None
elif other_ax.bbox.bounds == ax.bbox.bounds:
ax2 = other_ax
break
else:
ax2 = None
if ax2 is None:
ax2 = ax.twinx()
ax2.plot(1000*df['Im'].abs(),1000*df['Pwr'].abs().values,**pwr_kw)
if area is None:
ax2.set_ylabel('Power (mW)')
else:
ax2.set_ylabel('Power Density (mW/cm$^2$)')
return ax
def plot_nyquist(df,area=None,ax=None,label='',plot_func='scatter',unit_scale='auto',label_size=10,eq_xy=True,**kw):
"""
Nyquist plot
Args:
df: dataframe of impedance data
area: cell area in cm2. If None, plot raw impedance
label: series label
plot_func: pyplot plotting function. Options: 'scatter', 'plot'
eq_xy: if True, ensure that scale of x and y axes is the same (i.e. xmax-xmin = ymax-ymin)
kw: kwargs for plot_func
"""
df = df.copy()
if ax is None:
fig, ax = plt.subplots()
if area is not None:
# if area given, convert to ASR
df['Zreal'] *= area
df['Zimag'] *= area
# get/set unit scale
unit_map = {-2:'$\mu$',-1:'m',0:'',1:'k',2:'M',3:'G'}
if unit_scale=='auto':
unit_scale = get_unit_scale(df)
Z_ord = [k for k,v in unit_map.items() if v==unit_scale][0]
elif unit_scale is None:
unit_scale=''
Z_ord = 0
else:
Z_ord = [k for k,v in unit_map.items() if v==unit_scale][0]
# scale data
df['Zreal'] /= 10**(Z_ord*3)
df['Zimag'] /= 10**(Z_ord*3)
if plot_func=='scatter':
if 's' not in kw.keys():
# default point size
kw['s'] = 8
ax.scatter(df['Zreal'],-df['Zimag'],label=label,**kw)
elif plot_func=='plot':
ax.plot(df['Zreal'],-df['Zimag'],label=label,**kw)
else:
raise ValueError(f'Invalid plot type {plot_func}. Options are scatter, plot')
if area is not None:
ax.set_xlabel(f'$Z^\prime \, / \, \mathrm{{{unit_scale}}}\Omega\cdot \mathrm{{cm}}^2$',size=label_size)
ax.set_ylabel(f'$-Z^{{\prime\prime}} \, / \, \mathrm{{{unit_scale}}}\Omega\cdot \mathrm{{cm}}^2$',size=label_size)
else:
ax.set_xlabel(f'$Z^\prime \, / \, \mathrm{{{unit_scale}}}\Omega$',size=label_size)
ax.set_ylabel(f'$-Z^{{\prime\prime}} \, / \, \mathrm{{{unit_scale}}}\Omega$',size=label_size)
if label!='':
ax.legend()
if eq_xy:
# make scale of x and y axes the same
fig = ax.get_figure()
# get data range
yrng = ax.get_ylim()[1] - ax.get_ylim()[0]
xrng = ax.get_xlim()[1] - ax.get_xlim()[0]
# get axis dimensions
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width, height
yscale = yrng/height
xscale = xrng/width
if yscale > xscale:
# expand the x axis
diff = (yscale - xscale)*width
xmin = max(0,ax.get_xlim()[0] - diff/2)
mindelta = ax.get_xlim()[0] - xmin
xmax = ax.get_xlim()[1] + diff - mindelta
ax.set_xlim(xmin,xmax)
elif xscale > yscale:
# expand the y axis
diff = (xscale - yscale)*height
if np.min(-df['Zimag']) >= 0:
# if -Zimag doesn't go negative, don't go negative on y-axis
ymin = max(0,ax.get_ylim()[0] - diff/2)
mindelta = ax.get_ylim()[0] - ymin
ymax = ax.get_ylim()[1] + diff - mindelta
else:
negrng = abs(ax.get_ylim()[0])
posrng = abs(ax.get_ylim()[1])
negoffset = negrng*diff/(negrng + posrng)
posoffset = posrng*diff/(negrng + posrng)
ymin = ax.get_ylim()[0] - negoffset
ymax = ax.get_ylim()[1] + posoffset
ax.set_ylim(ymin,ymax)
return ax
def plot_bode(df,area=None,axes=None,label='',plot_func='scatter',cols=['Zmod','Zphz'],unit_scale='auto',invert_Zimag=True,**kw):
"""
Bode plot
Args:
df: dataframe of impedance data
area: cell area in cm2. If None, plot raw impedance
label: series label
plot_func: pyplot plotting function. Options: 'scatter', 'plot'
cols: which columns to plot vs. frequency. Defaults to Zmod and Zphz
unit_scale: impedance scale. Options are 'auto', 'mu', 'm', '', 'k', 'M'
kw: kwargs for plot_func
"""
df = df.copy()
# formatting for columns
col_dict = {'Zmod':{'units':'$\Omega$','label':'$Z_{\mathrm{mod}}$','scale':'log'},
'Zphz':{'units':'$^\circ$','label':'$Z_{\mathrm{phz}}$','scale':'linear'},
'Zreal':{'units':'$\Omega$','label':'$Z^\prime$','scale':'linear'},
'Zimag':{'units':'$\Omega$','label':'$Z^{\prime\prime}$','scale':'linear'}
}
if axes is None:
fig, axes = plt.subplots(1,2,figsize=(8,3))
else:
fig = axes[0].get_figure()
ax1,ax2 = axes
if area is not None:
for col in ['Zreal','Zimag','Zmod']:
if col in df.columns:
df[col] *= area
# get/set unit scale
unit_map = {-2:'$\mu$',-1:'m',0:'',1:'k',2:'M',3:'G'}
if unit_scale=='auto':
unit_scale = get_unit_scale(df)
Z_ord = [k for k,v in unit_map.items() if v==unit_scale][0]
elif unit_scale is None:
unit_scale=''
Z_ord = 0
else:
Z_ord = [k for k,v in unit_map.items() if v==unit_scale][0]
# scale data
for col in ['Zreal','Zimag','Zmod']:
if col in df.columns:
df[col] /= 10**(Z_ord*3)
if invert_Zimag:
df['Zimag'] *= -1
if plot_func=='scatter':
if 's' not in kw.keys():
# default point size
kw['s'] = 8
ax1.scatter(df['Freq'],df[cols[0]],label=label,**kw)
ax2.scatter(df['Freq'],df[cols[1]],label=label,**kw)
elif plot_func=='plot':
ax1.plot(df['Freq'],df[cols[0]],label=label,**kw)
ax2.plot(df['Freq'],df[cols[1]],label=label,**kw)
else:
raise ValueError(f'Invalid plot type {plot_func}. Options are scatter, plot')
for ax in axes:
ax.set_xlabel('$f$ / Hz')
ax.set_xscale('log')
def ax_title(col,area):
cdict = col_dict.get(col,{})
if area is not None and cdict.get('units','')=='$\Omega$':
title = '{} / {}{}$\cdot\mathrm{{cm}}^2$'.format(cdict.get('label',col),unit_scale,cdict.get('units',''))
elif cdict.get('units','')=='$\Omega$':
title = '{} / {}{}'.format(cdict.get('label',col),unit_scale,cdict.get('units',''))
else:
title = '{} / {}'.format(cdict.get('label',col),cdict.get('units','a.u.'))
if col=='Zimag' and invert_Zimag:
title = '$-$' + title
return title
for col, ax in zip(cols,axes):
ax.set_ylabel(ax_title(col,area))
ax.set_yscale(col_dict.get(col,{}).get('scale','linear'))
for ax in axes:
# manually set x axis limits - sometimes matplotlib doesn't get them right
fmin = df['Freq'].min()
fmax = df['Freq'].max()
ax.set_xlim(fmin/5,fmax*5)
# if area is not None:
# ax1.set_ylabel('$Z_{\mathrm{mod}} \ (\Omega\cdot \mathrm{cm}^2)$')
# else:
# ax1.set_ylabel('$Z_{\mathrm{mod}} \ (\Omega)$')
# ax1.set_yscale('log')
# ax2.set_ylabel('$Z_{\mathrm{phz}} \ (^\circ)$')
fig.tight_layout()
return axes
def plot_full_eis(df,area=None,axes=None,label='',plot_func='scatter',unit_scale='auto',bode_cols=['Zmod','Zphz'],**kw):
if axes is None:
# fig = plt.figure(figsize=(8,8))
# ax1 = plt.subplot2grid((2,2),(0,0),colspan=2)
# ax2 = plt.subplot2grid((2,2),(1,0))
# ax3 = plt.subplot2grid((2,2),(1,1))
# axes = np.array([ax1,ax2,ax3])
fig,axes = plt.subplots(1,3,figsize=(9,2.5))
ax1,ax2,ax3 = axes.ravel()
else:
ax1,ax2,ax3 = axes.ravel()
fig = axes.ravel()[0].get_figure()
#Nyquist plot
plot_nyquist(df,area=area,label=label,ax=ax1,plot_func=plot_func,unit_scale=unit_scale,**kw)
#Bode plots
plot_bode(df,area=area,label=label,axes=(ax2,ax3),plot_func=plot_func,cols=bode_cols,unit_scale=unit_scale,**kw)
fig.tight_layout()
return axes
def compare_fits(data,gamry_params,py_params,model):
"""
Plot comparison of Nyquist and Bode plots for gamry fit and python fit
"""
w = data['Freq'].values
y = data.loc[:,['Zreal','Zimag']].values
weights = 1/(data['Zmod']).values
#Nyquist plot
fig, ax = plt.subplots(figsize=(8,6))
ax.scatter(data['Zreal'],-data['Zimag'],s=6,label='Measured')
Z_fc_gam = model(w,**gamry_params)
ax.plot(Z_fc_gam.real,-Z_fc_gam.imag,'k',label='Gamry fit')
Z_fc_py = model(w,**py_params)
ax.plot(Z_fc_py.real,-Z_fc_py.imag,'r',label='Python fit')
ax.legend()
ax.set_xlabel('$Z_{real}$ ($\Omega \cdot$cm$^2$)')
ax.set_ylabel('$-Z_{imag}$ ($\Omega \cdot$cm$^2$)')
#Bode plots
fig2, (ax1,ax2) = plt.subplots(1,2,figsize=(8,4))
ax1.semilogx(w,data['Zreal'],'.',label='Measured')
ax1.semilogx(w,Z_fc_gam.real,'k',label='Gamry fit')
ax1.semilogx(w,Z_fc_py.real,'r',label='Python fit')
ax1.set_title('Real')
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('$Z_{real}$')
ax2.semilogx(w,-data['Zimag'],'.',label='Measured')
ax2.semilogx(w,-Z_fc_gam.imag,'k',label='Gamry fit')
ax2.semilogx(w,-Z_fc_py.imag,'r',label='Python fit')
ax2.set_title('Imag')
ax2.set_xlabel('Frequency (Hz)')
ax2.set_ylabel('-$Z_{imag}$')
fig2.tight_layout()
def plot_model(model,params,w=None,area=None,plot_type='all',plot_func='plot',axes=None,label='',unit_scale='auto',mark_peaks=False,c=None,**kw):
if w is None:
w = np.logspace(-2,6)
Z = np.array(model(w,**params))
data = pd.DataFrame(np.array([w,Z.real,Z.imag]).T,columns=['Freq','Zreal','Zimag'])
data['Zmod'] = (Z*Z.conjugate())**0.5
data['Zphz'] = (180/np.pi)*np.arctan(Z.imag/Z.real)
# if area is not None:
# data['Zreal']*=area
# data['Zimag']*=area
# data['Zmod']*=area
if plot_type=='nyquist':
axes = plot_nyquist(data,ax=axes,label=label,plot_func=plot_func,area=area,unit_scale=unit_scale,c=c,**kw)
elif plot_type=='bode':
axes = plot_bode(data,axes=axes,label=label,plot_func=plot_func,area=area,c=c,**kw)
elif plot_type=='all':
if axes is None:
fig, axes = plt.subplots(1,3,figsize=(12,4))
plot_full_eis(data,axes=axes,label=label,plot_func=plot_func,area=area,unit_scale=unit_scale,c=c,**kw)
# plot_nyquist(data,ax=axes.ravel()[0],label=label,plot_func=plot_func,area=area,unit_scale=unit_scale,c=c,**kw)
# plot_bode(data,axes=axes.ravel()[1:],label=label,plot_func=plot_func,area=area,c=c,**kw)
else:
raise ValueError(f'Invalid plot type {plot_type}. Options are nyquist, bode, all')
if label != '':
if type(axes) in (tuple,list):
for tax in axes:
tax.legend()
elif type(axes)==np.ndarray:
for tax in axes.ravel():
tax.legend()
else:
axes.legend()
# plot model peak frequencies
if mark_peaks:
f_peaks = np.array(var_RC_peak_frequencies(params))
Z_peaks = model(f_peaks,**params)
peak_df = construct_eis_df(f_peaks,Z_peaks)
if plot_type=='nyquist':
plot_nyquist(peak_df,ax=axes,marker='x',s=50,unit_scale=unit_scale,area=area,c=c)
elif plot_type=='bode':
plot_bode(peak_df,axes=axes,marker='x',s=50,area=area,c=c)
elif plot_type=='all':
plot_nyquist(peak_df,ax=axes.ravel()[0],marker='x',s=50,unit_scale=unit_scale,area=area,c=c)
plot_bode(peak_df,axes=axes.ravel()[1:],marker='x',s=50,area=area,c=c)
return axes
def mark_model_peaks(params,model,area=None,plot_type='all',axes=None,label='',marker='x',s=50,c='r',unit_scale='auto',**kw):
"""
Mark peak RQ frequencies on Nyquist and/or Bode plots
Parameters:
params: dict of EC model parameters
model: EC model
area: cell area
plot_type: which type of plot(s) to generate. Options: ''nyquist'', ''bode'', ''all''
axes: axis or axes on which to plot
label: legend label for peak markers
marker: marker type
s: marker size
c: marker color
unit_scale: unit scale for Nyquist plot
kw: kwargs to pass to plt.scatter
"""
f_peaks = np.array(var_RC_peak_frequencies(params))
Z_peaks = model(f_peaks,**params)
peak_df = construct_eis_df(f_peaks,Z_peaks)
if plot_type=='nyquist':
axes = plot_nyquist(peak_df,ax=axes,label=label,area=area,unit_scale=unit_scale,marker=marker,s=s,c=c,**kw)
elif plot_type=='bode':
axes = plot_bode(peak_df,axes=axes,label=label,area=area,marker=marker,s=s,c=c,**kw)
elif plot_type=='all':
if axes is None:
fig, axes = plt.subplots(1,3,figsize=(12,4))
plot_nyquist(peak_df,ax=axes.ravel()[0],c='r',marker='x',s=50,label=label,unit_scale=unit_scale,area=area)
plot_bode(peak_df,axes=axes.ravel()[1:],c='r',marker='x',s=50,label=label,area=area)
else:
raise ValueError(f'Invalid plot type {plot_type}. Options are nyquist, bode, all')
if label != '':
if type(axes) in (tuple,list):
for tax in axes:
tax.legend()
elif type(axes)==np.ndarray:
for tax in axes.ravel():
tax.legend()
else:
axes.legend()
def plot_fit(data,params,model,f_model=None,axes=None,unit_scale='auto',area=None,bode_cols=['Zmod','Zphz'],mark_peaks=False,fit_color='k',fit_kw={},**data_kw):
w = data['Freq'].values
if f_model is None:
f_model = w
elif f_model is 'fill':
f_model = np.logspace(np.log10(np.min(w)),np.log10(np.max(w)), 100)
y = data.loc[:,['Zreal','Zimag']].values
weights = 1/(data['Zmod']).values
if axes is None:
fig,axes = plt.subplots(1,3,figsize=(9,3))
ax1,ax2,ax3 = axes
# fig = plt.figure(figsize=(8,8))
# ax1 = plt.subplot2grid((2,2),(0,0),colspan=2)
# ax2 = plt.subplot2grid((2,2),(1,0))
# ax3 = plt.subplot2grid((2,2),(1,1))
# axes = np.array([ax1,ax2,ax3])
else:
ax1,ax2,ax3 = axes.ravel()
fig = axes.ravel()[0].get_figure()
Z_fit = model(f_model,**params)
fit_df = pd.DataFrame(np.array([Z_fit.real,Z_fit.imag]).T,columns=['Zreal','Zimag'])
fit_df['Freq'] = f_model
fit_df['Zmod'] = (Z_fit*Z_fit.conjugate())**0.5
fit_df['Zphz'] = (180/np.pi)*np.arctan(Z_fit.imag/Z_fit.real)
if unit_scale=='auto':
unit_scale = get_unit_scale(data)
#Nyquist plot
#ax.scatter(data['Zreal'],-data['Zimag'],s=6,label='Measured')
plot_nyquist(data,label='Measured',ax=ax1,unit_scale=unit_scale,area=area,**data_kw)
plot_nyquist(fit_df,c=fit_color,ax=ax1,label='Fit',plot_func='plot',unit_scale=unit_scale,area=area,**fit_kw)
#ax1.plot(Z_fc.real,-Z_fc.imag,'k',label='Fit')
ax1.legend()
#Bode plots
plot_bode(data,axes=(ax2,ax3),label='Measured',area=area,cols=bode_cols,unit_scale=unit_scale,**data_kw)
plot_bode(fit_df,axes=(ax2,ax3),label='Fit',c=fit_color,plot_func='plot',area=area,cols=bode_cols,unit_scale=unit_scale,**fit_kw)
# plot model peak frequencies
if mark_peaks:
f_peaks = np.array(var_RC_peak_frequencies(params))
Z_peaks = model(f_peaks,**params)
peak_df = construct_eis_df(f_peaks,Z_peaks)
plot_nyquist(peak_df,ax=ax1,c=fit_color,marker='x',s=50,label='RQ Peak Frequencies',unit_scale=unit_scale,area=area)
plot_bode(peak_df,axes=(ax2,ax3),c=fit_color,marker='x',s=50,label='RQ Peak Frequencies',area=area,cols=bode_cols)
ax2.legend()
ax3.legend()
for ax in [ax2,ax3]:
# manually set x axis limits - sometimes matplotlib doesn't get them right
fmin = min(data['Freq'].min(),np.min(f_model))
fmax = max(data['Freq'].max(),np.max(f_model))
ax.set_xlim(fmin/5,fmax*5)
fig.tight_layout()
return axes
def jv_multiplot(files,color_by='T',aggregate=False,file_sequence=['file_type','T','aflow','cflow'],area=None,ax=None,label_color=None):
"""
Plot multiple j-V curves on same axes
Args:
files: files to plot
color_by: fieldname to determine series colors
aggregate: how to aggregate multiple files with same value of color_by field. Options:
False: plot all files, ordered by time
'max_pwr': plot file with max power
"""
if ax is None:
fig, ax = plt.subplots()
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
markers = ['.','^','v','s','D','*']
#temps = [int(os.path.basename(file).split('_')[1].replace('C','')) for file in files]
times = [get_timestamp(f) for f in files]
infos = [get_file_info(file,file_sequence) for file in files]
jvdf = pd.DataFrame(np.array([files,times]).T,columns=['file','time'])
jvdf = jvdf.join(pd.DataFrame(infos))
if label_color is None:
label_color = dict(zip(jvdf[color_by].unique(),default_colors))
if color_by=='T':
label_units = '$^\circ$C'
else:
label_units = ''
for label, gdf in jvdf.groupby(color_by):
if len(gdf) > 1:
if aggregate==False:
gdf = gdf.sort_values('time')
gdf.index = np.arange(len(gdf))
for i, row in gdf.iterrows():
df = read_jv_data(row['file'])
plot_jv(df,area=area,plot_pwr=True,ax=ax,label='{}{} ({})'.format(label,label_units,i+1),
marker=markers[i],markersize=5,c=label_color[label],
pwr_kw=dict(ls=':',marker=markers[i],markersize=3,c=label_color[label]))
else:
if aggregate=='max_pwr':
idx = gdf['file'].map(lambda x: read_jv_data(x)['Pwr'].abs().max()).idxmax()
elif aggregate=='min_pwr':
idx = gdf['file'].map(lambda x: read_jv_data(x)['Pwr'].abs().max()).idxmin()
elif aggregate=='first':
idx = gdf['file'].map(lambda x: get_timestamp(x)).idxmin()
elif aggregate=='last':
idx = gdf['file'].map(lambda x: get_timestamp(x)).idxmax()
else:
raise ValueError(f'Invalid aggregate method {aggregate} specified')
df = read_jv_data(gdf.loc[idx,'file'])
plot_jv(df,area=area,plot_pwr=True,ax=ax,label=f'{label}{label_units}',marker=markers[0],markersize=5,c=label_color[label],
pwr_kw=dict(ls=':',marker=markers[0],markersize=5,c=label_color[label]))
else:
df = read_jv_data(gdf['file'].min())
plot_jv(df,area=area,plot_pwr=True,ax=ax,label=f'{label}{label_units}',marker=markers[0],markersize=5,c=label_color[label],
pwr_kw=dict(ls=':',marker=markers[0],markersize=5,c=label_color[label]))
def generate_plots(plot_types,datadir=None,savefigs=False,savedir='./plots',area=None,
ocv_kw={},nyquist_kw={},bode_kw={},jv_kw={}):
"""
Generate basic plots for files in directory
Args:
plot_types: list of plot types to generate. Options: 'nyquist','bode','ocv','jv'
datadir: data directory
savefigs: if True, save generated figures
area: cell area (cm2)
plot_kw:
"""
#allowed_plot_types = ['nyquist','bode','ocv','jv']#,'nyquist-bode'
if datadir is None:
datadir = os.getcwd()
plotdir = os.path.join(datadir,savedir)
if not os.path.exists(plotdir):
os.mkdir(plotdir)
# get cell info from datadir
cell = get_cell_name(datadir)
# set kw defaults and update with any user-specified params
# so that user doesn't have to re-enter all defaults to change one thing
jv_default={'plot_pwr':True,'marker':'.','pwr_kw':{'ls':':','marker':'.'}}
jv_default.update(jv_kw)
jv_kw = jv_default
ocv_default = {'filter_func':lambda x: x[0:3] in ('OCV','EIS') and x[-3:]=='DTA'}
ocv_default.update(ocv_kw)
ocv_kw = ocv_default
for plot_type in plot_types:
if plot_type in ['nyquist','bode']:
start_str = 'EIS'
elif plot_type=='jv':
start_str = 'PWRPOLARIZATION'
if plot_type=='ocv':
plot_ocv(datadir,**ocv_kw)
plt.title(cell,wrap=True)
if savefigs is True:
fig = plt.gcf()
fig.savefig(os.path.join(plotdir,'OCV_plot.png'),dpi=500)
else:
files = glob.glob(os.path.join(datadir,start_str + '*.DTA'))
for file in files:
info = get_file_info(file)
if plot_type=='nyquist':
df = read_eis_zdata(file)
ax = plot_nyquist(df,area=area,**nyquist_kw)
ax.text(0.97,0.05,'{}$^\circ$C, {}, {}'.format(info['T'],info['aflow'],info['cflow']),transform=ax.transAxes,ha='right')
elif plot_type=='bode':
df = read_eis_zdata(file)
axes = plot_bode(df,area=area,**bode_kw)
ax = axes[0]
ax.text(0.03,0.9,'{}$^\circ$C, {}, {}'.format(info['T'],info['aflow'],info['cflow']),transform=ax.transAxes)
elif plot_type=='jv':
df = read_jv_data(file)
ax = plot_jv(df,area=area,**jv_kw)
ax.text(0.5,0.05,'{}$^\circ$C, {}, {}'.format(info['T'],info['aflow'],info['cflow']),transform=ax.transAxes,ha='center')
ax.set_title(cell,wrap=True)
if savefigs is True:
fig = plt.gcf()
fname = os.path.basename(file)
fig.savefig(os.path.join(plotdir,fname.replace('DTA','png')),dpi=500)
def plot_eis_prop(eis_df,property_col,label='',aggregate=['start','end','max','mean'],ax=None,**plt_kw):
"""
Plot EIS-derived property as a function of temperature
Args:
eis_df: DataFrame with time, temperature, and property to plot
property_col: column name of property to plot
label: label for legend. Aggregate name will be affixed to end of provided label
aggregate: list of aggregate functions to use to aggregate multiple property values for each temperature.
Options: start, end, or any built-in pandas aggregate function
ax: axis on which to plot
plt_kw: plot keyword args
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
grp_df = eis_df.groupby('T')
temps = np.array(list(grp_df.groups.keys()))
prop_agg = {}
for agg in aggregate:
prop_agg[agg] = aggregate_prop(eis_df,'T',property_col,agg) #np.zeros(len(grp_df))
# for i, (T, df) in enumerate(grp_df):
# for agg in aggregate:
# if agg in ['start','end']:
# if agg=='start':
# agg_time = df['time'].min()
# else:
# agg_time = df['time'].max()
# sigma_agg[agg][i] = float(df[df['time']==agg_time][property_col])
# else:
# sigma_agg[agg][i] = getattr(df[property_col],agg)()
T_inv = 1000/(273+temps)
for agg, prop in prop_agg.items():
if label in('',None):
lab_prefix = ''
else:
lab_prefix = label + ' '
ax.semilogy(T_inv,prop,label=lab_prefix+agg,**plt_kw)
if label is not None:
ax.legend()
#label untransformed T on top axis
if ax is not None:
#get twin ax if already exists
for other_ax in ax.figure.axes:
if other_ax is ax:
ax2 = None
elif other_ax.bbox.bounds == ax.bbox.bounds:
ax2 = other_ax
break
else:
ax2 = None
if ax2 is None:
#create twin ax
ax2 = ax.twiny()
#set to same scale
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks(T_inv)
ax2.set_xticklabels(temps.astype(int))
ax.set_xlabel(r'$1000/T \ (\mathrm{K}^{-1})$')
ax2.set_xlabel(r'$T \ (^\circ \mathrm{C})$')
#ax.set_ylabel(r'$\sigma$ (S/cm)')
fig.tight_layout()
return ax
def plot_arrhenius_fit(df,property_col,aggregate,ax=None,**plt_kw):
G_act, fit = calc_G_act(df,property_col,aggregate,return_fit=True)
if ax is None:
fig, ax = plt.subplots()
temps = np.unique(df['T'])
T_inv = 1/(temps + 273)
y_fit = np.exp(np.polyval(fit,T_inv))
ax.plot(1000*T_inv,y_fit,**plt_kw)
if plt_kw.get('label','') != '':
ax.legend()
return ax
#---------------------------------
# Equivalent circuit modeling
#---------------------------------
def Z_cpe(w,Q,n):
"Impedance of CPE"
#Q=Y_0, n=a in Gamry
#Z = (1/(Q*w**n))*np.exp(-np.pi*n*1j/2)
Z = 1/((1j*w*2*np.pi)**n*Q)
#equiv to: Z = (1/(Q*w**n))*1j**(-n)
return Z
def Z_C(w,C):
return 1/(2*np.pi*1j*w*C)
def Z_L(w,L):
"Impedance of inductor"
return w*L*1j*2*np.pi
def Z_O(w,Y,B):
"Impedance of O diffusion element (porous bounded Warburg)"
return (1/(Y*(1j*w*2*np.pi)**0.5))*np.tanh(B*(1j*w*2*np.pi)**0.5)
def Z_fO(w,Y,t0,nf):
"Impedance of fractal O diffusion element (fractal porous bounded Warburg)"
return (1/(Y*(1j*t0*w*2*np.pi)**nf))*np.tanh((1j*w*t0*2*np.pi)**nf)
def Z_ger(w,Y,t0):
"Gerischer impedance"
return 1/(Y*np.sqrt(1 + 1j*2*np.pi*w*t0))
def Z_HN(w,Rct,t0,nu,beta):
"Havriliak-Negami impedance"
return Rct/(1+(1j*2*np.pi*w*t0)**nu)**beta
def Z_par(Z1,Z2):
"parallel impedance"
return 1/(1/Z1 + 1/Z2)
def Z_fuelcell(w,HFR,Rf_c,Yo_c,a_c,Rf_a,Yo_a,a_a,Lstray):
Z_a = Z_par(Z_cpe(w,Yo_a,a_a), Rf_a)
Z_c = Z_par(Z_cpe(w,Yo_c,a_c), Rf_c)
return Z_L(w,Lstray) + HFR + Z_a + Z_c
def Z_var_num_RC(w,HFR,Lstray,**RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) and an inductor (Lstray)
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
RC_params: parameters for each parallel RC circuit. keys: R, Q, n
"""
#Z_RC = [Z_par(Z_cpe(w,p['Q'],p['n']), p['R']) for p in RC_params]
num_RC = int(len(RC_params)/3)
Z_RC = [Z_par(Z_cpe(w,RC_params[f'Q{i}'],RC_params[f'n{i}']), RC_params[f'R{i}']) for i in range(num_RC)]
return Z_L(w,Lstray) + HFR + np.sum(Z_RC,axis=0)
def Z_var_num_RC_RL(w,HFR,Lstray,R_L,**RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) and parallel RL element (R_L, Lstray)
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
R_L: resistance of resistor in parallel with inductor
RC_params: parameters for each parallel RC circuit. keys: R, Q, n
"""
#Z_RC = [Z_par(Z_cpe(w,p['Q'],p['n']), p['R']) for p in RC_params]
num_RC = int(len(RC_params)/3)
Z_RC = [Z_par(Z_cpe(w,RC_params[f'Q{i}'],RC_params[f'n{i}']), RC_params[f'R{i}']) for i in range(num_RC)]
return Z_par(R_L,Z_L(w,Lstray)) + HFR + np.sum(Z_RC,axis=0)
def Z_var_num_RC_noL(w,HFR,**RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) only
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
RC_params: parameters for each parallel RC circuit. keys: R, Q, n
"""
#Z_RC = [Z_par(Z_cpe(w,p['Q'],p['n']), p['R']) for p in RC_params]
num_RC = int(len(RC_params)/3)
Z_RC = [Z_par(Z_cpe(w,RC_params[f'Q{i}'],RC_params[f'n{i}']), RC_params[f'R{i}']) for i in range(num_RC)]
return HFR + np.sum(Z_RC,axis=0)
def Z_var_num_RC_RL_LRC(w,HFR,Lstray,R_L,R_lrc,L_lrc,C_lrc,**RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) and parallel RL element (R_L, Lstray) and a parallel LRC element (R_lrc and C_lrc in parallel with L_lrc)
LRC circuit allows fitting of low-frequency curl that goes below x-axis and moves to left with decreasing frequency
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
R_L: resistance of resistor in parallel with inductor
RC_params: parameters for each parallel RC circuit. keys: R, Q, n
"""
#Z_RC = [Z_par(Z_cpe(w,p['Q'],p['n']), p['R']) for p in RC_params]
num_RC = int(len(RC_params)/3)
Z_RC = [Z_par(Z_cpe(w,RC_params[f'Q{i}'],RC_params[f'n{i}']), RC_params[f'R{i}']) for i in range(num_RC)]
return Z_par(R_L,Z_L(w,Lstray)) + Z_par(Z_L(w,L_lrc), Z_C(w,C_lrc) + R_lrc) + HFR + np.sum(Z_RC,axis=0)
def Z_var_num_RC2(w,HFR,Lstray,**RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) and an inductor (Lstray)
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
RC_params: parameters for each parallel RC circuit. keys: R, Q, n, on (order matters!)
"""
def RC_switch(on):
if on >= 1:
return 1
else:
return 0
def Z_RC_element(w,el_params):
# params: R, Q, n, on
if RC_switch(el_params[3])==0:
return np.zeros_like(w)
else:
return Z_par(el_params[0],Z_cpe(w,el_params[1],el_params[2]))*RC_switch(el_params[3])
num_RC = int(len(RC_params)/4)
Z_rc = np.sum([Z_RC_element(w,list(RC_params.values())[i*4:i*4+4]) for i in range(num_RC)],axis=0)
return Z_L(w,Lstray) + HFR + Z_rc
def construct_eis_df(f,Z):
"""
Construct dataframe from complex impedance array
Parameters:
f: frequency array
Z: complex impedance array
"""
df = pd.DataFrame(f,columns=['Freq'])
df['Zreal'] = Z.real
df['Zimag'] = Z.imag
df['Zmod'] = ((Z*Z.conjugate())**0.5).real
df['Zphz'] = (180/np.pi)*np.arctan(Z.imag/Z.real)
return df
def chi_sq(y, y_fit, weights):
"""
Weighted sum of squared residuals
Parameters:
-----------
y: actual data points (nxp)
y_fit: fitted data points (nxp)
weights: weights to apply to squared residuals. n-vector or nx2
"""
if len(weights.shape)==1:
weights = weights.reshape(-1,1)
elif weights.shape[1]!=2:
raise ValueError('Invalid shape for weights: {}'.format(weights.shape))
x2 = np.sum(np.sum((y-y_fit)**2*weights**2,axis=1))
# x2 = np.sum(np.sum((y-y_fit)**2,axis=1)*weights**2)
return x2
def ec_chi_sq(params,w,y,weights,model,normalize='deg'):
"""
Chi squared for equivalent circuit model.
Parameters:
-----------
params: dict of model parameters
w: frequencies
y: measured impedance data: nx2 matrix of Zreal, Zimag
weights: weights for squared residuals (n-vector)
model: equivalent circuit model
normalize: normalization method. Options:
'deg': normalize by degrees of freedom, i.e. len(y) - len(params)
'n': normalize by number of observations, i.e. len(y)
False: don't normalize
"""
Zfit = model(w,**params)
y_fit = np.array([Zfit.real,Zfit.imag]).T
x2 = chi_sq(y,y_fit,weights) #+ np.sum((x < 0).astype(int)*1000)
if normalize=='deg':
x2 /= (len(y) - len(params))
elif normalize=='n':
x2 /= len(y)
elif normalize is not False:
raise ValueError(f'Invalid normalize option {normalize}. Options are ''deg'', ''n'', False')
return x2
def chi_sq_from_df(df,params,model,normalize='deg',weighting='modulus'):
"""
Convenience function for getting chi squared from dataframe
"""
w = df['Freq'].values
y = df[['Zreal','Zimag']].values
if weighting=='modulus':
weights = 1/(df['Zmod']).values
elif weighting=='proportional':
weights = 1/y
elif weighting=='hybrid_modulus':
weights = 1/(np.abs(y)*df['Zmod'].values.reshape(-1,1))**0.5
elif weighting=='unity':
weights = np.ones_like(w)
return ec_chi_sq(params,w,y,weights,model,normalize=normalize)
def fuelcell_chi_sq(x,w,y,weights,est_HFR=0,alpha=0,normalize='deg'):
"""
Regularized chi squared for fuel cell EC model. Used for Nelder-Mead optimization
Regularization penalty is difference between model HFR and estimated HFR
Parameters:
-----------
x: vector of square roots of model parameters (square root necessary to bound Nelder-Mead to positive values)
Order: HFR, Rf_c, Yo_c, a_c, Rf_a, Yo_a, a_a, Lstray
w: frequencies
y: measured impedance data: nx2 matrix of Zreal, Zimag
weights: weights for squared residuals (n-vector)
est_HFR: estimated HFR. If alpha=0, this does not matter
alpha: regularization weight
"""
varnames = ['HFR','Rf_c','Yo_c','a_c','Rf_a','Yo_a','a_a','Lstray']
params = dict(zip(varnames,x**2))
return ec_chi_sq(params,w,y,weights,Z_fuelcell,normalize) + alpha*(params['HFR'] - est_HFR)**2
def estimate_HFR(data,n_pts_extrap=20,alpha=2e-4,min_r2=0,verbose=0,plot_fit=False,ax=None):
"""
Estimate HFR from impedance data by interpolating Zreal intercept
If data does not cross Zreal axis, extrapolate using regularized-degree polynomial fit
Parameters:
data: dataframe of impedance data read from Gamry DTA file
n_pts_extrap: if extrapolation required, number of high-frequency data points to fit
alpha: if extrapolation required, regularization strength for fit
verbose: 0: no messages; 1: print whether fitted or interpolated; 2: print whether fitted or interpolated and fit info
"""
end_idx = data[data['Zimag']<0].index.min()
if end_idx==0:
#if data starts above x axis, use first n points for fitting and extrapolation
fit_data = data.iloc[:n_pts_extrap,:]
extrap_flag = True
elif np.isnan(end_idx):
#if data never gets above x axis, use last n points for fitting and extrapolation
fit_data = data.iloc[-n_pts_extrap:,:]
extrap_flag = True
else:
extrap_flag = False
if extrap_flag:
#if high-frequency data does not cross Zreal axis, fit and extrapolate
#weighted fit - give higher weight to points closer to intercept
fit = reg_degree_polyfit(fit_data['Zreal'],fit_data['Zimag'],weights=(1/fit_data['Zimag'])**2,alpha=alpha,min_r2=min_r2)
roots = np.roots(fit)
real_roots = np.real(roots[np.iscomplex(roots)==False])
min_idx = np.abs(real_roots - fit_data['Zreal'].values[0]).argmin()
HFR = real_roots[min_idx]
if verbose==1:
print('Extrapolated HFR')
if verbose==2:
r2 = fit_r_squared(fit_data['Zreal'],fit_data['Zimag'],fit,w=(1/fit_data['Zimag'])**2)
print('Extrapolated HFR. Degree = {}, r2 = {}'.format(len(fit)-1,round(r2,5)))
else:
#else, simply sort and interpolate
#limit data to points up to first negative Zimag to avoid oscillation when sorting by Zimag
srt = data.loc[:end_idx,:].sort_values(by='Zimag',axis=0)
HFR = np.interp(0,srt['Zimag'],srt['Zreal'])
if verbose in (1,2):
print('Interpolated HFR')
if plot_fit is True:
if extrap_flag:
if ax is None:
fig, ax = plt.subplots()
ax.scatter(data['Zreal'][:n_pts_extrap+3],-data['Zimag'][:n_pts_extrap+3],s=10)
x = np.arange(HFR,fit_data['Zreal'].max(),0.1)
y_fit = np.polyval(fit,x)
ax.plot(x,-y_fit,ls='-')
deg = len(fit)
r2 = round(fit_r_squared(fit_data['Zreal'],fit_data['Zimag'],fit,w=(1/fit_data['Zimag'])**2),4)
rHFR = round(HFR,3)
ax.text(0.1,0.9,f'$R_{{\Omega}}$: {rHFR} $\Omega$\nDegree: {deg}\n$r^2$: {r2}',transform=ax.transAxes,va='top')
ax.set_ylabel(r'$-Z_{\mathrm{imag}} \ (\Omega)$')
ax.set_xlabel(r'$Z_{\mathrm{real}} \ (\Omega)$')
return HFR
def fit_ec_model(data,model,init_params=None,normalize='deg',alpha=0,n_restarts=10,est_HFR=True,weighting='modulus',return_result=False,simplex_params={},random_seed=None,**est_HFR_kw):
"""
Fit equivalent circuit model using Nelder-Mead downhill simplex method.
Adds random noise to the init_params (+/- 50%) and uses these as starting guesses for optimization.
Minimizes objective_func with regularization penalty for deviation of fitted HFR from estimated HFR
Runs with n_restarts different initial parameter sets and keeps best result to attempt to find global minimum.
Parameters:
-----------
data: dataframe of impedance data containing Freq, Zreal, Zimag, and Zmod columns
objective_func: cost function to minimize. Args: (x,w,y,weights, eHFR, alpha)
init_params: dict of model parameters from which to start optimization
alpha: regularization factor for HFR deviance from estimated HFR
n_restarts: number of times to restart optimization from randomized initial parameters
est_HFR: if True, interpolate Zdata to estimate HFR and use this estimate in init_params
est_HFR_kw: kwargs to pass to estimate_HFR
Returns scipy.optimize output for best result. result['x']**2 gives optimized parameters
"""
w = data['Freq'].values
y = data.loc[:,['Zreal','Zimag']].values
if weighting=='modulus':
weights = 1/(data['Zmod']).values
elif weighting=='proportional':
weights = 1/y
elif weighting=='hybrid_modulus':
weights = 1/(np.abs(y)*data['Zmod'].values.reshape(-1,1))**0.5
elif weighting=='unity':
weights = np.ones_like(w)
else:
raise ValueError('Invalid weighting {}. Options are ''modulus'', ''proportional'', ''hybrid_modulus'', ''unity'''.format(weighting))
if init_params is None:
# get param names from model argspec
param_names = inspect.getfullargspec(model)[0]
param_names.remove('w')
init_params = dict(zip(param_names,np.ones(len(param_names))))
else:
param_names = list(init_params.keys())
init_params = init_params.copy()
# # ensure that order of init_params matches argspec
# init_params = {k:init_params[k] for k in param_names}
def objective_func(x,w,y,weights, eHFR, alpha):
params = dict(zip(param_names,x**2))
cs = ec_chi_sq(params,w,y,weights,model,normalize=normalize)
# apply a hefty penalty to prevent non-physical n values
n_vals = np.array([v for k,v in params.items() if k[0]=='n'])# or k=='nu' or k=='beta')])
n_penalty = sum(n_vals[n_vals > 1])*1e3*cs
return cs + n_penalty
#estimate HFR if specified
if est_HFR==True:
eHFR = estimate_HFR(data,**est_HFR_kw)
init_params['HFR'] = eHFR
else:
eHFR = 0
if alpha!=0:
print('''Warning: alpha is non-zero but HFR is not being estimated. This should only be run this way if the HFR in init_params is a reasonably accurate estimate of the actual HFR.
Otherwise, set alpha to 0 or est_HFR to True''')
start_vals = np.array(list(init_params.values()))
simplex_defaults = {'shift_factor':2,'n_restarts':5}
simplex_defaults.update(simplex_params)
simplex_params = simplex_defaults
# randomly shift the starting parameters and optimize to attempt to find the global min
best_fun = np.inf
best_steps = 0
# initialize RandomState
randstate = np.random.RandomState(random_seed)
for i in range(simplex_params['n_restarts']):
if i==0:
# on first attempt, just use the starting parameters determined above
init_vals = start_vals
else:
# on subsequent attempts, randomly shift the starting paramters
rands = randstate.rand(len(start_vals))
# multiply or divide the start_vals by random factors up to shift_factor
# transform linear [0,1) range to logarithmic [1/shift_factor,shift_factor) range
factors = (1/simplex_params['shift_factor'])*np.exp(rands*2*np.log(simplex_params['shift_factor']))
init_vals = start_vals*factors # 0.95*(rands/np.max(np.abs(rands)))*start_vals
#print(init_vals)
result = minimize(fun=objective_func,x0=init_vals**(1/2), args=(w,y,weights,eHFR,alpha), method='Nelder-Mead', #tol=1e-10,
options=dict(maxiter=10000,adaptive=True))#,bounds=[(0,None)]*len(init_vals))
#print(result)
if result.fun < best_fun:
#init_vals = result.x.copy()**2
best_fun = copy(result.fun)
best_result = result.copy()
best_steps = i + 1
#else:
#init_vals = prev_vals
# print(dict(zip(init_params.keys(),result['x']**2)))
# print('fun: ',result.fun)
print('Best result {:.2e} achieved within {} restarts'.format(best_fun,best_steps))
best_params = dict(zip(param_names,best_result['x']**2))
if return_result:
return best_params, best_result
else:
return best_params
def regfit_ec_model(data,objective_func,init_params,alpha,n_restarts=10,est_HFR=True,weighting='inverse',**est_HFR_kw):
# this should be incorporated into the standard fit_ec_model by simply adding a relax option
"""
Fit equivalent circuit model using Nelder-Mead downhill simplex method with initial regularization, but final fit unregularized
First runs n_restarts optimizations with randomized init_params and HFR regularization penalty, and keeps best regularized result.
Uses best regularized parameters as starting guess for unregularized final fit
Parameters:
-----------
data: dataframe of impedance data containing Freq, Zreal, Zimag, and Zmod columns
objective_func: cost function to minimize. Args: (x,w,y,weights, eHFR, alpha)
init_params: dict of model parameters from which to start optimization
alpha: regularization factor for HFR deviance from estimated HFR
n_restarts: number of times to restart optimization from randomized initial parameters
est_HFR: if True, interpolate Zdata to estimate HFR and use this estimate in init_params
Returns scipy.optimize output for final result. result['x']**2 gives optimized parameters
"""
w = data['Freq'].values
y = data.loc[:,['Zreal','Zimag']].values
if weighting=='inverse':
weights = 1/(data['Zmod']).values
elif weighting=='equal':
weights = np.ones_like(w)
else:
raise ValueError('Invalid weighting {}. Options are ''inverse'', ''equal'''.format(weighting))
#optimize regularized fit
best_result = fit_ec_model(data,objective_func,init_params,alpha,n_restarts,est_HFR,weighting,**est_HFR_kw)
#use best regularized result as starting point for unregularized fit
unreg_init = best_result['x'].copy()
result = minimize(fun=objective_func,x0=unreg_init, args=(w,y,weights,0,0), method='Nelder-Mead', #tol=1e-10,
options=dict(maxiter=10000,adaptive=True))#,bounds=[(0,None)]*len(init_vals))
print(f'Unregularized result: {result.fun}')
return result
#----------------------------
#variable RC circuit fitting
#----------------------------
def calculate_weights(data,weighting='modulus',split_character=0.5):
"""Calculate weights for complex impedance chi squared"""
f = data['Freq'].values
y = data.loc[:,['Zreal','Zimag']].values
if weighting=='modulus':
weights = 1/(data['Zmod']).values
elif weighting=='proportional':
weights = 1/(y**2)**0.5
elif weighting=='hybrid_modulus':
mod_root = np.abs(y)**split_character * data['Zmod'].values.reshape(-1,1)**(1-split_character)
weights = 1/((mod_root)**2)**0.5
# weights = 1/(np.abs(y)*data['Zmod'].values.reshape(-1,1))**0.5
elif weighting=='unity':
weights = np.ones_like(f)
else:
raise ValueError('Invalid weighting {}. Options are ''modulus'', ''proportional'', ''hybrid_modulus'', ''unity'''.format(weighting))
return weights
def evaluate_param_window(data,params,model,param,normalize='n',weighting='modulus',bounds=(0.95,1.05),n_points=10):
factors = np.linspace(bounds[0],bounds[1],n_points)
param_vals = factors*params[param]
mod_params = params.copy()
func_vals = np.empty(len(factors))
for i,pv in enumerate(param_vals):
mod_params[param] = pv
func_vals[i] = chi_sq_from_df(data,mod_params,model,normalize,weighting)
return param_vals, func_vals
def plot_param_windows(data,params,model,plot_params='all',normalize='n',weighting='modulus',bounds=(0.95,1.05),n_points=10,ncol=3,subplot_dims=(3.5,3),sharey=False):
if plot_params=='all':
plot_params = list(params.keys())
nrow = int(np.ceil(len(plot_params)/ncol))
fig, axes = plt.subplots(nrow,ncol,figsize=(ncol*subplot_dims[0],nrow*subplot_dims[1]),sharey=sharey)
for param,ax in zip(plot_params,axes.ravel()):
pv,fv = evaluate_param_window(data,params,model,param,normalize,weighting,bounds,n_points)
ax.plot(pv,fv)
ax.set_xlabel(param)
ax.set_ylabel('Error')
ax.ticklabel_format(scilimits=(-3,3))
for ax in axes.ravel()[len(plot_params):]:
ax.axis('off')
fig.tight_layout()
def Z_var_num_RC_RL(w,HFR,Lstray,R_L,**RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) and parallel RL element (R_L, Lstray)
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
R_L: resistance of resistor in parallel with inductor
RC_params: parameters for each parallel RC circuit. keys: R, Q, n
"""
#Z_RC = [Z_par(Z_cpe(w,p['Q'],p['n']), p['R']) for p in RC_params]
num_RC = int(len(RC_params)/3)
Z_RC = [Z_par(Z_cpe(w,RC_params[f'Q{i}'],RC_params[f'n{i}']), RC_params[f'R{i}']) for i in range(num_RC)]
return Z_par(R_L,Z_L(w,Lstray)) + HFR + np.sum(Z_RC,axis=0)
def get_model_func(model):
nonRC_param_names = inspect.getfullargspec(model)[0]
nonRC_param_names.remove('w')
def model_func(w,*args):
params = dict(zip(nonRC_param_names,args))
RC_param_vals = args[len(nonRC_param_names):]
num_RC = int(len(RC_param_vals)/3)
RC_param_names = sum([[f'R{i}',f'Q{i}',f'n{i}'] for i in range(num_RC)],[])
RC_params = dict(zip(RC_param_names,RC_param_vals))
params.update(RC_params)
Z_model = model(w,**params)
return np.hstack([Z_model.real,Z_model.imag])
return model_func
def model_func(w,HFR,Lstray,R_L,*RC_params):
"""
Impedance of circuit with 1-n parallel RC circuits in series with a resistor (HFR) and parallel RL element (R_L, Lstray)
Args:
w: frequency (Hz)
HFR: high-frequency resistance
Lstray: inductance
R_L: resistance of resistor in parallel with inductor
RC_params: parameters for each parallel RC circuit. keys: R, Q, n
"""
#Z_RC = [Z_par(Z_cpe(w,p['Q'],p['n']), p['R']) for p in RC_params]
num_RC = int(len(RC_params)/3)
Z_RC = [Z_par(RC_params[i*3],Z_cpe(w,RC_params[i*3+1],RC_params[i*3+2]), ) for i in range(num_RC)]
Z_model = Z_par(R_L,Z_L(w,Lstray)) + HFR + np.sum(Z_RC,axis=0)
return np.hstack([Z_model.real,Z_model.imag])
def fit_var_RC(data,alpha,max_fun,model=Z_var_num_RC,init_params=None,max_L=1e-5,min_geo_gain=5,min_ari_gain=0.05,min_num_RC=1,max_num_RC=3,est_HFR=True,relax=False,
method='simplex',direction='ascending',early_stop=True,
err_peak_log_sep=1,weighting='modulus',weight_split_character=0.5,frequency_bounds=None,random_seed=None,
simplex_params={'shift_factor':2,'n_restarts':5},grid_search_params={'grid_density':3,'n_best':10},
global_params={'algorithm':'basinhopping','n_restarts':1,'shift_factor':2,'algorithm_kwargs':{}},
return_info=False,return_history=False,**est_HFR_kw):
"""
Fit equivalent circuit model with variable number of parallel RC elements using Nelder-Mead downhill simplex method
Uses grid search or random parameter sampling to find a global minimum
Increase the number of RC elements until the target objective function is achieved
Parameters:
data: dataframe of impedance data containing Freq, Zreal, Zimag, and Zmod columns
alpha: regularization factor for HFR deviance from estimated HFR
max_fun: maximum acceptable value of objective function. RC elements will be added until the objective function is less than or equal to this value
model: EC model to fit. Options: Z_var_num_RC, Z_var_num_RC_RL
init_params: dict of model parameters from which to start optimization. Can contain parameters for any number of RC elements
max_L: maximum allowable inductance
max_num_RC: maximum number of RC elements to allow
min_geo_gain: minimum geometric improvement in objective function required to allow addition of an RC element (i.e. min_geo_gain=5 requires a 5-fold improvement in objective function to add an element)
min_ari_gain: minimum arithmetic improvement in objective function required to allow addition of an RC element (i.e. min_ari_gain=0.05 requires an improvement of 0.05 in objective function to add an element)
If both min_geo_gain and min_ari_gain are specified, any element addition that satisfies EITHER condition will be kept (not both!)
This allows for fitting noisy spectra, for which adding an additional element may significantly improve the fit with a large arithmetic gain but relatively small geometric gain,
as well as cleaner spectra, where an additional element may result in a small arithmetic gain due to the small value of the objective function, but a large geometric gain.
n_restarts: if grid_search is False, number of times to restart optimization from randomized initial parameters
est_HFR: if True, interpolate Zdata to estimate HFR and use this estimate in init_params
relax: if True, perform a final parameter optimization without HFR regularization, using the best regularized fit as the starting point. Default False
method: optimization method. Options:
''simplex'': Nelder-Mead simplex method. Can be run from multiple starting points in parameter space in order to increase likelihood of finding global min
''global'': Use a global optimization method. Recommended method
''grid_search'': Use a grid search to explore parameter space, then use several of the best parameter sets as starting points for local Nelder-mead optimization. Not recommmended
direction: direction for circuit element addition/subtraction
'ascending': start at min_num_RC and add elements up to max_num_RC. Best for methods 'simplex', 'curve_fit'
'descending': start at max_num_RC + 1 and remove elements down to min_num_RC. May be useful for method 'global'
'overshoot': start at min_num_RC and add elements up to max_num_RC + 1, then remove the final element and refit at max_num_RC. May be useful for method 'global'
early_stop: if True, stop increasing num_RC if 2 consecutive element additions fail to produce a qualifying reduction of the objective function
err_peak_log_sep: minimum logarithmic (base 10 - order of magnitude) frequency separation between error peaks. Used for identifying maximum error when initializing next RQ element
shift_factor: maximum factor by which to multiply/divide initial parameters if using random parameter sampling. E.g. a value of 2 allows initial parameter values to at most be doubled or halved
weighting: weighting to use for fit. Options:
'unity': weight = 1
'modulus': weight = 1/Zmod
'proportional': real weight = 1/Zreal, imag weight = 1/Zimag
'hybrid_modulus': mix between regular modulus and split modulus:
real weight = (((1/Zmod)^(1-split_character)*(1/Zreal)^split_character)^2)^1/2
imag weight = (((1/Zmod)^(1-split_character)*(1/Zimag)^split_character)^2)^1/2
weight_split_character: if weighting=='hybrid_modulus', determines how much split character the hybrid modulus has. 0 is regular modulus, 1 is split modulus, 0.5 is equal split and regular character
frequency_bounds: bounds on allowed RQ peak frequencies. (min,max) tuple. None indicates no bounds
random_seed: int or None. If int, initialize a RandomState with this seed and use it for random parameter sampling to guarantee repeatability
grid_search: if True, use a grid search to identify best starting params for optimization. if False, use random parameter space sampling
grid_search_params: dict of params for grid search.
grid_density: number of different values to test for each parameter
n_best: number of param sets from grid search to use as starting points for optimization
return_info: if True, return best_fun and num_RC after best_params
return_history: if True, return history of params and fun for each num_RC tested
est_HFR_kw: kwargs to pass to estimate_HFR
Returns:
return_info=False,return_history=False: best_params (dict of optimized parameters)
return_info=True,return_history=False: best_params, best_fun, num_RC
return_info=True,return_history=True: best_params, best_fun, num_RC, history
return_info=False,return_history=True: best_params, history
"""
# get non-RC param names from model argspec
nonRC_param_names = inspect.getfullargspec(model)[0]
nonRC_param_names.remove('w')
Zmag = data['Zmod'].max()
if frequency_bounds is None:
if 'Lstray' in nonRC_param_names:
def objective_func(param_roots,param_names,w,y,weights,eHFR,alpha):
params = dict(zip(param_names,param_roots**2))
err = ec_chi_sq(params,w,y,weights,model,normalize='n')
# apply a hefty penalty to prevent non-physical n values
n_vals = np.array([v for k,v in params.items() if (k[0]=='n' or k=='nu' or k=='beta')])
n_penalty = sum(n_vals[n_vals > 1])*1000*Zmag
# apply a hefty penalty to prevent high inductance
if params['Lstray'] > max_L:
L_penalty = 1e6*(params['Lstray'] - max_L)*Zmag
else:
L_penalty = 0
return err + n_penalty + alpha*(params['HFR'] - eHFR)**2 + L_penalty
else:
def objective_func(param_roots,param_names,w,y,weights,eHFR,alpha):
params = dict(zip(param_names,param_roots**2))
err = ec_chi_sq(params,w,y,weights,model,normalize='n')
# apply a hefty penalty to prevent non-physical n values
n_vals = np.array([v for k,v in params.items() if (k[0]=='n' or k=='nu' or k=='beta')])
n_penalty = sum(n_vals[n_vals > 1])*1000*Zmag
return err + n_penalty + alpha*(params['HFR'] - eHFR)**2
elif len(frequency_bounds)==2:
if 'Lstray' in nonRC_param_names:
def objective_func(param_roots,param_names,w,y,weights,eHFR,alpha):
params = dict(zip(param_names,param_roots**2))
err = ec_chi_sq(params,w,y,weights,model,normalize='n')
# apply a hefty penalty to prevent non-physical n values
n_vals = np.array([v for k,v in params.items() if (k[0]=='n' or k=='nu' or k=='beta')])
n_penalty = sum(n_vals[n_vals > 1])*1000*Zmag
# apply a hefty penalty to prevent high inductance
if params['Lstray'] > max_L:
L_penalty = 1e6*(params['Lstray'] - max_L)*Zmag
else:
L_penalty = 0
# apply a hefty penalty to keep peak frequencies inside bounds
f_peaks = var_RC_peak_frequencies(params)
above_freq = np.array([np.log(max(f,frequency_bounds[1])) - np.log(frequency_bounds[1]) for f in f_peaks])
below_freq = np.array([np.log(frequency_bounds[0]) - np.log(min(f,frequency_bounds[0])) for f in f_peaks])
fRQ_penalty = np.sum(above_freq*1000*Zmag + below_freq*1000*Zmag)
return err + n_penalty + alpha*(params['HFR'] - eHFR)**2 + L_penalty + fRQ_penalty
else:
def objective_func(param_roots,param_names,w,y,weights,eHFR,alpha):
params = dict(zip(param_names,param_roots**2))
err = ec_chi_sq(params,w,y,weights,model,normalize='n')
# apply a hefty penalty to prevent non-physical n values
n_vals = np.array([v for k,v in params.items() if (k[0]=='n' or k=='nu' or k=='beta')])
n_penalty = sum(n_vals[n_vals > 1])*1000*Zmag
# apply a hefty penalty to keep peak frequencies inside bounds
f_peaks = var_RC_peak_frequencies(params)
above_freq = np.array([np.log(max(f,frequency_bounds[1])) - np.log(frequency_bounds[1]) for f in f_peaks])
below_freq = np.array([np.log(frequency_bounds[0]) - np.log(min(f,frequency_bounds[0])) for f in f_peaks])
fRQ_penalty = np.sum(above_freq*1000*Zmag + below_freq*1000*Zmag)
return err + n_penalty + alpha*(params['HFR'] - eHFR)**2 + fRQ_penalty
else:
raise ValueError('Invalid argument for frequency_bounds. Must be a 2-length tuple or list, or None for no bounds')
"""Attempt to use correlation between real and imag errors as an additional penalty term to help find global minimum. Didn't work well"""
# def objective_func(param_roots,param_names,w,y,weights,eHFR,alpha,beta,sig_err):
# params = dict(zip(param_names,param_roots**2))
# # need Z_fit for error correlation - calculate explicitly instead of using ec_chi_sq to avoid calculating twice
# Zfit = model(w,**params)
# y_fit = np.array([Zfit.real,Zfit.imag]).T
# if len(weights.shape)==1:
# weights = weights.reshape(-1,1)
# elif weights.shape[1]!=2:
# raise ValueError('Invalid shape for weights: {}'.format(weights.shape))
# y_err = (y-y_fit)**2*weights**2
# x2 = np.sum(y_err)
# # normalize by number of points
# x2 /= len(y)
# # apply a hefty penalty to prevent non-physical n values
# n_vals = np.array([v for k,v in params.items() if k[0]=='n'])
# n_penalty = sum(n_vals[n_vals > 1])*1000
# # apply a penalty for real and imag error correlation
# y_tot = np.sum(y_err,axis=1)
# # limit to significant errors to avoid treating cluster of points near zero error as highly correlated
# y_err_sig = y_err[np.where(y_tot>=sig_err)]
# # calculate robust correlation coefficient between real and imag errors
# if len(y_err_sig) > 0:
# rs = spearmanr(y_err_sig)
# #print(rs.pvalue)
# corr_penalty = -beta*np.log(rs.pvalue)
# else:
# # if no errors meet significance threshold, no penalty
# corr_penalty = 0
# return x2 + n_penalty + alpha*(params['HFR'] - eHFR)**2 + corr_penalty
# get model func for curve_fit
"""placeholder"""
model_func = get_model_func(model)
# get order of max Zreal
R_ord = np.floor(np.log10(data['Zreal'].max()))
# get frequency, Z values, and weights
w = data['Freq'].values
y = data.loc[:,['Zreal','Zimag']].values
weights = calculate_weights(data,weighting=weighting,split_character=weight_split_character)
# initialize RandomState
randstate = np.random.RandomState(random_seed)
# initialize lists for storing results for each num_RC
history = {}
num = []
num_fun = []
num_params = []
num_new_params = []
idx_maxerr_hist = [] # history of max error indexes
if init_params is not None:
# avoid overwriting passed parameters
init_params = init_params.copy()
##---------------------------------
## Ascending
##---------------------------------
if direction in ('ascending','overshoot'):
if init_params is None:
init_params = {}
if 'HFR' in nonRC_param_names:
init_params['HFR'] = 1
if 'Lstray' in nonRC_param_names:
init_params['Lstray'] = 1e-6
# ensure that order of init_params matches argspec
init_params = {k:init_params.get(k,1) for k in nonRC_param_names}
# add params for first RC element
# set R to same order of magnitude as max Zreal
init_params.update({'R0':10**R_ord,'Q0':1e-3,'n0':0.5})
else:
init_num_RC = int((len(init_params) - len(nonRC_param_names))/3)
if init_num_RC==0:
default_RC = {'R0':10**R_ord,'Q0':1e-3,'n0':0.5}
init_params.update(default_RC)
param_names = nonRC_param_names + sum([[f'{k}{i}' for k in ['R','Q','n']] for i in range(init_num_RC)],[])
# ensure that order of init_params matches argspec. If RC params not specified, use defaults
# Don't use dict.get() in order to throw an error if params (besides 1st RC params) are missing
init_params = {k:init_params[k] for k in param_names}
#initial parameter values
#estimate HFR if specified
if est_HFR==True:
eHFR = estimate_HFR(data,**est_HFR_kw)
init_params['HFR'] = eHFR
if eHFR < 0:
init_params['HFR'] = 0
alpha = 0
print("""Warning: Estimated HFR is negative. Estimated HFR set to 0, alpha set to 0""")
else:
eHFR = 0
if alpha!=0:
print('''Warning: alpha is non-zero but HFR is not being estimated. This should only be run this way if the HFR in init_params is a reasonably accurate estimate of the actual HFR.
Otherwise, set alpha to 0 or est_HFR to True''')
print('Initial parameters: {}'.format(init_params))
print('Initial peak frequencies:',var_RC_peak_frequencies(init_params))
#determine # of RC elements in initial parameters
#First two params are HFR and Lstray. Each RC element has 3 params: R, Q, n
init_num_RC = max(int((len(init_params) - len(nonRC_param_names))/3), min_num_RC)
n = init_num_RC
if direction=='overshoot':
end_RC = max_num_RC + 1
else:
end_RC = max_num_RC
while n <= end_RC:
if n > int((len(init_params) - len(nonRC_param_names))/3):
while n > int((len(init_params) - len(nonRC_param_names))/3):
# add RQ elements until we reach desired number of elements
# find frequency with largest error in current fit
Z_fit = model(data['Freq'],**init_params)
y_fit = np.array([np.real(Z_fit),np.imag(Z_fit)]).T
if len(weights.shape)==1:
shaped_weights = weights.reshape(-1,1)
else:
shaped_weights = weights
y_errs = np.sum((y-y_fit)**2*shaped_weights**2,axis=1)
# aggregate local errors with moving average
y_errs_agg = np.array([np.mean(y_errs[max(i-5,0):min(i+5,len(y_errs))]) for i in range(len(y_errs))])
# ignore points below Nyquist x-axis (positive phase/inductive)
ignore_idx = list(np.where(data['Zimag'].values > 0)[0])
# don't initialize new elements outside user-specified frequency bounds
if frequency_bounds is not None:
ignore_idx += list(np.where((data['Freq'].values < frequency_bounds[0]) | (data['Freq'].values > frequency_bounds[1]))[0])
# don't initialize new elements at edges of measured frequency range
ignore_idx += [0,1]
ignore_idx += list(np.arange(len(y_errs_agg)-2,len(y_errs_agg),1).astype(int))
# don't initialize new elements in same location as previously initialized elements
for idx in idx_maxerr_hist:
add_idx = np.where(np.abs(
|
np.log10(w)
|
numpy.log10
|
import numpy as np
import pandas as pd
import warnings
from collections import defaultdict
from collections.abc import Sequence
from tqdm.autonotebook import tqdm
# plotting
import matplotlib.pyplot as plt
# ML algo
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.linear_model import Ridge, Lasso, LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.exceptions import ConvergenceWarning
# this shouldn't be a hard dependency
from IPython.core.display import display
class MLModels:
"""
Creates a class MLModels.
Parameters
----------
n_trials : int
default : 50
test_size : float
default : 0.25
random_state: int
default : None
Methods
--------
plot_accuracy : Plots and returns model train and test accuracies
train_test : Calculates the training and testing accuracy of the model
run_classifier : Runs the specified classifier algorithms
on the data provided
plot_pcc : Calculates the Proportion Chance Criteria,
Plots a bar chart of all classes
run_regression : Runs the specified regression algorithms
on the data provided
summarize : Displays in a dataframe the best performance
(highest accuracy) of the methods
"""
# safe to change
n_trials = 50
test_size = 0.25
random_state = None
# not so safe to change
model = None
_setting_name = None
def __init__(self):
self.training_accuracy = None
self.test_accuracy = None
self.training_std = None
self.test_std = None
self.coef = None
self.classes = None
self._setting = None
def plot_accuracy(self):
"""
Plots the train and test accuracy +- 1 standard deviation of the model.
"""
fig, ax = plt.subplots()
ax.plot(self._setting, self.training_accuracy,
label="training accuracy")
ax.plot(self._setting, self.test_accuracy, label="test accuracy")
ax.fill_between(self._setting,
self.training_accuracy-self.training_std,
self.training_accuracy+self.training_std, alpha=0.2)
ax.fill_between(self._setting, self.test_accuracy-self.test_std,
self.test_accuracy+self.test_std, alpha=0.2)
ax.set_ylabel("Accuracy")
ax.set_xlabel(self._setting_name)
ax.legend()
return ax
def train_test(self, X, y, scaler=None):
"""
Calculates the training and testing accuracy of the model.
Calculate mean accuracy for `self.n_trials` using the given parameter
`self._settings`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : array-like, shape = [n_samples]
Target vector relative to X
scaler : object
Scaling method to be applied to X
default : None
"""
train_accuracies = []
test_accuracies = []
has_coef = True
coef = defaultdict(lambda: np.array([np.nan] * X.shape[1]))
rs = (np.random.RandomState(seed=self.random_state) if
self.random_state else None)
with tqdm(total=self.n_trials*len(self._setting)) as pb:
for i in range(self.n_trials):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=self.test_size, random_state=rs)
if scaler is not None:
# scale using the training set
scaler_inst = scaler.fit(X_train)
X_train = scaler_inst.transform(X_train)
# apply the training set scale to the test set
X_test = scaler_inst.transform(X_test)
pb.set_description(f'Trial: {i + 1}')
training_accuracy = []
test_accuracy = []
feature_coef = []
for s in self._setting:
# build the model
self.model.__setattr__(self._setting_name, s)
self.model.__setattr__('random_state', rs)
self.model.__setattr__('n_jobs', -1)
clf = self.model
clf.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(X_test, y_test))
if has_coef:
try:
# classifiers have (1, N), while regressors have (N, )
coef[s] = np.nanmean([coef[s].reshape(clf.coef_.shape), clf.coef_], axis=0)
except AttributeError:
has_coef = False
pb.update(1)
train_accuracies.append(training_accuracy)
test_accuracies.append(test_accuracy)
self.training_accuracy = np.mean(train_accuracies, axis=0)
self.test_accuracy = np.mean(test_accuracies, axis=0)
self.training_std = np.std(train_accuracies, axis=0)
self.test_std = np.std(test_accuracies, axis=0)
if has_coef:
self.coef = coef[self._setting[np.argmax(self.test_accuracy)]]
try:
self.classes = clf.classes_
except AttributeError:
pass
@staticmethod
def run_classifier(X, labels, feature_names=None, C=None,
n_neighbors=None, scaler=None, methods='all'):
"""
Run classifier algorithms on the data provided.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
labels : array-like, shape = [n_samples]
Target vector relative to X
feature_names : list
List of column names to include in the training of the model
C : list
List of values of C for Logistic Regression and SVC
default : 1e-8, 1e-4, 1e-3, 0.1, 0.2, 0.4, 0.75,
1, 1.5, 3, 5, 10, 15, 20, 100, 300, 1000, 5000
n_neighbors : list
List of values of number of neighbors for KNN
default : 1 to 50
scaler : object
Scaling method to be applied to X
default : None
methods: Union[str, dict]
Dictionary of label -> MLModel to execute or a str corresponding
to a pre-defined list.
default : 'all'
str options : 'knn', 'logistic' or 'lr',
'svc' or 'svm', 'nsvc' or 'nsvm'
Returns
-------
Dictionary of MLModels
"""
C = [1e-8, 1e-4, 1e-3, 0.1, 0.2, 0.4, 0.75, 1, 1.5, 3, 5, 10, 15, 20,
100, 300, 1000, 5000] if C is None else C
n_nb = list(range(1, 51)) if n_neighbors is None else n_neighbors
if isinstance(methods, str):
algo = {methods} if methods != 'all' else {'knn', 'lr', 'svc'}
methods = {}
if algo.intersection({'knn'}):
methods['KNN'] = KNNClassifier(n_nb)
if algo.intersection({'logistic', 'lr'}):
methods['Logistic Regression (L1)'] =\
LogisticRegressor(C, 'l1')
methods['Logistic Regression (L2)'] =\
LogisticRegressor(C, 'l2')
if algo.intersection({'svc', 'svm'}):
methods['Linear SVM (L1)'] = LinearSVM(C, 'l1')
methods['Linear SVM (L2)'] = LinearSVM(C, 'l2')
if algo.intersection({'nsvc', 'nsvm'}):
methods['Polynomial SVM'] = PolynomialSVM({'C': C})
methods['RBF SVM'] = RadialBasisSVM({'gamma': C})
if not methods:
warnings.warn('methods is not a valid value')
return
MLModels.plot_pcc(labels)
plt.show()
return MLModels.__run_models(methods, X, labels, feature_names,
scaler=scaler)
@staticmethod
def plot_pcc(labels):
"""
Calculates and prints the Proportion Chance Criterion.
Plots the frequency of each class as a bar chart.
Parameters
----------
labels : array-like, shape = [n_samples]
Target vector relative to X
"""
label, counts = np.unique(labels, return_counts=True)
N = np.sum(counts)
pcc = np.sum([(n/N)**2 for n in counts])
fig, ax = plt.subplots()
ax.bar(range(len(counts)), counts, tick_label=label)
ax.set_title('PCC = %.2f (%.2f)' % (pcc, pcc*1.25))
ax.set_xlabel('labels')
ax.set_ylabel('frequency')
return ax
@staticmethod
def run_regression(X, labels, feature_names=None, alpha=None,
n_neighbors=None, scaler=None, algorithm=['all']):
"""
Runs the specified algorithms on the data provided.
Parameters
----------
X : {array-like, sparse matrix}
Training data
labels : array-like, shape = [n_samples]
Target vector relative to X
feature_names :
alpha : list
List of values of alpha for Linear Regression
default : 1e-12, 1e-10, 1e-8, 1e-4, 1e-3,0.1, 0.2, 0.4, 0.75,
1, 1.5, 3, 5, 10, 15, 20
n_neighbors : list
List of values of number of neighbors for KNN
default : 1 to 50
scaler : object
Scaling method to be applied to X
default : None
algorithm : list
default : 'all'
options : 'knn', 'linear' or 'linear regression'
Returns
-------
Dictionary of model objects
"""
alpha = [1e-12, 1e-10, 1e-8, 1e-4, 1e-3,0.1, 0.2, 0.4, 0.75,
1, 1.5, 3, 5, 10, 15, 20] if alpha is None else alpha
n_nb = list(range(1, 51)) if n_neighbors is None else n_neighbors
methods = {}
if isinstance(algorithm, list):
for algo in algorithm:
algo = algo.lower()
if algo == 'knn' or algo == 'all':
methods['KNN'] = KNNRegressor(n_nb)
if (algo == 'linear' or algo == 'linear regression' or algo
== 'all'):
methods['Linear Regression (L1)'] = LassoRegressor(
alpha=alpha)
methods['Linear Regression (L2)'] = RidgeRegressor(
alpha=alpha)
if (algo == 'linear1' or algo == 'linear regression1' or algo
== 'all'):
methods['Linear Regression (L1)'] = LassoRegressor(
alpha=alpha)
if (algo == 'linear2' or algo == 'linear regression2' or algo
== 'all'):
methods['Linear Regression (L2)'] = RidgeRegressor(
alpha=alpha)
if len(methods.keys()) == 0:
print(f'method {algo} not in options')
return MLModels.__run_models(methods, X, labels, feature_names,
scaler=scaler)
else:
print('Algorithms should be in a list')
@staticmethod
def __run_models(methods, X, labels, feature_names, scaler=None):
"""
Perform the models on X and return a summary.
Parameters
----------
methods: dictionary
Dictionary of objects (models)
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
labels : array-like, shape = [n_samples]
Target vector relative to X
feature_names : list
List of column names to include in the training of the model
scaler : object
Scaling method to be applied to X
default : None
Returns
-------
Dictionary of fitted classifiers
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning)
for k in methods:
print(k)
m = methods[k]
m.train_test(X, labels, scaler=scaler)
display(MLModels.summarize(methods, (
feature_names if feature_names is not None else X.columns)))
return methods
@staticmethod
def summarize(methods, feature_names):
"""
Displays the best performance (highest accuracy) of the methods
Parameters
----------
methods: dictionary
Dictionary of objects (models)
feature_names : list
List of column names to include in the training of the model
Returns
-------
Dataframe of the best performance (highest accuracy) of the methods
specified along with the best parameter and top predictor
"""
names = []
accuracies = []
parameters = []
features = []
for k in methods:
m = methods[k]
names.append(k)
accuracies.append(np.max(m.test_accuracy))
parameters.append('%s = %s' % (
m._setting_name, m._setting[
|
np.argmax(m.test_accuracy)
|
numpy.argmax
|
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Date: 2021-06-08 16:16:42
"""
import pickle
import numpy as np
def generate_label_q_cifar():
path = 'D:/Experiment Datasets/Image Datasets/CIFAR-FS/'
train_data_path = path + 'CIFAR_FS_train.pickle'
with open(train_data_path, 'rb') as fp:
train_data = pickle.load(fp, encoding='bytes')
train_labels = train_data[b'labels']
train_labels = np.unique(
|
np.array(train_labels)
|
numpy.array
|
from typing import NamedTuple, Tuple
import numba as nb
import numpy as np
from scipy import sparse
from .typing import X_EPSILON, BoolArray, FloatArray, IntArray, IntDType, SparseMatrix
class AdjacencyMatrix(NamedTuple):
indices: IntArray
indptr: IntArray
nnz: int
@nb.njit(inline="always")
def neighbors(A: AdjacencyMatrix, cell: int) -> IntArray:
start = A.indptr[cell]
end = A.indptr[cell + 1]
return A.indices[start:end]
# Conversion between dense and sparse
# -----------------------------------
def _to_ij(conn: IntArray, fill_value: int, invert: bool) -> Tuple[IntArray, IntArray]:
n, m = conn.shape
j = conn.ravel()
valid = j != fill_value
i = np.repeat(np.arange(n), m)[valid]
j = j[valid]
if invert:
return j, i
else:
return i, j
def _to_sparse(
conn: IntArray, fill_value: int, invert: bool, sort_indices: bool
) -> sparse.csr_matrix:
i, j = _to_ij(conn, fill_value, invert)
coo_content = (j, (i, j))
coo_matrix = sparse.coo_matrix(coo_content)
csr_matrix = coo_matrix.tocsr()
# Conversion to csr format results in a sorting of indices. We require
# only sorting of i, not j, as this would e.g. mess up the order for
# counter clockwise vertex orientation, e.g.
if not sort_indices:
order = np.argsort(i)
csr_matrix.indices = j[order]
csr_matrix.has_sorted_indices = False
return csr_matrix
def ragged_index(n: int, m: int, m_per_row: IntArray) -> BoolArray:
"""
Given an array of n rows by m columns, starting from left mark the values
True such that the number of True values equals m_per_row.
For example:
n = 3
m = 4
m_per_row = np.array([1, 2, 3])
Then the result of _ragged_index(n, m, m_per_row) is:
np.array([
[True, False, False, False],
[True, True, False, False],
[True, True, True, False],
])
This can be used as boolean index to set a variable number of values per
row.
"""
column_number = np.tile(np.arange(m), n).reshape((n, m))
return (column_number.T < m_per_row).T
def to_sparse(
conn: IntArray, fill_value: int, sort_indices: bool = True
) -> sparse.csr_matrix:
return _to_sparse(conn, fill_value, invert=False, sort_indices=sort_indices)
def to_dense(conn: SparseMatrix, fill_value: int) -> IntArray:
n, _ = conn.shape
m_per_row = conn.getnnz(axis=1)
m = m_per_row.max()
# Allocate 2D array and create a flat view of the dense connectivity
dense_conn = np.empty((n, m), dtype=IntDType)
flat_conn = dense_conn.ravel()
if (n * m) == conn.nnz:
# Shortcut if fill_value is not present, when all of same geom. type
# e.g. all triangles or all quadrangles
valid = slice(None) # a[:] equals a[slice(None)]
else:
valid = ragged_index(n, m, m_per_row).ravel()
flat_conn[~valid] = fill_value
if isinstance(conn, sparse.csr_matrix):
flat_conn[valid] = conn.indices
elif isinstance(conn, sparse.coo_matrix):
flat_conn[valid] = conn.col
else:
raise TypeError("Can only invert coo or csr matrix")
return dense_conn
# Inverting connectivities
# ------------------------
def invert_dense_to_sparse(
conn: IntArray, fill_value: int, sort_indices: bool = True
) -> sparse.csr_matrix:
return _to_sparse(conn, fill_value, invert=True, sort_indices=sort_indices)
def invert_dense(
conn: IntArray, fill_value: int, sort_indices: bool = True
) -> IntArray:
sparse_inverted = _to_sparse(
conn, fill_value, invert=True, sort_indices=sort_indices
)
return to_dense(sparse_inverted, fill_value)
def invert_sparse(conn: sparse.csr_matrix) -> sparse.csr_matrix:
coo = conn.tocoo()
j = coo.row
i = coo.col
coo_content = (j, (i, j))
inverted = sparse.coo_matrix(coo_content)
return inverted.tocsr()
def invert_sparse_to_dense(conn: sparse.csr_matrix, fill_value: int) -> IntArray:
inverted = invert_sparse(conn)
return to_dense(inverted, fill_value)
# Renumbering
# -----------
def renumber(a: IntArray) -> IntArray:
# Taken from https://github.com/scipy/scipy/blob/v1.7.1/scipy/stats/stats.py#L8631-L8737
# (scipy is BSD-3-Clause License)
arr = np.ravel(np.asarray(a))
sorter = np.argsort(arr, kind="quicksort")
inv = np.empty(sorter.size, dtype=IntDType)
inv[sorter] = np.arange(sorter.size, dtype=IntDType)
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv] - 1
return dense.reshape(a.shape)
def close_polygons(face_node_connectivity: IntArray, fill_value: int) -> IntArray:
# Wrap around and create closed polygon: put the first node at the end of the row
# In case of fill values, replace all fill values
n, m = face_node_connectivity.shape
closed = np.full((n, m + 1), fill_value, dtype=IntDType)
closed[:, :-1] = face_node_connectivity
first_node = face_node_connectivity[:, 0]
# Identify fill value, and replace by first node also
isfill = closed == fill_value
closed.ravel()[isfill.ravel()] = np.repeat(first_node, isfill.sum(axis=1))
return closed, isfill
def reverse_orientation(face_node_connectivity: IntArray, fill_value: int):
# We cannot simply reverse the rows with [:, ::-1], since there may be fill
# values present.
reversed_orientation = face_node_connectivity.copy()
in_reverse = face_node_connectivity[:, ::-1]
in_reverse = in_reverse[in_reverse != fill_value]
replace = face_node_connectivity != fill_value
reversed_orientation[replace] = in_reverse
return reversed_orientation
def counterclockwise(
face_node_connectivity: IntArray, fill_value: int, nodes: FloatArray
) -> IntArray:
# In principle, we need only compute the cross product of the first three
# vertices to determine whether a polygon is ordered clockwise (cw) or ccw.
# However, this fails if there are hanging nodes amongst the first few.
# First, we try with just first triangles.
p = nodes[face_node_connectivity[:, :3]]
dxy = np.diff(p, axis=1)
normal = np.cross(dxy[:, 0], dxy[:, 1])
reverse = normal < 0
# Check whether there are any hanging nodes
hanging =
|
np.abs(normal)
|
numpy.abs
|
"""
infinite_conformal.py
====================================
Module for calculating :math:`C_I` using linear algebra,
based on components generated by the method of recursive images
using conformal mapping to obtain :math:`V_{I\infty}`
"""
import scipy
import scipy.integrate
import scipy.special
import numpy as np
import numpy.linalg
eps0= 8.854187817*10**-12
def fF(phi,k,accuracy_limit):
"""
Calculates the inverses of the incomplete elliptic integral of the first kind
for a complex argument phi. This function uses Jacobis form (phi) rather than the trigonometric form.
uses scipy.integrate.quad for intergration
scipy.special.ellipkinc(phi, k*k) could have been used if phi was a real number
Parameters
----------
phi : complex number
k : real number
accuracy_limit : limit for accuracy in quad
Returns
-------
complex number
value of incomplete integral
"""
outreal=scipy.integrate.quad(lambda x: fFargRe(k,phi,x), 0, 1,epsrel=accuracy_limit)
outimag=scipy.integrate.quad(lambda x: fFargIm(k,phi,x), 0, 1,epsrel=accuracy_limit)
return (outreal[0]+1j*outimag[0])*phi
def fFargRe(k,phi, x):
"""Real part of the argument for the integral in fF()
"""
theta=phi*x
return (1/np.sqrt(1-k*k*np.sin(theta)**2)).real
def fFargIm(k,phi, x):
"""Imaginary part of the argument for the integral in fF()
"""
theta=phi*x
return (1/np.sqrt(1-k*k*np.sin(theta)**2)).imag
def fdtDdz(k,lamda,z):
"""Function for calculating :math:`\\frac{dt}{dz}` in order to calculate the electric field in x and y direction.
"""
return (2*np.pi/(k*lamda))*np.cos(2*np.pi*z/lamda)
def fdwDdt(k,t):
"""Function for calculating :math:`\\frac{dw}{dz}` in order to calculate the electric field in x and y direction.
"""
out = -1/np.sqrt( (1-t**2)*(1-t**2*k**2) )
# because of the sqrt, there are two possible solutions, need to make sure we select the rigth one
if out.imag>0:
return out
return -out
def fdwDdz(k,t,lamda,z):
"""Function for calculating :math:`\\frac{dt}{dz}` in order to calculate the electric field in x and y direction.
"""
#print(fdwDdt(k,t),fdtDdz(k,lamda,z))
return fdwDdt(k,t)*fdtDdz(k,lamda,z)
class single_recursive_images:
"""
A class that houses the potential, capacitance and electric fields from the method of recursive images.
The values obtained are unphysical, as the potential at the electrodes is not constant.
Parameters
----------
eta : float
cover fraction of the electrodes
interface_of_electrodes : int
interface for the electrodes
thickness_of_layers : list of floats
thicknesses of the layers, this list will be 2 shorter than eps_x_of_layers and eps_y_of_layers, as the outermost layers have no defined thickness, but are infinite
eps_x_of_layers : list of floats
in-plane dielectric constant of the layers
eps_y_of_layers : list of floats
out-of-plane dielectric constant of the layers
max_reflections : int, optional
maximum number of reflections to considder, defaults to 8
accuracy_limit : float, optional
reflections with less than accuracy_limit are ignored, defaults to 10**-15
"""
def __init__(self,eta,interface_of_electrodes,thickness_of_layers,eps_x_of_layers,eps_y_of_layers,max_reflections=8,accuracy_limit=10**-15):
"""
Returns
-------
class object
"""
self.eta=eta
self.interface_of_electrodes=interface_of_electrodes
self.thickness_of_layers=thickness_of_layers
self.eps_x_of_layers=eps_x_of_layers
self.eps_y_of_layers=eps_y_of_layers
self.max_reflections=max_reflections
self.accuracy_limit=accuracy_limit
# calculate the y-coordinate of each interface
self.y_of_interfaces=[0]
for T in thickness_of_layers:
self.y_of_interfaces.append(self.y_of_interfaces[-1]+T)
self.number_of_layers=len(self.eps_y_of_layers)
# calculate the reflection coefficients for each interface
self.r_pos_dir=[] # reflection coefficients for potential going in positive direction
self.r_neg_dir=[] # reflection coefficients for potential going in negative direction
self.t_pos_dir=[] # transmission coefficients for potential going in positive direction
self.t_neg_dir=[] # transmission coefficients for potential going in negative direction
for i in range(self.number_of_layers-1):
eps1=np.sqrt(self.eps_y_of_layers[i]*self.eps_x_of_layers[i])
eps2=np.sqrt(self.eps_y_of_layers[i+1]*self.eps_x_of_layers[i+1])
self.r_pos_dir.append((eps1-eps2)/(eps1+eps2))
self.t_pos_dir.append(self.r_pos_dir[-1]+1)
self.r_neg_dir.append(-self.r_pos_dir[-1])
self.t_neg_dir.append(self.r_neg_dir[-1]+1)
# calculate eps_r of all layers
self.eps_r_of_layers=[]
for i in range(self.number_of_layers):
if self.eps_y_of_layers[i]>0:
self.eps_r_of_layers.append((self.eps_x_of_layers[i]/self.eps_y_of_layers[i])**0.5)
else:
self.eps_r_of_layers.append(1)
# calculate k for the electrodes
# k is the argument for the eliptical integrals
self.k=np.sin(np.pi/2*self.eta)
# Calculate the eliptical integrals used to calculate the capacitance
self.Kk= scipy.special.ellipk(float(self.k**2))
self.Kpk= scipy.special.ellipk(1-float(self.k**2))
def get_tree(self):
"""
get function that will make the tree if needed
"""
if not hasattr(self,'tree'):
self.make_tree()
return self.tree
def make_tree(self):
"""
Function for building the tree\n
The tree consists of a list of lists where
the main list iterates over the layers and
the sublists contain a series of cases defined as:\n
[dist prior, direction, amplitude]\n
dist prior: the distance traveled to get to the current layer\n
direction: the direction of the potential throught the layer (reflection switches the direction)\n
amplitude: multiplication of all the reflection and transmission coefficients so far\n
The cases are calculated iteratively, staring with the initial projected fields
(with positive direction in the layer above the electrodes, and negative direction below them)\n
"""
''' tree will contain the full tree, and the reflections are added iteratively
temp_tree_1 and temp_tree_2 are used to keep track of what fields will be
adressed in the next iteration'''
tree=[]
temp_tree_2=[]
for layer in range(self.number_of_layers):
tree.append([])
temp_tree_2.append([])
#[dist prior,direction,amplitude,displacementx]
''' add initial fields'''
temp_tree_2[self.interface_of_electrodes].append([0,-1,1])
tree[self.interface_of_electrodes].append([0,-1,1])
temp_tree_2[self.interface_of_electrodes+1].append([0,1,1])
tree[self.interface_of_electrodes+1].append([0,1,1])
for step in range(self.max_reflections):
temp_tree_1=temp_tree_2
temp_tree_2=[]
for layer in range(self.number_of_layers):
temp_tree_2.append([])
''' at this point
temp_tree_1 contains the fields that should generate additional reflections
temp_tree_2 is empty, and cases for the next iteration are added here'''
for layer in range(self.number_of_layers-2): #layer-2 because the outermost layers have infinite thickness and cannot generate reflections
for case in temp_tree_1[layer+1]:
dist_prior=case[0]
dist_add=self.thickness_of_layers[layer]*self.eps_r_of_layers[layer+1]
direction=case[1]
amplitude=case[2]
if abs(amplitude*np.exp(-np.pi*(dist_prior+dist_add)))>self.accuracy_limit:
if direction==1:
temp_tree_2[layer+1].append([dist_prior+dist_add,-direction,amplitude*self.r_pos_dir[layer+1]])
temp_tree_2[layer+2].append([dist_prior+dist_add,direction,amplitude*self.t_pos_dir[layer+1]])
tree[layer+1].append(temp_tree_2[layer+1][-1]) # add to actual tree
tree[layer+2].append(temp_tree_2[layer+2][-1]) # add to actual tree
if direction==-1:
temp_tree_2[layer+1].append([dist_prior+dist_add,-direction,amplitude*self.r_neg_dir[layer]])
temp_tree_2[layer].append([dist_prior+dist_add,direction,amplitude*self.t_neg_dir[layer]])
tree[layer+1].append(temp_tree_2[layer+1][-1]) # add to actual tree
tree[layer].append(temp_tree_2[layer][-1]) # add to actual tree
self.tree=tree
def get_C(self):
"""
Returns
-------
float
capacitace
"""
eps_m_below=self.eps_y_of_layers[self.interface_of_electrodes]*self.eps_r_of_layers[self.interface_of_electrodes]
eps_m_above=self.eps_y_of_layers[self.interface_of_electrodes+1]*self.eps_r_of_layers[self.interface_of_electrodes+1]
return (eps_m_below+eps_m_above)*self.Kk/self.Kpk*eps0/2
def get_C_int_Ex(self):
"""
Function for calculating the capacitance by integrating :math:`\\varepsilon_xE_x` at :math:`x=0`.\n
Used for testing, as it should give same output as get_C().\n
For all practical applications get_C() should be used instead\n
Returns
-------
float
capacitace
"""
if self.eps_x_of_layers[0]>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), -20, 0)
C= G*self.eps_x_of_layers[0]*eps0
else:
C=0
for i in range(self.number_of_layers-2):
if self.eps_x_of_layers[i+1]>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), self.y_of_interfaces[i], self.y_of_interfaces[i+1])
C+= G*self.eps_x_of_layers[i+1]*eps0
if self.eps_x_of_layers[-1]>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), self.y_of_interfaces[-1], 20)
C+= G*self.eps_x_of_layers[-1]*eps0
return C
def get_V_Ex_Ey(self,x,y,get_V=1,get_Ex=1,get_Ey=1): # accepts 'x' as a list, but 'y' must be single value
"""
Function for calculating the the potential and electric fields at coordinates (x,y)
Parameters
----------
x : float or list of floats
x-coordiate(s)
y : float
y-coordiate
get_V : bool, optional
V is only calculated if this flag is set to True, default: True
get_Ex : bool, optional
Ex is only calculated if this flag is set to True, default: True
get_Ey : bool, optional
Ey is only calculated if this flag is set to True, default: True
Returns
-------
list of float for V, Ex, Ey
"""
tree=self.get_tree()
x=np.array(x)
x=np.atleast_1d(x)
''' the conformal mapping technique uses a coordinate system centered
at the center of the electrode finger the global coordinate system
used here is centered on the gap between elelctrodes.
We therefore transform between the two by adding +0.5'''
x=x+0.5
V=np.zeros(x.size)
Ex=np.zeros(x.size)
Ey=np.zeros(x.size)
layer=0
''' find the current layer '''
while layer<len(self.y_of_interfaces) and self.y_of_interfaces[layer]<=y:
layer+=1
if self.eps_y_of_layers[layer]==0:
return 0,0,0 # return zero if the dielectric constant is zero
''' run all cases '''
for case in tree[layer]:
#case=[dist prior,direction,amplitude]
dist_prior=case[0]
direction=case[1]
amplitude=case[2]
if amplitude==0: continue
if direction==1:
#interfaces[layer] is the interface below the layer
Y=dist_prior+self.eps_r_of_layers[layer]*(y-self.y_of_interfaces[layer-1])
else: #direction==-1
Y=dist_prior-self.eps_r_of_layers[layer]*(y-self.y_of_interfaces[layer])
####################################
for i,XX in enumerate(x):
# from symmetry we only need 0<x<0.5
# the period of the system is 2
XX=XX%2 # reduce to period
if XX>1:
XX=XX%1 # reduce to half the period
sign_1=-1 # we are on the 'negative' side of half period
else:
sign_1=1
if XX>0.5:
XX=1-XX # fold onto 0<x<0.5
sign_2=-1 # we are on the negative side of the fold
else:
sign_2=1
z=XX+1j*abs(Y) # posistion on the z-plane
t=1/self.k*np.sin(np.pi*z) # posistion on the t-plane
if get_Ex or get_Ey:
dwDdz=fdwDdz(self.k,t,2,z) # :math:`\\frac{dw}{dz}`
if get_Ex:
Ex[i]+=1/self.Kpk*dwDdz.imag*amplitude*sign_1
if get_Ey:
Ey[i]-=1/self.Kpk*dwDdz.real*direction*amplitude*sign_1*sign_2*self.eps_r_of_layers[layer]
if get_V:
F=fF(
|
np.arcsin(t)
|
numpy.arcsin
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file simo_rnn_tut.py
# @author <NAME> (<NAME> <<EMAIL>>
# @date 2020-09-07
#
# @brief Hierarchical indoor localization based on Wi-Fi fingerprinting
# and a single-input multi-output (SIMO) recurrent neural network
# (RNN) and an optional stacked denoising autoencoder (SDAE) with
# TUT dataset for PyTorch.
#
# @remarks The results are submitted to XXX (TBD).
import os
import sys
import pathlib
import argparse
import datetime
import logging
from collections import OrderedDict, namedtuple
from timeit import default_timer as timer
import numpy as np
from num2words import num2words
from numpy.linalg import norm
from sklearn.metrics import accuracy_score
# PyTorch
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
# from torchsummary import summary
# user modules
sys.path.insert(0, '../models')
from sdae_pt import sdae_pt
sys.path.insert(0, '../utils')
from mean_ci import mean_ci
from tut import TUT
# create logger
logging.basicConfig() # to write to stdout
logger = logging.getLogger(__name__)
def build_fnn(input_size, hidden_size, num_layers, output_size, dropout):
all_layers = [input_size] + [hidden_size]*num_layers + [output_size]
layers = []
for i in range(num_layers+1):
layers.append(('bn'+str(i), nn.BatchNorm1d(num_features=all_layers[i])))
layers.append(('af'+str(i), nn.ReLU()))
layers.append(('do'+str(i), nn.Dropout(p=dropout)))
layers.append(('fc'+str(i), nn.Linear(all_layers[i], all_layers[i+1])))
return nn.Sequential(OrderedDict(layers))
class TutDataset(Dataset):
"""Convert TUT training dataset to a PyTorch dataset."""
def __init__(self, tut):
self.rss = tut.rss_scaled.astype('float32')
# convert one-hot encoded labels to class-index-based ones
# for loss processing based on nn.CrossEntropyLoss:
# https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html
self.floor = np.argmax(tut.labels.floor, axis=1)
self.coord = tut.coord_scaled.astype('float32')
def __len__(self):
return len(self.rss)
def __getitem__(self, idx):
return (self.rss[idx], self.floor[idx], self.coord[idx])
class SimoRnnFnn(nn.Module):
""" SIMO RNN FNN for hierarchical indoor localization."""
def __init__(self, sdae, rnn, fnn_floor, fnn_coord, batch_size, device):
super(SimoRnnFnn, self).__init__()
self.sdae = sdae
self.rnn = rnn
self.fnn_floor = fnn_floor
self.fnn_coord = fnn_coord
self.batch_size = batch_size
self.device = device
def forward(self, input, hidden):
input = self.sdae(input)
x = torch.cat((input, torch.zeros(self.batch_size, 1).to(self.device)), dim=1) # augmented input to RNN
rnn_input_size = x.shape[1]
output, hidden = self.rnn(x.view(-1, 1, rnn_input_size), hidden)
output_floor = self.fnn_floor(output.view(self.batch_size, -1))
# update the augmented input based on predicted floor index
x = torch.cat((input, torch.argmax(output_floor, dim=1).to(self.device, torch.float32).view(self.batch_size, 1)), dim=1)
output, hidden = self.rnn(x.view(-1, 1, rnn_input_size), hidden)
output_coord = self.fnn_coord(output.view(self.batch_size, -1))
return output_floor, output_coord, hidden
def initHidden(self):
return torch.zeros(self.rnn.num_layers, self.batch_size, self.rnn.hidden_size)
def simo_rnn_tut_pt(
frac: float,
validation_split: float,
preprocessor: str,
batch_size: int,
epochs: int,
optimizer: str,
dropout: float,
corruption_level: float,
dae_hidden_layers: list,
sdae_hidden_layers: list,
cache: bool,
rnn_hidden_size: int,
rnn_num_layers: int,
floor_hidden_size: int,
floor_num_layers: int,
coordinates_hidden_size: int,
coordinates_num_layers: int,
floor_weight: float,
coordinates_weight: float,
log_level: str,
device: torch.device
):
"""Multi-building and multi-floor indoor localization based on hybrid
buidling/floor classification and coordinates regression using SDAE and
SIMO RNN and TUT dataset.
Keyword arguments:
"""
# set logging level
if log_level == 'CRITICAL':
logger.setLevel(logging.CRITICAL)
elif log_level == 'ERROR':
logger.setLevel(logging.ERROR)
elif log_level == 'WARNING':
logger.setLevel(logging.WARNING)
elif log_level == 'INFO':
logger.setLevel(logging.INFO)
elif log_level == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.NOTSET)
# load datasets after scaling
logger.info("Loading the data ...")
tut = TUT(
cache=cache,
frac=frac,
preprocessor=preprocessor,
classification_mode='hierarchical')
flr_height = tut.floor_height
# training_df = tut.training_df
training_data = tut.training_data
# testing_df = tut.testing_df
testing_data = tut.testing_data
logger.info("Building the model ...")
rss = training_data.rss_scaled
coord = training_data.coord_scaled
coord_scaler = training_data.coord_scaler # for inverse transform
labels = training_data.labels
rss_size = rss.shape[1]
floor_size = labels.floor.shape[1]
coord_size = coord.shape[1]
if sdae_hidden_layers != '':
sdae = sdae_pt(
dataset='tut',
input_data=rss,
preprocessor=preprocessor,
hidden_layers=sdae_hidden_layers,
cache=cache,
model_fname=None,
optimizer=optimizer,
corruption_level=corruption_level,
batch_size=batch_size,
epochs=epochs,
# epochs=300,
validation_split=validation_split)
input_size = sdae_hidden_layers[-1] + 1 # 1 for floor index
else:
sdae = nn.Identity()
input_size = rss_size + 1 # 1 for floor index
rnn = nn.RNN(
input_size=input_size,
hidden_size=rnn_hidden_size,
num_layers=rnn_num_layers,
batch_first=True,
dropout=(dropout if rnn_num_layers > 1 else 0.0)) # to turn off RNN warning messages
fnn_floor = build_fnn(rnn_hidden_size,
floor_hidden_size, floor_num_layers,
floor_size, dropout)
fnn_coord = build_fnn(rnn_hidden_size,
coordinates_hidden_size, coordinates_num_layers,
coord_size, dropout)
model = SimoRnnFnn(sdae, rnn, fnn_floor, fnn_coord, batch_size,
device=device).to(device)
logger.info("Training the model ...")
startTime = timer()
# N.B.: CrossEntropyLoss combines nn.LogSoftmax() and nn.NLLLoss() in one
# single class. So we don't need softmax activation function in
# classification.
criterion_floor = nn.CrossEntropyLoss()
criterion_coord = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
dataset = TutDataset(tut.training_data)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True)
# dataloader = DataLoader(dataset, batch_size=1, shuffle=True, drop_last=True)
for epoch in range(epochs):
model.train()
running_loss = 0
for rss, floor, coord in dataloader:
hidden = model.initHidden()
# move data to GPU if available
hidden = hidden.to(device, non_blocking=True)
rss = rss.to(device, non_blocking=True)
floor = floor.to(device, non_blocking=True)
coord = coord.to(device, non_blocking=True)
optimizer.zero_grad()
# forward pass
output_floor, output_coord, hidden = model(rss, hidden)
loss = floor_weight*criterion_floor(output_floor, floor)
loss += coordinates_weight*criterion_coord(output_coord, coord)
loss.backward()
optimizer.step()
running_loss += loss.item()
logger.debug("[Epoch %3d] loss: %.3f", epoch+1, running_loss/len(dataloader))
elapsedTime = timer() - startTime
logger.info("Completed in %.4e s", elapsedTime)
logger.info("Evaluating the model ...")
model.eval()
rss = testing_data.rss_scaled
flrs = np.argmax(testing_data.labels.floor, axis=1)
coords = testing_data.coord # original coordinates
dataset = TutDataset(tut.testing_data)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=True)
# calculate the classification accuracies and localization errors
flrs_pred = list()
coords_scaled_pred = list()
for rss, _, _ in dataloader:
hidden = model.initHidden()
# move data to GPU if available
hidden = hidden.to(device, non_blocking=True)
rss = rss.to(device, non_blocking=True)
# run the model recursively twice for floor and location
for _ in range(2):
output_floor, output_coord, hidden = model(rss, hidden)
if device == torch.device("cuda"):
output_floor = output_floor.detach().cpu().clone().numpy()
output_coord = output_coord.detach().cpu().clone().numpy()
else:
output_floor = output_floor.detach().clone().numpy()
output_coord = output_coord.detach().clone().numpy()
flrs_pred.append(output_floor)
coords_scaled_pred.append(output_coord)
flrs_pred = np.argmax(
|
np.vstack(flrs_pred)
|
numpy.vstack
|
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse import random as sp_random
from scipy.sparse.linalg import lsqr
from pylops.utils import dottest
from pylops.basicoperators import MatrixMult, VStack, \
HStack, Block, BlockDiag, Real
par1 = {'ny': 101, 'nx': 101,
'imag': 0, 'dtype':'float64'} # square real
par2 = {'ny': 301, 'nx': 101,
'imag': 0, 'dtype':'float64'} # overdetermined real
par1j = {'ny': 101, 'nx': 101,
'imag': 1j, 'dtype':'complex128'} # square imag
par2j = {'ny': 301, 'nx': 101,
'imag': 1j, 'dtype':'complex128'} # overdetermined imag
@pytest.mark.parametrize("par", [(par1)])
def test_VStack_incosistent_columns(par):
"""Check error is raised if operators with different number of columns
are passed to VStack
"""
G1 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
G2 = np.random.normal(0, 10, (par['ny'], par['nx'] + 1)).astype(par['dtype'])
with pytest.raises(ValueError):
VStack([MatrixMult(G1, dtype=par['dtype']),
MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
@pytest.mark.parametrize("par", [(par1)])
def test_HStack_incosistent_columns(par):
"""Check error is raised if operators with different number of rows
are passed to VStack
"""
G1 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
G2 = np.random.normal(0, 10, (par['ny'] + 1, par['nx'])).astype(par['dtype'])
with pytest.raises(ValueError):
HStack([MatrixMult(G1, dtype=par['dtype']),
MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_VStack(par):
"""Dot-test and inversion for VStack operator
"""
np.random.seed(0)
G1 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
G2 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
Vop = VStack([MatrixMult(G1, dtype=par['dtype']),
MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
assert dottest(Vop, 2*par['ny'], par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
xlsqr = lsqr(Vop, Vop * x, damp=1e-20, iter_lim=300, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
# use numpy matrix directly in the definition of the operator
V1op = VStack([G1, MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
assert dottest(V1op, 2 * par['ny'], par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
# use scipy matrix directly in the definition of the operator
G1 = sp_random(par['ny'], par['nx'], density=0.4).astype('float32')
V2op = VStack([G1, MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
assert dottest(V2op, 2 * par['ny'], par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
@pytest.mark.parametrize("par", [(par2), (par2j)])
def test_HStack(par):
"""Dot-test and inversion for HStack operator with numpy array as input
"""
np.random.seed(0)
G1 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32')
G2 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32')
x = np.ones(2*par['nx']) + par['imag']*np.ones(2*par['nx'])
Hop = HStack([G1, MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
assert dottest(Hop, par['ny'], 2*par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
xlsqr = lsqr(Hop, Hop * x, damp=1e-20, iter_lim=300, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
# use numpy matrix directly in the definition of the operator
H1op = HStack([G1, MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
assert dottest(H1op, par['ny'], 2 * par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
# use scipy matrix directly in the definition of the operator
G1 = sp_random(par['ny'], par['nx'], density=0.4).astype('float32')
H2op = HStack([G1, MatrixMult(G2, dtype=par['dtype'])],
dtype=par['dtype'])
assert dottest(H2op, par['ny'], 2 * par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Block(par):
"""Dot-test and inversion for Block operator
"""
np.random.seed(0)
G11 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
G12 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
G21 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
G22 = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype'])
x = np.ones(2*par['nx']) + par['imag']*np.ones(2*par['nx'])
Bop = Block([[MatrixMult(G11, dtype=par['dtype']),
MatrixMult(G12, dtype=par['dtype'])],
[MatrixMult(G21, dtype=par['dtype']),
MatrixMult(G22, dtype=par['dtype'])]],
dtype=par['dtype'])
assert dottest(Bop, 2*par['ny'], 2*par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
xlsqr = lsqr(Bop, Bop * x, damp=1e-20, iter_lim=500, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
# use numpy matrix directly in the definition of the operator
B1op = Block([[G11,
MatrixMult(G12, dtype=par['dtype'])],
[MatrixMult(G21, dtype=par['dtype']),
G22]], dtype=par['dtype'])
assert dottest(B1op, 2 * par['ny'], 2 * par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
# use scipy matrix directly in the definition of the operator
G11 = sp_random(par['ny'], par['nx'], density=0.4).astype('float32')
B2op = Block([[G11,
MatrixMult(G12, dtype=par['dtype'])],
[MatrixMult(G21, dtype=par['dtype']),
G22]], dtype=par['dtype'])
assert dottest(B2op, 2 * par['ny'], 2 * par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_BlockDiag(par):
"""Dot-test and inversion for BlockDiag operator
"""
|
np.random.seed(0)
|
numpy.random.seed
|
"""
parse CP2K output to Trajectory
type 1: cp2k_shell log, generated by ASE, with "out" in the filename
type 2: ENERGY_FORCE type, input file and xyz force output still exits
type 3:
"""
import logging
import numpy as np
from glob import glob
from os.path import isfile, getctime
from thyme import Trajectory
from thyme._key import *
from thyme.parsers.monty import read_pattern, read_table_pattern
from thyme.routines.folders import find_folders, find_folders_matching
HARTREE = 27.2114
HARTREE_BOHR = 51.42208619083232
nc_fl_num = r"[+-]?\d+\.*\d*[eE]?[+-]?\d*"
fl_num = r"(" + nc_fl_num + ")"
nc_sfl_num = r"\s+" + nc_fl_num
sfl_num = r"\s+" + fl_num
def get_childfolders(path):
return find_folders_matching(["*.xyz", "*.inp", "*out*"], path)
def pack_folder_trj(folder: str, data_filter):
"""
For one plain folder (without looking into its children folder),
search for all the simulation store in this folder
It can be a single CP2K run, multiple MD runs or multiple ASE call.
We assume all trajectories in this folder has the same number of atoms, species
And the same DFT set up.
Args:
folder (str): folder path for parsing
Return:
Trajectory instance
"""
has_xyz = len(glob(f"{folder}/*.xyz")) > 0
has_out = len(glob(f"{folder}/*out*")) > 0
has_inp = (len(glob(f"{folder}/*.inp")) > 0) or (
len(glob(f"{folder}/*.restart")) > 0
)
has_pair = (len(glob(f"{folder}/*.inp")) > 0) and (has_out or has_xyz)
has_xyzs = (len(glob(f"{folder}/*-frc*.xyz")) > 0) and (
len(glob(f"{folder}/*-pos*.xyz")) > 0
)
conditions = [has_xyzs, has_pair, has_out]
trj = Trajectory()
if np.sum(conditions) == 0:
return trj
# first identify force_eval pair
MD_xyz = {}
if has_out:
# sort files from new to old
outfile_list = []
outfile_mtime = []
for outfile in glob(f"{folder}/*out*"):
outfile_list += [outfile]
outfile_mtime += [-getctime(outfile)]
sort_id = np.argsort(outfile_mtime)
outfile_list = np.array(outfile_list, dtype=str)
outfile_list = outfile_list[sort_id]
for outfile in outfile_list:
if not isfile(outfile):
continue
outfile_dict = parse_std_out(outfile)
if "abort" in outfile_dict:
continue
_trj = Trajectory()
if "send" in outfile_dict:
logging.info(f"parsing {outfile} as shell product")
_trj = parse_ase_shell_out(folder, outfile)
elif "run_type" in outfile_dict:
run_type = outfile_dict["run_type"]
proj_name = outfile_dict["proj_name"]
if run_type == "ENERGY_FORCE":
logging.info(f"parsing {outfile} as force_eval type product")
_trj = parse_force_eval_pairs(folder, outfile, outfile_dict)
elif run_type == "MD":
if "xyzout" in outfile_dict:
MD_xyz[proj_name] = f"{folder}/" + outfile_dict["inputfile"]
else:
raise NotImplementedError(
f"cannot parse MD without xyz {run_type}"
)
else:
raise NotImplementedError(f"cannot parse RUN_TYPE {run_type}")
if _trj.nframes > 0:
logging.info(f"repr {repr(_trj)}")
logging.info(f"add {_trj}")
logging.info(f"to {trj}")
trj.add_trj(_trj)
for k in MD_xyz:
_trj = parse_md(folder, inp=MD_xyz[k], proj_name=k)
logging.info(f"repr {repr(_trj)}")
trj.add_trj(_trj)
if has_xyz and has_inp and len(MD_xyz) == 0:
mtime = 10e9
mfile = ""
proj_name = ""
# choose the earliest input
for inputfile in glob(f"{folder}/*.inp") + glob(f"{folder}/*.restart"):
_mtime = getctime(inputfile)
if _mtime < mtime:
metadata = parse_std_inp_metadata(inputfile)
if metadata["run_type"] == "MD":
mtime = _mtime
mfile = inputfile
proj_name = metadata["proj_name"]
if isfile(f"{folder}/{proj_name}-pos-1.xyz"):
_trj = parse_md(folder, inp=inputfile, proj_name=proj_name)
logging.info(f"repr {repr(_trj)}")
trj.add_trj(_trj)
logging.info(trj)
logging.info(repr(trj))
return trj
# elif np.sum(conditions) == 0:
# logging.info(f"! {folder} skip for no file matching")
# else:
# logging.info(f"! {folder} skip for incomplete files")
# if folder == "./":
# folder = "."
# return Trajectory()
def parse_md(folder: str, inp: str, proj_name: str):
"""
Args:
folder (str): path of the folder
inp (str): the input file for CP2K
proj_name (str): the CP2K project name.
The prefix used for MD dumps
"""
logging.info(f"parse md in folder {folder}")
# if above strings are found
find_input = False
try:
find_input = (
isfile(inp)
and isfile(f"{folder}/{proj_name}-pos-1.xyz")
and isfile(f"{folder}/{proj_name}-frc-1.xyz")
)
except Exception as e:
logging.info(f"It is not a MD {e}")
if not find_input:
return Trajectory()
metadata = parse_std_inp_metadata(inp)
data = parse_std_inp_pos(inp)
return parse_cp2k_xyzs(
f"{folder}/{proj_name}-pos-1.xyz",
f"{folder}/{proj_name}-frc-1.xyz",
data["cells"],
metadata,
)
def parse_std_out(filename):
logging.info(f"parse {filename}")
d = read_pattern(
filename,
{
"inputfile": r"Input file name\s+(\S+)",
"abort": r"(ABORT)",
"run_type": r"Run type\s+(\S+)",
"proj_name": r"Project name\s+(\S+)",
"xyzout": r"Coordinates\s+\d+\s+(\S+)",
"send": r"Sending: (GET_E)",
"receive": r"Received: * (READY)",
"energy": r"Total energy:" + sfl_num,
},
)
del_keys = []
for k in d:
d[k] = np.array(d[k], dtype=str).reshape([-1])
if len(d[k]) > 0:
d[k] = d[k][0]
else:
del_keys += [k]
for k in del_keys:
del d[k]
if "energy" in d:
d["energy"] = float(d["energy"]) * HARTREE
return d
def parse_force_eval_pairs(folder, outfile, outfile_dict):
logging.info(f"parse {outfile}")
trj = Trajectory()
trj.per_frame_attrs += ["forces", "total_energy", "positions"] # , 'symbols']
symbol, force = parse_forceeval_force(outfile)
# if above strings are found
find_input = False
try:
inp = outfile_dict["inputfile"]
inp = f"{folder}/{inp}"
run_type = outfile_dict["run_type"]
proj_name = outfile_dict["proj_name"]
find_input = (run_type == "ENERGY_FORCE") and isfile(inp)
except Exception as e:
logging.info(f"{outfile} is not a force_eval {e}")
if not find_input:
return trj
metadata = parse_std_inp_metadata(inp)
data = parse_std_inp_pos(inp)
data.update(metadata)
if symbol is None:
find_force = False
for name in metadata["filenames"]:
for filename in glob(f"{folder}/{proj_name}*{name}*.xyz"):
if not find_force:
_symbol, _force = parse_forceeval_force(filename)
if _symbol is not None:
if all(_symbol == data[SPECIES]):
symbol = _symbol
force = _force
find_force = True
else:
find_force = True
if find_force:
data[FORCE] = force
if "energy" in outfile_dict:
data[TOTAL_ENERGY] = np.array([outfile_dict["energy"]])
trj = Trajectory.from_dict(data)
return trj
def parse_forceeval_force(filename):
logging.info(f"parse {filename}")
header_pattern = r"\#\s+Atom\s+Kind\s+Element\s+X\s+Y\s+Z"
footer_pattern = r"SUM OF ATOMIC FORCES\s+" + nc_sfl_num * 4
d = read_pattern(
filename,
{
"header": r"\#\s+Atom\s+Kind\s+Element\s+X\s+Y\s+(Z)",
"footer": r"SUM OF ATOMIC FORCES\s+" + sfl_num * 4,
},
)
if len(d["footer"]) > 0:
force = read_table_pattern(
filename,
row_pattern=r"\d+\s+\d+\s+([A-Z][a-z]*?)" + sfl_num * 3,
header_pattern=header_pattern,
footer_pattern=footer_pattern,
last_one_only=False,
)
if len(force) > 0:
force = np.array(force[0], str)
symbol = force[:, 0]
force = (
np.array(force[:, 1:], dtype=float).reshape([1, -1, 3]) * HARTREE_BOHR
)
return symbol, force
return None, None
def parse_std_inp_pos(filename):
logging.info(f"parse {filename}")
data = {}
if "restart" in filename:
footer = r"\s+UNIT angstrom"
else:
footer = r"\s+\&END COORD\s+"
position = read_table_pattern(
filename,
header_pattern=r"\&COORD",
row_pattern=r"([A-Z][a-z]*?)" + sfl_num * 3,
footer_pattern=footer,
last_one_only=False,
)
try:
position = np.array(position[0], str)
data["species"] = position[:, 0].reshape([-1])
data["positions"] = np.array(position[:, 1:], dtype=float).reshape([1, -1, 3])
data["natom"] = data["positions"].shape[1]
except:
pass
if "restart" in filename:
footer = r"MULTIPLE_UNIT_CELL"
else:
footer = r"&END"
cell = read_table_pattern(
filename,
header_pattern=r"\&CELL",
row_pattern=r"[A-Ca-c]" + sfl_num * 3,
footer_pattern=footer,
last_one_only=False,
)
if len(cell) > 0:
data["cells"] = np.array(cell[0], dtype=float).reshape([1, 3, 3])
return data
def parse_std_inp_metadata(filename):
logging.info(f"parse {filename}")
data = {}
d = read_pattern(
filename,
{
"kpoints": r"SCHEME\s+MONKHORST-PACK\s+(\d+)\s(\d+)\s(\d+)",
"gamma": r"SCHEME\s+([gG][a-zA-Z]*)",
"cutoff": r"REL_CUTOFF" + sfl_num,
"thermostat": r"ENSEMBLE\s+(\w*)",
"dipole_correction": r"SURFACE_DIPOLE_CORRECTION\s+(\w+)",
"run_type": r"RUN_TYPE\s+(\S+)",
"project": r"PROJECT\s+(\S+)",
"proj_name": r"PROJECT_NAME\s+(\S+)",
"filenames": r"FILENAME\s+(\S+)",
"etemp": r"ELECTRONIC_TEMPERATURE\s+[K]\s+(\w+)",
},
)
# TO DO: COMPONENTS_TO_FIX cannot be handled yet
fix_range = read_table_pattern(
filename,
header_pattern=r"^\s+\&FIXED_ATOMS",
row_pattern=r"LIST\s+(\d+)\.\.(\d+)",
footer_pattern=r"^\s+&END",
last_one_only=False,
)
fix_ids = read_table_pattern(
filename,
header_pattern=r"^\s+\&FIXED_ATOMS",
row_pattern=r"LIST\s+([\d+\s?]+)",
footer_pattern=r"^\s+&END",
last_one_only=False,
)
if len(d["run_type"]) > 0:
data["run_type"] = d["run_type"][0][0]
else:
data["run_type"] = "unknown"
if len(d["proj_name"]) > 0:
data["proj_name"] = d["proj_name"][0][0]
if len(d["project"]) > 0:
data["proj_name"] = d["project"][0][0]
ids = []
if len(fix_range) > 0:
print(fix_range)
ids = np.arange(int(fix_range[0][0][0]) - 1, int(fix_range[0][0][1]))
if len(fix_ids) > 0:
ids = [int(idx) - 1 for idx in fix_ids[0][0][0].split()]
if len(ids) > 0:
data["fix_atoms"] = True
data["fix_atoms_id"] = ids
else:
data["fix_atoms"] = False
if len(d["kpoints"]) > 0:
data["kpoints"] = [int(i) for i in d["kpoints"][-1]]
if len(d["gamma"]) > 0:
data["gamma"] = True
if len(d["cutoff"]) > 0:
data["cutoff"] = float(d["cutoff"][-1][0])
if len(d["thermostat"]) > 0:
data["thermostat"] = d["thermostat"][-1][0]
data["aimd"] = True
else:
data["aimd"] = False
if len(d["dipole_correction"]) > 0:
data["dipole_correction"] = True
else:
data["dipole_correction"] = False
if len(d["etemp"]) > 0:
data["etemp"] = float(d["etemp"][0][0])
data["filenames"] = np.array(d["filenames"], dtype=str).reshape([-1])
return data
# 'meltedCu':false,
# 'started from fix bottom': true,
# 'mass': [$(grep -i mass $file|awk '{printf "%5.2f,", $2}') ],
# 'timestep': $(grep -i timestep $file|awk '{printf "%3.1f:", $2}') ,
# },
def parse_ase_shell_out(folder, filename):
"""
assume all frames share the same symbols
"""
logging.info(f"parse {filename}")
trj = Trajectory()
with open(filename) as fin:
lines = fin.readlines()
nlines = len(lines)
nconfigs = 0
i = 0
cell = None
position = None
energy = None
force = None
input_dict = None
species = None
for filename in glob(f"{folder}/*.inp"):
if species is None:
data = parse_std_inp_pos(filename)
species = data["species"]
data = {}
while i < nlines:
if "LOAD" in lines[i]:
inputfile = lines[i].split()[2]
elif "SET_CELL" in lines[i]:
cell = []
for icell in range(3):
i += 1
cell_line = lines[i].split()[1:]
cell += [[float(x) for x in cell_line]]
cell = np.array(cell).reshape([-1])
elif "SET_POS" in lines[i]:
i += 1
natom = int(lines[i].split()[1]) // 3
position = []
for iatom in range(natom):
i += 1
l = lines[i].split()
pos_line = l[1:]
position += [[float(x) for x in pos_line]]
if input_dict is None and isfile(inputfile):
input_dict = parse_std_inp_metadata(inputfile)
data.update(input_dict)
d = parse_std_inp_pos(inputfile)
species = d["species"]
elif "GET_E" in lines[i]:
i += 1
energy = float(lines[i].split()[1])
elif "GET_F" in lines[i]:
i += 1
natom = int(lines[i].split()[1]) // 3
force = []
for iatom in range(natom):
i += 1
force_line = lines[i].split()[1:]
force += [[float(x) for x in force_line]]
force = np.array(force).reshape([-1])
position = np.array(position).reshape([-1])
data[CELL] = np.copy(cell).reshape([1, 3, 3])
data[SPECIES] = species
data[POSITION] = np.copy(position).reshape([1, -1, 3])
data[TOTAL_ENERGY] = np.array([energy])
data[FORCE] =
|
np.copy(force)
|
numpy.copy
|
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
from model.main import *
import json
import pandas as pd
import numpy as np
class DataStore():
model=None
model_month=None
sale_model=None
data = DataStore()
@app.route('/', methods=["GET"])
def home():
percent=percentageMethod()
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
with open('percent.json') as f:
file2 = json.load(f)
labels=file2['index']
data=file2['data']
if "username" in session:
return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent,
month_index=month_index, month_data=month_data)
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
@app.route('/totalyear', methods=["GET"])
def total_year():
total_year=totalYear()
file1=pd.read_json('total_year.json',orient='index')
year_index=np.array(file1['year'])
year_data=np.array(file1['total'])
return render_template("total_year.html",year_index=year_index, year_data=year_data)
@app.route('/totalmonth', methods=["GET"])
def total_month():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
num=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
data=data1['total']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totalmonth', methods=["POST"])
def total_month_num():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_month"))
predict_rs, fitted_data=predict(data.model_month,num)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["GET"])
def total_date():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
num=30
# Fit model
model_date=fit_model_date()
data.model=model_date
predict_rs_date, fitted_data_date=predict_date(model_date,30)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
#Test model
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def fit_model_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
data=data1['total']
result1 = fit_model_fast(data)
return result1
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=30
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["POST"])
def total_date_num():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_date"))
predict_rs_date, fitted_data_date=predict_date(data.model,num)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=data.model, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=6
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
return mse, rmse, mae, mape
@app.route('/revenueyear', methods=["GET"])
def revenue_year():
sale_year=saleYear()
year_index=np.array(sale_year['year'])
year_data=np.array(sale_year['quantity'])
return render_template("revenue_year.html",year_index=year_index, year_data=year_data)
@app.route('/revenuemonth', methods=["GET"])
def revenue_month():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
num_sale=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
data=data1['quantity']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','quantity']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','quantity']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuemonth', methods=["POST"])
def revenue_month_num():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=
|
np.array(file1['quantity'])
|
numpy.array
|
import numpy as np
def quaternion(v):
# Take 3d input and turn into quaternion.
x,y,z = v
return np.array(([0,x,y,z]))
def rotation(theta,axis=None):
# Rotation quaternion of angle theta by axis x,y,z.
# Axis input should be of type [1,0,0].
if axis is None:
axis = np.array(([1,1,1]))
theta = np.deg2rad(theta)
i,j,k =
|
np.sin(theta/2)
|
numpy.sin
|
import numpy as np
import sys
import open3d as o3d
import tensorflow as tf
import os
import pcp_utils
from pcp_utils.utils import Config
graspnet_path = pcp_utils.utils.get_6dof_graspnet_dir()
sys.path.append(graspnet_path)
import grasp_estimator
import tf_utils
from grasp_data_reader import regularize_pc_point_count
tf.set_random_seed(0)
def _regularize_pc_point_count(pcs, npoints):
return regularize_pc_point_count(pcs, npoints)
class GraspSampler:
class Config(Config):
# path for checkpoints
#vae_checkpoint_folder = f'{graspnet_path}/checkpoints/npoints_1024_train_evaluator_0_allowed_categories__ngpus_1_'
#vae_checkpoint_folder = f'{graspnet_path}/log/vae_finetune_20_gan1_model_gan1/'
#vae_checkpoint_folder = f'{graspnet_path}/log/vae_finetune_mug1_50_fv_na0_gan0_all_train_small_var10_lr5/'
vae_checkpoint_folder = f'{graspnet_path}/checkpoints/latent_size_2_ngpus_1_gan_1_confidence_weight_0.1_npoints_1024_num_grasps_per_object_256_train_evaluator_0_'
evaluator_checkpoint_folder = f'{graspnet_path}/checkpoints/npoints_1024_train_evaluator_1_allowed_categories__ngpus_8_/'
#evaluator_checkpoint_folder = f'{graspnet_path}/log/evaluator_finetune_mug1_50_fv_all_train_small_var10'
#evaluator_checkpoint_folder = f'{graspnet_path}/log/evaluator_finetune_20_gan1_40k'
gradient_based_refinement = False
grasp_conf_threshold = 0.8
cut_off_points = [0.3, 0.5, 0.5] #should remove
# camera params
camera_img_height = 128
camera_img_width = 128
camera_radius = 0.3
camera_fov_y = 45
camera_pitch = [40, 41, 2]
camera_yaw = [0, 350, 36]
camera_yaw_list = None #[0, 60, 300]
camera_save_image = False
camera_recon_scene = True
camera_lookat_pos = [1.3, 0.75, 0.4]
table_top = [1.3, 0.75, 0.4]
table_T_camR = [0, 0, 0]
cut_off_points = [0.3, 0.5, 0.5] # for cropping pointcloud
data_collection_mode = False
data_collection_from_trained_model = False
save_data_name = None
fix_view = False
def __init__(self, config:Config):
self.config = config
self.vae_checkpoint_folder = config.vae_checkpoint_folder
self.evaluator_checkpoint_folder = config.evaluator_checkpoint_folder
self.gradient_based_refinement = False
self.grasp_conf_threshold = 0.8
self.cut_off_points = config.cut_off_points
self.output_grasps_dir = "vae_generated_grasps"
self.fix_view = config.fix_view
##### Prepare the 6dof graspnet network for forward pass ######
cfg = grasp_estimator.joint_config(
self.vae_checkpoint_folder,
self.evaluator_checkpoint_folder,
)
cfg['threshold'] = self.grasp_conf_threshold
cfg['sample_based_improvement'] = 1 - int(self.gradient_based_refinement)
cfg['num_refine_steps'] = 10 if self.gradient_based_refinement else 20
if self.config.data_collection_mode:
if not self.config.data_collection_from_trained_model:
cfg["use_geometry_sampling"] = True
cfg['num_refine_steps'] = 0
cfg['grasp_selection_mode'] = "all"
#cfg['num_refine_steps'] = 0
self.num_refine_steps = cfg['num_refine_steps']
self.estimator = grasp_estimator.GraspEstimator(cfg)
self.sess = tf.Session()
self.estimator.build_network()
self.estimator.load_weights(self.sess)
if not os.path.exists(self.output_grasps_dir):
os.makedirs(self.output_grasps_dir)
# set camera for this:
self.camera_positions, self.camera_quats = pcp_utils.cameras.generate_new_cameras_hemisphere(radius=self.config.camera_radius,
lookat_point=self.config.camera_lookat_pos, pitch=self.config.camera_pitch, yaw=self.config.camera_yaw, yaw_list=self.config.camera_yaw_list)
self.n_cams = len(self.camera_positions)
# i don't think we need this
mujoco_T_adam = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]], dtype=np.float32)
origin_T_camR_xpos = np.array(config.table_top, np.float32) + np.array(config.table_T_camR, np.float)
origin_T_adam =
|
np.zeros((4,4), dtype=np.float32)
|
numpy.zeros
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import math
from functools import partial
import numpy as np
import paddle
from paddle.metric import Metric, Accuracy, Precision, Recall
__all__ = ['AccuracyAndF1', 'Mcc', 'PearsonAndSpearman']
class AccuracyAndF1(Metric):
"""
Encapsulates Accuracy, Precision, Recall and F1 metric logic.
"""
def __init__(self,
topk=(1, ),
pos_label=1,
name='acc_and_f1',
*args,
**kwargs):
super(AccuracyAndF1, self).__init__(*args, **kwargs)
self.topk = topk
self.pos_label = pos_label
self._name = name
self.acc = Accuracy(self.topk, *args, **kwargs)
self.precision = Precision(*args, **kwargs)
self.recall = Recall(*args, **kwargs)
self.reset()
def compute(self, pred, label, *args):
self.label = label
self.preds_pos = paddle.nn.functional.softmax(pred)[:, self.pos_label]
return self.acc.compute(pred, label)
def update(self, correct, *args):
self.acc.update(correct)
self.precision.update(self.preds_pos, self.label)
self.recall.update(self.preds_pos, self.label)
def accumulate(self):
acc = self.acc.accumulate()
precision = self.precision.accumulate()
recall = self.recall.accumulate()
if precision == 0.0 or recall == 0.0:
f1 = 0.0
else:
# 1/f1 = 1/2 * (1/precision + 1/recall)
f1 = (2 * precision * recall) / (precision + recall)
return (
acc,
precision,
recall,
f1,
(acc + f1) / 2, )
def reset(self):
self.acc.reset()
self.precision.reset()
self.recall.reset()
self.label = None
self.preds_pos = None
def name(self):
"""
Return name of metric instance.
"""
return self._name
class Mcc(Metric):
"""
Matthews correlation coefficient
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient.
"""
def __init__(self, name='mcc', *args, **kwargs):
super(Mcc, self).__init__(*args, **kwargs)
self._name = name
self.tp = 0 # true positive
self.fp = 0 # false positive
self.tn = 0 # true negative
self.fn = 0 # false negative
def compute(self, pred, label, *args):
preds = paddle.argsort(pred, descending=True)[:, :1]
return (preds, label)
def update(self, preds_and_labels):
preds = preds_and_labels[0]
labels = preds_and_labels[1]
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
if isinstance(labels, paddle.Tensor):
labels = labels.numpy().reshape(-1, 1)
sample_num = labels.shape[0]
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if pred == 1:
if pred == label:
self.tp += 1
else:
self.fp += 1
else:
if pred == label:
self.tn += 1
else:
self.fn += 1
def accumulate(self):
if self.tp == 0 or self.fp == 0 or self.tn == 0 or self.fn == 0:
mcc = 0.0
else:
# mcc = (tp*tn-fp*fn)/ sqrt(tp+fp)(tp+fn)(tn+fp)(tn+fn))
mcc = (self.tp * self.tn - self.fp * self.fn) / math.sqrt(
(self.tp + self.fp) * (self.tp + self.fn) *
(self.tn + self.fp) * (self.tn + self.fn))
return (mcc, )
def reset(self):
self.tp = 0 # true positive
self.fp = 0 # false positive
self.tn = 0 # true negative
self.fn = 0 # false negative
def name(self):
"""
Return name of metric instance.
"""
return self._name
class PearsonAndSpearman(Metric):
"""
Pearson correlation coefficient
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
Spearman's rank correlation coefficient
https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient.
"""
def __init__(self, name='mcc', *args, **kwargs):
super(PearsonAndSpearman, self).__init__(*args, **kwargs)
self._name = name
self.preds = []
self.labels = []
def update(self, preds_and_labels):
preds = preds_and_labels[0]
labels = preds_and_labels[1]
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
if isinstance(labels, paddle.Tensor):
labels = labels.numpy()
preds = np.squeeze(preds.reshape(-1, 1)).tolist()
labels = np.squeeze(labels.reshape(-1, 1)).tolist()
self.preds.append(preds)
self.labels.append(labels)
def accumulate(self):
preds = [item for sublist in self.preds for item in sublist]
labels = [item for sublist in self.labels for item in sublist]
#import pdb; pdb.set_trace()
pearson = self.pearson(preds, labels)
spearman = self.spearman(preds, labels)
return (
pearson,
spearman,
(pearson + spearman) / 2, )
def pearson(self, preds, labels):
n = len(preds)
#simple sums
sum1 = sum(float(preds[i]) for i in range(n))
sum2 = sum(float(labels[i]) for i in range(n))
#sum up the squares
sum1_pow = sum([pow(v, 2.0) for v in preds])
sum2_pow = sum([pow(v, 2.0) for v in labels])
#sum up the products
p_sum = sum([preds[i] * labels[i] for i in range(n)])
numerator = p_sum - (sum1 * sum2 / n)
denominator = math.sqrt(
(sum1_pow - pow(sum1, 2) / n) * (sum2_pow - pow(sum2, 2) / n))
if denominator == 0:
return 0.0
return numerator / denominator
def spearman(self, preds, labels):
preds_rank = self.get_rank(preds)
labels_rank = self.get_rank(labels)
total = 0
n = len(preds)
for i in range(n):
total += pow((preds_rank[i] - labels_rank[i]), 2)
spearman = 1 - float(6 * total) / (n * (pow(n, 2) - 1))
return spearman
def get_rank(self, raw_list):
x = np.array(raw_list)
r_x = np.empty(x.shape, dtype=int)
y =
|
np.argsort(-x)
|
numpy.argsort
|
import gvar as gv
import numpy as np
data_file = 'data/callat_a09m310_test.h5'
fit_states = ['pion', 'kaon', 'proton', 'omega']
bs_seed = 'a12m220XL'
corr_lst = {
# PION
'pion':{
'dsets':['a09m310/piplus'],
'weights' :[1],
't_reverse':[False],
'fold' :True,
'snks' :['S', 'P'],
'srcs' :['S'],
'xlim' :[0,48.5],
'ylim' :[0.12,0.169],
'colors' :{'SS':'#70bf41','PS':'k'},
'type' :'cosh',
'ztype' :'z_snk z_src',
'z_ylim' :[0.055,0.26],
# fit params
'n_state' :3,
'T' :96,
't_range' :
|
np.arange(8,48)
|
numpy.arange
|
r"""@package motsfinder.axisym.curve.expcalc
Computation class storing interim results of expansion calculations.
The implementation here uses the formulas derived in
\ref thornburg2003_1 "[1]". Specifically, we make heavy use of the quantities
`A, B, C, D` defined in \ref thornburg2003_1 "[1]" in equation (12) to compute
the expansion \f$ \Theta \f$ using equation (11). See also
\ref pookkolb2018 "[2]" and the docstrings of the individual procedures.
In the base class ExpansionCalc defined in this module, we do not consider how
the used quantities \f$ s_i \f$ and \f$ \partial_i s_j \f$ are obtained. This
depends on how the surfaces are represented and hence is the responsibility of
subclasses to implement. Additionally, subclasses also need to supply surface
parameter derivatives defined in \ref thornburg2003_1 "[1]" as
\f$ X^u_i = \partial_i y^u \f$ and
\f$ X^u_{ij} = \partial_i\partial_j y^u \f$.
In the axisymmetric case considered here, we have only one parameter,
\f$ y^u = \lambda \f$ along the curve, and hence drop the `u` superscript.
Note that in this code, we call the covector field \f$ X_i \f$ simply `X` and
the 2nd rank tensor field \f$ X_{ij} \f$ simply `Y` (Python cannot
differentiate between objects based on how many indices you use).
@b Examples
See implementations starshapedcurve._StarShapedExpansionCalc and
refparamcurve._RefParamExpansionCalc.
@b References
\anchor thornburg2003_1 [1] <NAME>. "A fast apparent horizon finder
for three-dimensional Cartesian grids in numerical relativity." Classical
and quantum gravity 21.2 (2003): 743.
\anchor pookkolb2018 [2] <NAME>, <NAME>, <NAME> and <NAME>, "The existence and stability of marginally trapped surfaces."
arXiv:1811.10405 [gr-qc].
"""
from abc import ABCMeta, abstractmethod
from math import fsum
from six import add_metaclass
import numpy as np
from scipy import linalg
from scipy.misc import derivative
from ...utils import cache_method_results
from ...numutils import inverse_2x2_matrix_derivative
from ...metric import christoffel_symbols, christoffel_deriv
from ...metric import riemann_components
__all__ = []
@add_metaclass(ABCMeta)
class ExpansionCalc(object):
r"""Abstract base class for computing the expansion at one point.
This class serves as coordinator for computing the expansion and
functional derivatives w.r.t. the horizon function. Sub classes need only
implement a small number of computational methods.
The purpose of having a separate class hierarchy for computing the
expansion (as opposed to doing all the computations inside the curve
classes) is to be able to store a number of interim results valid only for
the results at one point of the surface. Including these as `cache` in the
curve classes would in principle be possible. To ease management of cache
invalidation (when computing at a different point), the complete cache
should live on one object. The ExpansionCalc class and its sub classes can
be interpreted as such a cache, with added functionality to do the
necessary computations using the cached values.
"""
def __init__(self, curve, h_fun, param, metric):
r"""Create a "calc" object for certain point of a curve.
The curve represents an axisymmetric surface.
@param curve (expcurve.ExpansionCurve)
The curve representing the (trial) surface on which to compute the
expansion and other quantities.
@param h_fun (exprs.numexpr.NumericExpression)
The (1D) "horizon" function. The subclasses implementing this
ExpansionCalc class are free to interpret as they wish.
@param param (float)
The parameter value along the `curve` at which the quantities
should be computed.
@param metric
The Riemannian 3-metric defining the geometry of the surrounding
space.
"""
## Step sizes for FD numerical differentiation of the expansion
## \wrt `h`, `h'`, ``h''``, respectively.
self.dx_hdiffs = (1e-6, 1e-6, 1e-3)
## Finite difference differentiation order.
self.fd_order = 3
## The curve representing the (trial) surface.
self.curve = curve
## Horizon function (in case we need higher derivatives than ``h''``).
self.h_fun = h_fun
## Value of horizon function `h` at the given parameter.
self.h = h_fun(param)
## Value of `h'` at the given parameter.
self.dh = h_fun.diff(param, n=1)
## Value of ``h''`` at the given parameter.
self.ddh = h_fun.diff(param, n=2)
## Parameter on the curve at which to do the computations.
self.param = param
point = curve(param, xyz=True)
## 3D point in `x`,`y`,`z` coordinates.
self.point = point
## Metric (tensor field).
self.metric = metric
## Metric tensor at the point to do computations at.
self.g = metric.at(point)
if curve.extr_curvature is None:
## Extrinsic curvature at the point to do computations at.
self.K = None
else:
self.K = curve.extr_curvature(point)
# Cached metric derivatives (computed on-demand).
self._dg = None
self._dg_inv = None
self._ddg = None
self._ddg_inv = None
## Derivatives \f$ \partial_i \ln\sqrt{g} \f$
self.dlnsqrtg = np.asarray(metric.diff_lnsqrtg(point))
s, ds, X, Y = self._compute_s_ds_X_Y()
## Normal covector (not normalized).
self.s = np.asarray(s)
## Derivative matrix \f$ \partial_i s_j \f$ of normal vector.
self.ds = np.asarray(ds)
## Derivative covector \f$ X_i := \partial_i \lambda(\vec x) \f$.
self.X = np.asarray(X)
## Second derivatives \f$ Y := X_{ij} := \partial_i\partial_j\lambda\f$.
self.Y = np.asarray(Y)
## Contravariant normal vector (not normalized).
self.s_up = self.g.raise_idx(s)
## Contravariant parameter derivative \f$ X^i := g^{ij}X_j \f$.
self.X_up = self.g.raise_idx(X)
ABCD, trK = self._compute_ABCDtrK()
## A, B, C, D terms of the Thornburg expansion formula.
self.ABCD = ABCD
## Trace of the extrinsic curvature.
self.trK = trK
## Cached expansion result.
self._Th = None
@property
def dg(self):
r"""Derivative of 3-metric components \wrt x,y,z."""
if self._dg is None:
self._dg = np.asarray(self.metric.diff(self.point, diff=1))
return self._dg
@property
def dg_inv(self):
r"""Derivative of inverse 3-metric components.
This is computed using
\f$0 = \partial_i \delta^a_b = \partial_i(g^{ac}g_{cb})\f$
from which we get
\f[
\partial_i g^{-1} = -g^{-1} (\partial_i g) g^{-1}.
\f]
"""
if self._dg_inv is None:
g_inv = self.g.inv
dg = self.dg
# explanation:
# X = g_inv.dot(dg) == g^ad partial_i g_db
# Y = X.dot(g_inv) == X^a_ib g^be
# => Y has indices Y[a,i,e] == (g^-1 partial_i g g^-1)^ae
# we want "i" to be the first axis => swapaxes(0, 1)
# equivalent to: -np.einsum('ic,acd,dj', _g_inv, _dg, _g_inv)
self._dg_inv = -(
g_inv.dot(dg).dot(g_inv).swapaxes(0, 1)
)
return self._dg_inv
@property
def ddg(self):
r"""Second derivatives of 3-metric components."""
if self._ddg is None:
self._ddg = np.asarray(self.metric.diff(self.point, diff=2))
return self._ddg
@property
def ddg_inv(self):
r"""Second derivatives of inverse 3-metric components.
As for `dg_inv`, using
\f$0 = \partial_i \partial_j \delta^a_b
= \partial_i \partial_j (g^{ac}g_{cb})\f$
we get
\f[
\partial_i \partial_j g^{-1}
= -g^{-1}\big[
(\partial_i \partial_j g) g^{-1}
+ (\partial_j g) (\partial_i g^{-1})
+ (\partial_i g) (\partial_j g^{-1})
\big].
\f]
"""
if self._ddg_inv is None:
g_inv = self.g.inv
dg = self.dg
dg_inv = self.dg_inv
ddg = self.ddg
# equivalent to:
# -(
# + np.einsum('ij,abjk,kl', g_inv, ddg, g_inv)
# + np.einsum('ij,bjk,akl', g_inv, dg, dg_inv)
# + np.einsum('ij,ajk,bkl', g_inv, dg, dg_inv)
# )
tmp = g_inv.dot(dg).dot(dg_inv)
self._ddg_inv = -(
+ np.moveaxis(g_inv.dot(ddg).dot(g_inv), [1,2,0], [0,1,2])
+ np.moveaxis(tmp, [2,1,0], [0,1,2])
+ np.moveaxis(tmp, [1,2,0], [0,1,2])
)
return self._ddg_inv
def _compute_ABCDtrK(self):
r"""Compute the A, B, C, D and trace(K) terms.
The computation only uses the cached covariant normal `s` and its
derivatives `ds` (in addition to the metric and extrinsic curvature,
of course). This means that any subclass only needs to implement
computing `s` and `ds` in order to use this function.
This computes the terms as defined in equation (12) in
\ref thornburg2003_1 "[1]".
"""
s, s_up, ds = self.s, self.s_up, self.ds
g, dg_inv, dlnsqrtg = self.g, self.dg_inv, self.dlnsqrtg
A = (
- ds.dot(s_up).dot(s_up)
- 0.5 * dg_inv.dot(s).dot(s).dot(s_up)
)
B = (
dg_inv.dot(s).diagonal().sum()
+ g.inv.dot(ds).diagonal().sum()
+ dlnsqrtg.dot(s_up)
)
if self.K is None:
trK = 0.0
C = 0.0
else:
trK = g.inv.dot(self.K).diagonal().sum()
C = self.K.dot(s_up).dot(s_up)
D = s.dot(s_up)
return (A, B, C, D), trK
def expansion(self, ingoing=False):
r"""Compute the expansion at the configured point.
This implements equation (11) in \ref thornburg2003_1 "[1]".
"""
if ingoing:
A, B, C, D = self.ABCD
return -A/D**1.5 - B/D**0.5 + C/D - self.trK
if self._Th is None:
A, B, C, D = self.ABCD
self._Th = A/D**1.5 + B/D**0.5 + C/D - self.trK
return self._Th
def diff(self, hdiff=0):
r"""Compute derivative of expansion \wrt `h`, `h'`, or ``h''``.
The argument `hdiff` controls the derivative order of `h` with
respect to which to differentiate the expansion, i.e. `hdiff=0` will
compute \f$ \partial_{h}\Theta \f$, while for `hdiff=2` we
compute \f$ \partial_{h''}\Theta \f$.
Numerical FD differentiation is performed if a `NotImplementedError`
is raised in one of the subroutines.
"""
try:
return self._diff(hdiff=hdiff)
except NotImplementedError:
return self._diff_FD(hdiff=hdiff)
def _diff_FD(self, hdiff):
r"""Compute derivatives of the expansion using finite differencing.
Since the expansion depends on `h` and its derivatives only
ultra-locally, a reasonable approximation to the variational
derivative of the expansion w.r.t. `h` can be obtained by varying `h`
(or derivatives) point-wise, i.e. compute the usual partial derivative
of the expansion w.r.t. `h`. This can be approximated using a finite
difference differentiation, which is done in this function. Note that
irrespective of the accuracy of this approximation, the test whether
the expansion has the desired value (e.g. 0.0 for a MOTS) is
independent of the results computed here.
"""
h_orig = self.curve.h
Th0 = self.expansion()
param = self.param
h_plus_eps = _FuncVariation(h_orig.evaluator(), diff=hdiff)
with self.curve.override_evaluator(h_plus_eps):
def f(eps):
if eps == 0:
return Th0
h_plus_eps.eps = eps
with self.curve.suspend_calc_obj():
return self.curve.expansion(param)
dx = self.dx_hdiffs[hdiff]
return derivative(f, x0=0.0, n=1, dx=dx, order=self.fd_order)
def _diff(self, hdiff):
r"""Compute analytical functional derivatives of the expansion.
This may raise a `NotImplementedError`, indicating that FD
differentiation needs to be performed.
@param hdiff
Derivative order of `h` to differentiate the expansion by (see
below). E.g., a value of `0` will compute \f$\partial_h \Theta\f$.
@b Notes
In general, due to the ultra-local dependency of the expansion on `h`
and its first two derivatives, we can treat the variational
differentiation like a simple partial differentiation. This can also
be seen by taking the definition
\f[
(\delta\Theta)(h)\Delta
:= \frac{d}{d\varepsilon}\Big|_{\varepsilon=0}
\Theta(h+\varepsilon\Delta)
\f]
and separating the terms based on the derivative order of
\f$\Delta\f$. The result will be of the form
\f[
(\delta\Theta)(h)\Delta =
\partial_h\Theta \Delta
+ \partial_{h'}\Theta \Delta'
+ \partial_{h''}\Theta \Delta''.
\f]
These three terms are computed here using
\f[
\partial_f \Theta =
\frac{A_f}{D^{3/2}}
- \frac{3}{2} \frac{A D_f}{D^{5/2}}
+ \frac{B_f}{D^{1/2}}
- \frac{1}{2} \frac{B D_f}{D^{3/2}}
+ \frac{C_f}{D}
- \frac{C D_f}{D^2}
- \partial_f \,\mathrm{tr} K,
\f]
where `f` is one of ``h, h', h''``.
The terms `A`, `B`, `C`, and `D` are defined in [1], but here we
repeat them for convenience:
\f{eqnarray*}{
A &:=& -s^i s^j \partial_i s_j - \frac{1}{2} s^i (\partial_i g^{kl}) s_k s_l \\
B &:=& (\partial_i g^{ij}) s_j + g^{ij} \partial_i s_j + (\partial_i \ln\sqrt{g}) s^i \\
C &:=& K^{ij} s_i s_j \\
D &:=& s_i s^i.
\f}
@b References
[1] <NAME>. "A fast apparent horizon finder for
three-dimensional Cartesian grids in numerical relativity."
Classical and quantum gravity 21.2 (2003): 743.
"""
if hdiff == 0: # del_h H
A, B, C, D = self.ABCD
dhA, dhB, dhC, dhD, dhtrK = self.get_dh_ABCDtrK()
return (
- 3 * A * dhD / (2*D**2.5) - B * dhD / (2*D**1.5)
- C/D**2 * dhD
+ dhC / D + dhB / np.sqrt(D) + dhA / D**1.5
- dhtrK
)
if hdiff == 1: # del_h' H
A, B, C, D = self.ABCD
dhpA, dhpB, dhpC, dhpD = self.get_dhp_ABCD()
return (
- 3 * A * dhpD / (2*D**2.5) - B * dhpD / (2*D**1.5)
- C/D**2 * dhpD
+ dhpC / D + dhpB / np.sqrt(D) + dhpA / D**1.5
)
if hdiff == 2: # del_h'' H
D = self.ABCD[-1]
dhppA, dhppB = self.get_dhpp_AB()
return (D * dhppB + dhppA) / D**1.5
raise NotImplementedError
def get_dh_ABCDtrK(self):
r"""Compute the derivative of A, B, C, D, tr(K) \wrt `h`.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
Refer to the definition of `A,B,C,D` in the documentation of _diff().
The terms computed here are:
\f[
\partial_h A = -2(\partial_h s^i) s^j \partial_i s_j
- s^i s^j \partial_h \partial_i s_j
- \frac{1}{2} (\partial_h s^i) (\partial_i g^{kl}) s_k s_l
- \frac{1}{2} s^i (\partial_h \partial_i g^{kl}) s_k s_l
- s^i (\partial_i g^{kl}) s_k \partial_h s_l
\f]
\f[
\partial_h B =
(\partial_h \partial_i g^{ij}) s_j
+ (\partial_i g^{ij}) \partial_h s_j
+ (\partial_h g^{ij}) \partial_i s_j
+ g^{ij} \partial_h \partial_i s_j
+ (\partial_h \partial_i \ln\sqrt{g}) s^i
+ (\partial_i \ln\sqrt{g}) \partial_h s^i
\f]
\f[
\partial_h C =
\big[(\partial_h g^{ik}) g^{jl} + g^{ik}(\partial_h g^{jl})\big]
K_{kl} s_i s_j
+ g^{ik} g^{jl} (\partial_h K_{kl}) s_i s_j
+ 2 g^{ik} g^{jl} K_{kl} s_i \partial_h s_j
\f]
\f[
\partial_h D =
(\partial_h g^{ij}) s_i s_j + 2 g^{ij} s_i \partial_h s_j
\f]
\f[
\partial_h \mathrm{tr}K =
(\partial_h g^{ij}) K_{ij} + g^{ij} \partial_h K_{ij}
\f]
The individual terms are computed by simply applying the chain rule.
We obtain for any quantity `f` which depends on the coordinates
`x,y,z`:
\f[
\partial_h f = (\partial_i f) (\partial_h\gamma)^i,
\f]
where \f$\gamma\f$ is the curve along which the computation takes
place.
"""
dh_gamma = self.curve.h_diff(self.param)
g_inv, dg_inv, dlnsqrtg = self.g.inv, self.dg_inv, self.dlnsqrtg
dg = self.dg
ddg = self.ddg
ddg_inv = self.ddg_inv
s, s_up, ds = self.s, self.s_up, self.ds
dds = self.compute_dds()
dhs = ds.dot(dh_gamma)
dhg_inv = np.einsum('aij,a', dg_inv, dh_gamma)
dhs_up = dhg_inv.dot(s) + g_inv.dot(dhs)
dhdg_inv = np.einsum('aikl,a', ddg_inv, dh_gamma)
dhds = dds.dot(dh_gamma)
dhdlnsqrtg = (
0.5 * np.einsum('icd,acd,a', dg_inv, dg, dh_gamma)
+ 0.5 * np.einsum('cd,iacd,a', g_inv, ddg, dh_gamma)
)
dhA = (
- 2 * np.einsum('i,j,ij', dhs_up, s_up, ds)
- np.einsum('i,j,ij', s_up, s_up, dhds)
- 0.5 * np.einsum('i,ikl,k,l', dhs_up, dg_inv, s, s)
- 0.5 * np.einsum('i,ikl,k,l', s_up, dhdg_inv, s, s)
- np.einsum('i,ikl,k,l', s_up, dg_inv, s, dhs)
)
dhB = (
np.einsum('iij,j', dhdg_inv, s)
+ np.einsum('iij,j', dg_inv, dhs)
+ dhg_inv.dot(ds).diagonal().sum()
+ g_inv.dot(dhds).diagonal().sum()
+ dhdlnsqrtg.dot(s_up)
+ dlnsqrtg.dot(dhs_up)
)
dhD = (
np.einsum('ij,i,j', dhg_inv, s, s)
+ 2 * np.einsum('ij,i,j', g_inv, s, dhs)
)
if self.K is None:
dhC = 0.0
dhtrK = 0.0
else:
K = self.K
dK = self.curve.extr_curvature(self.point, diff=1)
dhK = np.einsum('aij,a', dK, dh_gamma)
dhC = (
np.einsum('ik,jl,kl,i,j', dhg_inv, g_inv, K, s, s)
+ np.einsum('ik,jl,kl,i,j', g_inv, dhg_inv, K, s, s)
+ np.einsum('ik,jl,kl,i,j', g_inv, g_inv, dhK, s, s)
+ 2 * np.einsum('ik,jl,kl,i,j', g_inv, g_inv, K, s, dhs)
)
dhtrK = (
np.einsum('ij,ij', dhg_inv, K)
+ np.einsum('ij,ij', g_inv, dhK)
)
return dhA, dhB, dhC, dhD, dhtrK
def get_dhp_ABCD(self):
r"""Compute the derivative of A, B, C, D \wrt `h'`.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
This implementation is correct iff
\f{eqnarray*}{
\partial_{h'} s_i &=& - X_i\\
\partial_{h'} \partial_i s_j &=& - X_{ij},
\f}
where \f$X_i := \partial_i \lambda\f$ and
\f$X_{ij} := \partial_i \partial_j \lambda\f$.
The terms computed here then become (refer to _diff()):
\f{eqnarray*}{
\partial_{h'} A &=&
2 X^i s^j \partial_i s_j + s^i s^j X_{ij}
+ \frac{1}{2} (\partial_i g^{kl}) (X^i s_k s_l + 2 s^i X_k s_l)
\\
\partial_{h'} B &=&
-(\partial_i g^{ij}) X_j - g^{ij} X_{ij} - (\partial_i\ln\sqrt{g}) X^i
\\
\partial_{h'} C &=& -2 K_{ij} X^i s^j
\\
\partial_{h'} D &=& -2 X_i s^i
\f}
This method is agnostic as to how the surfaces are represented as long
as the quantities \f$s_i\f$, \f$\partial_i s_j\f$, \f$X_i\f$, and
\f$X_{ij}\f$ are available.
"""
g_inv, dg_inv, dlnsqrtg = self.g.inv, self.dg_inv, self.dlnsqrtg
s, s_up, ds = self.s, self.s_up, self.ds
X, X_up, Y = self.X, self.X_up, self.Y
dhpA = (
2 * ds.dot(X_up).dot(s_up)
+ Y.dot(s_up).dot(s_up)
+ 0.5 * dg_inv.dot(s).dot(s).dot(X_up)
+ dg_inv.dot(X).dot(s).dot(s_up)
)
dhpB = (
- dg_inv.dot(X).diagonal().sum()
- g_inv.dot(Y).diagonal().sum()
- dlnsqrtg.dot(X_up)
)
if self.K is None:
dhpC = 0.0
else:
dhpC = - 2 * self.K.dot(X_up).dot(s_up)
dhpD = - 2 * X.dot(s_up)
return dhpA, dhpB, dhpC, dhpD
def get_dhpp_AB(self):
r"""Compute the derivative of A and B \wrt ``h''``.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
This implementation is correct iff
\f{eqnarray*}{
\partial_{h''} s_i &=& 0\\
\partial_{h''} \partial_i s_j &=& - X_i X_j.
\f}
We compute here (see also _diff()):
\f{eqnarray*}{
\partial_{h''} A &=& s^i s^j X_i X_j \\
\partial_{h''} B &=& -X^i X_i \\
\partial_{h''} C &=& \partial_{h''} D = 0
\f}
This method is agnostic as to how the surfaces are represented as long
as the quantities \f$s_i\f$, \f$\partial_i s_j\f$, \f$X_i\f$, and
\f$X_{ij}\f$ are available.
"""
X, X_up = self.X, self.X_up
s_up = self.s_up
dhppA = np.outer(X, X).dot(s_up).dot(s_up)
dhppB = - X_up.dot(X)
return dhppA, dhppB
@abstractmethod
def _compute_s_ds_X_Y(self):
r"""Compute the terms we need to compute the expansion.
Subclasses need to interpret the horizon function and compute the
covariant normal (not normalized), its derivatives, and the parameter
first (`X = del_i lambda`) and second (`Y = del_i del_j lambda`)
derivatives.
"""
pass
def _compute_dds_Z(self):
r"""Compute second derivatives of the normal and third ones of lambda.
This computes \f$\partial_i\partial_j s_k\f$ and
\f$Z := X_{ijk} = \partial_i\partial_j\partial_k \lambda\f$.
@return Two elements, the first containing the derivatives of the
non-normalized covariant normal `s` and the second those of the
parameter \f$\lambda\f$.
"""
raise NotImplementedError
def _compute_d2_Y(self):
r"""Compute second derivatives of xi and lambda \wrt x,y,z."""
raise NotImplementedError
def _compute_d3_Z(self):
r"""Compute third derivatives of xi and lambda \wrt x,y,z."""
raise NotImplementedError
def ricci_scalar(self):
r"""Compute the Ricci scalar of the surface represented by the curve.
The Ricci scalar of a 2-surface is defined as (see e.g. [1])
\f$R = q^{AB}R_{AB}\f$, where `q` is the induced metric
\f$q_{ab} = g_{ab} - \nu_a \nu_b\f$, \f$R_{AB}\f$ is the Ricci tensor
\f$R_{AB} = R^C_{\ A\,CB}\f$ and \f$\nu\f$ the covariant outward unit
normal of the surface.
Here, \f$R^A_{\ B\,CD}\f$ is the Riemann tensor.
Note that `A,B` run over the coordinates \f$(\lambda,\varphi)\f$ on
the surface and `a,b` over `x,y,z`.
See induced_metric() for a bit more details on the induced metric `q`
and the coordinate transformation to get the components \f$q_{AB}\f$
we need here.
It is convenient to compute the Ricci scalar from the purely covariant
Riemann tensor \f$R_{AB\,CD} = q_{AE}R^E_{\ B\,CD}\f$ as this is
antisymmetric in the first and last two index pairs, i.e. it has only
one independent component \f$R_{\lambda\varphi\,\lambda\varphi}\f$ in
two dimensions.
A short calculation reveals
\f[
R = q^{AB}R_{AB}
= 2 R_{\lambda\varphi\,\lambda\varphi}
(q^{\lambda\lambda}q^{\varphi\varphi} - (q^{\lambda\varphi})^2).
\f]
@b References
[1] <NAME>. General relativity. Springer Science &
Business Media, 2004.
"""
R_0101 = self.covariant_riemann()
q_inv = self.induced_metric(inverse=True)
return 2 * R_0101 * (q_inv[0,0]*q_inv[1,1] - q_inv[0,1]**2)
def induced_metric(self, diff=0, inverse=False):
r"""Compute the induced metric on the surface.
This method computes the components of the induced metric in
\f$(\lambda,\varphi)\f$ coordinates as well as the components of the
inverse (i.e. indices upstairs) and derivatives of these components.
Since this class assumes axisymmetry throughout, this method requires
(without loss of generality) that the point at which the metric is to
be returned is located at `phi=0`, i.e. `y=0` and `x>0`.
@param diff
Derivative order to compute. Default is `0`.
@param inverse
Whether to return the (derivatives of the) inverse of the induced
metric. Default is `False`.
@return NumPy array with ``2+diff`` axes, such that the indices
``[A1,A2,...,B,C]`` correspond to
\f$\partial_{A_1}\partial_{A_2}\ldots q_{BC}\f$ for
`inverse==False` and with upstairs indices for `invers==True`.
@b Notes
The induced 2-metric `q` on the surface \f$\sigma\f$ is formally given
by
\f[
q = \Pi_\sigma g = g\big|_\sigma - \underline{\nu} \otimes \underline{\nu},
\qquad
q_{ab} = g_{ab} - \nu_a \nu_b,
\f]
where \f$\nu\f$ is the outward pointing normal of \f$\sigma\f$ and
\f$\underline{\nu} = g(\nu,\,\cdot\,)\f$.
The induced metric can easily be expressed in terms of the components
of the 3-metric `g` by expanding these into the cobasis fields of the
coordinates \f$\lambda, \varphi\f$ on the 2-surface (and thereby
dropping any transversal components). As a result, we get the simple
formula
\f[
q_{AB} = g_{ij}\ (\partial_A x^i)\ (\partial_B x^j),
\f]
where `A,B = 1,2` and
\f$(\partial_A) = (\partial_\lambda, \partial_\varphi)\f$.
The derivatives of the Cartesian coordinates `x,y,z` are computed in
diff_xyz_wrt_laph().
From this, we easily get the first and second derivatives by applying
the chain and product rule:
\f{eqnarray*}{
\partial_A q_{CD} &=&
(\partial_A g_{ij}) x_C^i x_D^j
+ g_{ij} (x_{CA}^i x_D^j + x_C^i x_{DA}^j)
\\
\partial_A\partial_B q_{CD} &=&
(\partial_A\partial_B g_{ij}) x_C^i x_D^j
+ (\partial_A g_{ij}) (x_{CB}^i x_D^j + x_C^i x_{DB}^j)
+ (\partial_B g_{ij}) (x_{CA}^i x_D^j + x_C^i x_{DA}^j)
\\&&
+ g_{ij} (x_{CAB}^i x_D^j + x_{CA}^i x_{DB}^j
+ x_{CB}^i x_{DA}^j + x_C^i x_{DAB}^j).
\f}
Here, \f$x_{A}^i := \partial_A x^i\f$, etc.
"""
return self._induced_metric(diff, bool(inverse))
@cache_method_results()
def _induced_metric(self, diff, inverse):
if inverse:
q = self.induced_metric(diff=0)
if diff == 0:
return linalg.inv(q)
dq = self.induced_metric(diff=1)
if diff == 1:
dq_inv = inverse_2x2_matrix_derivative(q, dq, diff=1)
return dq_inv
ddq = self.induced_metric(diff=2)
if diff == 2:
ddq_inv = inverse_2x2_matrix_derivative(q, dq, ddq, diff=2)
return ddq_inv
raise NotImplementedError
dx = self.diff_xyz_wrt_laph(diff=1)
g = self.g.mat
if diff == 0:
q = np.einsum('ij,ai,bj', g, dx, dx)
return q
ddx = self.diff_xyz_wrt_laph(diff=2)
dg = self.dg
dg_laph = np.einsum('ak,kij', dx, dg)
if diff == 1:
dq = (
np.einsum('aij,bi,cj', dg_laph, dx, dx)
+ np.einsum('ij,bai,cj', g, ddx, dx)
+ np.einsum('ij,bi,caj', g, dx, ddx)
)
return dq
d3x = self.diff_xyz_wrt_laph(diff=3)
ddg = self.ddg
ddg_laph = (
|
np.einsum('abk,kij', ddx, dg)
|
numpy.einsum
|
import numpy as np
import networkx as nx
from numba import *
from kmmi.utils.utils import sub_sum
from kmmi.heuristics.neighborhood_search import ls_one_n_beam
@njit
def initialize_degree_vecs(A, H=None):
n = A.shape[0]
if H is not None:
U = np.zeros(n, dtype=bool_)
U[H] = True
alpha = np.sum(A[:,U], axis=1, dtype=np.float64)
beta = np.sum(A[:,U!=True], axis=1, dtype=np.float64)
else:
U = np.ones(n, dtype=bool_)
alpha = np.sum(A, axis=1, dtype=np.float64)
beta = np.zeros(n, dtype=np.float64)
return U, alpha, beta
def __weighted_degree_rank(A, beta_ratio):
n = A.shape[0]
ws = np.sum(A, axis=0)
ds = np.sum(A > 0, axis=0)
_, s_ranking = zip(*sorted(zip(sorted(list(zip(np.arange(n), ws)),
key=lambda x: x[1]),range(n))))
_, d_ranking = zip(*sorted(zip(sorted(list(zip(np.arange(n), ds)),
key=lambda x: x[1]),range(n))))
s_ranking = (np.array(s_ranking) + 1) / n
d_ranking = (np.array(d_ranking) + 1) / n
beta_1 = beta_ratio
beta_2 = 1.0 - beta_1
scores = beta_1*s_ranking + beta_2*d_ranking
p_w = scores / scores.sum()
_, score_order = zip(*sorted(zip(scores, range(n)))[::-1])
return score_order, p_w
def init_solution_weighted_degree_ranking(A: np.array, k: int, beta_ratio: float=0.5):
"""Construct a k sized subgraph based on the degree rank order heuristic.
"""
score_order, p_w = __weighted_degree_rank(A, beta_ratio)
idxs = np.array(score_order[:k])
H = np.zeros(A.shape[0], dtype=bool)
H[idxs] = True
return H, p_w
def init_solution_weighted_degree_ranking_fs(A: np.array, k: int, fss: list,
beta_ratio: float=0.25):
"""Construct a k sized subgraph based on the degree rank order heuristic
taking into account the set of force selected nodes.
"""
n = A.shape[0]
H_fs = np.zeros(n, dtype=bool)
idx = np.argmax([sub_sum(A, s) for i,s in enumerate(fss)])
H_fs[fss[idx]] = True
score_order, p_w = __weighted_degree_rank(A, beta_ratio)
H =
|
np.zeros(n, dtype=bool)
|
numpy.zeros
|
"""Base class for DataSet """
import numbers
import numpy as np
class BaseDataSet():
"""Base class for DataSet """
def __init__(self, standard_df, truth=None, idx=None):
if truth is not None:
self.truth = truth
self.standard_df = standard_df
if idx is None and self.standard_df is not None:
idx = np.arange(len(self.standard_df))
self.idx = idx
def __getattr__(self, item):
"""Easier access to nudge and outcomes"""
if item in ["nudge", "outcome"] and item in self.standard_df:
return self.standard_df[item].values
return self.truth[item]
def write_interim(self, path):
"""Write interim data (standard format) to csv file"""
try:
if self.goal == "decrease":
self.standard_df["outcome"] = -self.standard_df["outcome"]
except AttributeError:
pass
self.standard_df.to_csv(path, index=False)
@property
def ate(self):
"""Compute the Average Treatment Effect"""
ones = np.where(self.nudge == 1)[0]
zeros = np.where(self.nudge == 0)[0]
return np.mean(self.outcome[ones])-
|
np.mean(self.outcome[zeros])
|
numpy.mean
|
import os
import time
import numpy as np
import cv2
import constants
def process_actions(nonlocal_variables, trainer, logger, robot, workspace_limits, heightmap_resolution,
heuristic_bootstrap, save_visualizations):
"""
The main function of a thread that will process grasp predictions and execute actions.
:param nonlocal_variables: Main connection between this thread and the main thread.
A (hopefully thread-safe dictionary).
:param trainer: Trainer object.
:param logger: Logger object.
:param robot: Robot object.
:param workspace_limits: Workspace limits.
:param heightmap_resolution: Height map resolution.
:param heuristic_bootstrap: Should we use a heuristic grasping algorithm after a certain number of actions
that have no effects.
:param save_visualizations: Save visualizations.
:return: None.
"""
while True:
# print('!-- Running process actions loop')
if nonlocal_variables[constants.EXECUTING_ACTION]:
print("executing action--child")
grasp_predictions = nonlocal_variables[constants.GRASP_PREDICTIONS]
valid_depth_heightmap = nonlocal_variables[constants.VALID_DEPTH_HEIGHTMAP]
color_heightmap = nonlocal_variables[constants.COLOR_HEIGHTMAP]
# Determine whether grasping or pushing should be executed based on network predictions
# best_push_conf = np.max(push_predictions)
best_grasp_conf = np.max(grasp_predictions)
print('Primitive confidence scores: %f (grasp)' % best_grasp_conf)
nonlocal_variables[constants.PRIMITE_ACTION] = 'grasp'
explore_actions = False
trainer.is_exploit_log.append([0 if explore_actions else 1])
logger.write_to_log('is-exploit', trainer.is_exploit_log)
if nonlocal_variables[constants.PRIMITE_ACTION] != 'grasp':
raise ValueError("Pushing not used in this project.")
print("valid depth heightmap size:",
valid_depth_heightmap.shape)
# If heuristic bootstrapping is enabled: if change has not been detected more than 2 times, execute heuristic algorithm to detect grasps/pushes
# NOTE: typically not necessary and can reduce final performance.
if heuristic_bootstrap and nonlocal_variables[constants.PRIMITE_ACTION] == 'grasp' and nonlocal_variables[constants.NO_CHANGE_COUNT] >= 2:
print('Change not detected for more than two grasps. Running heuristic grasping.')
nonlocal_variables[constants.BEST_PIX_IND] = trainer.grasp_heuristic(valid_depth_heightmap)
nonlocal_variables[constants.NO_CHANGE_COUNT] = 0
predicted_value = grasp_predictions[nonlocal_variables[constants.BEST_PIX_IND]]
use_heuristic = True
else:
use_heuristic = False
# Get pixel location and rotation with highest affordance prediction from heuristic algorithms (rotation, y, x)
nonlocal_variables[constants.BEST_PIX_IND] = np.unravel_index(
np.argmax(grasp_predictions), grasp_predictions.shape)
predicted_value = np.max(grasp_predictions)
trainer.use_heuristic_log.append([1 if use_heuristic else 0])
logger.write_to_log('use-heuristic', trainer.use_heuristic_log)
# Save predicted confidence value
trainer.predicted_value_log.append([predicted_value])
logger.write_to_log('predicted-value',
trainer.predicted_value_log)
# Compute 3D position of pixel
print('!------------------ Action: %s at (%d, %d, %d)' % (
nonlocal_variables[constants.PRIMITE_ACTION], nonlocal_variables[constants.BEST_PIX_IND][0],
nonlocal_variables[constants.BEST_PIX_IND][1], nonlocal_variables[constants.BEST_PIX_IND][2])
)
best_rotation_angle = np.deg2rad(
nonlocal_variables[constants.BEST_PIX_IND][0] * (360.0 / trainer.model.num_rotations)
)
best_pix_x = nonlocal_variables[constants.BEST_PIX_IND][2]
best_pix_y = nonlocal_variables[constants.BEST_PIX_IND][1]
# NOTE: original
# TODO: why is it outof bound by one? indexing error
if best_pix_x == valid_depth_heightmap.shape[1]:
best_pix_x -= 1
if best_pix_y == valid_depth_heightmap.shape[0]:
best_pix_y -= 1
primitive_position = [best_pix_x * heightmap_resolution +
workspace_limits[0][0], best_pix_y *
heightmap_resolution +
workspace_limits[1][0],
valid_depth_heightmap[best_pix_y][best_pix_x]
+ workspace_limits[2][0]]
# Visualize executed primitive, and affordances
if save_visualizations:
# TODO: ValueError: operands could not be broadcast together with shapes (364,273,3) (364,364,3)
grasp_pred_vis = trainer.get_prediction_vis(
grasp_predictions, color_heightmap, nonlocal_variables[constants.BEST_PIX_IND]
)
logger.save_visualizations(trainer.iteration, grasp_pred_vis, 'grasp')
#cv2.imwrite('visualization.grasp.png', grasp_pred_vis)
# Initialize variables that influence reward
nonlocal_variables[constants.GRASP_SUCCESS] = False
# Execute primitive
if nonlocal_variables[constants.PRIMITE_ACTION] == 'grasp':
# ! TODO
nonlocal_variables[constants.GRASP_SUCCESS] = robot.grasp(
primitive_position, best_rotation_angle, workspace_limits)
print('Grasp successful: %r' % (nonlocal_variables[constants.GRASP_SUCCESS]))
nonlocal_variables[constants.EXECUTING_ACTION] = False
print('!-- no longer executing action')
time.sleep(0.01)
def training_step(prev_primitive_action, prev_reward_value, trainer, logger):
"""
Run a single experience replay training step.
:param prev_primitive_action: Previous primitive action.
:param prev_reward_value: Previous reward.
:param trainer: Trainer object.
:param logger: Logger object.
:return: None.
"""
sample_primitive_action = prev_primitive_action
if sample_primitive_action == 'grasp':
sample_primitive_action_id = 1
sample_reward_value = 0 if prev_reward_value == 1 else 1
# Get samples of the same primitive but with different results
sample_ind = np.argwhere(
np.logical_and(np.asarray(trainer.reward_value_log)[1:trainer.iteration, 0] == sample_reward_value, np.asarray(
trainer.executed_action_log)[1:trainer.iteration, 0] == sample_primitive_action_id))
if sample_ind.size > 0:
# Find sample with highest surprise value
sample_surprise_values = np.abs(np.asarray(trainer.predicted_value_log)[
sample_ind[:, 0]] - np.asarray(trainer.label_value_log)[
sample_ind[:, 0]])
sorted_surprise_ind = np.argsort(
sample_surprise_values[:, 0])
sorted_sample_ind = sample_ind[sorted_surprise_ind, 0]
pow_law_exp = 2
rand_sample_ind = int(
np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1)))
sample_iteration = sorted_sample_ind[rand_sample_ind]
print('Experience replay: iteration %d (surprise value: %f)' % (
sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))
# Load sample RGB-D heightmap
sample_color_heightmap = cv2.imread(os.path.join(
logger.color_heightmaps_directory, '%06d.0.color.png' % (sample_iteration)))
sample_color_heightmap = cv2.cvtColor(
sample_color_heightmap, cv2.COLOR_BGR2RGB)
sample_depth_heightmap = cv2.imread(os.path.join(
logger.depth_heightmaps_directory, '%06d.0.depth.png' % (sample_iteration)), -1)
sample_depth_heightmap = sample_depth_heightmap.astype(
np.float32) / 100000
# Compute forward pass with sample
sample_push_predictions, sample_grasp_predictions, sample_state_feat = trainer.forward(
sample_color_heightmap, sample_depth_heightmap, is_volatile=True)
# Load next sample RGB-D heightmap
next_sample_color_heightmap = cv2.imread(os.path.join(
logger.color_heightmaps_directory, '%06d.0.color.png' % (sample_iteration + 1)))
next_sample_color_heightmap = cv2.cvtColor(
next_sample_color_heightmap, cv2.COLOR_BGR2RGB)
next_sample_depth_heightmap = cv2.imread(os.path.join(
logger.depth_heightmaps_directory, '%06d.0.depth.png' % (sample_iteration + 1)), -1)
next_sample_depth_heightmap = next_sample_depth_heightmap.astype(
np.float32) / 100000
sample_push_success = sample_reward_value == 0.5
sample_grasp_success = sample_reward_value == 1
sample_change_detected = sample_push_success
new_sample_label_value, _ = trainer.get_label_value(sample_primitive_action, sample_push_success,
sample_grasp_success, sample_change_detected,
sample_push_predictions, sample_grasp_predictions,
next_sample_color_heightmap, next_sample_depth_heightmap)
# Get labels for sample and backpropagate
sample_best_pix_ind = (np.asarray(trainer.executed_action_log)[
sample_iteration, 1:4]).astype(int)
trainer.backprop(sample_color_heightmap, sample_depth_heightmap, sample_primitive_action,
sample_best_pix_ind, trainer.label_value_log[sample_iteration])
# Recompute prediction value and label for replay buffer
if sample_primitive_action == 'push':
trainer.predicted_value_log[sample_iteration] = [
|
np.max(sample_push_predictions)
|
numpy.max
|
# -*- coding: utf-8 -*-
"""
A python class that defines a basic gridworld environment
Written to be compatible with OpenAI Gym environments
Example usage:
gw = gridworld.GridWorld()
gw.reset()
gw.ax.scatter(gw.flatgrid[gw.initialstate][0],gw.flatgrid[gw.initialstate][1],s=100)
actions = ['right', 'left', 'up', 'down']
for _ in range(10):
act = np.random.randint(4, size=1)[0]
gw.step(act) # take a random action
print(actions[act],gw.oldstate,gw.newstate)
gw.render()
print('Total Reward: {}'.format(gw.cumlreward))
plt.show()
Created on Fri Jan 31 15:24:57 2020
@author: kiranvad
"""
import numpy as np
import pdb
import matplotlib.pyplot as plt
from itertools import product
np.random.seed()
from gym import spaces
class GridWorld:
"""
A Simple grid world environment with a determinstic transition probability
"""
def __init__(self,size=[6,6]):
"""
Initiate your grid world with the following:
size : Size of the square grid world
actions : Different actions that you can take
rewards : Various rewards possible in the environment
"""
self.nA = 4
self.nS = np.prod(size)
self.size = size
# Just to be in sync with GYM environments
self.action_space = spaces.Discrete(self.nA)
self.observation_space = spaces.Discrete(self.nS)
self.actions = ['R', 'L', 'U', 'D']
self._grid = list(product(np.arange(self.size[0]), np.arange(self.size[0])))
self._terminal_states = [self._grid[0],self._grid[-1]]
self.Tstates = [0,self.observation_space.n-1]
self._det_next_state = [[1,0],[-1,0],[0,1],[0,-1]] # in the order (right, left, up, down)
# Define reset for environment
def reset(self, init_state = None):
# initiate it to a random seed
if init_state is None:
self.oldstate = np.random.choice(np.arange(1,self.nS-1), 1)[0]
else:
self.oldstate = init_state
self.initialstate = self.oldstate
self.s = self.oldstate
# Define how to take a step
def step(self, action):
state = self.s
prob = np.zeros(self.action_space.n)
current_trans_probs = self.P[self.s][action]
for action,tp in enumerate(current_trans_probs):
prob[action] = tp[0][0]
coin = np.random.choice([0,1,2,3], 1, p=prob)[0]
tpms = self.P[self.s][action][coin][0]
self.newstate = tpms[1]
self.recentaction = coin
self.cumlreward += tpms[2]
self.s = tpms[1]
return tpms[1], tpms[2], tpms[3]
# Define a render to plot grid world
def render(self):
'''
This function does the following:
0. Use the environment plot axis to add arrow of trjectories
1. Shows the current state and cumulative reward on top of it
'''
diff = tuple(i-j for i,j in zip(self.flatgrid[self.newstate],self.flatgrid[self.oldstate]))
self.ax.arrow(self.flatgrid[self.oldstate][0], self.flatgrid[self.oldstate][1], diff[0],diff[1],\
head_width=0.2, head_length=0.2, fc='lightblue', ec='black')
self.oldstate = self.newstate
def _plotenv(self, showgridids=False):
#fig = plt.figure()
ax = plt.gca()
for i in range(self.size[0] + 1):
ax.plot(np.arange(self.size[0] + 1) - 0.5, np.ones(self.size[0] + 1) * i - 0.5, color='k')
for i in range(self.size[1] + 1):
ax.plot(np.ones(self.size[1] + 1) * i - 0.5,
|
np.arange(self.size[1] + 1)
|
numpy.arange
|
# -*- coding:utf-8 -*-
import numpy as np
# from sklearn.cluster import KMeans
class Kmeans:
def __init__(self, k, c=0.0):
self.k = k
self.c = c
self.X = None
self.labels = None
self.centers = None
def fit(self, X):
self.X = X
self.labels, self.centers = self._fit()
def transform(self):
for i, x in enumerate(self.X):
label = int(self.labels[i])
center = self.centers[label]
self.X[i] = self.fill(x, center)
return self.X
def fill(self, x, y):
# 确保y没有缺失值
assert not np.any(np.isnan(y))
for i in range(len(x)):
if np.isnan(x[i]):
x[i] = y[i]
return x
def get_distance(self, a, b):
a, b = a[(np.logical_not(np.isnan(a))) & (np.logical_not(np.isnan(b)))], \
b[(np.logical_not(np.isnan(a))) & (np.logical_not(
|
np.isnan(b)
|
numpy.isnan
|
from exetera.core.operations import INVALID_INDEX
import unittest
from io import BytesIO
import numpy as np
import tempfile
import os
from exetera.core import session
from exetera.core import fields
from exetera.core import persistence as per
from exetera.core import dataframe
class TestDataFrameCreateFields(unittest.TestCase):
def test_dataframe_init(self):
bio = BytesIO()
with session.Session() as s:
dst = s.open_dataset(bio, 'w', 'dst')
# init
df = dst.create_dataframe('dst')
self.assertTrue(isinstance(df, dataframe.DataFrame))
numf = df.create_numeric('numf', 'uint32')
df2 = dst.create_dataframe('dst2', dataframe=df)
self.assertTrue(isinstance(df2, dataframe.DataFrame))
# add & set & contains
self.assertTrue('numf' in df)
self.assertTrue('numf' in df2)
cat = s.create_categorical(df2, 'cat', 'int8', {'a': 1, 'b': 2})
self.assertFalse('cat' in df)
self.assertFalse(df.contains_field(cat))
df['cat'] = cat
self.assertTrue('cat' in df)
# list & get
self.assertEqual(id(numf), id(df.get_field('numf')))
self.assertEqual(id(numf), id(df['numf']))
# list & iter
dfit = iter(df)
self.assertEqual('numf', next(dfit))
self.assertEqual('cat', next(dfit))
# del & del by field
del df['numf']
self.assertFalse('numf' in df)
with self.assertRaises(ValueError, msg="This field is owned by a different dataframe"):
df.delete_field(cat)
self.assertFalse(df.contains_field(cat))
def test_dataframe_create_numeric(self):
bio = BytesIO()
with session.Session() as s:
dst = s.open_dataset(bio, 'r+', 'dst')
df = dst.create_dataframe('dst')
num = df.create_numeric('num', 'uint32')
num.data.write([1, 2, 3, 4])
self.assertEqual([1, 2, 3, 4], num.data[:].tolist())
num2 = df.create_numeric('num2', 'uint32')
num2.data.write([1, 2, 3, 4])
def test_dataframe_create_numeric(self):
bio = BytesIO()
with session.Session() as s:
np.random.seed(12345678)
values = np.random.randint(low=0, high=1000000, size=100000000)
dst = s.open_dataset(bio, 'r+', 'dst')
df = dst.create_dataframe('dst')
a = df.create_numeric('a','int32')
a.data.write(values)
total =
|
np.sum(a.data[:], dtype=np.int64)
|
numpy.sum
|
import json
import math
import base64
import glob
from io import BytesIO
import numpy as np
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import imgaug as ia
from imgaug import augmenters as iaa
from tqdm import tqdm
from keras.utils import Sequence
import cv2
from sklearn.utils import shuffle
from skimage.transform import AffineTransform, warp
from skimage.draw import circle
from skimage.transform import rotate, resize
import copy
R_t = lambda theta: np.array([[math.cos(theta), -math.sin(theta)],
[math.sin(theta), math.cos(theta)]], dtype=np.float32)
class APTDataset(Sequence):
def __init__(self, prefix, input_shape, output_shape, batch_size=8, c_r=3.2, is_training=False):
super().__init__()
self.is_training = is_training
self.c_r = c_r
self.input_shape = input_shape
self.output_shape = output_shape
self.batch_size = batch_size
self.json_files = glob.glob(prefix+'/**/*.json', recursive=True)
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
self.seq = iaa.Sequential(
[
# execute 0 to 3 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 3),
[
iaa.OneOf([
iaa.GaussianBlur((0, 1.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(3, 5)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 5)), # blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
iaa.Emboss(alpha=(0, 0.3), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
]),
iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.Multiply((0.8, 1.2), per_channel=0.5),
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
iaa.Grayscale(alpha=(0.0, 1.0))
],
random_order=True
)
],
random_order=True
)
def __len__(self):
return int(np.ceil(float(len(self.json_files))/self.batch_size))
def __getitem__(self, i):
l_bound = i * self.batch_size
r_bound = (i+1) * self.batch_size
if r_bound>len(self.json_files): # ensure every iteration has the same batch size
r_bound = len(self.json_files)
l_bound = r_bound - self.batch_size
dat_que = np.empty((self.batch_size, *self.input_shape), dtype=np.float32)
lab_que = np.empty((self.batch_size, *self.output_shape), dtype=np.float32)
for n, index in enumerate(range(l_bound, r_bound)):
with open(self.json_files[index]) as f:
data = json.load(f)
# Decode image from base64 imageData
img = Image.open(BytesIO(base64.b64decode(data['imageData'])))
img = img.convert('RGB')
srcW, srcH = img.size
dstW, dstH = self.output_shape[:2]
img = np.array(img, dtype=np.uint8)
crop_ratio = np.zeros(4, dtype=np.float32)
if self.is_training and np.random.rand() < 0.3:
crop_ratio = np.random.uniform(0.01, 0.1, size=4)
u, r, d, l = np.round(crop_ratio * np.array([srcH, srcW, srcH, srcW])).astype(np.uint8)
img = img[u:srcH-d,l:srcW-r] # crop image
fx = self.input_shape[1] / float(img.shape[1])
fy = self.input_shape[0] / float(img.shape[0])
img = cv2.resize(img, self.input_shape[:2][::-1], interpolation=cv2.INTER_AREA) # resize first...
# Sort the corners by clockwise
# while the first corner is the most top-lefted
corners = np.float32(data['shapes'][0]['points'])
if self.is_training and np.sum(crop_ratio)>0:
corners[:, 0] -= l
corners[:, 1] -= u
corners[:, 0] *= fx
corners[:, 1] *= fy
if self.is_training and np.random.rand() < .3:
angle = np.random.uniform(-30,30)
cx = int(img.shape[1]//2)
cy = int(img.shape[0]//2)
M = cv2.getRotationMatrix2D((cx,cy),angle,1)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
(h, w) = img.shape[:2]
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cx
M[1, 2] += (nH / 2) - cy
img = np.clip(cv2.warpAffine(img,M,(nW, nH), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=np.random.randint(25)), 0, 255)
x_scale = self.input_shape[1] / nW
y_scale = self.input_shape[0] / nH
if img.shape != self.input_shape:
img = cv2.resize(img, self.input_shape[:2][::-1], interpolation=cv2.INTER_AREA) # resize first...
# img = resize(img, self.input_shape[:2], mode='constant', cval=0, clip=True, preserve_range=True, order=0)
R = R_t(-angle*np.pi/180.0)
corners[:, 0] -= cx
corners[:, 1] -= cy
corners = (R @ corners.T).T
corners[:, 0] *= x_scale
corners[:, 1] *= y_scale
corners[:, 0] += cx
corners[:, 1] += cy
corners[:, 0] = np.round(np.clip(corners[:, 0] * dstW/self.input_shape[1], 0, dstW-1))
corners[:, 1] = np.round(np.clip(corners[:, 1] * dstH/self.input_shape[0], 0, dstH-1))
corners = corners.astype(np.uint8)
lab = np.zeros(self.output_shape, dtype=np.float32)
for (x, y) in corners:
rr, cc = circle(y, x, self.c_r, shape=self.output_shape[:2])
lab[rr, cc, 0] = 1 # markers
if self.is_training:
if np.random.rand() < 0.3: # heavy augmentation (slow)
img = self.seq.augment_image(img) # data augmentation
else: # light augmentation (fast)
img = img.astype(np.float32) / 255.0 # normalize first
# random amplify each channel
a = .2 # amptitude
t = [np.random.uniform(-a,a)]
t += [np.random.uniform(-a,a)]
t += [np.random.uniform(-a,a)]
t = np.array(t)
img = np.clip(img * (1. + t), 0, 1) # channel wise amplify
up = np.random.uniform(0.8, 1.2) # change gamma
img = np.clip(img**up, 0, 1) # apply gamma and convert back to range [0,255]
# additive random noise
sigma = np.random.rand()*0.05
img = np.clip(img +
|
np.random.randn(*img.shape)
|
numpy.random.randn
|
import argparse
import sys
import tempfile
import math
import random
import pandas as pd
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import estimator
import tflearn
import numpy as np
from sklearn import metrics
from collections import Counter
from datetime import datetime
import wordvecdata as wvd
"""
Implements neural classifier in tensoflow. Also implements the metrics used in the work.
Disclaimer: File probably needs tons of cleaning up...
"""
COLUMNS = ["node1", "node2"]
LABEL_COLUMN = "label"
def build_estimator(model_dir, model_type, embeddings,index_map, combination_method):
"""Build an estimator."""
# Continuous base columns.
node1 = tf.contrib.layers.real_valued_column("node1")
deep_columns = [node1]
if model_type == "regressor":
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
if combination_method == 'concatenate':
net = tflearn.input_data(shape=[None, embeddings.shape[1]*2])
else:
net = tflearn.input_data(shape=[None, embeddings.shape[1]] )
net = tflearn.fully_connected(net, 100, activation='relu')
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')
m = tflearn.DNN(net)
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100])
return m
def get_input(df, embeddings,index_map, combination_method='hadamard', data_purpose='train'):
"""Input builder function."""
# Converts the label column into a constant Tensor.
label_values = df[LABEL_COLUMN].values
indexed_labels = []
original_labels = np.array(label_values)
labels = [[0, 0] for i in range(len(label_values))]
for label_lst, value in zip(labels, label_values):
label_lst[value] = 1
indexed_labels = labels
if data_purpose not in ['map', 'test']:
vocab_size = embeddings.shape[0]
embedding_dim = embeddings.shape[1]
W = tf.Variable(tf.constant(0.0, shape=[vocab_size, embedding_dim]), trainable=False, name="W")
embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
embedding_init = W.assign(embeddings)
embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
embedding_init = W.assign(embeddings)
feature_cols = {}
column_tensors = []
col_keys = []
for i in COLUMNS:
words = [value for value in df[i].values]
col_keys.append(words)
#print("%s words in index map." % len(index_map))
ids = [index_map[word] for word in words]
column_tensors.append([embeddings[id_] for id_ in ids])
keys = []
for entity1, entity2 in zip(col_keys[0], col_keys[1]):
keys.append("%s::%s" % (entity1, entity2))
assert(combination_method in ['hadamard','average', 'weighted_l1', 'weighted_l2', 'concatenate']), "Invalid combination Method %s" % combination_method
features = column_tensors[0]
no_output = ['map']
for i in range(1, len(column_tensors)):
if combination_method == 'hadamard':
if data_purpose not in no_output:
print("Combining with Hadamard.")
features = np.multiply(features, column_tensors[i])
elif combination_method == 'average':
if data_purpose not in no_output:
print("Combining with Average.")
features = np.mean(np.array([ features, column_tensors[i] ]), axis=0)
elif combination_method == 'weighted_l1':
if data_purpose not in no_output:
print("Combining with Weighted L1.")
features = np.absolute(np.subtract(features, column_tensors[i]))
elif combination_method == 'weighted_l2':
if data_purpose not in no_output:
print("Combining with Weighted L2.")
features = np.square(np.absolute(np.subtract(features, column_tensors[i])))
elif combination_method == 'concatenate':
if data_purpose not in no_output:
print("Combining with Concatenate.")
features = np.concatenate([features, column_tensors[i]], 1)
return features, original_labels, indexed_labels, keys
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, positive_labels, combination_method, method):
"""Train and evaluate the model."""
index_map, weights = wvd.load(train_embeddings_file_name)
#Get positive labels
positive_labels = positive_labels.split(',')
print("reading data...")
train_file_name = train_data
df_train = pd.read_table(train_file_name, dtype={'node1':str, 'node2':str})
df_train = df_train.sample(frac=1)
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["label"].apply(lambda x: label_func(x, positive_labels))).astype(int)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
print("model directory = %s" % model_dir)
train_x, _, train_y, _ = get_input(df_train, weights, index_map, combination_method)
print("\nBuilding model...")
m = build_estimator(model_dir, model_type, weights, index_map, combination_method)
print("\nTraining model...")
if model_type == "regressor":
m.fit(train_x, train_y, n_epoch=train_steps, show_metric=True, snapshot_epoch=False)
print("\nTesting model...")
index_map, weights = wvd.load(test_embeddings_file_name)
print("reading data...")
test_file_name = test_data
df_test = pd.read_table(test_file_name, dtype={'node1':str, 'node2':str})
df_test = df_test.sample(frac=1)
# remove NaN elements
df_test = df_test.dropna(how='any', axis=0)
df_test[LABEL_COLUMN] = (
df_test["label"].apply(lambda x: label_func(x, positive_labels))).astype(int)
if model_type == "regressor":
test_x, test_original_y, test_index_y, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')
node_sets = get_node_sets(test_original_x, test_original_y)
print("\nPredicting:")
model_predictions = m.predict(test_x)
model_predictions = list(model_predictions)
#Covert back to 1 and 0
predictions = []
model_predictions_probs = []
for prediction in model_predictions:
predictions.append(prediction[1]) #non-thresholded value of positve class
model_predictions_probs.append(prediction[1])
k = int(len([i for i in test_original_y if i == 1]) * 0.3)
do_evaluations([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets,
positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method)
#Uncomment to log ranked links
#log_predictions([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets,
# positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method,
# outfilename=combination_method, method=method)
def do_evaluations(test_x, test_y, predictions, k, node_sets, positive_labels=None, model=None, graph=None, vertices=None,
weights=None, index_map=None, combination_method=None, bipartite=False, sim_ind=None, error_anlysis=False):
#Area under ROC
roc_auc = metrics.roc_auc_score(test_y, predictions, average='micro')
print("ROC AUC: %s" % roc_auc)
#Area under Precision-recall curve
avg_prec = metrics.average_precision_score(test_y, predictions, average='micro')
print("Overall Average precision (corresponds to AUPRC): %s" % avg_prec)
predictions_cpy = [pred for pred in predictions]
test_y_cpy = [y for y in test_y]
test_x_cpy = [x for x in test_x]
k_cpy = k
#Mean Average Precision (MAP)
print("Calculating Averaged R-Precision and MAP")
total_rp = 0.0
total_ap = 0.0
total_rp_error = 0.0
total_ap_error = 0.0
no_pos_nodes = 0
if model:
#TODO: Check for these values being present without raising the Value error about any and all for truth value of array being ambiguous
#assert weights != None and index_map != None and positive_labels != None and \
# combination_method, "If model is specified, weights, index_map, combination method and positive_labels must be given."
for node, dict_ in node_sets.iteritems():
map_df_test = pd.DataFrame(dict_)
map_df_test[LABEL_COLUMN] = (
map_df_test["label"].apply(lambda x: label_func(x, positive_labels))).astype(int)
test_x, test_original_y, test_index_y, test_original_x = get_input(map_df_test, weights, index_map, combination_method, data_purpose='map')
model_predictions = model.predict(test_x)
model_predictions = list(model_predictions)
predictions = []
for prediction in model_predictions:
predictions.append(prediction[1]) #non-thresholded value of positve class
test_original_y = [y for y in test_original_y]
node_pos_cnt = len([i for i in test_original_y if i ==1])
pos_indices = [ind for ind, i in enumerate(test_original_y) if i ==1]
rp = r_precision([p for p in predictions], pos_indices, node_pos_cnt, lambda ind,rel_indices: ind in rel_indices)
if rp < 1.0 and node_pos_cnt != 0:
total_rp_error += (1.0 - rp)
if error_anlysis:
top_k = get_top_k([p for p in predictions], node_pos_cnt)
print("Pos Gold indices: %s. Pos predicted indices: %s. RP: %s." % (pos_indices, [ind for ind in top_k], rp))
ap = metrics.average_precision_score(np.array(test_original_y), np.array(predictions), average='micro')
if ap < 1.0 and node_pos_cnt != 0:
total_ap_error += (1.0 - ap)
if error_anlysis:
top_k = get_top_k([p for p in predictions], len(predictions))
print("Pos Gold indices: %s. Pos predicted indices: %s. AP: %s." % (pos_indices, [ind for ind in top_k], ap))
if str(ap) == 'nan':
ap = 0.0
if node_pos_cnt < 1:
no_pos_nodes += 1 #This node had no positive labels
total_rp += rp
total_ap += ap
elif graph:
assert sim_ind, "Similarity Index must be specified."
if bipartite:
print("Evaluations of graph. Processing graph as bipartite.")
for node, dict_ in node_sets.iteritems():
node1_lst = dict_['node1']
node2_lst = dict_['node2']
label_lst = []
for l in dict_['label']:
if l == 'O':
label_lst.append(0)
else:
label_lst.append(1)
assert len(node1_lst) == len(node2_lst) == len(label_lst), "Nodes and labels lists of unequal length: %s, %s, %s" % (len(node1_lst), len(node2_lst), len(label_lst))
predictions = []
for entity1, entity2 in zip(node1_lst, node2_lst):
entity1_set = set(graph[entity1])
entity2_set = set(graph[entity2])
if bipartite:
assert vertices, "Vertices must be passed with bipartite graphs."
#Get neighbours of neighbours of this node to use, so do new entity2_set
entity2_lst = []
for node in entity2_set:
if node in vertices:
entity = str(vertices[node])
entity2_lst += graph[entity]
entity2_set = set(entity2_lst)
#Calculate similarity index
assert sim_ind in ['common_neighbours', 'jaccard_coefficient', 'adamic_adar'], "Invalid similarity index %s" % sim_ind
cn = len(entity2_set.intersection(entity1_set)) #Calculate common neighbours which all metrics use
if sim_ind == 'common_neighbours':
si = cn
elif sim_ind == 'jaccard_coefficient':
neighbours_union_len = len(entity2_set.union(entity1_set))
if neighbours_union_len > 0:
si = cn / float(neighbours_union_len)
else:
si = 0.0
elif sim_ind == 'adamic_adar':
if cn > 1:
si = 1.0/math.log(cn)
else:
si = 0.0
predictions.append(float(si))
node_pos_cnt = len([i for i in label_lst if i ==1])
pos_indices = [ind for ind, i in enumerate(label_lst) if i ==1]
rp = r_precision([p for p in predictions], pos_indices, node_pos_cnt, lambda ind,rel_indices: ind in rel_indices)
if rp < 1.0 and node_pos_cnt != 0:
total_rp_error += (1.0 - rp)
if error_anlysis:
top_k = get_top_k([p for p in predictions], node_pos_cnt)
print("\nPos Gold indices: %s. Pos predicted indices: %s. RP: %s" % (pos_indices, [ind for ind in top_k], rp))
ap = metrics.average_precision_score(
|
np.array(label_lst)
|
numpy.array
|
import numpy as np
import scipy.linalg
import matplotlib.pyplot as plt
from matplotlib import cm
from random import randint
import scipy.optimize as optimize
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import mean_squared_error, r2_score
class StablePlot:
"""
Find the best curse regarding the the given function
"""
fig_index = 1
funcs = []
def __init__(self,
data_path: str):
self.p_values = []
self.pop_sizes = []
self.data = []
with open(data_path, "r") as data_file:
index = 0
for line in data_file.readlines():
if index == 0:
self.pop_sizes = [int(i.replace("population size = ", "")) for i in line.split(",")[1:-1]]
index += 1
continue
items = line.split(",")[:-1]
self.p_values.append(int(items[0]))
self.data.append([int(i) for i in items[1:]])
index += 1
def fit_points(self):
"""
find the best fitting
:return: the params and save a file
"""
x = []
y = []
z = []
for p_index, p_value in enumerate(self.p_values):
for pop_index, pop_size in enumerate(self.pop_sizes):
x.append(p_value)
y.append(pop_size)
z.append(self.data[p_index][pop_index])
data = np.c_[x, y, z]
mn = np.min(data, axis=0)
mx = np.max(data, axis=0)
X, Y = np.meshgrid(np.linspace(mn[0], mx[0], 100), np.linspace(mn[1], mx[1], 100))
XX = X.flatten()
YY = Y.flatten()
# best-fit linear plane
A = np.c_[data[:, 0], data[:, 1],
|
np.ones(data.shape[0])
|
numpy.ones
|
import torch
import cv2
import numpy as np
import math
from itertools import product as product
def images_to_writer(writer, images, prefix='image', names='image', epoch=0):
if isinstance(names, str):
names = [names+'_{}'.format(i) for i in range(len(images))]
for image, name in zip(images, names):
writer.add_image('{}/{}'.format(prefix, name), image, epoch)
def to_grayscale(image):
"""
input is (d,w,h)
converts 3D image tensor to grayscale images corresponding to each channel
"""
# print(image.shape)
channel = image.shape[0]
image = torch.sum(image, dim=0)
# print(image.shape)
image = torch.div(image, channel)
# print(image.shape)
# assert False
return image
def to_image_size(feature, target_img):
height, width, _ = target_img.shape
resized_feature = cv2.resize(feature, (width, height))
return resized_feature
def features_to_grid(features):
num, height, width, channel = (len(features), len(features[0]), len(features[0][0]), len(features[0][0]))
rows = math.ceil(np.sqrt(num))
output = np.zeros([rows*(height+2),rows*(width+2), 3],dtype=np.float32)
for i, feature in enumerate(features):
row = i % rows
col = math.floor(i / rows)
output[row*(2+height)+1:(row+1)*(2+height)-1, col*(2+width)+1:(col+1)*(2+width)-1] = feature
return output
def viz_feature_maps(writer, feature_maps, module_name='base', epoch=0, prefix='module_feature_maps'):
feature_map_visualization = []
for i in feature_maps:
i = i.squeeze(0)
temp = to_grayscale(i)
feature_map_visualization.append(temp.data.cpu().numpy())
names, feature_map_heatmap = [], []
for i, feature_map in enumerate(feature_map_visualization):
feature_map = (feature_map * 255)
heatmap = cv2.applyColorMap(feature_map.astype(np.uint8), cv2.COLORMAP_JET)
feature_map_heatmap.append(heatmap[..., ::-1])
names.append('{}.{}'.format(module_name, i))
images_to_writer(writer, feature_map_heatmap, prefix, names, epoch)
def viz_grads(writer, model, feature_maps, target_image, target_mean, module_name='base', epoch=0, prefix='module_grads'):
grads_visualization = []
names = []
for i, feature_map in enumerate(feature_maps):
model.zero_grad()
# print()
feature_map.backward(torch.Tensor(np.ones(feature_map.size())), retain_graph=True)
# print(target_image.grad)
grads = target_image.grad.data.clamp(min=0).squeeze(0).permute(1,2,0)
# print(grads)
# assert False
grads_visualization.append(grads.cpu().numpy()+target_mean)
names.append('{}.{}'.format(module_name, i))
images_to_writer(writer, grads_visualization, prefix, names, epoch)
def viz_module_feature_maps(writer, module, input_image, module_name='base', epoch=0, mode='one', prefix='module_feature_maps'):
output_image = input_image
feature_maps = []
for i, layer in enumerate(module):
output_image = layer(output_image)
feature_maps.append(output_image)
if mode is 'grid':
pass
elif mode is 'one':
viz_feature_maps(writer, feature_maps, module_name, epoch, prefix)
return output_image
def viz_module_grads(writer, model, module, input_image, target_image, target_mean, module_name='base', epoch=0, mode='one', prefix='module_grads'):
output_image = input_image
feature_maps = []
for i, layer in enumerate(module):
output_image = layer(output_image)
feature_maps.append(output_image)
if mode is 'grid':
pass
elif mode is 'one':
viz_grads(writer, model, feature_maps, target_image, target_mean, module_name, epoch, prefix)
return output_image
def viz_prior_box(writer, prior_box, image=None, epoch=0):
if isinstance(image, type(None)):
image = np.random.random((prior_box.image_size[0], prior_box.image_size[1], 3))
elif isinstance(image, str):
image = cv2.imread(image, -1)
# image = cv2.resize(image, (prior_box.image_size[0], prior_box.image_size[1]))
image = cv2.resize(image, (prior_box.image_size[1], prior_box.image_size[0]))
for k, f in enumerate(prior_box.feature_maps):
bbxs = []
image_show = image.copy()
for i, j in product(range(f[0]), range(f[1])):
cx = j * prior_box.steps[k][1] + prior_box.offset[k][1]
cy = i * prior_box.steps[k][0] + prior_box.offset[k][0]
# aspect_ratio: 1 Min size
s_k = prior_box.scales[k]
bbxs += [cx, cy, s_k, s_k]
# # aspect_ratio: 1 Max size
# # rel size: sqrt(s_k * s_(k+1))
# s_k_prime = sqrt(s_k * self.scales[k+1])
# bbxs += [cx, cy, s_k_prime, s_k_prime]
# # rest of aspect ratios
# for ar in self.aspect_ratios[k]:
# ar_sqrt = sqrt(ar)
# bbxs += [cx, cy, s_k*ar_sqrt, s_k/ar_sqrt]
# bbxs += [cx, cy, s_k/ar_sqrt, s_k*ar_sqrt]
scale = [prior_box.image_size[1], prior_box.image_size[0], prior_box.image_size[1], prior_box.image_size[0]]
bbxs = np.array(bbxs).reshape((-1, 4))
archors = bbxs[:, :2] * scale[:2]
bbxs = np.hstack((bbxs[:, :2] - bbxs[:, 2:4]/2, bbxs[:, :2] + bbxs[:, 2:4]/2)) * scale
archors = archors.astype(np.int32)
bbxs = bbxs.astype(np.int32)
for archor, bbx in zip(archors, bbxs):
cv2.circle(image_show,(archor[0],archor[1]), 2, (0,0,255), -1)
if archor[0] == archor[1]:
cv2.rectangle(image_show, (bbx[0], bbx[1]), (bbx[2], bbx[3]), (0, 255, 0), 1)
writer.add_image('example_prior_boxs/feature_map_{}'.format(k), image_show, epoch)
def add_pr_curve_raw(writer, tag, precision, recall, epoch=0):
num_thresholds = len(precision)
writer.add_pr_curve_raw(
tag=tag,
true_positive_counts = -np.ones(num_thresholds),
false_positive_counts = -np.ones(num_thresholds),
true_negative_counts = -np.ones(num_thresholds),
false_negative_counts = -
|
np.ones(num_thresholds)
|
numpy.ones
|
import torch
from Third_Party.smoothing_adversarial.attacks import PGD_L2, DDN
import numpy as np
import gc
import pandas as pd
from torch.nn.functional import softmax
from scipy.stats import rankdata
from numpy.random import default_rng
from scipy.stats.mstats import mquantiles
from scipy.stats import norm
from tqdm import tqdm
from typing import List
# function to calculate accuracy of the model
def calculate_accuracy(model, dataloader, device):
model.eval() # put in evaluation mode
total_correct = 0
total_images = 0
with torch.no_grad():
for data in dataloader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total_images += labels.size(0)
total_correct += (predicted == labels).sum().item()
model_accuracy = total_correct / total_images
return model_accuracy
def Smooth_Adv(model, x, y, noises, N_steps=20, max_norm=0.125, device='cpu', GPU_CAPACITY=1024, method='PGD'):
# create attack model
if method == 'PGD':
attacker = PGD_L2(steps=N_steps, device=device, max_norm=max_norm)
elif method == "DDN":
attacker = DDN(steps=N_steps, device=device, max_norm=max_norm)
# create container for the adversarial examples
x_adv = torch.zeros_like(x)
# get number of data points
n = x.size()[0]
# number of permutations to estimate mean
num_of_noise_vecs = noises.size()[0] // n
# calculate maximum batch size according to gpu capacity
batch_size = GPU_CAPACITY // num_of_noise_vecs
# calculate number of batches
if n % batch_size != 0:
num_of_batches = (n // batch_size) + 1
else:
num_of_batches = (n // batch_size)
# start generating examples for each batch
print("Generating Adverserial Examples:")
for j in tqdm(range(num_of_batches)):
#GPUtil.showUtilization()
# get inputs and labels of batch
inputs = x[(j * batch_size):((j + 1) * batch_size)]
labels = y[(j * batch_size):((j + 1) * batch_size)]
# duplicate batch according to the number of added noises and send to device
# the first num_of_noise_vecs samples will be duplicates of x[0] and etc.
tmp = torch.zeros((len(labels) * num_of_noise_vecs, *inputs.shape[1:]))
x_tmp = inputs.repeat((1, num_of_noise_vecs, 1, 1)).view(tmp.shape).to(device)
# send labels to device
y_tmp = labels.to(device).long()
# generate random Gaussian noise for the duplicated batch
noise = noises[(j * (batch_size * num_of_noise_vecs)):((j + 1) * (batch_size * num_of_noise_vecs))].to(device)
# noise = torch.randn_like(x_tmp, device=device) * sigma_adv
# generate adversarial examples for the batch
x_adv_batch = attacker.attack(model, x_tmp, y_tmp,
noise=noise, num_noise_vectors=num_of_noise_vecs,
no_grad=False,
)
# take only the one example for each point
x_adv_batch = x_adv_batch[::num_of_noise_vecs]
# move back to CPU
x_adv_batch = x_adv_batch.to(torch.device('cpu'))
# put in the container
x_adv[(j * batch_size):((j + 1) * batch_size)] = x_adv_batch.detach().clone()
# return adversarial examples
return x_adv
def evaluate_predictions(S, X, y, conditional=False, coverage_on_label=False, num_of_classes=10):
# get numbers of points
#n = np.shape(X)[0]
# get points to a matrix of the format nxp
#X = np.vstack([X[i, 0, :, :].flatten() for i in range(n)])
# Marginal coverage
marg_coverage = np.mean([y[i] in S[i] for i in range(len(y))])
# If desired calculate coverage for each class
if coverage_on_label:
sums = np.zeros(num_of_classes)
size_sums = np.zeros(num_of_classes)
lengths = np.zeros(num_of_classes)
for i in range(len(y)):
lengths[y[i]] = lengths[y[i]] + 1
size_sums[y[i]] = size_sums[y[i]] + len(S[i])
if y[i] in S[i]:
sums[y[i]] = sums[y[i]] + 1
coverage_given_y = sums/lengths
lengths_given_y = size_sums/lengths
# Conditional coverage not implemented
wsc_coverage = None
# Size and size conditional on coverage
size = np.mean([len(S[i]) for i in range(len(y))])
idx_cover = np.where([y[i] in S[i] for i in range(len(y))])[0]
size_cover = np.mean([len(S[i]) for i in idx_cover])
# Combine results
out = pd.DataFrame({'Coverage': [marg_coverage], 'Conditional coverage': [wsc_coverage],
'Size': [size], 'Size cover': [size_cover]})
# If desired, save coverage for each class
if coverage_on_label:
for i in range(num_of_classes):
out['Coverage given '+str(i)] = coverage_given_y[i]
out['Size given '+str(i)] = lengths_given_y[i]
return out
# calculate accuracy of the smoothed classifier
def calculate_accuracy_smooth(model, x, y, noises, num_classes, k=1, device='cpu', GPU_CAPACITY=1024):
# get size of the test set
n = x.size()[0]
# number of permutations to estimate mean
n_smooth = noises.size()[0] // n
# create container for the outputs
smoothed_predictions = torch.zeros((n, num_classes))
# calculate maximum batch size according to gpu capacity
batch_size = GPU_CAPACITY // n_smooth
# calculate number of batches
if n % batch_size != 0:
num_of_batches = (n // batch_size) + 1
else:
num_of_batches = (n // batch_size)
# get predictions over all batches
for j in range(num_of_batches):
# get inputs and labels of batch
inputs = x[(j * batch_size):((j + 1) * batch_size)]
labels = y[(j * batch_size):((j + 1) * batch_size)]
# duplicate batch according to the number of added noises and send to device
# the first n_smooth samples will be duplicates of x[0] and etc.
tmp = torch.zeros((len(labels) * n_smooth, *inputs.shape[1:]))
x_tmp = inputs.repeat((1, n_smooth, 1, 1)).view(tmp.shape).to(device)
# generate random Gaussian noise for the duplicated batch
noise = noises[(j * (batch_size * n_smooth)):((j + 1) * (batch_size * n_smooth))].to(device)
# add noise to points
noisy_points = x_tmp + noise
# get classifier predictions on noisy points
model.eval() # put in evaluation mode
with torch.no_grad():
noisy_outputs = model(noisy_points).to(torch.device('cpu'))
# transform the output into probabilities vector
noisy_outputs = softmax(noisy_outputs, dim=1)
# get smoothed prediction for each point
for m in range(len(labels)):
smoothed_predictions[(j * batch_size) + m, :] = torch.mean(
noisy_outputs[(m * n_smooth):((m + 1) * n_smooth)], dim=0)
# transform results to numpy array
smoothed_predictions = smoothed_predictions.numpy()
# get label ranks to calculate top k accuracy
label_ranks = np.array([rankdata(-smoothed_predictions[i, :], method='ordinal')[y[i]] - 1 for i in range(n)])
# get probabilities of correct labels
label_probs = np.array([smoothed_predictions[i, y[i]] for i in range(n)])
# calculate accuracy
top_k_accuracy = np.sum(label_ranks <= (k - 1)) / float(n)
# calculate average inverse probability score
score = np.mean(1 - label_probs)
# calculate the 90 qunatiule
quantile = mquantiles(1-label_probs, prob=0.9)
return top_k_accuracy, score, quantile
def smooth_calibration(model, x_calib, y_calib, noises, alpha, num_of_classes, scores_list, correction, base=False, device='cpu', GPU_CAPACITY=1024):
# size of the calibration set
n_calib = x_calib.size()[0]
# number of permutations to estimate mean
n_smooth = noises.size()[0] // n_calib
# create container for the scores
if base:
scores_simple = np.zeros((len(scores_list), n_calib))
else:
smoothed_scores = np.zeros((len(scores_list), n_calib))
scores_smoothed = np.zeros((len(scores_list), n_calib))
# create container for the calibration thresholds
thresholds = np.zeros((len(scores_list), 3))
# calculate maximum batch size according to gpu capacity
batch_size = GPU_CAPACITY // n_smooth
# calculate number of batches
if n_calib % batch_size != 0:
num_of_batches = (n_calib // batch_size) + 1
else:
num_of_batches = (n_calib // batch_size)
# create container for smoothed and base classifier outputs
if base:
simple_outputs = np.zeros((n_calib, num_of_classes))
else:
smooth_outputs = np.zeros((n_calib, num_of_classes))
# initiate random uniform variables for inverse quantile score
rng = default_rng()
uniform_variables = rng.uniform(size=n_calib, low=0.0, high=1.0)
# pass all points to model in batches and calculate scores
for j in range(num_of_batches):
# get inputs and labels of batch
inputs = x_calib[(j * batch_size):((j + 1) * batch_size)]
labels = y_calib[(j * batch_size):((j + 1) * batch_size)]
if base:
noise = noises[(j * batch_size):((j + 1) * batch_size)].to(device)
noisy_points = inputs.to(device) + noise
else:
# duplicate batch according to the number of added noises and send to device
# the first n_smooth samples will be duplicates of x[0] and etc.
tmp = torch.zeros((len(labels) * n_smooth, *inputs.shape[1:]))
x_tmp = inputs.repeat((1, n_smooth, 1, 1)).view(tmp.shape).to(device)
# generate random Gaussian noise for the duplicated batch
noise = noises[(j * (batch_size * n_smooth)):((j + 1) * (batch_size * n_smooth))].to(device)
# add noise to points
noisy_points = x_tmp + noise
# get classifier predictions on noisy points
model.eval() # put in evaluation mode
with torch.no_grad():
noisy_outputs = model(noisy_points).to(torch.device('cpu'))
# transform the output into probabilities vector
noisy_outputs = softmax(noisy_outputs, dim=1).numpy()
# get smoothed score for each point
if base:
simple_outputs[(j * batch_size):((j + 1) * batch_size), :] = noisy_outputs
else:
for k in range(len(labels)):
# get all the noisy outputs of a specific point
point_outputs = noisy_outputs[(k * n_smooth):((k + 1) * n_smooth)]
# get smoothed classifier output of this point
smooth_outputs[(j * batch_size) + k, :] = np.mean(point_outputs, axis=0)
# get smoothed score of this point
# generate random variable for inverse quantile score
u = np.ones(n_smooth) * uniform_variables[(j * batch_size) + k]
# run over all scores functions and compute smoothed scores
for p, score_func in enumerate(scores_list):
# get smoothed score
tmp_scores = score_func(point_outputs, labels[k], u, all_combinations=True)
smoothed_scores[p, (j * batch_size) + k] = np.mean(tmp_scores)
# run over all scores functions and compute scores of smoothed and base classifier
for p, score_func in enumerate(scores_list):
if base:
scores_simple[p, :] = score_func(simple_outputs, y_calib, uniform_variables, all_combinations=False)
else:
scores_smoothed[p, :] = score_func(smooth_outputs, y_calib, uniform_variables, all_combinations=False)
# Compute thresholds
level_adjusted = (1.0 - alpha) * (1.0 + 1.0 / float(n_calib))
bounds = np.zeros((len(scores_list), 2))
for p in range(len(scores_list)):
if base:
thresholds[p, 0] = mquantiles(scores_simple[p, :], prob=level_adjusted)
else:
thresholds[p, 1] = mquantiles(scores_smoothed[p, :], prob=level_adjusted)
thresholds[p, 2] = mquantiles(smoothed_scores[p, :], prob=level_adjusted)
# calculate lower and upper bounds of correction of smoothed score
upper_thresh = norm.cdf(norm.ppf(thresholds[p, 2], loc=0, scale=1)+correction, loc=0, scale=1)
lower_thresh = norm.cdf(norm.ppf(thresholds[p, 2], loc=0, scale=1)-correction, loc=0, scale=1)
bounds[p, 0] = np.size(smoothed_scores[p, :][smoothed_scores[p, :] <= lower_thresh])/np.size(smoothed_scores[p, :])
bounds[p, 1] = np.size(smoothed_scores[p, :][smoothed_scores[p, :] <= upper_thresh]) / np.size(smoothed_scores[p, :])
return thresholds, bounds
def smooth_calibration_ImageNet(model, x_calib, y_calib, n_smooth, sigma_smooth, alpha, num_of_classes, scores_list, correction, base=False, device='cpu', GPU_CAPACITY=1024):
# size of the calibration set
n_calib = x_calib.size()[0]
# create container for the scores
if base:
scores_simple = np.zeros((len(scores_list), n_calib))
else:
smoothed_scores = np.zeros((len(scores_list), n_calib))
scores_smoothed = np.zeros((len(scores_list), n_calib))
# create container for the calibration thresholds
thresholds = np.zeros((len(scores_list), 3))
# calculate maximum batch size according to gpu capacity
batch_size = GPU_CAPACITY // n_smooth
# calculate number of batches
if n_calib % batch_size != 0:
num_of_batches = (n_calib // batch_size) + 1
else:
num_of_batches = (n_calib // batch_size)
# create container for smoothed and base classifier outputs
if base:
simple_outputs = np.zeros((n_calib, num_of_classes))
else:
smooth_outputs = np.zeros((n_calib, num_of_classes))
# initiate random uniform variables for inverse quantile score
rng = default_rng()
uniform_variables = rng.uniform(size=n_calib, low=0.0, high=1.0)
# pass all points to model in batches and calculate scores
for j in range(num_of_batches):
# get inputs and labels of batch
inputs = x_calib[(j * batch_size):((j + 1) * batch_size)]
labels = y_calib[(j * batch_size):((j + 1) * batch_size)]
if base:
noise = (torch.randn_like(inputs)*sigma_smooth).to(device)
noisy_points = inputs.to(device) + noise
else:
# duplicate batch according to the number of added noises and send to device
# the first n_smooth samples will be duplicates of x[0] and etc.
tmp = torch.zeros((len(labels) * n_smooth, *inputs.shape[1:]))
x_tmp = inputs.repeat((1, n_smooth, 1, 1)).view(tmp.shape).to(device)
# generate random Gaussian noise for the duplicated batch
noise = (torch.randn_like(x_tmp)*sigma_smooth).to(device)
# add noise to points
noisy_points = x_tmp + noise
# get classifier predictions on noisy points
model.eval() # put in evaluation mode
with torch.no_grad():
noisy_outputs = model(noisy_points).to(torch.device('cpu'))
# transform the output into probabilities vector
noisy_outputs = softmax(noisy_outputs, dim=1).numpy()
# get smoothed score for each point
if base:
simple_outputs[(j * batch_size):((j + 1) * batch_size), :] = noisy_outputs
else:
for k in range(len(labels)):
# get all the noisy outputs of a specific point
point_outputs = noisy_outputs[(k * n_smooth):((k + 1) * n_smooth)]
# get smoothed classifier output of this point
smooth_outputs[(j * batch_size) + k, :] = np.mean(point_outputs, axis=0)
# get smoothed score of this point
# generate random variable for inverse quantile score
u = np.ones(n_smooth) * uniform_variables[(j * batch_size) + k]
# run over all scores functions and compute smoothed scores
for p, score_func in enumerate(scores_list):
# get smoothed score
tmp_scores = score_func(point_outputs, labels[k], u, all_combinations=True)
smoothed_scores[p, (j * batch_size) + k] = np.mean(tmp_scores)
# run over all scores functions and compute scores of smoothed and base classifier
for p, score_func in enumerate(scores_list):
if base:
scores_simple[p, :] = score_func(simple_outputs, y_calib, uniform_variables, all_combinations=False)
else:
scores_smoothed[p, :] = score_func(smooth_outputs, y_calib, uniform_variables, all_combinations=False)
# Compute thresholds
level_adjusted = (1.0 - alpha) * (1.0 + 1.0 / float(n_calib))
bounds = np.zeros((len(scores_list), 2))
for p in range(len(scores_list)):
if base:
thresholds[p, 0] = mquantiles(scores_simple[p, :], prob=level_adjusted)
else:
thresholds[p, 1] = mquantiles(scores_smoothed[p, :], prob=level_adjusted)
thresholds[p, 2] = mquantiles(smoothed_scores[p, :], prob=level_adjusted)
# calculate lower and upper bounds of correction of smoothed score
upper_thresh = norm.cdf(norm.ppf(thresholds[p, 2], loc=0, scale=1)+correction, loc=0, scale=1)
lower_thresh = norm.cdf(norm.ppf(thresholds[p, 2], loc=0, scale=1)-correction, loc=0, scale=1)
bounds[p, 0] = np.size(smoothed_scores[p, :][smoothed_scores[p, :] <= lower_thresh])/np.size(smoothed_scores[p, :])
bounds[p, 1] = np.size(smoothed_scores[p, :][smoothed_scores[p, :] <= upper_thresh]) / np.size(smoothed_scores[p, :])
return thresholds, bounds
def predict_sets(model, x, noises, num_of_classes, scores_list, thresholds, correction, base=False, device='cpu', GPU_CAPACITY=1024):
# get number of points
n = x.size()[0]
# number of permutations to estimate mean
n_smooth = noises.size()[0] // n
# create container for the scores
if base:
scores_simple = np.zeros((len(scores_list), n, num_of_classes))
else:
smoothed_scores = np.zeros((len(scores_list), n, num_of_classes))
scores_smoothed = np.zeros((len(scores_list), n, num_of_classes))
# calculate maximum batch size according to gpu capacity
batch_size = GPU_CAPACITY // n_smooth
# calculate number of batches
if n % batch_size != 0:
num_of_batches = (n // batch_size) + 1
else:
num_of_batches = (n // batch_size)
# initiate random uniform variables for inverse quantile score
rng =
|
default_rng()
|
numpy.random.default_rng
|
#!/usr/bin/env python
u"""
dealiasing_monthly_mean.py
Written by <NAME> (10/2021)
Reads GRACE/GRACE-FO AOD1B datafiles for a specific product and outputs monthly
the mean for a specific GRACE/GRACE-FO processing center and data release
GAA: atmospheric loading from ECMWF
GAB: oceanic loading from OMCT/MPIOM
GAC: global atmospheric and oceanic loading
GAD: ocean bottom pressure from OMCT/MPIOM
Creates a file for each month (such as making GAA and GAB files for CSR)
CALLING SEQUENCE:
python dealiasing_monthly_mean.py --center CSR --release RL06 --product GAA
COMMAND LINE OPTIONS:
-D X, --directory X: Working Data Directory
-c X, --center X: GRACE/GRACE-FO Processing Center
-r X, --release X: GRACE/GRACE-FO Data Release (RL05 or RL06)
-p X, --product X: GRACE/GRACE-FO dealiasing product (GAA, GAB, GAC, GAD)
-l X, --lmax X: Maximum spherical harmonic degree and order for output
-F X, --format X: Output data format
ascii
netCDF4
HDF5
SHM
-C, --clobber: Overwrite existing data
-M X, --mode X: Permission mode of directories and files
-V, --verbose: Output information for each output file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format
http://www.h5py.org/
PROGRAM DEPENDENCIES:
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
time.py: utilities for calculating time operations
UPDATED HISTORY:
Updated 10/2021: using python logging for handling verbose output
Updated 07/2021: can use default argument files to define options
added option to output in spherical harmonic model (SHM) format
remove choices for argparse processing centers
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 02/2021: replaced numpy bool to prevent deprecation warning
Updated 12/2020: using utilities from time module
Updated 10/2020: use argparse to set command line parameters
Updated 08/2020: flake8 compatible regular expression strings
Updated 04/2020: using harmonics class for operations and outputting to file
reduce output date file to only months with AOD data
Updated 10/2019: changing Y/N flags to True/False
Updated 06/2019: using python3 compatible regular expression patterns
Updated 10/2018: using future division for python3 Compatibility
Updated 08/2018: using full release string (RL05 instead of 5)
Updated 03/2018: copy date file from input GSM directory to output directory
Written 03/2018
"""
from __future__ import print_function, division
import sys
import os
import re
import gzip
import time
import logging
import tarfile
import argparse
import numpy as np
import gravity_toolkit.time
import gravity_toolkit.utilities as utilities
from gravity_toolkit.harmonics import harmonics
#-- PURPOSE: calculate the Julian day from the year and the day of the year
#-- http://scienceworld.wolfram.com/astronomy/JulianDate.html
def calc_julian_day(YEAR, DAY_OF_YEAR):
JD = 367.0*YEAR - np.floor(7.0*(YEAR + np.floor(10.0/12.0))/4.0) - \
np.floor(3.0*(np.floor((YEAR + 8.0/7.0)/100.0) + 1.0)/4.0) + \
np.floor(275.0/9.0) + np.float64(DAY_OF_YEAR) + 1721028.5
return JD
#-- PURPOSE: reads the AOD1B data and outputs a monthly mean
def dealiasing_monthly_mean(base_dir, PROC=None, DREL=None, DSET=None,
LMAX=None, DATAFORM=None, CLOBBER=False, VERBOSE=False, MODE=0o775):
#-- create logger
loglevel = logging.INFO if VERBOSE else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- output data suffix
suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')
#-- aod1b data products
aod1b_products = dict(GAA='atm',GAB='ocn',GAC='glo',GAD='oba')
#-- compile regular expressions operator for the clm/slm headers
#-- for the specific AOD1b product
hx = re.compile(r'^DATA.*SET.*{0}'.format(aod1b_products[DSET]),re.VERBOSE)
#-- compile regular expression operator to find numerical instances
#-- will extract the data from the file
regex_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?'
rx = re.compile(regex_pattern, re.VERBOSE)
#-- set number of hours in a file
#-- set the ocean model for a given release
if DREL in ('RL01','RL02','RL03','RL04','RL05'):
#-- for 00, 06, 12 and 18
n_time = 4
ATMOSPHERE = 'ECMWF'
OCEAN_MODEL = 'OMCT'
default_center = 'EIGEN'
default_lmax = 100
elif DREL in ('RL06',):
#-- for 00, 03, 06, 09, 12, 15, 18 and 21
n_time = 8
ATMOSPHERE = 'ECMWF'
OCEAN_MODEL = 'MPIOM'
default_center = 'GFZOP'
default_lmax = 180
else:
raise ValueError('Invalid data release')
#-- Maximum spherical harmonic degree (LMAX)
LMAX = default_lmax if not LMAX else LMAX
#-- Calculating the number of cos and sin harmonics up to d/o of file
n_harm = (default_lmax**2 + 3*default_lmax)//2 + 1
#-- AOD1B data products
product = {}
product['atm'] = 'Atmospheric loading from {0}'.format(ATMOSPHERE)
product['ocn'] = 'Oceanic loading from {0}'.format(OCEAN_MODEL)
product['glo'] = 'Global atmospheric and oceanic loading'
product['oba'] = 'Ocean bottom pressure from {0}'.format(OCEAN_MODEL)
#-- GRACE AOD1B directory for data release
aod1b_dir = os.path.join(base_dir,'AOD1B',DREL)
#-- GRACE data directory for data release and processing center
grace_dir = os.path.join(base_dir,PROC,DREL)
#-- recursively create output directory if not currently existing
if not os.access(os.path.join(grace_dir,DSET),os.F_OK):
os.makedirs(os.path.join(grace_dir,DSET), MODE)
#-- file formatting string if outputting to SHM format
shm = '{0}-2_{1:4.0f}{2:03.0f}-{3:4.0f}{4:03.0f}_{5}_{6}_{7}_{8}00.gz'
#-- center name if outputting to SHM format
if (PROC == 'CSR'):
CENTER = 'UTCSR'
elif (PROC == 'GFZ'):
CENTER = default_center
elif (PROC == 'JPL'):
CENTER = 'JPLEM'
else:
CENTER = default_center
#-- read input DATE file from GSM data product
grace_datefile = '{0}_{1}_DATES.txt'.format(PROC, DREL)
date_input = np.loadtxt(os.path.join(grace_dir,'GSM',grace_datefile),
skiprows=1)
grace_month = date_input[:,1].astype(np.int64)
start_yr = date_input[:,2]
start_day = date_input[:,3].astype(np.int64)
end_yr = date_input[:,4]
end_day = date_input[:,5].astype(np.int64)
#-- output date file reduced to months with complete AOD
f_out = open(os.path.join(grace_dir,DSET,grace_datefile), 'w')
#-- date file header information
args = ('Mid-date','Month','Start_Day','End_Day','Total_Days')
print('{0} {1:>10} {2:>11} {3:>10} {4:>13}'.format(*args),file=f_out)
#-- for each GRACE/GRACE-FO month
for t,gm in enumerate(grace_month):
#-- check if GRACE/GRACE-FO month crosses years
if (start_yr[t] != end_yr[t]):
#-- check if start_yr is a Leap Year or Standard Year
dpy = gravity_toolkit.time.calendar_days(start_yr[t]).sum()
#-- list of Julian Days to read from both start and end year
julian_days_to_read = []
#-- add days to read from start and end years
julian_days_to_read.extend([calc_julian_day(start_yr[t],D)
for D in range(start_day[t],dpy+1)])
julian_days_to_read.extend([calc_julian_day(end_yr[t],D)
for D in range(1,end_day[t]+1)])
else:
#-- Julian Days to read going from start_day to end_day
julian_days_to_read = [calc_julian_day(start_yr[t],D)
for D in range(start_day[t],end_day[t]+1)]
#-- output filename for GRACE/GRACE-FO month
if (DATAFORM == 'SHM'):
MISSION = 'GRAC' if (gm <= 186) else 'GRFO'
FILE = shm.format(DSET.upper(),start_yr[t],start_day[t],
end_yr[t],end_day[t],MISSION,CENTER,'BC01',DREL[2:])
else:
args = (PROC,DREL,DSET.upper(),LMAX,gm,suffix[DATAFORM])
FILE = '{0}_{1}_{2}_CLM_L{3:d}_{4:03d}.{5}'.format(*args)
#-- calendar dates to read
JD = np.array(julian_days_to_read)
Y,M,D,h,m,s = gravity_toolkit.time.convert_julian(JD,
ASTYPE='i', FORMAT='tuple')
#-- find unique year and month pairs to read
rx1='|'.join(['{0:d}-{1:02d}'.format(*p) for p in set(zip(Y,M))])
rx2='|'.join(['{0:0d}-{1:02d}-{2:02d}'.format(*p) for p in set(zip(Y,M,D))])
#-- compile regular expressions operators for finding tar files
tx = re.compile(r'AOD1B_({0})_\d+.(tar.gz|tgz)$'.format(rx1),re.VERBOSE)
#-- finding all of the tar files in the AOD1b directory
input_tar_files = [tf for tf in os.listdir(aod1b_dir) if tx.match(tf)]
#-- compile regular expressions operators for file dates
#-- will extract year and month and calendar day from the ascii file
fx = re.compile(r'AOD1B_({0})_X_\d+.asc(.gz)?$'.format(rx2),re.VERBOSE)
#-- check the last modified times of the tar file members
input_mtime = np.zeros_like(julian_days_to_read,dtype=np.int64)
input_file_check = np.zeros_like(julian_days_to_read,dtype=bool)
c = 0
#-- for each tar file
for fi in sorted(input_tar_files):
#-- open the AOD1B monthly tar file
tar = tarfile.open(name=os.path.join(aod1b_dir,fi), mode='r:gz')
#-- for each ascii file within the tar file that matches fx
monthly_members = [m for m in tar.getmembers() if fx.match(m.name)]
for member in monthly_members:
#-- check last modification time of input tar file members
input_mtime[c] = member.mtime
input_file_check[c] = True
c += 1
#-- check if all files exist
COMPLETE = input_file_check.all()
#-- if output file exists: check if input tar file is newer
TEST = False
OVERWRITE = 'clobber'
if os.access(os.path.join(grace_dir,DSET,FILE), os.F_OK):
#-- check last modification time of input and output files
output_mtime = os.stat(os.path.join(grace_dir,DSET,FILE)).st_mtime
#-- if input tar file is newer: overwrite the output file
if (input_mtime > output_mtime).any():
TEST = True
OVERWRITE = 'overwrite'
else:
TEST = True
OVERWRITE = 'new'
#-- print GRACE/GRACE-FO dates if there is a complete month of AOD
if COMPLETE:
#-- print GRACE/GRACE-FO dates to file
print(('{0:13.8f} {1:03d} {2:8.0f} {3:03d} {4:8.0f} {5:03d} '
'{6:8.0f}').format(date_input[t,0],gm,start_yr[t],start_day[t],
end_yr[t],end_day[t],date_input[t,6]),file=f_out)
#-- if there are new files, files to be rewritten or clobbered
if COMPLETE and (TEST or CLOBBER):
#-- if verbose: output information about the output file
logging.info('{0} ({1})'.format(FILE,OVERWRITE))
#-- allocate for the mean output harmonics
Ylms = harmonics(lmax=LMAX, mmax=LMAX)
nt = len(julian_days_to_read)*n_time
Ylms.clm = np.zeros((LMAX+1,LMAX+1,nt))
Ylms.slm = np.zeros((LMAX+1,LMAX+1,nt))
Ylms.time = np.zeros((nt))
count = 0
#-- for each tar file
for fi in sorted(input_tar_files):
#-- open the AOD1B monthly tar file
tar = tarfile.open(name=os.path.join(aod1b_dir,fi), mode='r:gz')
#-- for each ascii file within the tar file that matches fx
monthly_members=[m for m in tar.getmembers() if fx.match(m.name)]
for member in monthly_members:
#-- extract member name
YMD,SFX = fx.findall(member.name).pop()
#-- open datafile for day
if (SFX == '.gz'):
fid = gzip.GzipFile(fileobj=tar.extractfile(member))
else:
fid = tar.extractfile(member)
#-- create counters for hour in dataset
hours = np.zeros((n_time))
c = 0
#-- while loop ends when dataset is read
while (c < n_time):
#-- read line
file_contents=fid.readline().decode('ISO-8859-1')
#-- find file header for data product
if bool(hx.search(file_contents)):
#-- extract hour from header and convert to float
HH, = re.findall(r'(\d+):\d+:\d+',file_contents)
hours[c] =
|
np.int64(HH)
|
numpy.int64
|
from typing import Optional
__all__ = ['DisplayCell', 'DisplayColumn', 'DisplayText',
'DisplayDetect', 'DisplayString', 'DisplayTable']
import os
import numpy as np
import re # used for new console display, remove when moved
import warnings
try:
from IPython import get_ipython
except:
pass
from .Utils.display_options import DisplayOptions
from .Utils.terminalsize import get_terminal_size
from .Utils.rt_display_properties import ItemFormat, DisplayConvert, default_item_formats, get_array_formatter
from .rt_enum import DisplayDetectModes, DisplayArrayTypes, DisplayLength, DisplayColumnColors, DisplayJustification, DisplayColorMode, DisplayTextDecoration, NumpyCharTypes, ColHeader, INVALID_DICT, TypeRegister, INVALID_SHORT_NAME, INVALID_LONG_NAME, ColumnStyle
from .rt_misc import build_header_tuples, parse_header_tuples
from .rt_datetime import DateTimeBase
from .rt_numpy import arange, hstack, bool_to_fancy, ismember
from .rt_timers import GetTSC
class DisplayAttributes(object):
MARGIN_COLUMNS = "MarginColumns"
NUMBER_OF_FOOTER_ROWS = "NumberOfFooterRows"
class DisplayDetect(object):
# Detects which environment the data is being displayed in.
# Dataset class flips global mode to DisplayDetectModes.HTML when the first
# _repr_html_ is called.
Mode =0
ForceRepr = False
ColorMode = DisplayColorMode.Dark
@staticmethod
def get_display_mode():
if (DisplayDetect.Mode ==0):
try:
ip = get_ipython()
configdict = ip.config
lenconfig = len(configdict)
# spyder has InteractiveShell
if (lenconfig > 0 and not configdict.has_key('InteractiveShell')):
#notebook or spyder
DisplayDetect.Mode =DisplayDetectModes.Jupyter
else:
#ipython
DisplayDetect.Mode =DisplayDetectModes.Ipython
# set a color mode for lightbg, darkbg, or no colors
if DisplayOptions.COLOR_MODE is not None:
DisplayDetect.ColorMode = DisplayOptions.COLOR_MODE
else:
color_detected = ip.colors
if color_detected == 'Linux' or color_detected == 'LightBG':
DisplayDetect.ColorMode = DisplayColorMode.Light
elif color_detected == 'Neutral' or (
'PYCHARM_HOSTED' in os.environ and
os.environ['PYCHARM_HOSTED'] == '1'):
DisplayDetect.ColorMode = DisplayColorMode.Dark
else:
DisplayDetect.ColorMode = DisplayColorMode.NoColors
except:
DisplayDetect.Mode =DisplayDetectModes.Console
return DisplayDetect.Mode
class DisplayString(object):
# wrapper for display operations that do not return a dataset or multiset
# ex. transpose: Dataset._T
def __init__(self, string):
self.data = string
def __repr__(self):
TypeRegister.Struct._lastrepr =GetTSC()
return self.data
def _repr_html_(self):
TypeRegister.Struct._lastreprhtml =GetTSC()
if DisplayDetect.Mode == DisplayDetectModes.HTML:
return self.data
else:
return None
def __str__(self):
if DisplayDetect.Mode == DisplayDetectModes.Console:
return self.data
else:
if DisplayDetect.Mode == DisplayDetectModes.HTML:
return self._repr_html_()
return self.__repr__()
class DisplayText(object):
'''
Only uses two colors: green and purple OR cyan and blue
For HTML
ds = rt.Dataset({'test': rt.arange(10)})
schema = {'Description': 'This is a structure', 'Steward': 'Nick'}
ds.apply_schema(schema)
ds.info()
'''
ESC = '\x1b['
RESET = '\x1b[00m'
TITLE_DARK = '1;32m' # green
TITLE_LIGHT = '1;35m' # purple
HEADER_DARK = '1;36m' # cyan
HEADER_LIGHT = '1;34m' # blue
def __init__(self, text):
'''
Wrapper for display of possibly formatted text (e.g., Dataset.info()
:param text:
'''
self.data = text
@staticmethod
def _as_if_dark():
return DisplayDetect.ColorMode == DisplayColorMode.Dark and\
DisplayDetect.Mode != DisplayDetectModes.Jupyter
@staticmethod
def _title_color():
if DisplayText._as_if_dark():
return DisplayText.TITLE_DARK
else:
return DisplayText.TITLE_LIGHT
@staticmethod
def _header_color():
if DisplayText._as_if_dark():
return DisplayText.HEADER_DARK
else:
return DisplayText.HEADER_LIGHT
@staticmethod
def _format(txt, fmt):
return DisplayText.ESC + fmt + txt + DisplayText.RESET
@staticmethod
def title_format(txt):
return DisplayText._format(txt, DisplayText._title_color())
@staticmethod
def header_format(txt):
return DisplayText._format(txt, DisplayText._header_color())
def __str__(self):
return self.data
def __repr__(self):
return self.data
def _repr_html_(self):
# creates a dependency on ansi2html
from ansi2html import Ansi2HTMLConverter
#preamble='<html>\n<head>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n<title></title>\n<style type="text/css">\n.ansi2html-content { display: inline; white-space: pre-wrap; word-wrap: break-word; }\n.body_foreground { color: #AAAAAA; }\n.body_background { background-color: #000000; }\n.body_foreground > .bold,.bold > .body_foreground, body.body_foreground > pre > .bold { color: #FFFFFF; font-weight: normal; }\n.inv_foreground { color: #000000; }\n.inv_background { background-color: #AAAAAA; }\n</style>\n</head>\n<body class="body_foreground body_background" style="font-size: normal;" >\n<pre class="ansi2html-content">\n'
#postamble = '</pre>\n</body>\n\n</html>\n'
#return preamble + self.data + postamble
return Ansi2HTMLConverter().convert(self.data)
class DisplayTable(object):
TestFooter = False
DebugMode = False
INVALID_DATA = np.nan
options = DisplayOptions()
console_x_offset = 3
FORCE_REPR = False
@classmethod
def console_detect_settings(cls):
'''
For debugging console display.
'''
display_mode = DisplayDetect.Mode
display_color_mode = DisplayDetect.ColorMode
detected_x, detected_y = get_terminal_size()
default_x = cls.options.CONSOLE_X
default_y = cls.options.CONSOLE_Y
settings_string = ["\n"]
settings_string.append(" display mode:"+str(display_mode))
settings_string.append(" color mode:"+str(display_color_mode))
settings_string.append("detected console x:"+str(detected_x))
settings_string.append("detected console y:"+str(detected_y))
settings_string.append(" default console x:"+str(default_x))
settings_string.append(" default console y:"+str(default_y))
print("\n".join(settings_string))
def __init__(self, attribs: Optional[dict] = None):
if attribs is None:
attribs = dict()
self._console_x = self.options.CONSOLE_X
self._console_y = self.options.CONSOLE_Y
if DisplayTable.FORCE_REPR is False:
self._console_x, self._console_y = get_terminal_size()
if self._console_x is None:
if DisplayTable.DebugMode: print("could not detect console size. using defaults.")
DisplayTable.FORCE_REPR = True
self._console_x = self.options.CONSOLE_X
self._console_y = self.options.CONSOLE_Y
# certain machines will not fail to detect the console width, but set it to zero
# default to the minimum console x bound
if self._console_x < self.options._BOUNDS['CONSOLE_X'][0]:
self._console_x = self.options._BOUNDS['CONSOLE_X'][0]
self._display_mode = DisplayDetect.Mode
# check for html
if self._display_mode == DisplayDetectModes.HTML or self._display_mode == DisplayDetectModes.Jupyter:
self._console_x = self.options.CONSOLE_X_HTML
# dict for any display attributes passed by the initializer
self._attribs = attribs
#---------------------------------------------------------------------------
def build_result_table(
self, header_tups, main_data, nrows:int,
footer_tups=None, keys:dict=None, sortkeys=None,
from_str=False, sorted_row_idx=None, transpose_on=False,
row_numbers=None, right_cols=None,
badrows=None,
badcols=None,
styles=None,
callback=None):
"""
Step 1: save all parameters into self namespace, as build_result_table is broken down into several functions.
Step 2: if set_view has been called, only display the specified columns. if sort_values has been called, move those columns to the front.
Step 3: build a row mask. if the table is too large to display, pull the first and last rows for display. if a sorted index is present, apply it.
Step 4: measure the table.
groupby key columns will always be included. fit as many columns as possible into the console. if the display is for html, defaults have been set to a hard-coded console width. other console width is detected upon each display. if there are too many columns to display, a column break will be set.
Step 5: build the table.
the result table is broken down into three parts: headers, left side, and main table.
the headers are column names combined with left headers, or numbers if the table is transposed.
the left side is row numbers, row labels, or groupby keys.
the main table is first and last columns that would fit in the display.
use the DisplayColumn class to organize the data for future styling. If the table is abbreviated, include a row break in each column.
Step 6: style the table. html_on will let DisplayColumn and DisplayCell know how to “paint” the individual cells.
Step 7: if the header has multiple lines and/or needs to be transposed, fix it up now.
Step 8: transpose the table for final display. we build the table by column, but it needs to be displayed by row. if the table should be transposed, don’t rotate it - clean up the headers.
Step 9: pass the table string to our console or html routine for final output.
**TODO: reduce the measuring and building to one pass over the data. currently rendering time is not an issue. ~15ms
"""
return self.build_result_table_new(
header_tups,
main_data,
nrows,
keys=keys,
sortkeys=sortkeys,
from_str=from_str,
sorted_row_idx=sorted_row_idx,
transpose_on=transpose_on,
row_numbers=row_numbers,
right_cols=right_cols,
footer_tups=footer_tups,
badrows=badrows,
badcols=badcols,
styles=styles,
callback=callback)
# -------------------------------------------------------------------------------------
def build_row_mask(self, head, tail, total):
# r_mask is the reqd data indices in one array (wether or not a row/column gets split)
# if necessary, break will be inserted in the final DisplayColumn
# if number requested >= size of original data, all will be shown
if (head+tail >= total) or self.options.ROW_ALL is True:
r_mask = arange(total, dtype=np.int64)
rowbreak = None
else:
#split mask [0 to head] row_break [end-tail to end]
h = arange(head, dtype=np.int64)
t = arange(total-tail, total, dtype=np.int64)
r_mask = hstack((h,t))
# row break is the row number at which to insert a break
rowbreak = head
# save the row mask, will check for sort later
return r_mask, rowbreak
#---------------------------------------------------------------------------
def get_sort_col_idx(self, col_names):
# returns a list of indicies of columns to sort by
# used to move sort key columns to the front, or mask the columns from a col_set_view() call
sorted_col_mask = []
for name in col_names:
# extract the index from np where. the header names array has unique values.
current_idx_tup = np.where(self._header_names == name)
current_idx = current_idx_tup[0]
if len(current_idx) < 1:
self._missing_sort_cols = True
# print("Sort column",name,"missing from sorted dataset view. Use ds.col_set_view('*') to reset column view or ds.unsort() to remove sort columns.")
else:
current_idx = current_idx[0]
sorted_col_mask.append(current_idx)
return sorted_col_mask
#---------------------------------------------------------------------------
def get_bad_color(self):
'''
put in the bad_col dictionary
'''
return ColumnStyle(color=DisplayColumnColors.GrayItalic)
#---------------------------------------------------------------------------
def build_result_table_new(self,
header_tups,
main_data,
nrows:int,
keys:dict=None,
sortkeys=None,
from_str=False,
sorted_row_idx=None,
transpose_on=False,
row_numbers=None,
right_cols=None,
footer_tups=None,
badcols=None,
badrows=None,
styles=None,
callback=None):
'''
callback: func, default None
callback to signature
'''
#---------------------------
def trim_data(trimslice, main=True):
# as left, right, center are built, parts of headers need to be trimmed off
# main data, bottom headers need to be trimmed for left and right side
if main:
self._main_data = self._main_data[trimslice]
self._header_tups[-1] = self._header_tups[-1][trimslice]
self._header_names = self._header_names[trimslice]
#if self._footer_tups is not None:
# for i, frow in enumerate(self._footer_tups):
# self._footer_tups[i] = frow[trimslice]
#---------------------------
# MAIN TABLE INFO
self._header_tups = header_tups # list of lists of header tuples (see ColHeader in rt_enum)
self._main_data = main_data # list of all column arrays
self._nrows = nrows # number of rows
self._ncols = len(self._main_data) # number of columns
self._gbkeys = keys # dictionary of groupby keys:data (left hand columns)
# *** changing to list of names, only need to know number of label cols
self._right_cols = right_cols # dictionary of right hand columns -> data
# *** changing to list of names, only need to know number of right cols
self._gb_prefix = TypeRegister.DisplayOptions.GB_PREFIX # add star before groupby names
self._footer_name = ""
self._footer_tups = footer_tups
if self._footer_tups is not None:
self._footer_name = self._footer_tups[0][0].col_name
# FLAGS
self._transpose_on = transpose_on # boolean: will the table transposed
self._from_str = from_str # boolean: should simple text be forced (no HTML)
# SORTS / MASKS
self._col_sortlist = sortkeys # list of names of columns which are sorted (for rearanging and styling)
self._sorted_row_idx = sorted_row_idx # ndarray of sort indices
self._missing_sort_cols = False # sort column names were provided, but not found in the data
# COLUMN STYLES
self._row_numbers = row_numbers
number_style = ColumnStyle(color=DisplayColumnColors.Rownum, align=DisplayJustification.Right)
label_style = ColumnStyle(color=DisplayColumnColors.Rownum)
gb_style = ColumnStyle(color=DisplayColumnColors.Groupby)
sort_style = ColumnStyle(color=DisplayColumnColors.Sort)
right_style = ColumnStyle(color=DisplayColumnColors.Groupby)
red_style = ColumnStyle(color=DisplayColumnColors.Red)
gray_style = ColumnStyle(color=DisplayColumnColors.GrayItalic)
# initialize width lists for console display
self._left_widths = []
self._main_first_widths = []
self._main_last_widths = []
self._right_widths = []
self._all_widths = []
# used for users requesting all columns to be displayed in console
self._column_sets = None
self._set_widths = None
# used to measure how many columns will fit
self._total_width = 0
# break flags, sorting masks
self._has_row_break = 0
self._has_col_break = 0
self._row_break = None
self._col_break = None
self._c_mask = None
self._r_mask = None
self._has_rownums = False
# extract names from last row in header tuples for a simple list of column names
self._header_names = np.array([h.col_name for h in self._header_tups[-1]])
self._num_header_lines = len(self._header_tups)
# detect HTML display for width calculation and future styling
self._html_on = False
if self._display_mode==DisplayDetectModes.HTML:
if from_str:
# max console display width in browser is smaller for simple table
self._console_x = 120
else:
self._html_on = True
else:
# option to remove colors from ipython display (hard to read in certain console windows)
# HTML for jupyter lab/notebook will always be styled, unless the table is called with a print statement
if self.options.NO_STYLES:
from_str = True
# -------------CALCULATE NUMBER OF ROWS TO BE DISPLAYED--------------------------------
# -------------------------------------------------------------------------------------
# head and tail functions will be taken into consideration here
# transposed tables will change this number significantly
totalrows = 0
# transposed
if self._transpose_on:
if DisplayTable.DebugMode: print("*table transposed")
# use the display option for number of columns to show in a transposed table
# if there aren't enough rows, display all
self._num_transpose_rows = min(self.options.COL_T,nrows)
totalrows = self._num_transpose_rows
head = self._num_transpose_rows
tail = 0
else:
if DisplayTable.DebugMode: print("*table untransposed")
totalrows=nrows
# force all rows to be shown
if self.options.ROW_ALL is True:
if DisplayTable.DebugMode is True: print("*forcing all rows")
head = nrows
tail = 0
# enforce number of rows to show based on display option
else:
head = self.options.HEAD_ROWS
tail = self.options.TAIL_ROWS
# if the table is empty, exit routine here
if totalrows is None:
#return "Table is empty (has no rows)."
totalrows = 0
# -----------BUILD A ROW MASK ---------------------------------------------------------
# -------------------------------------------------------------------------------------
self._r_mask, self._row_break = self.build_row_mask(head, tail, totalrows)
# possibly apply the mask (single arange or stacked arange) to the indirect sort
if sorted_row_idx is not None:
self._r_mask = self._sorted_row_idx[self._r_mask]
self._has_row_break = self._row_break is not None
# ---------------BUILD THE LEFT TABLE--------------------------------------------------
# -------------------------------------------------------------------------------------
# the left frame will ALWAYS be displayed
self._left_table_data = [] # list of arrays
left_header_names = []
# transposed
if self._transpose_on:
# left table will be all groupby key names + all column names in a single column
if self._gbkeys is not None:
nKeys = len(self._gbkeys)
header_column = self._header_names
trimslice = slice(nKeys,None,None)
trim_data(trimslice)
else:
# trim off 1 for row numbers, or leave as-is to prepare for row numbers callback
nKeys = 1 if self._row_numbers is None else 0
trimslice = slice(nKeys, None, None)
trim_data(trimslice, main=False)
header_column = self._header_names
self._left_table_data.append(header_column)
left_header_names.append("Fields:")
# untransposed
else:
# groupby keys take priority
if self._gbkeys is not None:
nKeys = len(self._gbkeys)
for i in range(nKeys):
# only last header line is handled here
left_header_names.append(self._gb_prefix+str( self._header_tups[-1][i][0] ))
self._left_table_data.append( self._main_data[i] )
# after adding, trim off left columns
trimslice = slice(nKeys,None,None)
trim_data(trimslice)
else:
self._has_rownums=True
# these will be used on the left and/or sent to row_label/row_number callback
row_numbers = self._r_mask
# regular row numbers
if self._row_numbers is None:
left_header_names.append(self._header_tups[-1][0][0])
trimslice = slice(1,None,None)
trim_data(trimslice, main=False)
self._left_table_data.append(row_numbers)
# custom row numbers
else:
# the entire left side of the table will be redefined by _row_numbers callback
# returns a single column, same size as row index sent in
func = self._row_numbers
name, numbers, number_style = func(row_numbers, number_style)
left_header_names.append(name)
self._left_table_data.append(numbers)
# display-generated row numbers and/or class-defined row_numbers function
if self._has_rownums or self._transpose_on:
color = DisplayColumnColors.Rownum
masked = True
style = number_style
# untransposed groupby columns
else:
color = None
masked = False
style = gb_style
left_footers = None
# TODO: move this to generic trim_data() util
if self._footer_tups is not None:
left_footers = [ frow[:len(left_header_names)] for frow in self._footer_tups ]
# build DisplayColumns and measure left hand side
# _left_table_columns - a list of DisplayColumn objects
# _left_widths - a list of ints
self._left_table_columns, self._left_widths = self.add_required_columns(left_header_names,
self._left_table_data,
left_footers,
gbkeys=self._gbkeys,
color=color,
masked=masked,
transpose=self._transpose_on,
style=style)
self._total_width += sum(self._left_widths)
# ---------------BUILD THE RIGHT TABLE-------------------------------------------------
# -------------------------------------------------------------------------------------
# if it exists, right frame will ALWAYS be displayed
self._right_table_columns = []
self._right_table_data = [] # raw data
right_header_names = []
if self._right_cols is not None:
right_footers = None
nKeys = len(self._right_cols)
right_header_names = [ t[0] for t in self._header_tups[-1][-nKeys:] ]
self._right_table_data = self._main_data[-nKeys:]
# trim off right columns from main data
# also trim footers
trimslice = slice(None,-nKeys,None)
trim_data(trimslice)
if self._footer_tups is not None:
right_footers = [ frow[-nKeys:] for frow in self._footer_tups ]
# build DisplayColumns and measure right hand side
# _right_table_columns - a list of DisplayColumn objects
# _right_widths - a list of ints
self._right_table_columns, self._right_widths = self.add_required_columns(right_header_names,
self._right_table_data,
right_footers)
self._total_width += sum(self._right_widths)
# ---------------BUILD THE MAIN TABLE--------------------------------------------------
if self._transpose_on:
self._main_table_columns = self.build_transposed_columns(self._main_data)
else:
# all columns in console
if self.options.COL_ALL and ((self._html_on is False) or self._from_str) and self._num_header_lines == 1:
self._column_sets, self._set_widths = self.all_columns_console(
self._console_x, self._total_width, self._header_names, self._main_data)
else:
if self._footer_tups is not None:
# start at beginning of main columns
left_offset = len(self._left_table_columns)
frows = [ f[left_offset:] for f in self._footer_tups ]
footer_arr = []
for i in range(len(frows[0])):
# list of value for each line, for each column
# e.g. if column had sum 6, mean 2.00, its list would be ['6','2.00']
footer_arr.append( [f[i][0] for f in frows])
else:
footer_arr = None
self._main_table_columns, self._main_first_widths, self._main_last_widths = self.fit_max_columns(
self._header_names, self._main_data, self._total_width, self._console_x, footer_arr)
# -------------------------STYLE THE TABLE--------------------------------------------
# ------------------------------------------------------------------------------------
# recolor badrows
if badrows is not None:
# if rmask is set, it is a lookup to the actual row being used
# fixup badrows
rmask=self._r_mask
rbreak=self._row_break
# use the head + tail to calculate how many relevant rows
ldata = head + tail
# build a new badrows with the correct line
newbadrows={}
for k,v in badrows.items():
if rmask is not None:
# look find our number in the mask
#print("rmask", rmask, "k",k)
bmask= rmask==k
if isinstance(bmask, np.ndarray):
btf = bool_to_fancy(bmask)
if len(btf) > 0:
# we found the number, now see if its before or after break
loc = btf[0]
if rbreak is not None:
if loc < rbreak:
newbadrows[loc]= v
else:
newbadrows[loc + 1]= v
else:
newbadrows[loc] =v
elif k < ldata:
# as long as there is no mask, just check to see if we are in range
# if rmask does not exist, then neither does rbreak
newbadrows[k]= v
if len(newbadrows) > 0:
badrows = newbadrows
else:
badrows = None
# groupby
if self._gbkeys is not None and self._transpose_on is False:
for c in self._left_table_columns:
c.paint_column(gb_style, badrows=badrows)
# right margin
if self._right_cols is not None and self._transpose_on is False:
for c in self._right_table_columns:
c.paint_column(right_style, badrows=badrows)
# sort
if self._col_sortlist is not None:
if self._column_sets is None:
if not self._missing_sort_cols:
for i, c in enumerate(self._col_sortlist):
self._main_table_columns[i].paint_column(sort_style, badrows=badrows)
else:
for i, c in enumerate(self._col_sortlist):
self._column_sets[0][i].paint_column(sort_style, badrows=badrows)
# custom left columns
if self._row_numbers is not None:
self._left_table_columns[-1].style_column(number_style)
# general purpose styles
# move to main build / measure column routine
if styles is not None:
# put in a callback for now, maybe pass the headers being displayed?
# styles = styles()
for i, col in enumerate(self._main_table_columns):
s = styles.get(col.header,None)
if s is not None:
self._main_table_columns[i].style_column(s)
# color entire column
if badcols is not None or badrows is not None:
for i, col in enumerate(self._main_table_columns):
if badcols is not None:
color_style = badcols.get(col.header,None)
else:
color_style=None
if color_style is not None or badrows is not None:
self._main_table_columns[i].style_column(color_style, badrows=badrows)
# Attribute-based styling
# -----------------------
# Color margin columns and footer rows
if len(self._attribs) > 0 and self._column_sets is None:
main_header_names = [c.header for c in self._main_table_columns]
num_footer_rows = self._attribs.get(
DisplayAttributes.NUMBER_OF_FOOTER_ROWS, 0)
for i, name in enumerate(main_header_names):
is_margin_column = name in self._attribs.get(
DisplayAttributes.MARGIN_COLUMNS, [])
if is_margin_column:
self._main_table_columns[i].paint_column(DisplayColumnColors.Groupby)
if num_footer_rows:
self._main_table_columns[i].paint_column(DisplayColumnColors.Groupby,
slice(-num_footer_rows, None, None))
# Right justify row label(s) of footer rows
for i, name in enumerate(left_header_names):
if num_footer_rows:
self._left_table_columns[i].align_column(DisplayJustification.Right,
slice(-num_footer_rows, None, None))
# -----------------------FIX / TRANSLATE TABLE HEADERS---------------------------------
# -------------------------------------------------------------------------------------
if self._num_header_lines > 1 and self._transpose_on is False and self._col_break is not None:
self.fix_multiline_headers()
final_footers = self.fix_multiline_footers(plain=from_str, badcols=badcols, badrows=badrows)
if self._column_sets is None:
self._all_widths = self._left_widths + self._main_first_widths + self._main_last_widths + self._right_widths
# all columns requested in console
else:
# left / right columns will always be included, so need to prepend/append their widths to the final widths
for idx, main_widths in enumerate(self._set_widths):
self._set_widths[idx] = self._left_widths + main_widths + self._right_widths
final_headers, final_footers = self.build_final_ends(plain=from_str, badcols=badcols, badrows=badrows)
# -------------------------FINAL STYLING----------------------------------------------
# ------------------------------------------------------------------------------------
# this is a good place to style for operations that span the whole table
if self._num_header_lines > 1:
if self._column_sets is None:
for i, cell in enumerate(self._header_tups[-1]):
color = DisplayColumnColors.Multiset_col_a + (cell.color_group % 2)
self._main_table_columns[i].paint_column(color)
else:
for set_index, set in enumerate(self._header_sets):
for i, cell in enumerate(set[-1]):
color = DisplayColumnColors.Multiset_col_a + (cell.color_group % 2)
self._column_sets[set_index][i].paint_column(color)
# -------------SUB FUNCTION WITH CALLBACK FOR CELL STYLING----------------------------
# ------------------------------------------------------------------------------------
def style_cells(listcols, stylefunc, rows=True, callback=None, location=None):
# listcols is a list of DisplayColumn objects
# stylefunc is a string of style function name: plain_string_list or styled_string_list
# turns columns into formatted strings, if rows is true, each row in its own list
# returns the formatted strings
# give user a chance to modify styling
# if the user returns a list of strings, we do not bother to style ourselves
if callback:
# in this callback, the style will indicate plain or not
# the location is either left, right, or main
# if rows is False, the data is rotated 90 (transposed)
# if html is True then expecting html styling vs console styling
result = callback(listcols, style=stylefunc, location=location, rows=rows, html=self._html_on)
if isinstance(result, list):
# the user can take over and return a list of string with html or console color styling
return result
table_strings = [ getattr(c, stylefunc)() for c in listcols ]
if rows:
table_strings = [ list(row) for row in zip(*table_strings) ]
# return a list of string with html or console color styling
return table_strings
# -------------------------BUILD AND ROTATE THE TABLE---------------------------------
# ------------------------------------------------------------------------------------
# plain
if from_str:
stylefunc = 'plain_string_list'
# ipython / html
else:
stylefunc = 'styled_string_list'
# call style function to build final strings from stored styles
# left / right will always be rotated
right_table_strings = []
# allow callback for labels and margins
left_table_strings = style_cells(self._left_table_columns, stylefunc, callback=callback, location='left')
if self._right_cols is not None:
right_table_strings = style_cells(self._right_table_columns, stylefunc, callback=callback, location='right')
# handle single, multiple sets the same way
if self._column_sets is None:
final_column_sets = [self._main_table_columns]
else:
final_column_sets = self._column_sets
# call style function to build final strings from stored styles
main_table_strings = []
as_rows = not self._transpose_on
for col_set in final_column_sets:
col_set = style_cells(col_set, stylefunc, as_rows, callback=callback, location='main')
main_table_strings.append(col_set)
# single table
if self._column_sets is None:
main_table_strings = main_table_strings[0]
if self._footer_tups is None:
final_footers = []
if len(final_footers) == 0:
final_footers = None
if self._html_on:
result_table = DisplayHtmlTable(final_headers, left_table_strings, main_table_strings, right_columns=right_table_strings, footers=final_footers)
else:
result_table = DisplayConsoleTable(self._all_widths, final_headers, left_table_strings, main_table_strings, right_table_strings, final_footers)
return result_table.build_table()
# print a table for each row
else:
result_tables = []
for idx, main_strings in enumerate(main_table_strings):
result_tables.append( DisplayConsoleTable(self._set_widths[idx], final_headers[idx], left_table_strings, main_strings, right_table_strings, None).build_table() )
return "\n\n".join(result_tables)
#-----------------------------------------------------------------------
def fix_repeated_keys(self, columns, repeat_string='.'):
'''
Display a different string when the first column of a multikey groupby is repeated.
TODO: add support for the same behavior with repeated keys in multiple columns.
'''
column_arrays = [c.data for c in columns]
for idx, keylist in enumerate(column_arrays):
if idx == 0:
pkey = column_arrays[0]
lenk = len(pkey)
if lenk > 1:
lastkey = str(pkey[0]) # string inside of DisplayCell object
for i in range(1, lenk):
item1 = str(pkey[i])
if item1 == lastkey:
pkey[i].string=repeat_string
else:
lastkey=item1
# ------------------------------------------------------------------------------------
def build_final_headers_html(self, plain=False):
'''
Translates the tables header tuples into HTML tags.
Note: this routine is very similar to build_final_headers_console.
Keeping them separate for readability.
'''
final_headers = []
span = len(self._left_table_columns)
pad_string = "<td colspan='"+str(span)+"' class='lg'></td>"
for i, line in enumerate(self._header_tups):
styled_line = []
# add padding to every row except the last
if i != len(self._header_tups)-1:
styled_line.append(pad_string)
for j, cell in enumerate(line):
# color correction for multiline
color = DisplayColumnColors.Multiset_head_a + (cell.color_group % 2)
new_cell = DisplayCell(cell.col_name, color=color, html=self._html_on, colspan=cell.cell_span)
new_cell.paint_cell()
# match the alignment to main table for the last row
align = DisplayJustification.Center
if i == len(self._header_tups)-1:
pass
# changed default - multiline header cells will always be centered
#align = self._main_table_columns[j]._align
new_cell.prefix += " "+DisplayColumn.align_html(align)
# BUG: justification doesn't stick
# build styled string
new_cell = new_cell.display()
styled_line.append(new_cell)
final_headers.append(styled_line)
# add left table headers to last row
for c in reversed(self._left_table_columns):
final_headers[-1].insert(0,c.build_header())
return final_headers
# ------------------------------------------------------------------------------------
def build_final_headers_console(self, plain=False):
'''
**specifically for multi-line
Translates the tables header tuples into console strings with spaces for padding
Note: this routine is very similar to build_final_headers_html.
Keeping them separate for readability.
'''
final_headers = []
span = len(self._left_table_columns)
pad_cell = ColHeader("",1,0)
column_margin = 3
for i, line in enumerate(self._header_tups[:-1]):
styled_line = []
width_index = 0
# add padding to every row except the last
# number of pad cells needs to be the number of left hand columns
if i != len(self._header_tups)-1:
for j in range(span):
self._header_tups[i].insert(0,pad_cell)
for j,cell in enumerate(line):
# fix multiline cell colors
color = DisplayColumnColors.Multiset_head_a + (cell.color_group % 2)
new_cell = DisplayCell(cell.col_name, color=color, html=self._html_on)
# get the width of bottom cells in same group to fix alignment
combined_width = sum(self._all_widths[width_index:width_index+cell.cell_span])
margin_width = ((cell.cell_span-1) * column_margin)
combined_width += margin_width
new_cell.string = DisplayColumn.align_console_string(new_cell.string, combined_width, align=DisplayJustification.Center)
width_index += cell.cell_span
# apply final styling
new_cell.paint_cell()
new_cell = new_cell.display(plain=plain)
styled_line.append(new_cell)
final_headers.append(styled_line)
# use DisplayColumns to style bottom row
# bug in alignment if sent through the same loop as other headers
bottom_headers = []
bottom_colors = [DisplayColumnColors.Multiset_head_a + (cell.color_group % 2) for cell in self._header_tups[-1]]
for c in self._left_table_columns:
bottom_headers.append(c.build_header(plain=plain))
for i,c in enumerate(self._main_table_columns):
bottom_headers.append(c.build_header(bottom_colors[i], plain=plain, align=DisplayJustification.Center))
final_headers.append(bottom_headers)
return final_headers
# ------------------------------------------------------------------------------------
def build_final_ends(self, plain=False, badcols=None, badrows=None):
'''
'''
final_headers = []
final_footers = []
gray_style = DisplayColumnColors.GrayItalic
# transposed headers
if self._transpose_on:
transposed_headers = []
for idx, width in enumerate(self._all_widths[len(self._left_table_columns):]):
new_cell = DisplayCell(str(idx), color=DisplayColumnColors.Rownum, html=self._html_on, colspan=1)
new_cell.paint_cell()
if self._html_on: # and from_str is False
pass
else:
new_cell.string = DisplayColumn.align_console_string(new_cell.string, width, align=DisplayJustification.Right)
transposed_headers.append(new_cell.display(plain=plain))
for c in reversed(self._left_table_columns):
if self._from_str:
transposed_headers.insert(0, c.build_header(plain=True))
else:
transposed_headers.insert(0, c.build_header(DisplayColumnColors.Rownum))
final_headers.append(transposed_headers)
# untransposed headers
else:
# fix multiline
if self._num_header_lines > 1:
if self._html_on: #and from_str is False:
final_headers = self.build_final_headers_html(plain=plain)
else:
final_headers = self.build_final_headers_console(plain=plain)
final_footers = self.fix_multiline_footers(plain=plain)
# default to the headers constructed in DisplayColumns
else:
if self._column_sets is None:
bottom_headers = []
bottom_footers = []
for c in self._left_table_columns + self._main_table_columns + self._right_table_columns:
# NOTE: you can choose a different final color with the final_color keyword here
# see rt_enum.DisplayColumnColors
bottom_headers.append(c.build_header(plain=plain))
if badcols is not None and badcols.get(c.header, None) is not None:
bottom_footers.append(c.build_footer(final_color = gray_style, plain=plain))
else:
# footers for each column will be in a list
bottom_footers.append(c.build_footer(plain=plain))
final_headers.append(bottom_headers)
final_footers = [ list(frow) for frow in zip(*bottom_footers) ]
# all columns requested in console
# TODO: colapse these into one function
else:
for set in self._column_sets:
current_row = []
final_headers.append([])
for c in self._left_table_columns:
current_row.append(c.build_header(plain=plain))
for c in set:
current_row.append(c.build_header(plain=plain))
for c in self._right_table_columns:
current_row.append(c.build_header(plain=plain))
final_headers[-1].append(current_row)
return final_headers, final_footers
def footers_to_string(self, footer_row):
'''
Takes row of footer tuples and turns into string list.
For adding/styling multiline footers.
'''
pass
# ------------------------------------------------------------------------------------
def fix_multiline_headers(self):
'''
Fixes multi-line headers if a column break was present.
cell_spans in ColHeader might need to be changed.
Need use cases for more than two lines, but the same loop should work.
'''
top_header_row = self._header_tups[0]
break_tup = ColHeader("",1,0)
# left side
col_idx = 0
num_left_cols = len(self._main_first_widths)
for i, top_header in enumerate(top_header_row):
current_span = top_header.cell_span
# keep walking through
if (col_idx + current_span) < num_left_cols-1:
col_idx += current_span
else:
new_left_head = top_header_row[:i]
# last span fit
if (col_idx + current_span) == num_left_cols-1:
#print("last left tuple fit")
last_tup = top_header
# trim last span
else:
#print("fixing left span")
new_span = num_left_cols - col_idx - 1
last_tup = ColHeader(top_header.col_name, new_span, top_header.color_group)
new_left_head.append(last_tup)
new_left_head.append(break_tup)
break
# right side
col_idx = 0
num_right_cols = len(self._main_last_widths)
for i, top_header in enumerate(reversed(top_header_row)):
current_span = top_header.cell_span
# keep walking through
if (col_idx + current_span) < num_right_cols:
col_idx += current_span
else:
new_right_head = []
if i != 0:
new_right_head = top_header_row[-i:]
# last span needs to get changed, build a new tuple
if (col_idx + current_span) > num_right_cols:
#print("fixing right span")
new_span = num_right_cols - col_idx
last_tup = ColHeader(top_header.col_name, new_span, top_header.color_group)
# last span fits
else:
#print("last right tuple fit")
last_tup = top_header
new_right_head.insert(0,last_tup)
break
self._header_tups[0] = new_left_head + new_right_head
# final row (no span changes)
bottom_left = self._header_tups[-1][:num_left_cols-1]
bottom_left.append(break_tup)
bottom_right = self._header_tups[-1][-num_right_cols:]
self._header_tups[-1] = bottom_left + bottom_right
# -------------------------------------------------------------------------------------
def fix_multiline_footers(self, plain=False, badcols=None, badrows=None):
'''
'''
final_footers = []
gray_style = DisplayColumnColors.GrayItalic
# transposed
if self._transpose_on:
raise NotImplementedError
# untransposed
else:
if self._column_sets is None:
bottom_footers = []
for c in self._left_table_columns:
bottom_footers.append(c.build_footer(plain=plain))
for c in self._main_table_columns:
if badcols is not None and badcols.get(c.header, None) is not None:
bottom_footers.append(c.build_footer(final_color = gray_style , plain=plain))
else:
bottom_footers.append(c.build_footer(plain=plain))
final_footers.append(bottom_footers)
else:
raise NotImplementedError
return final_footers
# -------------------------------------------------------------------------------------
def build_column(self, header, column, masked=False, footer=None, style=None):
'''
All DisplayColumns built for final display will funnel through this function.
Any row breaks will be added here if necessary.
'''
# ask the data how it would like to be displayed
display_format, func = get_array_formatter(column)
# merge custom styles or callback styles with default array styles
if style is not None:
if style.width is not None:
display_format.maxwidth = style.width
# format the footer with the same function as the column data
# UPDATE: footers should always be string now, might be different type than column above
if footer is not None:
if isinstance(footer, list):
pass
elif not isinstance(footer, str):
footer = func(footer, display_format)
#print('footer was not none')
# transposed data will have the same alignment
if self._transpose_on:
#align = DisplayJustification.Right
display_format.justification = DisplayJustification.Right
# prevents us from creating a massive array of break strings
mask = self._r_mask
if masked is True:
mask = arange(len(column))
# mask is now an array of numbers
# when looping over a column
cell_list = []
# add the row break character to each column
if self._row_break is not None:
# get the head portion of the column
cell_list = [DisplayCell(func(item, display_format), item, html=self._html_on) for item in column[mask[:self._row_break]]]
# put in the break
cell_list += [DisplayCell("...", "...", html=self._html_on)]
# get the tail portion
cell_list += [DisplayCell(func(item, display_format), item, html=self._html_on) for item in column[mask[self._row_break:]]]
else:
cell_list = [DisplayCell(func(item, display_format), item, html=self._html_on) for item in column[mask]]
# TJD NOTE this is a natural location to color code the inner grid
#cell_list[1].string="<td bgcolor=#00FFF>" + cell_list[1].string # + "</td>"
#cell_list[1].color = DisplayColumnColors.GrayItalic
# consider adding the break character here (one full, one for splits)
new_column = DisplayColumn(cell_list, # build a display cell for every value in the masked column
row_break = self._row_break, # a break character will be added for final table if necessary. hang on to index for now.
color = None,
header = header,
#align = display_format.justification,
html = self._html_on,
itemformat = display_format,
footer=footer)
return new_column, footer
# -------------------------------------------------------------------------------------
def add_required_columns(self, header_names, table_data, footers, masked=False, gbkeys=None,
transpose=False, color=None, style=None):
'''
header_names : list of string header names (not tuples)
table_data : list of arrays
footers : list of footer rows (lists of ColHeader tuples)
masked : flag to indicate that the column has already been trimmed (build column does not need to apply a row mask)
gbkeys : dictionary of groupby keys - columns need to be painted differently
'''
table_columns = []
widths = []
footerval = None
# TODO: support multikey here
#if footers is not None:
# footers = footers[0]
for i, column in enumerate(table_data):
header = header_names[i]
if footers is not None:
footerval = [ f[i][0] for f in footers ]
#footerval = footers[i][0]
new_column, _ = self.build_column(header, column, masked=masked, style=style, footer=footerval)
new_column._r_mask = self._r_mask
# possibly paint entire column here
if color is not None:
new_column.paint_column(color)
widths.append(new_column._max_width)
table_columns.append(new_column)
if gbkeys is not None and transpose:
new_column.paint_column(DisplayColumnColors.Groupby, col_slice=slice(None, len(gbkeys)))
# don't repeat labels in multikey groupby
if gbkeys is not None and len(gbkeys)>1:
self.fix_repeated_keys(table_columns, repeat_string='.')
return table_columns, widths
# -------------------------------------------------------------------------------------
def fit_max_columns(self, headers, columns, total_width, console_width, footers=None):
'''
The display will attempt to fit as many columns as possible into the console.
HTML display has been assigned a default value for self._console_x (see DisplayTable.__init__)
If the user changes their self.options.COL_ALL to True, all columns will be displayed on the same line.
*note: this will break console display for large tables and should only be used in jupyter lab now.
*in progress
If the user requested all columns to be shown - regardless of width, the display will split them up into
separate views with the maximum columns per line.
'''
# ----------------------------------
def build_break_column(nrows):
#Builds a break column using the number of rows.
#build_column() will add a row_break if necessary.
breakstring = "..."
col = np.full(nrows,breakstring).view(TypeRegister.FastArray)
footer = [breakstring]
if self._footer_tups is not None:
footer = footer*len(self._footer_tups)
return self.build_column(breakstring, col, masked=True, footer=footer)[0]
# ----------------------------------
force_all_columns = self.options.COL_ALL
left_columns = []
right_columns = []
first_widths = []
last_widths = []
# check to see if forcing all columns to be displayed
colbegin = 0
colend = len(columns)-1
# possibly build footers
f_first = None
f_last = None
has_footers = False
if footers is not None:
has_footers = True
#self._console_x -= 80
while ((total_width <= console_width) or force_all_columns is True) and (colbegin <= colend):
# pull from the front
c = columns[colbegin]
h_first = headers[colbegin]
if has_footers: f_first = footers[colbegin]
first_col, f_first = self.build_column(h_first, c, footer=f_first)
d_first_width = first_col.display_width
first_width = first_col._max_width
# pull from the back
c = columns[colend]
h_last = headers[colend]
if has_footers: f_last = footers[colend]
last_col, f_last = self.build_column(h_last, c, footer=f_last)
d_last_width = last_col.display_width
last_width = last_col._max_width
# if adding front column breaks console max
if ((total_width + d_first_width) > console_width) and force_all_columns is False:
self._col_break = colbegin
break
# front column fit
else:
#print("add to front",first_col._header)
first_widths.append(first_width)
left_columns.append(first_col)
colbegin += 1
total_width += d_first_width
# break if the front just added the next back column
# all columns were added
if colbegin > colend:
break
# break early if max has been reached
if ((total_width + d_last_width) > console_width) and force_all_columns is False:
#print("max reached before last checked")
# if not all columns were added, set a column break
if (len(left_columns) + len(right_columns)) < len(columns):
self._col_break = colbegin
break
# add column to the back list
#print("add to back",last_col._header)
last_widths.insert(0, last_width)
right_columns.insert(0, last_col)
colend-=1
total_width += d_last_width
if DisplayTable.DebugMode: print("total_width",total_width)
if DisplayTable.DebugMode: print("console_x",console_width)
if DisplayTable.DebugMode: print("colbegin",colbegin)
if DisplayTable.DebugMode: print("colend",colend)
# add the column break
if self._col_break is not None:
break_col = build_break_column(len(self._r_mask))
first_widths.append(break_col._max_width)
left_columns.append(break_col)
# returns list of display columns (might have center gap), first widths, last widths
# keep first and last widths separate to fix other headers spanning multiple columns
return left_columns + right_columns, first_widths, last_widths
# -------------------------------------------------------------------------------------
def all_columns_console(self, console_width, left_offset, headers, columns):
current_width = left_offset
column_sets = [[]]
column_widths = [[]]
for col_index, c in enumerate(columns):
h = headers[col_index]
col, _ = self.build_column(h, c)
d_width = col.display_width # width with column padding (for measuring)
width = col._max_width # actual width of widest string
# if too large for console, move to the next line
if (current_width + d_width) > console_width:
column_sets.append([])
column_widths.append([])
current_width = left_offset
column_sets[-1].append(col)
column_widths[-1].append(width)
current_width += d_width
return column_sets, column_widths
# -------------------------------------------------------------------------------------
def all_columns_console_multiline(self, console_width, left_offset, headers, columns):
'''
** not implemented
only supports two-line headers
'''
current_width = left_offset
column_sets = [[]]
column_widths = [[]]
top_headers = [[]]
bottom_headers = [[]]
# keep each group together. for now this will only work for two line multi-column
bottom_index = 0
for header_tup in headers[0]:
span = header_tup.cell_span
current_cols = []
current_headers = []
current_d_widths = []
current_widths = []
# build column from each cell in bottom headers row
for i in range(span):
bottom_tup = headers[-1][bottom_index+i]
c = columns[bottom_index+i]
h = bottom_tup.col_name
col, _ = self.build_column(h, c)
current_cols.append(col)
current_d_widths.append(col.display_width)
current_widths.append(col._max_width)
current_headers.append(bottom_tup)
# if console width is broken, create a new column set
if (current_width + sum(current_d_widths)) > console_width:
column_sets.append([])
column_widths.append([])
top_headers.append([])
bottom_headers.append([])
current_width = left_offset
# add info for all columns in current group
for idx, col in enumerate(current_cols):
column_sets[-1].append(col)
column_widths[-1].append(current_widths[idx])
bottom_headers[-1].append(current_headers[idx])
top_headers[-1].append(header_tup)
current_width += sum(current_d_widths)
bottom_index += span
header_sets = [ [top_headers[i], bottom_headers[i]] for i in range(len(top_headers)) ]
return column_sets, column_widths, header_sets
# -------------------------------------------------------------------------------------
def build_transposed_columns(self, columns):
'''
Transposed column data needs to be constructed differently. Widths will be
calculated as a maximum items in multiple arrays.
At the end of the table's construction, it will remain as a list of rows.
'''
#t_max = min(self.options.COL_T, self._nrows)
t_max = min(15, self._nrows)
# build column classes for EVERY column in the t table
t_columns = []
# groupby columns appear in the main table instead of the left table
if self._gbkeys is not None:
for gb in self._gbkeys.values():
new_col = gb[:t_max]
new_col, _ = self.build_column("", new_col, masked = True)
new_col.paint_column(DisplayColumnColors.Groupby)
t_columns.append(new_col)
for column in columns:
new_col = column[:t_max]
new_col, _ = self.build_column("", new_col, masked = True)
t_columns.append(new_col)
# find the max width at each index (not very efficient)
t_widths = []
for i in range(t_max):
max_width = len(max([c[i].string for c in t_columns], key=len))
t_widths.append(max_width)
# fit maximum number of columns in the console window
t_display_widths = [w + DisplayConsoleTable.column_spacing for w in t_widths]
total_width = self._total_width
max_t_cols = 0
for w in t_display_widths:
total_width += w
if total_width > self._console_x:
break
else:
max_t_cols += 1
self._main_first_widths = t_widths[:max_t_cols]
# trim columns
for i, col in enumerate(t_columns):
t_columns[i]._data = col._data[:max_t_cols]
self._nrows = self._ncols
self._ncols = max_t_cols
# set widths for individual cells in DisplayColumns
for i,t in enumerate(t_columns):
t_columns[i]._max_t_widths = self._main_first_widths
return t_columns
#---------------------------------------
@staticmethod
def display_detect():
'''
Call to redetect the display mode.
This is useful when launching a qtconsole from jupyter lab.
'''
DisplayDetect.get_display_mode()
@staticmethod
def display_rows(rows=None):
'''
Parameters
----------
rows: defaults to None. How many top and bottom rows to display in a Dataset.
set to None to return the current rows.
Contolled by Display.options.HEAD_ROWS/TAIL_ROWS
See Also
--------
Display.options.TAIL_ROWS
Display.options.HEAD_ROWS
Examples
--------
rt.display_rows(20)
'''
if rows is None:
return DisplayOptions.HEAD_ROWS, DisplayOptions.TAIL_ROWS
DisplayOptions.TAIL_ROWS = rows
DisplayOptions.HEAD_ROWS = rows
@staticmethod
def display_precision(precision=2):
'''
Parameters
----------
precision: defaults to 2. How many places after the decimal to display.
set to None to return the current precision.
Examples
--------
rt.display_precision(4)
'''
if precision is None:
return DisplayOptions.PRECISION
DisplayOptions.PRECISION=precision
DisplayOptions.P_THRESHOLD = None
DisplayOptions.p_threshold()
@staticmethod
def display_threshold(threshold=6):
'''
Parameters
----------
precision: defaults to 6. How many powers of 10 before flipping to scientific notation.
set to None to return the current threshold.
Notes
-----
E_THRESHOLD = 6 # power of 10 at which the float flips to scientific notation 10**+/-
E_PRECISION = 3 # number of digits to display to the right of the decimal (sci notation)
Examples
--------
rt.display_threshold(6)
'''
if threshold is None:
return DisplayOptions.E_THRESHOLD
DisplayOptions.E_THRESHOLD=threshold
DisplayOptions.E_MIN = None
DisplayOptions.E_MAX = None
DisplayOptions.e_min()
DisplayOptions.e_max()
@staticmethod
def display_html(html=None):
'''
Parameters
----------
html: defaults to None. Set to True to force html.
set to None to return the current mode.
'''
if html is None:
return DisplayOptions.HTML_DISPLAY
DisplayOptions.HTML_DISPLAY=html
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
class DisplayHtmlTable:
def __init__(self, headers, left_columns, main_columns, right_columns: Optional[list] = None, footers=None, axis_labels=False):
if right_columns is None:
right_columns = list()
self.headers = headers
self.left_columns = left_columns
self.main_columns = main_columns
self.right_columns = right_columns
self.footers = footers
def build_table(self):
def join_row_section(rowstrings, idx=None):
joined = ""
numrows = len(rowstrings)
if numrows > 0:
if idx is None:
joined = "".join(rowstrings)
elif i < numrows:
joined = "".join(rowstrings[i])
return joined
html_string_list = []
display_id = str(
|
np.random.randint(9999)
|
numpy.random.randint
|
import numpy as np
from numpy.fft import fft, ifft
norm = None # or "orhto"
x_even =
|
np.array([8, 9, 1, 3])
|
numpy.array
|
# -*- coding: utf-8 -*-
""" EvaluatorMultiPlayers class to wrap and run the simulations, for the multi-players case.
Lots of plotting methods, to have various visualizations. See documentation.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# Generic imports
import sys
import pickle
USE_PICKLE = False #: Should we save the figure objects to a .pickle file at the end of the simulation?
from copy import deepcopy
from re import search
import random
import time
# Scientific imports
import numpy as np
import matplotlib.pyplot as plt
import inspect
def _nbOfArgs(function):
try:
return len(inspect.signature(functions).parameters)
except NameError:
return len(inspect.getargspec(function).args)
# Local imports, libraries
try:
from .usejoblib import USE_JOBLIB, Parallel, delayed
from .usetqdm import USE_TQDM, tqdm
# Local imports, tools and config
from .plotsettings import BBOX_INCHES, signature, maximizeWindow, palette, makemarkers, add_percent_formatter, wraptext, wraplatex, legend, show_and_save, nrows_ncols, addTextForWorstCases, violin_or_box_plot, adjust_xticks_subplots
from .sortedDistance import weightedDistance, manhattan, kendalltau, spearmanr, gestalt, meanDistance, sortedDistance
from .fairnessMeasures import amplitude_fairness, std_fairness, rajjain_fairness, mean_fairness, fairnessMeasure, fairness_mapping
# Local imports, objects and functions
from .CollisionModels import onlyUniqUserGetsReward, noCollision, closerUserGetsReward, rewardIsSharedUniformly, defaultCollisionModel, full_lost_if_collision
from .MAB import MAB, MarkovianMAB, ChangingAtEachRepMAB, NonStationaryMAB, PieceWiseStationaryMAB, IncreasingMAB
from .ResultMultiPlayers import ResultMultiPlayers
from .memory_consumption import getCurrentMemory, sizeof_fmt
except ImportError:
from usejoblib import USE_JOBLIB, Parallel, delayed
from usetqdm import USE_TQDM, tqdm
# Local imports, tools and config
from plotsettings import BBOX_INCHES, signature, maximizeWindow, palette, makemarkers, add_percent_formatter, wraptext, wraplatex, legend, show_and_save, nrows_ncols, addTextForWorstCases, violin_or_box_plot, adjust_xticks_subplots
from sortedDistance import weightedDistance, manhattan, kendalltau, spearmanr, gestalt, meanDistance, sortedDistance
from fairnessMeasures import amplitude_fairness, std_fairness, rajjain_fairness, mean_fairness, fairnessMeasure, fairness_mapping
# Local imports, objects and functions
from CollisionModels import onlyUniqUserGetsReward, noCollision, closerUserGetsReward, rewardIsSharedUniformly, defaultCollisionModel, full_lost_if_collision
from MAB import MAB, MarkovianMAB, ChangingAtEachRepMAB, NonStationaryMAB, PieceWiseStationaryMAB, IncreasingMAB
from ResultMultiPlayers import ResultMultiPlayers
from memory_consumption import getCurrentMemory, sizeof_fmt
REPETITIONS = 1 #: Default nb of repetitions
DELTA_T_PLOT = 50 #: Default sampling rate for plotting
COUNT_RANKS_MARKOV_CHAIN = False #: If true, count and then print a lot of statistics for the Markov Chain of the underlying configurations on ranks
MORE_ACCURATE = False #: Use the count of selections instead of rewards for a more accurate mean/var reward measure.
MORE_ACCURATE = True #: Use the count of selections instead of rewards for a more accurate mean/var reward measure.
plot_lowerbounds = True #: Default is to plot the lower-bounds
USE_BOX_PLOT = True #: True to use boxplot, False to use violinplot (default).
nb_break_points = 0 #: Default nb of random events
FINAL_RANKS_ON_AVERAGE = True #: Default value for ``finalRanksOnAverage``
USE_JOBLIB_FOR_POLICIES = False #: Default value for ``useJoblibForPolicies``. Does not speed up to use it (too much overhead in using too much threads); so it should really be disabled.
# --- Class EvaluatorMultiPlayers
class EvaluatorMultiPlayers(object):
""" Evaluator class to run the simulations, for the multi-players case.
"""
def __init__(self, configuration,
moreAccurate=MORE_ACCURATE):
# Configuration
self.cfg = configuration #: Configuration dictionnary
# Attributes
self.nbPlayers = len(self.cfg['players']) #: Number of players
print("Number of players in the multi-players game:", self.nbPlayers)
self.horizon = self.cfg['horizon'] #: Horizon (number of time steps)
print("Time horizon:", self.horizon)
self.repetitions = self.cfg.get('repetitions', REPETITIONS) #: Number of repetitions
print("Number of repetitions:", self.repetitions)
self.delta_t_plot = 1 if self.horizon <= 10000 else self.cfg.get('delta_t_plot', DELTA_T_PLOT)
print("Sampling rate for plotting, delta_t_plot:", self.delta_t_plot) #: Sampling rate for plotting
self.horizon = int(self.horizon)
print("Number of jobs for parallelization:", self.cfg['n_jobs'])
self.collisionModel = self.cfg.get('collisionModel', defaultCollisionModel) #: Which collision model should be used
self.full_lost_if_collision = full_lost_if_collision.get(self.collisionModel.__name__, True) #: Is there a full loss of rewards if collision ? To compute the correct decomposition of regret
print("Using collision model {} (function {}).\nMore details:\n{}".format(self.collisionModel.__name__, self.collisionModel, self.collisionModel.__doc__))
self.signature = signature
# Flags
self.moreAccurate = moreAccurate #: Use the count of selections instead of rewards for a more accurate mean/var reward measure.
print("Using accurate regrets and last regrets ? {}".format(moreAccurate))
self.finalRanksOnAverage = self.cfg.get('finalRanksOnAverage', FINAL_RANKS_ON_AVERAGE) #: Final display of ranks are done on average rewards?
self.averageOn = self.cfg.get('averageOn', 5e-3) #: How many last steps for final rank average rewards
self.nb_break_points = self.cfg.get('nb_break_points', nb_break_points) #: How many random events?
self.plot_lowerbounds = self.cfg.get('plot_lowerbounds', plot_lowerbounds) #: Should we plot the lower-bounds?
self.useJoblib = USE_JOBLIB and self.cfg['n_jobs'] != 1 #: Use joblib to parallelize for loop on repetitions (useful)
self.showplot = self.cfg.get('showplot', True) #: Show the plot (interactive display or not)
self.use_box_plot = USE_BOX_PLOT or (self.repetitions == 1) #: To use box plot (or violin plot if False). Force to use boxplot if repetitions=1.
self.count_ranks_markov_chain = self.cfg.get('count_ranks_markov_chain', COUNT_RANKS_MARKOV_CHAIN)#: If true, count and then print a lot of statistics for the Markov Chain of the underlying configurations on ranks
self.change_labels = self.cfg.get('change_labels', {}) #: Possibly empty dictionary to map 'playerId' to new labels (overwrite their name).
self.append_labels = self.cfg.get('append_labels', {}) #: Possibly empty dictionary to map 'playerId' to new labels (by appending the result from 'append_labels').
# Internal object memory
self.envs = [] #: List of environments
self.players = [] #: List of players
self.__initEnvironments__()
# Internal vectorial memory
self.rewards = dict() #: For each env, history of rewards
# self.rewardsSquared = dict()
self.pulls = dict() #: For each env, keep the history of arm pulls (mean)
self.lastPulls = dict() #: For each env, keep the distribution of arm pulls
self.allPulls = dict() #: For each env, keep the full history of arm pulls
self.collisions = dict() #: For each env, keep the history of collisions on all arms
self.lastCumCollisions = dict() #: For each env, last count of collisions on all arms
self.nbSwitchs = dict() #: For each env, keep the history of switches (change of configuration of players)
self.bestArmPulls = dict() #: For each env, keep the history of best arm pulls
self.freeTransmissions = dict() #: For each env, keep the history of successful transmission (1 - collisions, basically)
self.lastCumRewards = dict() #: For each env, last accumulated rewards, to compute variance and histogram of whole regret R_T
self.runningTimes = dict() #: For each env, keep the history of running times
self.memoryConsumption = dict() #: For each env, keep the history of running times
print("Number of environments to try:", len(self.envs)) # DEBUG
# XXX: WARNING no memorized vectors should have dimension horizon * repetitions, that explodes the RAM consumption!
for envId in range(len(self.envs)): # Zeros everywhere
self.rewards[envId] = np.zeros((self.nbPlayers, self.horizon))
# self.rewardsSquared[envId] = np.zeros((self.nbPlayers, self.horizon))
self.lastCumRewards[envId] = np.zeros(self.repetitions)
self.pulls[envId] = np.zeros((self.nbPlayers, self.envs[envId].nbArms), dtype=np.int32)
self.lastPulls[envId] = np.zeros((self.nbPlayers, self.envs[envId].nbArms, self.repetitions), dtype=np.int32)
self.allPulls[envId] = np.zeros((self.nbPlayers, self.envs[envId].nbArms, self.horizon), dtype=np.int32)
self.collisions[envId] = np.zeros((self.envs[envId].nbArms, self.horizon))
self.lastCumCollisions[envId] = np.zeros((self.envs[envId].nbArms, self.repetitions), dtype=np.int32)
self.nbSwitchs[envId] = np.zeros((self.nbPlayers, self.horizon), dtype=np.int32)
self.bestArmPulls[envId] = np.zeros((self.nbPlayers, self.horizon), dtype=np.int32)
self.freeTransmissions[envId] = np.zeros((self.nbPlayers, self.horizon), dtype=np.int32)
self.runningTimes[envId] = np.zeros((self.nbPlayers, self.repetitions))
self.memoryConsumption[envId] = np.zeros((self.nbPlayers, self.repetitions))
# To speed up plotting
self._times = np.arange(1, 1 + self.horizon)
# --- Init methods
def __initEnvironments__(self):
""" Create environments."""
nbArms = []
for configuration_arms in self.cfg['environment']:
print("Using this dictionary to create a new environment:\n", configuration_arms) # DEBUG
new_mab_problem = None
if isinstance(configuration_arms, dict) \
and "arm_type" in configuration_arms \
and "params" in configuration_arms:
# PieceWiseStationaryMAB or NonStationaryMAB or ChangingAtEachRepMAB
if "listOfMeans" in configuration_arms["params"] \
and "changePoints" in configuration_arms["params"]:
new_mab_problem = PieceWiseStationaryMAB(configuration_arms)
elif "newMeans" in configuration_arms["params"] \
and "args" in configuration_arms["params"]:
if "changePoints" in configuration_arms["params"]:
new_mab_problem = NonStationaryMAB(configuration_arms)
else:
new_mab_problem = ChangingAtEachRepMAB(configuration_arms)
# MarkovianMAB
elif configuration_arms["arm_type"] == "Markovian" \
and "transitions" in configuration_arms["params"]:
new_mab_problem = MarkovianMAB(configuration_arms)
# IncreasingMAB
elif "change_lower_amplitude" in configuration_arms:
new_mab_problem = IncreasingMAB(configuration_arms)
if new_mab_problem is None:
new_mab_problem = MAB(configuration_arms)
self.envs.append(new_mab_problem)
nbArms.append(new_mab_problem.nbArms)
if len(set(nbArms)) != 1: # FIXME add support of multi-environments evaluator for MP policies with different number of arms in the scenarios.
raise ValueError("ERROR: right now, the multi-environments evaluator does not work well for MP policies, if there is a number different of arms in the scenarios!")
def __initPlayers__(self, env):
""" Create or initialize players."""
playersId = self.cfg.get('playersId', '0')
for playerId, player in enumerate(self.cfg['players']):
print("- Adding player #{:>2} = {} ...".format(playerId + 1, player)) # DEBUG
if isinstance(player, dict): # Either the 'player' is a config dict
print(" Creating this player from a dictionnary 'player' = {} ...".format(player)) # DEBUG
self.players.append(player['archtype'](env.nbArms, **player['params']))
else: # Or already a player object
print(" Using this already created player 'player' = {} ...".format(player)) # DEBUG
self.players.append(player)
for playerId in range(len(self.players)):
self.players[playerId].__cachedstr__ = str(self.players[playerId])
if playersId in self.append_labels:
self.players[playerId].__cachedstr__ += self.append_labels[playersId]
if playersId in self.change_labels:
self.players[playerId].__cachedstr__ = self.change_labels[playersId]
# --- Start computation
def startAllEnv(self):
"""Simulate all envs."""
for envId, env in enumerate(self.envs):
self.startOneEnv(envId, env)
def startOneEnv(self, envId, env):
"""Simulate that env."""
print("\n\nEvaluating environment:", repr(env)) # DEBUG
self.players = []
self.__initPlayers__(env)
# Get the position of the best arms
means = env.means
bestarm = env.maxArm
# FIXME for > 1 player, this has no meaning
indexes_bestarm = np.nonzero(np.isclose(means, bestarm))[0]
def store(r, repeatId):
"""Store the result of the experiment r."""
self.rewards[envId] += np.cumsum(r.rewards, axis=1) # cumsum on time
# self.rewardsSquared[envId] += np.cumsum(r.rewards ** 2, axis=1) # cumsum on time
# self.rewardsSquared[envId] += np.cumsum(r.rewardsSquared, axis=1) # cumsum on time
self.lastCumRewards[envId][repeatId] = np.sum(r.rewards) # sum on time and sum on players
self.pulls[envId] += r.pulls
self.lastPulls[envId][:, :, repeatId] = r.pulls
self.allPulls[envId] += r.allPulls
self.collisions[envId] += r.collisions
self.lastCumCollisions[envId][:, repeatId] = np.sum(r.collisions, axis=1) # sum on time
for playerId in range(self.nbPlayers):
self.nbSwitchs[envId][playerId, 1:] += (np.diff(r.choices[playerId, :]) != 0)
self.bestArmPulls[envId][playerId, :] += np.cumsum(np.in1d(r.choices[playerId, :], indexes_bestarm))
# FIXME there is probably a bug in this computation
self.freeTransmissions[envId][playerId, :] += np.array([r.choices[playerId, t] not in r.collisions[:, t] for t in range(self.horizon)])
self.runningTimes[envId][playerId, repeatId] = r.running_time
self.memoryConsumption[envId][playerId, repeatId] = r.memory_consumption
# Start now
if self.useJoblib:
seeds = np.random.randint(low=0, high=100 * self.repetitions, size=self.repetitions)
repeatIdout = 0
for r in Parallel(n_jobs=self.cfg['n_jobs'], verbose=self.cfg['verbosity'])(
delayed(delayed_play)(env, self.players, self.horizon, self.collisionModel, seed=seeds[repeatId], repeatId=repeatId, count_ranks_markov_chain=self.count_ranks_markov_chain, useJoblib=self.useJoblib)
for repeatId in tqdm(range(self.repetitions), desc="Repeat||")
):
store(r, repeatIdout)
repeatIdout += 1
if env.isChangingAtEachRepetition:
env._t += self.repetitions # new self.repetitions draw!
else:
for repeatId in tqdm(range(self.repetitions), desc="Repeat"):
r = delayed_play(env, self.players, self.horizon, self.collisionModel, repeatId=repeatId, count_ranks_markov_chain=self.count_ranks_markov_chain, useJoblib=self.useJoblib)
store(r, repeatId)
# --- Save to disk methods
def saveondisk(self, filepath="saveondisk_EvaluatorMultiPlayers.hdf5"):
""" Save the content of the internal data to into a HDF5 file on the disk.
- See http://docs.h5py.org/en/stable/quick.html if needed.
"""
# 1. create the h5py file
import h5py
h5file = h5py.File(filepath, "w")
# 2. store main attributes and all other attributes, if they exist
for name_of_attr in [
"nbPlayers", "horizon", "repetitions",
"delta_t_plot", "collisionModel", "full_lost_if_collision", "signature", "nb_break_points", "plot_lowerbounds", "moreAccurate", "finalRanksOnAverage", "useJoblib", "showplot", "use_box_plot", "count_ranks_markov_chain", "cache_rewards", "change_labels", "append_labels"
]:
if not hasattr(self, name_of_attr): continue
value = getattr(self, name_of_attr)
if inspect.isfunction(value): value = value.__name__
if isinstance(value, str): value = np.string_(value)
try: h5file.attrs[name_of_attr] = value
except (ValueError, TypeError):
print("Error: when saving the Evaluator object to a HDF5 file, the attribute named {} (value {} of type {}) couldn't be saved. Skipping...".format(name_of_attr, value, type(value))) # DEBUG
# 3. for each environment
h5file.attrs["number_of_envs"] = len(self.envs)
for envId in range(len(self.envs)):
# 3.a. create subgroup for this env
sbgrp = h5file.create_group("env_{}".format(envId))
# 3.b. store attribute of the MAB problem
mab = self.envs[envId]
for name_of_attr in ["isChangingAtEachRepetition", "isMarkovian", "_sparsity", "means", "nbArms", "maxArm", "minArm"]:
if not hasattr(mab, name_of_attr): continue
value = getattr(mab, name_of_attr)
if isinstance(value, str): value = np.string_(value)
try: sbgrp.attrs[name_of_attr] = value
except (ValueError, TypeError):
print("Error: when saving the Evaluator object to a HDF5 file, the attribute named {} (value {} of type {}) couldn't be saved. Skipping...".format(name_of_attr, value, type(value))) # DEBUG
# 3.c. store data for that env
for name_of_dataset in [ "rewards", "lastCumRewards", "pulls", "lastPulls", "allPulls", "collisions", "lastCumCollisions", "nbSwitchs", "bestArmPulls", "freeTransmissions", "runningTimes", "memoryConsumption"]:
if not (hasattr(self, name_of_dataset) and envId in getattr(self, name_of_dataset)): continue
data = getattr(self, name_of_dataset)[envId]
try: sbgrp.create_dataset(name_of_dataset, data=data)
except (ValueError, TypeError) as e:
print("Error: when saving the Evaluator object to a HDF5 file, the dataset named {} (value of type {} and shape {} and dtype {}) couldn't be saved. Skipping...".format(name_of_dataset, type(data), data.shape, data.dtype)) # DEBUG
print("Exception:\n", e) # DEBUG
# 3.d. compute and store data for that env
for methodName in ["getRunningTimes", "getMemoryConsumption", "getPulls", "getNbSwitchs", "getBestArmPulls", "getfreeTransmissions", "getCollisions", "getRewards", "getFirstRegretTerm", "getSecondRegretTerm", "getThirdRegretTerm", "getCentralizedRegret", "getLastRegrets"]:
if not hasattr(self, methodName): continue
name_of_dataset = methodName.replace("get", "")
name_of_dataset = name_of_dataset[0].lower() + name_of_dataset[1:]
if name_of_dataset in sbgrp: name_of_dataset = methodName # XXX be sure to not use twice the same name, e.g., for getRunningTimes and runningTimes
method = getattr(self, methodName)
try:
if _nbOfArgs(method) > 2:
if isinstance(method(0, envId=envId), tuple):
data = np.array([method(playerId, envId=envId)[0] for playerId in range(len(self.players))])
else:
data = np.array([method(playerId, envId=envId) for playerId in range(len(self.players))])
else:
if isinstance(method(envId), tuple):
data = method(envId)[0]
else:
data = method(envId)
except TypeError:
if isinstance(method(envId), tuple):
data = method(envId)[0]
else:
data = method(envId)
try: sbgrp.create_dataset(name_of_dataset, data=data)
except (ValueError, TypeError) as e:
print("Error: when saving the Evaluator object to a HDF5 file, the dataset named {} (value of type {} and shape {} and dtype {}) couldn't be saved. Skipping...".format(name_of_dataset, type(data), data.shape, data.dtype)) # DEBUG
print("Exception:\n", e) # DEBUG
# 4. when done, close the file
h5file.close()
def loadfromdisk(self, filepath):
""" Update internal memory of the Evaluator object by loading data the opened HDF5 file.
.. warning:: FIXME this is not YET implemented!
"""
# FIXME I just have to fill all the internal matrices from the HDF5 file ?
raise NotImplementedError
# --- Getter methods
def getPulls(self, playerId, envId=0):
"""Extract mean pulls."""
return self.pulls[envId][playerId, :] / float(self.repetitions)
def getAllPulls(self, playerId, armId, envId=0):
"""Extract mean of all pulls."""
return self.allPulls[envId][playerId, armId, :] / float(self.repetitions)
def getNbSwitchs(self, playerId, envId=0):
"""Extract mean nb of switches."""
return self.nbSwitchs[envId][playerId, :] / float(self.repetitions)
def getCentralizedNbSwitchs(self, envId=0):
"""Extract average of mean nb of switches."""
return np.sum(self.nbSwitchs[envId], axis=0) / (float(self.repetitions) * self.nbPlayers)
def getBestArmPulls(self, playerId, envId=0):
"""Extract mean of best arms pulls."""
# We have to divide by a arange() = cumsum(ones) to get a frequency
return self.bestArmPulls[envId][playerId, :] / (float(self.repetitions) * self._times)
def getfreeTransmissions(self, playerId, envId=0):
"""Extract mean of successful transmission."""
return self.freeTransmissions[envId][playerId, :] / float(self.repetitions)
def getCollisions(self, armId, envId=0):
"""Extract mean of number of collisions."""
return self.collisions[envId][armId, :] / float(self.repetitions)
def getRewards(self, playerId, envId=0):
"""Extract mean of rewards."""
return self.rewards[envId][playerId, :] / float(self.repetitions)
def getRegretMean(self, playerId, envId=0):
"""Extract mean of regret, for one arm for one player (no meaning).
.. warning:: This is the centralized regret, *for one arm*, it does not make much sense in the multi-players setting!
"""
return np.cumsum(self.envs[envId].get_maxArm(self.horizon) - self.getRewards(playerId, envId))
def getCentralizedRegret_LessAccurate(self, envId=0):
"""Compute the empirical centralized regret: cumsum on time of the mean rewards of the M best arms - cumsum on time of the empirical rewards obtained by the players, based on accumulated rewards."""
assert self.nbPlayers <= self.envs[envId].nbArms, "WARNING getCentralizedRegret_LessAccurate is not yet implement in the case when there is more players than arms ?" # DEBUG
# FIXED use self.envs[envId].get_maxArms(M=self.nbPlayers, horizon=self.horizon)
averageBestRewards = np.cumsum(self.envs[envId].get_maxArms(M=self.nbPlayers, horizon=self.horizon))
# And for the actual rewards, the collisions are counted in the rewards logged in self.getRewards
actualRewards = np.sum([self.getRewards(playerId, envId=0) for playerId in range(self.nbPlayers)], axis=0)
return averageBestRewards - actualRewards
# --- Three terms in the regret
def getFirstRegretTerm(self, envId=0):
"""Extract and compute the first term :math:`(a)` in the centralized regret: losses due to pulling suboptimal arms."""
losses = np.zeros(self.horizon)
# means = self.envs[envId].means # Shape: (nbArms)
allMeans = self.envs[envId].get_allMeans(self.horizon) # Shape: (nbArms, horizon)
allPulls = self.allPulls[envId] / float(self.repetitions) # Shape: (nbPlayers, nbArms, horizon)
# it's hard to program this in vector operations, so let's do just a loop...
for t in range(self.horizon):
means = allMeans[:, t]
sortingIndex = np.argsort(means)
means = np.sort(means)
deltaMeansWorstArms = means[-self.nbPlayers] - means[:-self.nbPlayers]
allWorstPulls = allPulls[:, sortingIndex[:-self.nbPlayers], t]
worstPulls = np.sum(allWorstPulls, axis=0) # sum for all players
losses[t] = np.dot(deltaMeansWorstArms, worstPulls) # Count and sum on k in Mworst
# Conclusion
firstRegretTerm = np.cumsum(losses) # Accumulate losses
return firstRegretTerm
def getSecondRegretTerm(self, envId=0):
"""Extract and compute the second term :math:`(b)` in the centralized regret: losses due to not pulling optimal arms."""
losses = np.zeros(self.horizon)
# means = self.envs[envId].means # Shape: (nbArms)
allMeans = self.envs[envId].get_allMeans(self.horizon) # Shape: (nbArms, horizon)
allPulls = self.allPulls[envId] / float(self.repetitions) # Shape: (nbPlayers, nbArms, horizon)
# it's hard to program this in vector operations, so let's do just a loop...
for t in range(self.horizon):
means = allMeans[:, t]
sortingIndex = np.argsort(means)
means = np.sort(means)
deltaMeansBestArms = means[-self.nbPlayers:] - means[-self.nbPlayers]
allBestPulls = allPulls[:, sortingIndex[-self.nbPlayers:], t]
bestMisses = 1 - np.sum(allBestPulls, axis=0) # sum for all players
losses[t] = np.dot(deltaMeansBestArms, bestMisses) # Count and sum on k in Mbest
# Conclusion
secondRegretTerm = np.cumsum(losses) # Accumulate losses
return secondRegretTerm
def getThirdRegretTerm(self, envId=0):
"""Extract and compute the third term :math:`(c)` in the centralized regret: losses due to collisions."""
# means = self.envs[envId].means # Shape: (nbArms)
allMeans = self.envs[envId].get_allMeans(self.horizon) # Shape: (nbArms, horizon)
countCollisions = self.collisions[envId] # Shape: (nbArms, horizon)
if not self.full_lost_if_collision:
print("Warning: the collision model ({}) does *not* yield a loss in communication when colliding (one user can communicate, or in average one user can communicate), so countCollisions -= 1 for the 3rd regret term ...".format(self.collisionModel.__name__)) # DEBUG
countCollisions = np.maximum(0, countCollisions - 1)
# losses = np.dot(means, countCollisions / float(self.repetitions)) # Count and sum on k in 1...K
losses = np.sum(allMeans * countCollisions, axis=0) / float(self.repetitions) # Count and sum on k in 1...K
thirdRegretTerm = losses # Accumulate losses
return thirdRegretTerm
def getCentralizedRegret_MoreAccurate(self, envId=0):
"""Compute the empirical centralized regret, based on counts of selections and not actual rewards."""
return self.getFirstRegretTerm(envId=envId) + self.getSecondRegretTerm(envId=envId) + self.getThirdRegretTerm(envId=envId)
def getCentralizedRegret(self, envId=0, moreAccurate=None):
"""Using either the more accurate or the less accurate regret count."""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
# print("Computing the vector of mean cumulated regret with '{}' accurate method...".format("more" if moreAccurate else "less")) # DEBUG
if moreAccurate:
return self.getCentralizedRegret_MoreAccurate(envId=envId)
else:
return self.getCentralizedRegret_LessAccurate(envId=envId)
# --- Last regrets
def getLastRegrets_LessAccurate(self, envId=0):
"""Extract last regrets, based on accumulated rewards."""
# FIXME it depends on the collision model !
assert self.nbPlayers <= self.envs[envId].nbArms, "WARNING getLastRegrets_LessAccurate is not yet implement in the case when there is more players than arms ?" # DEBUG
sumBestMeans = np.sum(self.envs[envId].get_maxArms(M=self.nbPlayers, horizon=self.horizon))
# if self.envs[envId].nbArms < self.nbPlayers:
# # sure to have collisions, then the best strategy is to put all the collisions in the worse arm
# worseArm = np.min(meansArms)
# sumBestMeans -= worseArm # This count the collisions
return sumBestMeans - self.lastCumRewards[envId]
def getAllLastWeightedSelections(self, envId=0):
"""Extract weighted count of selections."""
all_last_weighted_selections = np.zeros(self.repetitions)
lastCumCollisions = self.lastCumCollisions[envId]
means = self.envs[envId].means # Shape: (nbArms)
for armId, mean in enumerate(means):
last_selections = np.sum(self.lastPulls[envId][:, armId, :], axis=0) # sum on players
all_last_weighted_selections += mean * (last_selections - lastCumCollisions[armId, :])
return all_last_weighted_selections
def getLastRegrets_MoreAccurate(self, envId=0):
"""Extract last regrets, based on counts of selections and not actual rewards."""
# FIXME it depends on the collision model !
assert self.nbPlayers <= self.envs[envId].nbArms, "WARNING getLastRegrets_MoreAccurate is not yet implement in the case when there is more players than arms ?" # DEBUG
sumBestMeans = np.sum(self.envs[envId].get_maxArms(M=self.nbPlayers, horizon=self.horizon))
# if self.envs[envId].nbArms < self.nbPlayers:
# # sure to have collisions, then the best strategy is to put all the collisions in the worse arm
# worseArm = np.min(meansArms)
# sumBestMeans -= worseArm # This count the collisions
return sumBestMeans - self.getAllLastWeightedSelections(envId=envId)
def getLastRegrets(self, envId=0, moreAccurate=None):
"""Using either the more accurate or the less accurate regret count."""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
# print("Computing the vector of last cumulated regrets (on repetitions) with '{}' accurate method...".format("more" if moreAccurate else "less")) # DEBUG
if moreAccurate:
return self.getLastRegrets_MoreAccurate(envId=envId)
else:
return self.getLastRegrets_LessAccurate(envId=envId)
def getRunningTimes(self, envId=0):
"""Get the means and stds and list of running time of the different players."""
all_times = [ self.runningTimes[envId][playerId, :] for playerId in range(self.nbPlayers) ]
means = [ np.mean(times) for times in all_times ]
stds = [ np.std(times) for times in all_times ]
return means, stds, all_times
def getMemoryConsumption(self, envId=0):
"""Get the means and stds and list of memory consumptions of the different players."""
all_memories = [ self.memoryConsumption[envId][playerId, :] for playerId in range(self.nbPlayers) ]
for playerId in range(self.nbPlayers):
all_memories[playerId] = [ m for m in all_memories[playerId] if m > 0 ]
means = [ np.mean(memories) for memories in all_memories ]
stds = [ np.std(memories) for memories in all_memories ]
return means, stds, all_memories
# --- Plotting methods
def plotRewards(self, envId=0, savefig=None, semilogx=False, moreAccurate=None):
"""Plot the decentralized (vectorial) rewards, for each player."""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
fig = plt.figure()
ymin = 0
colors = palette(self.nbPlayers)
markers = makemarkers(self.nbPlayers)
X = self._times - 1
cumRewards = np.zeros((self.nbPlayers, self.horizon))
for playerId, player in enumerate(self.players):
label = 'Player #{:>2}: {}'.format(playerId + 1, _extract(player.__cachedstr__))
Y = self.getRewards(playerId, envId)
cumRewards[playerId, :] = Y
ymin = min(ymin, np.min(Y))
if semilogx:
plt.semilogx(X[::self.delta_t_plot], Y[::self.delta_t_plot], label=label, color=colors[playerId], marker=markers[playerId], markevery=(playerId / 50., 0.1), lw=2)
else:
plt.plot(X[::self.delta_t_plot], Y[::self.delta_t_plot], label=label, color=colors[playerId], marker=markers[playerId], markevery=(playerId / 50., 0.1), lw=2)
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}${}".format(self.horizon, self.signature))
if self.nb_break_points > 0:
# DONE fix math formula in case of non stationary bandits
plt.ylabel("Cumulative personal reward {}".format(r"$\sum_{s=1}^{t} \sum_{k=1}^{%d} \mu_k(t) \mathbb{E}_{%d}[1(I(t)=k)]$" % (self.envs[envId].nbArms, self.repetitions) if moreAccurate else r"$\mathbb{E}_{%d}[r_t]$" % self.repetitions))
else:
plt.ylabel("Cumulative personal reward {}".format(r"$\sum_{k=1}^{%d} \mu_k\mathbb{E}_{%d}[T_k(t)]$" % (self.envs[envId].nbArms, self.repetitions) if moreAccurate else r"$\mathbb{E}_{%d}[r_t]$" % self.repetitions))
plt.title("Multi-players $M = {}$ : Personal reward for each player, averaged ${}$ times\n${}$ arms{}: {}".format(self.nbPlayers, self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotFairness(self, envId=0, savefig=None, semilogx=False, fairness="default", evaluators=()):
"""Plot a certain measure of "fairness", from these personal rewards, support more than one environments (use evaluators to give a list of other environments)."""
fig = plt.figure()
X = self._times - 1
evaluators = [self] + list(evaluators) # Default to only [self]
colors = palette(len(evaluators))
markers = makemarkers(len(evaluators))
plot_method = plt.semilogx if semilogx else plt.plot
# Decide which fairness function to use
fairnessFunction = fairness_mapping[fairness] if isinstance(fairness, str) else fairness
fairnessName = fairness if isinstance(fairness, str) else getattr(fairness, '__name__', "std_fairness")
for evaId, eva in enumerate(evaluators):
label = eva.strPlayers(short=True)
cumRewards = np.zeros((eva.nbPlayers, eva.horizon))
for playerId, _ in enumerate(eva.players):
cumRewards[playerId, :] = eva.getRewards(playerId, envId)
# # Print each fairness measure # DEBUG
# for fN, fF in fairness_mapping.items():
# f = fF(cumRewards)
# print(" - {} fairness index is = {} ...".format(fN, f)) # DEBUG
# Plot only one fairness term
fairness = fairnessFunction(cumRewards)
plot_method(X[::self.delta_t_plot][2:], fairness[::self.delta_t_plot][2:], markers[evaId] + '-', label=label, markevery=(evaId / 50., 0.1), color=colors[evaId], lw=2)
if len(evaluators) > 1:
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}$, {}{}".format(self.horizon, self.strPlayers() if len(evaluators) == 1 else "", self.signature))
add_percent_formatter("yaxis", 1.0)
# plt.ylim(0, 1)
plt.ylabel("Centralized measure of fairness for cumulative rewards ({})".format(fairnessName.title()))
plt.title("Multi-players $M = {}$ : Centralized measure of fairness, averaged ${}$ times\n${}$ arms{}: {}".format(self.nbPlayers, self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotRegretCentralized(self, envId=0, savefig=None,
semilogx=False, semilogy=False, loglog=False,
normalized=False, evaluators=(),
subTerms=False, sumofthreeterms=False, moreAccurate=None):
"""Plot the centralized cumulated regret, support more than one environments (use evaluators to give a list of other environments).
- The lower bounds are also plotted (Besson & Kaufmann, and Anandkumar et al).
- The three terms of the regret are also plotting if evaluators = () (that's the default).
"""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
X0 = X = self._times - 1
fig = plt.figure()
evaluators = [self] + list(evaluators) # Default to only [self]
colors = palette(5 if len(evaluators) == 1 and subTerms else len(evaluators))
markers = makemarkers(5 if len(evaluators) == 1 and subTerms else len(evaluators))
plot_method = plt.loglog if loglog else plt.plot
plot_method = plt.semilogy if semilogy else plot_method
plot_method = plt.semilogx if semilogx else plot_method
# Loop
for evaId, eva in enumerate(evaluators):
if subTerms:
Ys = [None] * 3
labels = [""] * 3
Ys[0] = eva.getFirstRegretTerm(envId)
labels[0] = "$(a)$ term: Pulls of {} suboptimal arms (lower-bounded)".format(max(0, self.envs[envId].nbArms - self.nbPlayers))
Ys[1] = eva.getSecondRegretTerm(envId)
labels[1] = "$(b)$ term: Non-pulls of {} optimal arms".format(min(self.nbPlayers, self.envs[envId].nbArms))
Ys[2] = eva.getThirdRegretTerm(envId)
labels[2] = "$(c)$ term: Weighted count of collisions"
Y = eva.getCentralizedRegret(envId, moreAccurate=moreAccurate)
label = "{}umulated centralized regret".format("Normalized c" if normalized else "C") if len(evaluators) == 1 else eva.strPlayers(short=True)
if semilogx or loglog: # FIXED for semilogx plots, truncate to only show t >= 100
X, Y = X0[X0 >= 100], Y[X0 >= 100]
if subTerms:
for i in range(len(Ys)):
Ys[i] = Ys[i][X0 >= 100]
if normalized:
Y = Y[X >= 1] / np.log(X[X >= 1]) # XXX prevent /0
if subTerms:
for i in range(len(Ys)):
Ys[i] = Ys[i][X >= 1] / np.log(X[X >= 1]) # XXX prevent /0
meanY = np.mean(Y)
# Now plot
plot_method(X[::self.delta_t_plot], Y[::self.delta_t_plot], (markers[evaId] + '-'), markevery=(evaId / 50., 0.1), label=label, color=colors[evaId], lw=2)
if len(evaluators) == 1:
# if not semilogx and not loglog and not semilogy:
# # We plot a horizontal line ----- at the mean regret
# plot_method(X[::self.delta_t_plot], meanY * np.ones_like(X)[::self.delta_t_plot], '--', label="Mean cumulated centralized regret", color=colors[evaId], lw=2)
# " = ${:.3g}$".format(meanY)
if subTerms:
if sumofthreeterms:
Ys.append(Ys[0] + Ys[1] + Ys[2])
labels.append("Sum of 3 terms (= regret)")
# print("Difference between regret and sum of three terms:", Y - np.array(Ys[-1])) # DEBUG
for i, (Y, label) in enumerate(zip(Ys, labels)):
plot_method(X[::self.delta_t_plot], Y[::self.delta_t_plot], (markers[i + 1] + '-'), markevery=((i + 1) / 50., 0.1), label=label, color=colors[i + 1], lw=2)
if semilogx or loglog: # Manual fix for issue https://github.com/SMPyBandits/SMPyBandits/issues/38
plt.xscale('log')
if semilogy or loglog: # Manual fix for issue https://github.com/SMPyBandits/SMPyBandits/issues/38
plt.yscale('log')
# We also plot our lower bound
if not self.envs[envId].isDynamic:
try:
# XXX In fact, the lower-bound is also true for Bayesian policies! Finite means ARE ALWAYS linear! I should write the proof, but I convinced myself that the lower-bound is still correct (in a certain sense) and at least it gives an overview of the (average) complexity of the problem (randomly drawn and) used for the experiments.
lowerbound, anandkumar_lowerbound, centralized_lowerbound = self.envs[envId].lowerbound_multiplayers(self.nbPlayers)
if not (semilogx or semilogy or loglog):
print("\nThis MAB problem has: \n - a [Lai & Robbins] complexity constant C(mu) = {:.3g} for 1-player problem ... \n - a Optimal Arm Identification factor H_OI(mu) = {:.2%} ...".format(self.envs[envId].lowerbound(), self.envs[envId].hoifactor())) # DEBUG
if self.envs[envId].isDynamic:
print("WARNING this env is in fact dynamic, this complexity term and H_OI factor do not have much sense... (they are computed from the average of the complexity for all mean vectors drawn in the repeated experiments...)") # DEBUG
print(" - [Anandtharam et al] centralized lower-bound = {:.3g},\n - [Anandkumar et al] decentralized lower-bound = {:.3g}\n - Our better (larger) decentralized lower-bound = {:.3g},".format(centralized_lowerbound, anandkumar_lowerbound, lowerbound)) # DEBUG
if normalized:
T = np.ones_like(X)
else:
X = X[X >= 1]
T = np.log(X)
if self.plot_lowerbounds:
plot_method(X[::self.delta_t_plot], lowerbound * T[::self.delta_t_plot], 'k-', label="Besson & Kaufmann L-B = ${:.3g} \; \log(t)$".format(lowerbound), lw=3)
plot_method(X[::self.delta_t_plot], anandkumar_lowerbound * T[::self.delta_t_plot], 'k--', label="Anandkumar L-B = ${:.3g} \; \log(t)$".format(anandkumar_lowerbound), lw=2)
plot_method(X[::self.delta_t_plot], centralized_lowerbound * T[::self.delta_t_plot], 'k:', label="Centralized L-B = ${:.3g} \; \log(t)$".format(centralized_lowerbound), lw=2)
except AssertionError:
print("Error: Unable to compute and display the lower-bound...") # DEBUG
# Labels and legends
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}$, {}{}".format(self.horizon, self.strPlayers() if len(evaluators) == 1 else "", self.signature))
if self.nb_break_points > 0:
plt.ylabel("{}umulative non-stationary centralized regret\n{}".format("Normalized c" if normalized else "C", r"$\sum_{s=1}^{t} \sum_{k=1}^{%d} \mu_k^*(s) - \sum_{s=1}^{t} \sum_{k=1}^{%d} \mu_k(s) \mathbb{P}_{%d}[A^j(t)=k,\overline{C}^j(t)]$" % (self.nbPlayers, self.envs[envId].nbArms, self.repetitions) if moreAccurate else r"$\mathbb{E}_{%d}[R_t]$" % self.repetitions))
else:
plt.ylabel("{}umulative centralized regret {}".format("Normalized c" if normalized else "C", r"$t \sum_{k=1}^{%d} \mu_k^* - \sum_{s=1}^{t} \sum_{k=1}^{%d} \mu_k(s) \mathbb{P}_{%d}[A^j(t)=k,\overline{C}^j(t)]$" % (self.nbPlayers, self.envs[envId].nbArms, self.repetitions) if moreAccurate else r"$\mathbb{E}_{%d}[R_t]$" % self.repetitions))
plt.title("Multi-players $M = {}$ : {}umulated centralized regret, averaged ${}$ times\n${}$ arms{}: {}".format(self.nbPlayers, "Normalized c" if normalized else "C", self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotNbSwitchs(self, envId=0, savefig=None, semilogx=False, cumulated=False):
"""Plot cumulated number of switchs (to evaluate the switching costs), comparing each player."""
X = self._times - 1
fig = plt.figure()
ymin = 0
colors = palette(self.nbPlayers)
markers = makemarkers(self.nbPlayers)
plot_method = plt.semilogx if semilogx else plt.plot
for playerId, player in enumerate(self.players):
label = 'Player #{:>2}: {}'.format(playerId + 1, _extract(player.__cachedstr__))
Y = self.getNbSwitchs(playerId, envId)
if cumulated:
Y = np.cumsum(Y)
ymin = min(ymin, np.min(Y))
plot_method(X[::self.delta_t_plot], Y[::self.delta_t_plot], label=label, color=colors[playerId], marker=markers[playerId], markevery=(playerId / 50., 0.1), linestyle='-' if cumulated else '', lw=2)
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}${}".format(self.horizon, self.signature))
plt.ylim(ymin, max(plt.ylim()[1], 1))
if not cumulated: add_percent_formatter("yaxis", 1.0)
plt.ylabel("{} of switches by player".format("Cumulated number" if cumulated else "Frequency"))
plt.title("Multi-players $M = {}$ : {}umber of switches for each player, averaged ${}$ times\n{} arm{}s: {}".format(self.nbPlayers, "Cumulated n" if cumulated else "N", self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotNbSwitchsCentralized(self, envId=0, savefig=None, semilogx=False, cumulated=False, evaluators=()):
"""Plot the centralized cumulated number of switchs (to evaluate the switching costs), support more than one environments (use evaluators to give a list of other environments)."""
X = self._times - 1
fig = plt.figure()
ymin = 0
evaluators = [self] + list(evaluators) # Default to only [self]
colors = palette(len(evaluators))
markers = makemarkers(len(evaluators))
plot_method = plt.semilogx if semilogx else plt.plot
for evaId, eva in enumerate(evaluators):
label = "" if len(evaluators) == 1 else eva.strPlayers(short=True)
Y = eva.getCentralizedNbSwitchs(envId)
if cumulated:
Y = np.cumsum(Y)
ymin = min(ymin, np.min(Y))
plot_method(X[::self.delta_t_plot], Y[::self.delta_t_plot], label=label, color=colors[evaId], marker=markers[evaId], markevery=(evaId / 50., 0.1), linestyle='-' if cumulated else '', lw=2)
if len(evaluators) > 1:
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}$, {}{}".format(self.horizon, self.strPlayers() if len(evaluators) == 1 else "", self.signature))
if not cumulated: add_percent_formatter("yaxis", 1.0)
plt.ylabel("{} of switches (changes of arms)".format("Cumulated number" if cumulated else "Frequency"))
plt.title("Multi-players $M = {}$ : Total {}number of switches, averaged ${}$ times\n${}$ arms{}: {}".format(self.nbPlayers, "cumulated " if cumulated else "", self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotBestArmPulls(self, envId=0, savefig=None):
"""Plot the frequency of pulls of the best channel.
- Warning: does not adapt to dynamic settings!
"""
X = self._times - 1
fig = plt.figure()
colors = palette(self.nbPlayers)
markers = makemarkers(self.nbPlayers)
for playerId, player in enumerate(self.players):
label = 'Player #{:>2}: {}'.format(playerId + 1, _extract(player.__cachedstr__))
Y = self.getBestArmPulls(playerId, envId)
plt.plot(X[::self.delta_t_plot], Y[::self.delta_t_plot], label=label, color=colors[playerId], marker=markers[playerId], markevery=(playerId / 50., 0.1), lw=2)
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}${}".format(self.horizon, self.signature))
add_percent_formatter("yaxis", 1.0)
# FIXME fix computation in case of non stationary bandits
if self.nb_break_points > 0:
print("WARNING the computation of Frequency of pulls of the optimal arm is wrong for non-stationary bandits...") # DEBUG
plt.ylabel("Frequency of pulls of the optimal arm")
plt.title("Multi-players $M = {}$ : Best arm pulls frequency for each players, averaged ${}$ times\n{} arm{}s: {}".format(self.nbPlayers, self.cfg['repetitions'], self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotAllPulls(self, envId=0, savefig=None, cumulated=True, normalized=False):
"""Plot the frequency of use of every channels, one figure for each channel. Not so useful."""
X = self._times - 1
mainfig = savefig
colors = palette(self.nbPlayers)
markers = makemarkers(self.nbPlayers)
figs = []
for armId in range(self.envs[envId].nbArms):
figs.append(plt.figure())
for playerId, player in enumerate(self.players):
Y = self.getAllPulls(playerId, armId, envId)
if cumulated:
Y = np.cumsum(Y)
if normalized:
Y /= 1 + X
plt.plot(X[::self.delta_t_plot], Y[::self.delta_t_plot], label=player.__cachedstr__, color=colors[playerId], linestyle='', marker=markers[playerId], markevery=(playerId / 50., 0.1), lw=2)
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}${}".format(self.horizon, self.signature))
s = ("Normalized " if normalized else "") + ("Cumulated number" if cumulated else "Frequency")
plt.ylabel("{} of pulls of the arm #{}".format(s, armId + 1))
plt.title("Multi-players $M = {}$ : {} of pulls of the arm #{} for each players, averaged ${}$ times\n{} arm{}s: {}".format(self.nbPlayers, s.lower(), armId + 1, self.cfg['repetitions'], self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
maximizeWindow()
if savefig is not None:
savefig = mainfig.replace("allPulls", "allPulls_Arm{}".format(armId + 1))
print("Saving to", savefig, "...") # DEBUG
plt.savefig(savefig, bbox_inches=BBOX_INCHES)
plt.show() if self.showplot else plt.close()
return figs
def plotFreeTransmissions(self, envId=0, savefig=None, cumulated=False):
"""Plot the frequency free transmission."""
X = self._times - 1
fig = plt.figure()
colors = palette(self.nbPlayers)
for playerId, player in enumerate(self.players):
Y = self.getfreeTransmissions(playerId, envId)
if cumulated:
Y = np.cumsum(Y)
plt.plot(X[::self.delta_t_plot], Y[::self.delta_t_plot], '.', label=player.__cachedstr__, color=colors[playerId], markersize=1, lw=2)
# should only plot with markers
legend()
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}${}".format(self.horizon, self.signature))
add_percent_formatter("yaxis", 1.0)
plt.ylabel("{}ransmission on a free channel".format("Cumulated T" if cumulated else "T"))
plt.title("Multi-players $M = {}$ : {}free transmission for each players, averaged ${}$ times\n{} arm{}s: {}".format(self.nbPlayers, "Cumulated " if cumulated else "", self.cfg['repetitions'], self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
# TODO I should plot the evolution of the occupation ratio of each channel, as a function of time
# Starting from the average occupation (by primary users), as given by [1 - arm.mean], it should increase occupation[arm] when users chose it
# The reason/idea is that good arms (low occupation ration) are pulled a lot, thus becoming not as available as they seemed
def plotNbCollisions(self, envId=0, savefig=None,
semilogx=False, semilogy=False, loglog=False,
cumulated=False, upperbound=False, evaluators=()):
"""Plot the frequency or cum number of collisions, support more than one environments (use evaluators to give a list of other environments)."""
X = self._times - 1
fig = plt.figure()
evaluators = [self] + list(evaluators) # Default to only [self]
colors = palette(len(evaluators))
markers = makemarkers(len(evaluators))
plot_method = plt.loglog if loglog else plt.plot
plot_method = plt.semilogy if semilogy else plot_method
plot_method = plt.semilogx if semilogx else plot_method
for evaId, eva in enumerate(evaluators):
Y = np.zeros(eva.horizon)
for armId in range(eva.envs[envId].nbArms):
Y += eva.getCollisions(armId, envId)
if cumulated:
Y = np.cumsum(Y)
Y /= eva.nbPlayers # To normalized the count?
plot_method(X[::self.delta_t_plot], Y[::self.delta_t_plot], (markers[evaId] + '-') if cumulated else '.', markevery=((evaId / 50., 0.1) if cumulated else None), label=eva.strPlayers(short=True), color=colors[evaId], alpha=1. if cumulated else 0.7, lw=2)
if not cumulated: add_percent_formatter("yaxis", 1.0)
# We also plot our lower bound
if upperbound and cumulated:
upperboundLog = self.envs[envId].upperbound_collisions(self.nbPlayers, X)
print("Anandkumar et al. upper bound for the non-cumulated number of collisions is {:.3g} * log(t) here ...".format(upperboundLog[-1])) # DEBUG
plot_method(X, upperboundLog, 'k-', label="Anandkumar et al. upper bound", lw=3)
else:
print("No upper bound for the non-cumulated number of collisions...") # DEBUG
# Start the figure
plt.xlabel("Time steps $t = 1...T$, horizon $T = {}${}".format(self.horizon, self.signature))
plt.ylabel("{} of collisions on all arms".format("Cumulated number" if cumulated else "Frequency"))
legend()
plt.title("Multi-players $M = {}$ : {}of collisions, averaged ${}$ times\n{} arm{}s: {}".format(self.nbPlayers, "Cumulated number " if cumulated else "Frequency ", self.cfg['repetitions'], self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def plotFrequencyCollisions(self, envId=0, savefig=None, piechart=True, semilogy=False):
"""Plot the frequency of collision, in a pie chart (histogram not supported yet)."""
nbArms = self.envs[envId].nbArms
Y = np.zeros(1 + nbArms) # One extra arm for "no collision"
labels = [''] * (1 + nbArms) # Empty labels
colors = palette(1 + nbArms) # Get colors
# All the other arms
for armId, arm in enumerate(self.envs[envId].arms):
# Y[armId] = np.sum(self.getCollisions(armId, envId) >= 1) # XXX no, we should not count just the fact that there were collisions, but instead count all collisions
Y[armId] = np.sum(self.getCollisions(armId, envId))
Y /= (self.horizon * self.nbPlayers)
assert 0 <= np.sum(Y) <= 1, "Error: the sum of collisions = {}, averaged by horizon and nbPlayers, cannot be outside of [0, 1] ...".format(np.sum(Y)) # DEBUG
for armId, arm in enumerate(self.envs[envId].arms):
labels[armId] = "#${}$: ${}$ (${:.1%}$$\%$)".format(armId, repr(arm), Y[armId])
print(" - For {},\tfrequency of collisions is {:.5g} ...".format(labels[armId], Y[armId])) # DEBUG
if Y[armId] < 1e-4: # Do not display small slices
labels[armId] = ''
if np.isclose(np.sum(Y), 0):
print("==> No collisions to plot ... Stopping now ...") # DEBUG
return
# Special arm: no collision
Y[-1] = 1 - np.sum(Y) if np.sum(Y) < 1 else 0
labels[-1] = "No collision (${:.1%}$$\%$)".format(Y[-1]) if Y[-1] > 1e-4 else ''
colors[-1] = 'lightgrey'
# Start the figure
fig = plt.figure()
plt.xlabel("{}{}".format(self.strPlayers(), self.signature))
if piechart:
plt.axis('equal')
plt.pie(Y, labels=labels, colors=colors, explode=[0.07] * len(Y), startangle=45)
else:
if semilogy:
Y = np.log10(Y) # use semilogy scale!
Y -= np.min(Y) # project back to [0, oo)
Y /= np.sum(Y) # project back to [0, 1)
for i in range(len(Y)):
plt.axvspan(i - 0.25, i + 0.25, 0, Y[i], label=labels[i], color=colors[i])
plt.xticks(np.arange(len(Y)), ["Arm #$%i$" % i for i in range(nbArms)] + ["No collision"])
plt.ylabel("Frequency of collision, in logarithmic scale" if semilogy else "Frequency of collision")
if not semilogy:
add_percent_formatter("yaxis", 1.0)
legend()
plt.title("Multi-players $M = {}$ : Frequency of collision for each arm, averaged ${}$ times\n{} arm{}s: {}".format(self.nbPlayers, self.cfg['repetitions'], self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=USE_PICKLE)
return fig
def printRunningTimes(self, envId=0, precision=3, evaluators=()):
"""Print the average+-std runnning time of the different players."""
print("\nGiving the mean and std running times ...")
try:
from IPython.core.magics.execution import _format_time
except ImportError:
_format_time = str
evaluators = [self] + list(evaluators) # Default to only [self]
for eva in evaluators:
means, vars, _ = eva.getRunningTimes(envId)
mean_time, std_time = np.sum(means), np.mean(vars)
print("\nFor players called '{}' ...".format(eva.strPlayers(latex=False, short=True)))
if eva.repetitions <= 1:
print(u" {} (mean of 1 run)".format(_format_time(mean_time, precision)))
else:
print(u" {} ± {} per loop (mean ± std. dev. of {} run)".format(_format_time(mean_time, precision), _format_time(std_time, precision), eva.repetitions))
def printMemoryConsumption(self, envId=0, evaluators=()):
"""Print the average+-std memory consumption of the different players."""
print("\nGiving the mean and std memory consumption ...")
evaluators = [self] + list(evaluators) # Default to only [self]
for eva in evaluators:
means, vars, _ = eva.getMemoryConsumption(envId)
print("\nFor players called '{}' ...".format(eva.strPlayers(latex=False, short=True)))
mean_time, std_time = np.sum(means), np.mean(vars)
if eva.repetitions <= 1:
print(u" {} (mean of 1 run)".format(sizeof_fmt(mean_time)))
else:
print(u" {} ± {} (mean ± std. dev. of {} runs)".format(sizeof_fmt(mean_time), sizeof_fmt(std_time), eva.repetitions))
def plotRunningTimes(self, envId=0, savefig=None, base=1, unit="seconds", evaluators=()):
"""Plot the running times of the different players, as a box plot for each evaluators."""
means, all_times, labels = [], [], []
evaluators = [self] + list(evaluators) # Default to only [self]
for eva in evaluators:
_means, _, _all_times = eva.getRunningTimes(envId=envId)
means.append(np.sum(_means))
all_times.append(np.sum(_all_times, axis=0))
labels.append(eva.strPlayers(latex=False, short=True))
# order by increasing mean time
index_of_sorting = np.argsort(means)
labels = [ labels[i] for i in index_of_sorting ]
all_times = [ np.asarray(all_times[i]) / float(base) for i in index_of_sorting ]
fig = plt.figure()
violin_or_box_plot(all_times, labels=labels, boxplot=self.use_box_plot)
plt.xlabel("Policies{}".format(self.signature))
ylabel = "Running times (in {}), for {} repetitions".format(unit, self.repetitions)
plt.ylabel(ylabel)
adjust_xticks_subplots(ylabel=ylabel, labels=labels)
plt.title("Running times for different MP bandit algorithms, horizon $T={}$, averaged ${}$ times\n${}$ arms{}: {}".format(self.horizon, self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=True)
return fig
def plotMemoryConsumption(self, envId=0, savefig=None, base=1024, unit="KiB", evaluators=()):
"""Plot the memory consumption of the different players, as a box plot for each."""
means, all_memories, labels = [], [], []
evaluators = [self] + list(evaluators) # Default to only [self]
for eva in evaluators:
_means, _, _all_memories = eva.getMemoryConsumption(envId=envId)
means.append(np.sum(_means))
all_memories.append(np.sum(_all_memories, axis=0))
labels.append(eva.strPlayers(latex=False, short=True))
# order by increasing mean memory consumption
index_of_sorting = np.argsort(means)
labels = [ labels[i] for i in index_of_sorting ]
all_memories = [ np.asarray(all_memories[i]) / float(base) for i in index_of_sorting ]
fig = plt.figure()
violin_or_box_plot(all_memories, labels=labels, boxplot=self.use_box_plot)
plt.xlabel("Policies{}".format(self.signature))
ylabel = "Memory consumption (in {}), for {} repetitions".format(unit, self.repetitions)
plt.ylabel(ylabel)
adjust_xticks_subplots(ylabel=ylabel, labels=labels)
plt.title("Memory consumption for different MP bandit algorithms, horizon $T={}$, averaged ${}$ times\n${}$ arms{}: {}".format(self.horizon, self.repetitions, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
show_and_save(self.showplot, savefig, fig=fig, pickleit=True)
return fig
def printFinalRanking(self, envId=0, verb=True):
"""Compute and print the ranking of the different players."""
if verb: print("\nGiving the final ranks ...")
assert 0 < self.averageOn < 1, "Error, the parameter averageOn of a EvaluatorMultiPlayers class has to be in (0, 1) strictly, but is = {} here ...".format(self.averageOn) # DEBUG
if verb: print("\nFinal ranking for this environment #{:>2} : {} ...".format(envId, self.strPlayers(latex=False, short=True))) # DEBUG
lastY = np.zeros(self.nbPlayers)
for playerId, player in enumerate(self.players):
Y = self.getRewards(playerId, envId)
if self.finalRanksOnAverage:
lastY[playerId] = np.mean(Y[-int(self.averageOn * self.horizon)]) # get average value during the last averageOn% of the iterations
else:
lastY[playerId] = Y[-1] # get the last value
# Sort lastY and give ranking
index_of_sorting = np.argsort(-lastY) # Get them by INCREASING rewards, not decreasing regrets
if verb:
for i, k in enumerate(index_of_sorting):
player = self.players[k]
print("- Player #{:>2} / {}, {}\twas ranked\t{} / {} for this simulation (last rewards = {:.5g}).".format(k + 1, self.nbPlayers, _extract(player.__cachedstr__), i + 1, self.nbPlayers, lastY[k])) # DEBUG
return lastY, index_of_sorting
def printFinalRankingAll(self, envId=0, evaluators=()):
"""Compute and print the ranking of the different players."""
evaluators = [self] + list(evaluators) # Default to only [self]
allLastY = np.zeros(len(evaluators))
for evaId, eva in enumerate(evaluators):
lastY, _ = eva.printFinalRanking(envId=envId, verb=False)
allLastY[evaId] = np.sum(lastY)
# Sort allLastY and give ranking
index_of_sorting = np.argsort(-allLastY) # Get them by INCREASING rewards, not decreasing regrets
for i, k in enumerate(index_of_sorting):
print("- Group of players #{:>2} / {}, {}\twas ranked\t{} / {} for this simulation (last rewards = {:.5g}).".format(k + 1, len(evaluators), evaluators[k].strPlayers(latex=False, short=True), i + 1, len(evaluators), allLastY[k])) # DEBUG
return allLastY, index_of_sorting
def printLastRegrets(self, envId=0, evaluators=(), moreAccurate=None):
"""Print the last regrets of the different evaluators."""
print("\nGiving the vector of final regrets ...")
evaluators = [self] + list(evaluators) # Default to only [self]
for evaId, eva in enumerate(evaluators):
print("\nFor evaluator #{:>2}/{} : {} (players {}) ...".format(1 + evaId, len(evaluators), eva, eva.strPlayers(latex=False, short=True)))
last_regrets = eva.getLastRegrets(envId=envId, moreAccurate=moreAccurate)
print(" Last regrets vector (for all repetitions) is:")
print("Min of last regrets R_T =", np.min(last_regrets))
print("Mean of last regrets R_T =", np.mean(last_regrets))
print("Median of last regrets R_T =", np.median(last_regrets))
print("Max of last regrets R_T =", np.max(last_regrets))
print("STD var last regrets R_T =", np.std(last_regrets))
def printLastRegretsPM(self, envId=0, evaluators=(), moreAccurate=None):
"""Print the average+-std last regret of the different players."""
print("\nGiving the mean and std last regret ...")
evaluators = [self] + list(evaluators) # Default to only [self]
for eva in evaluators:
last_regrets = eva.getLastRegrets(envId=envId, moreAccurate=moreAccurate)
print("\nFor players called '{}' ...".format(eva.strPlayers(latex=False, short=True)))
mean_regret, std_regret = np.mean(last_regrets), np.std(last_regrets)
# FIXME
mean_regret, std_regret = np.round(mean_regret), np.round(std_regret)
if eva.repetitions <= 1:
print(u" {:g} (mean of 1 run)".format(mean_regret))
else:
print(u" {:g} ± {:g} (mean ± std. dev. of {} runs)".format(mean_regret, std_regret, eva.repetitions))
def plotLastRegrets(self, envId=0,
normed=False, subplots=True, nbbins=15, log=False,
all_on_separate_figures=False, sharex=False, sharey=False,
boxplot=False, normalized_boxplot=True,
savefig=None, moreAccurate=None,
evaluators=()):
"""Plot histogram of the regrets R_T for all evaluators."""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
if len(evaluators) == 0: # no need for a subplot
subplots = False
evaluators = [self] + list(evaluators) # Default to only [self]
N = len(evaluators)
colors = palette(N)
if self.repetitions == 1:
boxplot = True
if boxplot:
all_last_regrets = []
labels = []
for evaId, eva in enumerate(evaluators):
last_regret = eva.getLastRegrets(envId=envId, moreAccurate=moreAccurate)
if normalized_boxplot:
last_regret /= np.log(self.horizon)
all_last_regrets.append(last_regret)
labels.append(eva.strPlayers(short=True))
means = [ np.mean(last_regrets) for last_regrets in all_last_regrets ]
# order by increasing mean regret
index_of_sorting = np.argsort(means)
labels = [ labels[i] for i in index_of_sorting ]
all_last_regrets = [ np.asarray(all_last_regrets[i]) for i in index_of_sorting ]
fig = plt.figure()
plt.xlabel("Bandit algorithms{}".format(self.signature))
ylabel = "{}egret value $R_T{}$,\nfor $T = {}$, for {} repetitions".format("Normalized r" if normalized_boxplot else "R", r"/\log(T)" if normalized_boxplot else "", self.horizon, self.repetitions)
plt.ylabel(ylabel, fontsize="x-small")
plt.title("Multi-players $M = {}$ : regrets for different bandit algorithms\n${}$ arms{}: {}".format(self.nbPlayers, self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
violin_or_box_plot(data=all_last_regrets, labels=labels, boxplot=self.use_box_plot)
adjust_xticks_subplots(ylabel=ylabel, labels=labels)
legend()
elif all_on_separate_figures:
figs = []
for evaId, eva in enumerate(evaluators):
fig = plt.figure()
plt.title("Multi-players $M = {}$ : Histogram of regrets for {}\n${}$ arms{}: {}".format(self.nbPlayers, eva.strPlayers(short=True), self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(self.nbPlayers, latex=True)))
plt.xlabel("Regret value $R_T$ at the end of simulation, for $T = {}${}".format(self.horizon, self.signature))
plt.ylabel("{} of observations, ${}$ repetitions".format("Frequency" if normed else "Number", self.repetitions))
last_regrets = eva.getLastRegrets(envId=envId, moreAccurate=moreAccurate)
n, returned_bins, patches = plt.hist(last_regrets, density=normed, color=colors[evaId], bins=nbbins)
addTextForWorstCases(plt, n, returned_bins, patches, normed=normed)
legend()
show_and_save(self.showplot, None if savefig is None else "{}__Algo_{}_{}".format(savefig, 1 + evaId, 1 + N), fig=fig, pickleit=USE_PICKLE)
figs.append(fig)
return figs
elif subplots:
nrows, ncols = nrows_ncols(N)
fig, axes = plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey)
# now for the figure
fig.suptitle("Histogram of regrets for different multi-players bandit algorithms\n${}$ arms{}: {}".format(self.envs[envId].nbArms, self.envs[envId].str_sparsity(), self.envs[envId].reprarms(nbPlayers=self.nbPlayers, latex=True)))
# XXX See https://stackoverflow.com/a/36542971/
ax0 = fig.add_subplot(111, frame_on=False) # add a big axes, hide frame
ax0.grid(False) # hide grid
ax0.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # hide tick and tick label of the big axes
# Add only once the ylabel, xlabel, in the middle
ax0.set_ylabel("{} of observations, ${}$ repetitions".format("Frequency" if normed else "Number", self.repetitions))
ax0.set_xlabel("Regret value $R_T$ at the end of simulation, for $T = {}${}".format(self.horizon, self.signature))
# now for the subplots
for evaId, eva in enumerate(evaluators):
i, j = evaId % nrows, evaId // nrows
ax = axes[i, j] if ncols > 1 else axes[i]
# print("evaId = {}, i = {}, j = {}, nrows = {}, ncols = {}, ax = {} ...".format(evaId, i, j, nrows, ncols, ax)) # DEBUG
last_regrets = eva.getLastRegrets(envId=envId, moreAccurate=moreAccurate)
n, returned_bins, patches = ax.hist(last_regrets, density=normed, color=colors[evaId], bins=nbbins, log=log)
addTextForWorstCases(ax, n, returned_bins, patches, normed=normed)
ax.vlines(
|
np.mean(last_regrets)
|
numpy.mean
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import networkx as nx
import re
import numpy as np
import itertools
_s = re.compile('\s+')
_p = re.compile('(\d+)\s+(\d+)')
def lsqp(atoms):
com = atoms.mean(axis=0)
#u, d, v = np.linalg.svd(atoms-com)
axes = np.zeros((len(atoms), 3))
for i in range(len(atoms)):
p1 = atoms[i]
if i == len(atoms)-1:
p2 = atoms[0]
else:
p2 = atoms[i+1]
a = np.cross(p1, p2)
axes += a
u, d, v = np.linalg.svd(axes)
i = 0
d = -np.dot(v[i], com)
n = -np.array((v[i,0], v[i,1], d))/v[i,2]
return v[i], com, n
def intriangle(triangle, axis, u, p):
# http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
p1, p2, p3 = triangle
w0 = p - p1
a = -np.dot(axis, w0)
b = np.dot(axis, u)
if (abs(b) < 0.01): return False
r = a / b
if r < 0.0: return False
if r > 1.0: return False
I = p + u * r
u = p2 - p1
v = p3 - p1
uu = np.dot(u, u)
uv = np.dot(u, v)
vv = np.dot(v, v)
w = I - p1
wu = np.dot(w, u)
wv = np.dot(w, v)
D = uv * uv - uu * vv
s = (uv * wv - vv * wu)/D
if (s < 0 or s > 1): return False
t = (uv * wu - uu * wv)/D
if (t < 0 or (s+t) > 1): return False
return True
def build_topology(psffile):
g = nx.Graph()
flag = 0
for line in open(psffile).readlines():
if flag == 0 and line.strip().endswith('NATOM'):
natom = int(line.strip().split()[0])
g.natom = natom
flag = 1
continue
if flag == 0 and line.strip().endswith('bonds'):
flag = 2
continue
if flag == 1 and not line.strip(): flag = 0
if flag == 2 and not line.strip(): break
if flag == 1:
num, segid, resid, resname, name = _s.split(line)[1:6]
if resname.startswith('TIP3'): continue
if name.startswith('H'): continue
g.add_node(int(num), {'segid': segid, 'resname': resname, 'name': name, 'resid': resid})
if flag == 2:
for pair in _p.findall(line):
num1, num2 = map(int, pair)
if g.has_node(num1) and g.has_node(num2): g.add_edge(num1, num2)
return g
def build_atomtable(psf, crdfile):
crds = {}
flag = 0
for line in open(crdfile).readlines():
if line.startswith('*'): continue
if flag == 0:
flag = 1
continue
if flag == 1 and not line.strip(): break
if flag == 1:
num, resid, resname, name, x, y, z, segid = _s.split(line.strip())[:8]
if resname.startswith('TIP3'): continue
if name.startswith('H'): continue
if psf.node[int(num)]['name'] != name: raise AtomMismatch("%d %s != %d %s" % (int(num), psf.node[int(num)]['name'], int(num), name))
crds[int(num)] = np.array((float(x), float(y), float(z)))
return crds
class AtomMismatch(Exception):
pass
def check_ring_penetration(psf, crd, pbc=[], xtl='rect', verbose=0):
# ring penetration test
# 1. find rings
# 2. build least square plane
# 3. project atoms ring constituent atoms onto the plane and build convex
# 4. find two bonded atoms that are at the opposite side of the plane
# 5. determine the point of intersection is enclosed in the ring
#
molecules = nx.connected_component_subgraphs(psf)
allatoms = np.array([crd[num] for num in psf.nodes()])
atoms_map = np.array([num for num in psf.nodes()])
natoms = len(allatoms)
if pbc:
atoms_map_reverse = {}
for i,num in enumerate(psf.nodes()):
atoms_map_reverse[num] = i
a = float(pbc[0])
b = float(pbc[1])
n = len(allatoms)
if xtl == 'rect':
allatoms = np.tile(allatoms, (9,1))
op = ((a,0),(a,b),(0,b),(-a,b),(-a,0),(-a,-b),(0,-b),(a,-b))
for i in range(8):
x,y = op[i]
allatoms[n*(i+1):n*(i+2),0] += x
allatoms[n*(i+1):n*(i+2),1] += y
atoms_map = np.tile(atoms_map, 9)
if xtl =='hexa':
allatoms = np.tile(allatoms, (7,1))
rot = lambda theta: np.matrix(((np.cos(np.radians(theta)), -np.sin(np.radians(theta))),
(np.sin(np.radians(theta)), np.cos(np.radians(theta)))))
op = (rot(15), rot(75), rot(135), rot(195), rot(255), rot(315))
d = np.array((a, 0))
for i in range(6):
xy = np.dot(d, op[i])
allatoms[n*(i+1):n*(i+2),:2] = allatoms[n*(i+1):n*(i+2),:2] + xy
atoms_map = np.tile(atoms_map, 7)
# print out image atoms
#fp = open('image.pdb', 'w')
#for i,atom in enumerate(allatoms):
# x, y, z = atom
# fp.write("HETATM%5d %-3s %3s %4d %8.3f%8.3f%8.3f 0.00 0.00 \n" % (i, 'C', 'DUM', i, x, y, z))
pen_pairs = []
pen_cycles = []
for m in molecules:
cycles = nx.cycle_basis(m)
if not cycles: continue
for cycle in cycles:
flag = False
atoms = np.array([crd[num] for num in cycle])
if len(set([psf.node[num]['resid'] for num in cycle])) > 1: continue
if verbose:
num = cycle[0]
print('found ring:', psf.node[num]['segid'], psf.node[num]['resid'], psf.node[num]['resname'])
# build least square fit plane
axis, com, n = lsqp(atoms)
# project atoms to the least square fit plane
for i,atom in enumerate(atoms):
w = np.dot(axis, atom-com)*axis + com
atoms[i] = com + (atom - w)
maxd = np.max(np.sqrt(np.sum(np.square(atoms - com), axis=1)))
d = np.sqrt(np.sum(np.square(allatoms-com), axis=1))
nums = np.squeeze(np.argwhere(d < 3))
# find two bonded atoms that are at the opposite side of the plane
for num in nums:
num1 = atoms_map[num]
for num2 in psf[num1]:
if num1 in cycle or num2 in cycle: continue
if num > natoms:
# image atoms
offset = int(num / natoms)
crd1 = allatoms[num]
crd2 = allatoms[atoms_map_reverse[num2] + offset * natoms]
else:
crd1 = crd[num1]
crd2 = crd[num2]
v1 = np.dot(crd1 - com, axis)
v2 = np.dot(crd2 - com, axis)
if v1 * v2 > 0: continue
# point of intersection of the least square fit plane
s = -np.dot(axis, crd1-com)/np.dot(axis, crd2-crd1)
p = crd1 + s*(crd2-crd1)
d = np.sqrt(np.sum(np.square(p-com)))
if d > maxd: continue
if verbose:
print('found potentially pentrarting bond:', psf.node[num1]['segid'], psf.node[num1]['resid'], psf.node[num1]['resname'], psf.node[num1]['name'], psf.node[num2]['name'])
d = 0
for i in range(0, len(atoms)):
p1 = atoms[i] - p
try: p2 = atoms[i+1] - p
except: p2 = atoms[0] - p
d += np.arccos(np.dot(p1, p2)/np.linalg.norm(p1)/np.linalg.norm(p2))
wn = d/2/np.pi
if wn > 0.9 and wn < 1.1:
# we have a case
pen_pairs.append((num1, num2))
pen_cycles.append(cycle)
flag = True
break
if flag: break
return pen_pairs, pen_cycles
def find_alpha_shape(atoms, alpha=-1, verbose=0):
# build alpha-shape of the protein
# requires <NAME>'s Hull program
# http://netlib.sandia.gov/voronoi/hull.html
import subprocess as sp
pid = sp.Popen([hull, '-A', '-oN'], stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, close_fds=True) # Delaunay triangulation
pid.communicate("\n".join(["%12.8f %12.8f %12.8f" % tuple(atom) for atom in atoms]))
flag = False
facets = []
facets_crds = []
vertices = set([])
h = nx.Graph()
for line in open('hout-alf').readlines():
if flag:
i, j, k = map(int, line.strip().split())
facets.append((i, j, k))
facets_crds.append(np.array((atoms[i], atoms[j], atoms[k])))
[vertices.add(x) for x in (i,j,k)]
h.add_edges_from([(i, j), (i, k), (j, k)])
if line.startswith("%") and 'hull' in line: flag = True
facets_crds = np.array(facets_crds)
return h, facets_crds
def check_protein_penetration(psf, crds, verbose=0, output=0):
# protein penetration test
# 1. build alpha-shape of the protein based on the CA and CB atoms within 6A
# 2. find two atoms that placed opposite side of the hull
#
prot_atoms = np.array([crds[num] for num in psf.nodes() if psf.node[num]['name'] in ['CA', 'CB']])
prot_segids = set([psf.node[num]['segid'] for num in psf.nodes() if psf.node[num]['name'] == 'CA'])
pen_pairs = []
if len(prot_atoms) == 0: return pen_pairs
g, facets_crds = find_alpha_shape(prot_atoms, verbose)
if output:
fp = open(output, 'w')
for node in g.nodes():
x, y, z = prot_atoms[node]
fp.write("HETATM%5d %-3s %3s %4d %8.3f%8.3f%8.3f 0.00 0.00 \n" % (node, 'C', 'DUM', node, x, y, z))
for edge in g.edges():
fp.write("CONECT%5d%5d\n" % edge)
dist = lambda x,y: np.sqrt(np.sum(np.square(x - y)))
flag = False
# find atoms potentially protruding the alpha-surface of the protein
xmax, xmin = np.max(prot_atoms[:,0]), np.min(prot_atoms[:,0])
ymax, ymin = np.max(prot_atoms[:,1]), np.min(prot_atoms[:,1])
visited = []
for node in psf.nodes():
if psf.node[node]['segid'] in prot_segids: continue
if node in visited: continue
crd = crds[node]
if ((crd[0] > (xmax+3) or crd[0] < (xmin-3)) and
(crd[1] > (ymax+3) or crd[1] < (ymin-3))): continue
if np.min(np.sum(np.square(facets_crds[:,0]-crd), axis=1)) > 25: continue
# closest facet
d = np.sum(np.square((np.sum(facets_crds, axis=1)/3) - crd), axis=1)
if
|
np.min(d)
|
numpy.min
|
import tensorflow as tf
import numpy as np
dataset = './point_history.csv'
TIME_STEPS = 16
DIMENSION = 2
NUM_CLASSES = 3
X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (TIME_STEPS * DIMENSION) + 1 + 2)))
model_save_path = './gesture_classifier.hdf5'
model = tf.keras.models.load_model(model_save_path)
tflite_save_path = './gesture_classifier.tflite'
# モデルを変換(量子化)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quantized_model = converter.convert()
open(tflite_save_path, 'wb').write(tflite_quantized_model)
interpreter = tf.lite.Interpreter(model_path=tflite_save_path)
interpreter.allocate_tensors()
# 入出力テンソルを取得
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# 推論実施
interpreter.set_tensor(input_details[0]['index'], np.array([X_dataset[0]]))
interpreter.invoke()
tflite_results = interpreter.get_tensor(output_details[0]['index'])
print(
|
np.squeeze(tflite_results)
|
numpy.squeeze
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>-<NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
# Project Imports
from mosqito.functions.shared.load import load
from mosqito.functions.hearing_model.sine_wave_generator import sine_wave_generator
from mosqito.functions.hearing_model.comp_loudness import comp_loudness
from mosqito.functions.hearing_model.mean_loudness_values import mean_loudness_values
from mosqito.functions.hearing_model.reassignment_loudness import reassignment_loudness
from mosqito.functions.hearing_model.sone2phone import sone2phone
import sys
sys.path.append('../../..')
def test_hm_sw():
""" Function that serves as a test and creates a sine wave in order to be processed by the hearing model
"comp_loudness.py".
"""
fs = 48000
duration = 2
db_spl = 60
sin_freq = 1000
# "Peak" value in Pascals (amplitude)
p_ref = 2e-5
sig, time = sine_wave_generator(fs, duration, db_spl, sin_freq)
n = len(sig)
pressure_rms = p_ref * (10.00 ** (db_spl / 20.00))
sensitivity = np.sqrt(2) * pressure_rms
# Calculations for the level from time domain signal
#
rms_time = np.sqrt(np.mean(np.power(sig, 2)))
db_time = 20 * np.log10(abs(np.fft.fft(sig * np.blackman(n))))
window = np.blackman(n)
signal = sig * window
spectrum = np.fft.fftshift(np.fft.fft(signal))
freq = np.fft.fftshift(
|
np.fft.fftfreq(n, 1 / fs)
|
numpy.fft.fftfreq
|
"""Filters patches of a PV module containing sun reflections.
Depending on the camera angle some of the patches of a PV module may contain
severe sun relfections which disturb downstream tasks, such as fault
classification. Sun reflections differ from thermal anomalies in that they are
non-stationary, but instead change position over subsequent patches. This filter
exploits this fact to differentiate modules with reflections from those without.
It works as follows:
1) Compute the maximum temperature and the x, y coordinate of the maximum
temperature in each patch (after slight blurring to reduce impact of noise)
2) Compute discrete element-wise difference between x, y coordinates of max
temp point in subsequent patches
3) Threshold the norm of the two difference-singals (threshold_changepoint)
4) Find the segments of the signal from 3 which are zero and contain more than
30 percent of the sanples in the sequence (if no segment fullfills this
simply choose the longest segment)
5) Select the segment which has the lowest variance as a reference
6) Compute the median max temperature and median x, y coordinates of the max
temp point in the reference sequence
7) Compute the element-wise difference between the median max temp and the max
temp of each patch (repeat for x, y cooridnates of max tmep point)
8) Label all patches in which the differences from step 7 exceed predefined
thresholds (threshold_temp, threshold_loc) as patches with sun reflection
"""
import os
import glob
import json
import itertools
import operator
import datetime
import numpy as np
import cv2
from PySide6.QtCore import QObject, Signal
from ..utils.common import get_immediate_subdirectories, to_celsius
def get_zero_islands(signal):
"""Get start and stop indices of all zero islands
in the binary signal sorted after length."""
sig = np.copy(signal)
idxs = []
while len(sig[sig == 0]) > 0:
indices = max((list(y)
for (x, y)
in itertools.groupby((enumerate(sig)), operator.itemgetter(1))
if x == 0), key=len)
start_idx = indices[0][0]
stop_idx = indices[-1][0]
sig[start_idx:stop_idx+1] = 1
idxs.append((start_idx, stop_idx+1))
return idxs
def min_temp_var_segment(max_locs_peaks, max_temps,
segment_length_threshold=0.3):
"""Get all sequences which are longer than `segment_length_threshold`*100
percent of the total sequence length, e.g. if `segment_length_threshold`
is 0.3 up to three segments can be selected. Then selected the segment
with lowest variance of max temperature.
"""
idxs = get_zero_islands(max_locs_peaks)
if len(idxs) == 0:
return 0, len(max_temps)-1
idxs_tmp = []
for start_idx, stop_idx in idxs:
if (stop_idx - start_idx) / len(max_locs_peaks) > segment_length_threshold:
idxs_tmp.append((start_idx, stop_idx))
# no sequence is longer than segment_length_threshold, just use the longest one
if len(idxs_tmp) == 0:
idxs_tmp = [idxs[0]]
idxs = idxs_tmp
# compute tmeperature variance (or interquartile range)
# of the segments and choose the one with the lowest variance
segment_vars = [np.var(max_temps[start_idx:stop_idx])
for start_idx, stop_idx in idxs]
nominal_segment_idxs = idxs[np.argmin(segment_vars)]
start_idx, stop_idx = nominal_segment_idxs
return start_idx, stop_idx
def predict_sun_reflections(patch_files, to_celsius_gain, to_celsius_offset,
threshold_temp=5.0, threshold_loc=10.0, threshold_changepoint=10.0,
segment_length_threshold=0.3):
if len(patch_files) < 2:
return (
np.array([], dtype=np.int32), np.array([], dtype=np.float64),
np.array([], dtype=np.float64), None, None)
max_locs = []
max_temps = []
for patch_file in patch_files:
patch = cv2.imread(patch_file, cv2.IMREAD_ANYDEPTH)
# average blur image to prevent noise from affecting
# the maximum location
patch = cv2.blur(patch, ksize=(3, 3))
max_temp = np.max(to_celsius(patch, to_celsius_gain, to_celsius_offset))
max_loc = np.unravel_index(np.argmax(patch, axis=None), patch.shape)
max_locs.append(max_loc)
max_temps.append(max_temp)
max_locs =
|
np.vstack(max_locs)
|
numpy.vstack
|
# -*- coding: utf-8 -*-
"""
Create a finite slice sampling of k-space and reconstruct using MLEM
Created on Wed Jul 13 21:07:06 2016
#Lego Sparse: K=0.8 (64 slices, 50% data), s=12, i=301, h=0.8, N=128 NLM
#Lego High Fidelity: K=1.0 (72 slices), s=12, i=1501, h=0.5, N=128 NLM
#Reduction Factors
#0.5:
#Lego: N=256, i=250, s=30, h=1, K=1.2;
#0.25:
#Lego: N=256, i=250, s=12, h=1, K=0.4;
#0.125:
#Lego: N=256, i=580, s=7, h=2, K=0.15;
@author: shakes
"""
from __future__ import print_function # (at top of module)
import _libpath #add custom libs
import finitetransform.mojette as mojette
import finitetransform.imageio as imageio #local module
import finitetransform.farey as farey #local module
import finitetransform.numbertheory as nt #local modules
from skimage.restoration import denoise_tv_chambolle, denoise_nl_means, denoise_bilateral
import scipy.fftpack as fftpack
import pyfftw
import numpy as np
import finite
import time
import math
# Monkey patch in fftn and ifftn from pyfftw.interfaces.scipy_fftpack
fftpack.fft2 = pyfftw.interfaces.scipy_fftpack.fft2
fftpack.ifft2 = pyfftw.interfaces.scipy_fftpack.ifft2
fftpack.fft = pyfftw.interfaces.scipy_fftpack.fft
fftpack.ifft = pyfftw.interfaces.scipy_fftpack.ifft
# Turn on the cache for optimum performance
pyfftw.interfaces.cache.enable()
#parameters
N = 256 #N = 128, 200. N = 256, 400
M = 1*N
K = 0.4
s = 12
iterations = 280
subsetsMode = 1
floatType = np.complex
twoQuads = True
plotColourBar = True
plotSampling = True
smoothReconMode = 2 #0-None,1-TV,2-NL,3-Median, 4-Bilateral
smoothIncrement = 10
smoothMidIteration = iterations-8*smoothIncrement
smoothMaxIteration = iterations-4*smoothIncrement
print("N:", N, "M:", M, "s:", s, "i:", iterations)
angles, subsetsAngles, lengths = mojette.angleSubSets_Symmetric(s,subsetsMode,N,N,1,True,K)
#angles, subsetsAngles, lengths = mojette.angleSubSets_Symmetric(s,subsetsMode,M,M,1,True,K)
perpAngle = farey.farey(1,0)
angles.append(perpAngle)
subsetsAngles[0].append(perpAngle)
print("Number of Angles:", len(angles))
print("angles:", angles) # 132, s=22
p = nt.nearestPrime(M)
print("p:", p)
#bounds
lValue = -3e1
lBound = complex(lValue, lValue)
uValue = 3e1
uBound = complex(uValue, uValue)
BL = np.full((p, p), lBound, dtype=floatType) #p or N?
BU = np.full((p, p), uBound, dtype=floatType)
#check if Katz compliant
if not mojette.isKatzCriterion(M, M, angles):
print("Warning: Katz Criterion not met")
#-------------------------------
#load kspace data
from scipy.io import loadmat
#load Cartesian data
#Attention: You must ensure the kspace data is correctly centered or not centered.
#x = loadmat('data/phantom_water_4.mat')
x = loadmat('data/phantom_lego_4.mat')
data_key = 'Cartesian_kData'
kspace = x[data_key]
#kspace = fftpack.fftshift(kspace)
print("kSpace Shape:", kspace.shape)
kMaxValue = np.max(kspace)
kMinValue = np.min(kspace)
print("k-Space Max Value:", kMaxValue)
print("k-Space Min Value:", kMinValue)
print("k-Space Max Magnitude:", np.abs(kMaxValue))
print("k-Space Min Magnitude:", np.abs(kMinValue))
#-------------------------------
#compute the Cartesian reconstruction for comparison
print("Computing Chaotic Reconstruction...")
dftSpace = kspace
#dftSpace = fftpack.ifftshift(kspace)
#dftSpace = np.fliplr(kspace)
#dftSpace = np.roll(kspace, 2, axis=1) #fix 1 pixel shift
dftSpace = np.roll(kspace, 1, axis=0) #fix 1 pixel shift
image = fftpack.ifft2(dftSpace) #the '2' is important
image = fftpack.ifftshift(image)
image = np.abs(image)
maxValue = np.max(image)
minValue = np.min(image)
print("Image Max Value:", maxValue)
print("Image Min Value:", minValue)
#-------------------------------
#compute lines
centered = True
subsetsLines = []
subsetsMValues = []
mu = 0
for angles in subsetsAngles:
lines, mValues = finite.computeLines(dftSpace, angles, centered, twoQuads)
subsetsLines.append(lines)
subsetsMValues.append(mValues)
mu += len(lines)
print("Number of lines:", mu)
print(subsetsMValues)
#samples used
sampleNumber = (p-1)*mu
print("Samples used:", sampleNumber, ", proportion:", sampleNumber/float(N*N))
print("Lines proportion:", mu/float(N))
#-------------
# Measure finite slice
from scipy import ndimage
print("Measuring slices")
#dftSpace = fftpack.fftshift(dftSpace) #undo centering
powSpectGrid = np.abs(dftSpace)
drtSpace = np.zeros((p+1, p), floatType)
for lines, mValues in zip(subsetsLines, subsetsMValues):
for i, line in enumerate(lines):
u, v = line
sliceReal = ndimage.map_coordinates(np.real(dftSpace), [u,v])
sliceImag = ndimage.map_coordinates(np.imag(dftSpace), [u,v])
slice = sliceReal+1j*sliceImag
# print("slice", i, ":", slice)
# slice = fftpack.fftshift(slice)
# print("Slice DC:", np.abs(slice[0])/p)
finiteProjection = fftpack.ifft(slice) # recover projection using slice theorem
# finiteProjection = fftpack.ifftshift(finiteProjection)
drtSpace[mValues[i],:] = finiteProjection
#print("drtSpace:", drtSpace)
#-------------------------------
#define ABMLEM
def abmlem_expand_complex(iterations, p, g_j, os_mValues, projector, backprojector, epsilon=1e3, dtype=np.int32):
'''
# Shakes' implementation
# From Lalush and Wernick;
# f^\hat <- (f^\hat / |\sum h|) * \sum h * (g_j / g) ... (*)
# where g = \sum (h f^\hat) ... (**)
#
# self.f is the current estimate f^\hat
# The following g from (**) is equivalent to g = \sum (h f^\hat)
'''
norm = False
center = False
fdtype = floatType
f = np.ones((p,p), fdtype)
for i in xrange(0, iterations):
print("Iteration:", i)
for j, mValues in enumerate(os_mValues):
# print("Subset:", j)
muFinite = len(mValues)
fL = f - BL
fU = BU - f
L = projector(BL, p, fdtype, mValues)
U = projector(BU, p, fdtype, mValues)
gL = projector(fL, p, fdtype, mValues)
gU = projector(fU, p, fdtype, mValues)
# form parenthesised term (g_j / g) from (*)
rL = g_j - L
rU = U - g_j
for m in mValues:
rL[m,:] /= gL[m,:]
rU[m,:] /= gU[m,:]
# backproject to form \sum h * (g_j / g)
g_rL = backprojector(rL, p, norm, center, 1, 0, mValues) / muFinite
g_rU = backprojector(rU, p, norm, center, 1, 0, mValues) / muFinite
# Combine the upper/lower bounds and complex parts
IL = fL*g_rL
IU = fU*g_rU
#combine to get f
f = (IL*BU + IU*BL) / (IL + IU)
if smoothReconMode > 0 and i % smoothIncrement == 0 and i > 0: #smooth to stem growth of noise
fCenter = fftpack.ifftshift(f) #avoid padding issues with some smoothing algorithms by ensuring image is centered
fReal = np.real(fCenter)
fImag =
|
np.imag(fCenter)
|
numpy.imag
|
import numpy as np
import os
import sys
import esutil
import time
import scipy.optimize
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colors as colors
import matplotlib.cm as cmx
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
from .fgcmUtilities import Cheb2dField
class FgcmSuperStarFlat(object):
"""
Class to compute the SuperStarFlat.
parameters
----------
fgcmConfig: FgcmConfig
fgcmPars: FgcmParameters
fgcmStars: FgcmStars
Config variables
----------------
ccdGrayMaxStarErr: float
Maximum error for any star observation to be used to compute superStar
superStarSubCCD: bool, default=False
Compute superStar flats on sub-ccd scale?
"""
def __init__(self,fgcmConfig,fgcmPars,fgcmStars):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.debug('Initializing FgcmSuperStarFlat')
self.fgcmPars = fgcmPars
self.fgcmStars = fgcmStars
self.illegalValue = fgcmConfig.illegalValue
self.minStarPerCCD = fgcmConfig.minStarPerCCD
self.plotPath = fgcmConfig.plotPath
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.epochNames = fgcmConfig.epochNames
self.ccdStartIndex = fgcmConfig.ccdStartIndex
self.ccdGrayMaxStarErr = fgcmConfig.ccdGrayMaxStarErr
self.quietMode = fgcmConfig.quietMode
self.superStarSubCCD = fgcmConfig.superStarSubCCD
self.superStarSubCCDChebyshevOrder = fgcmConfig.superStarSubCCDChebyshevOrder
self.superStarSubCCDTriangular = fgcmConfig.superStarSubCCDTriangular
self.superStarSigmaClip = fgcmConfig.superStarSigmaClip
def setDeltaMapperDefault(self, deltaMapperDefault):
"""
Set the deltaMapperDefault array.
Parameters
----------
deltaMapperDefault : `np.recarray`
"""
self.deltaMapperDefault = deltaMapperDefault
def computeSuperStarFlats(self, doPlots=True, doNotUseSubCCD=False, onlyObsErr=False, forceZeroMean=False):
"""
Compute the SuperStar Flats
parameters
----------
doPlots: bool, default=True
doNotUseSubCCD: bool, default=False
Override any setting of superStarSubCCD (used for initial guess)
onlyObsErr: bool, default=False
Only use observation error (used for initial guess)
forceZeroMean: bool, default=False
Force the mean superstar to be zero in each epoch/band
"""
startTime = time.time()
self.fgcmLog.debug('Computing superstarflats')
# New version, use the stars directly
objID = snmm.getArray(self.fgcmStars.objIDHandle)
objMagStdMean = snmm.getArray(self.fgcmStars.objMagStdMeanHandle)
objMagStdMeanErr = snmm.getArray(self.fgcmStars.objMagStdMeanErrHandle)
objNGoodObs = snmm.getArray(self.fgcmStars.objNGoodObsHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
obsMagErr = snmm.getArray(self.fgcmStars.obsMagADUModelErrHandle)
obsSuperStarApplied = snmm.getArray(self.fgcmStars.obsSuperStarAppliedHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsCCDIndex = snmm.getArray(self.fgcmStars.obsCCDHandle) - self.ccdStartIndex
obsIndex = snmm.getArray(self.fgcmStars.obsIndexHandle)
objObsIndex = snmm.getArray(self.fgcmStars.objObsIndexHandle)
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsFlag = snmm.getArray(self.fgcmStars.obsFlagHandle)
# Flag bad observations here...
self.fgcmStars.performSuperStarOutlierCuts(self.fgcmPars)
goodStars = self.fgcmStars.getGoodStarIndices(checkMinObs=True)
_, goodObs = self.fgcmStars.getGoodObsIndices(goodStars, expFlag=self.fgcmPars.expFlag, checkBadMag=True)
# we need to compute E_gray == <mstd> - mstd for each observation
# compute EGray, GO for Good Obs
EGrayGO, EGrayErr2GO = self.fgcmStars.computeEGray(goodObs, onlyObsErr=onlyObsErr)
# one more cut on the maximum error
# as well as making sure that it didn't go below zero
gd,=np.where((EGrayErr2GO < self.ccdGrayMaxStarErr) & (EGrayErr2GO > 0.0) &
(np.abs(EGrayGO) < 50.0))
goodObs=goodObs[gd]
# unapply input superstar correction here (note opposite sign)
EGrayGO=EGrayGO[gd] + obsSuperStarApplied[goodObs]
EGrayErr2GO=EGrayErr2GO[gd]
# and record the deltas (per ccd)
prevSuperStarFlatCenter = np.zeros((self.fgcmPars.nEpochs,
self.fgcmPars.nLUTFilter,
self.fgcmPars.nCCD))
superStarFlatCenter = np.zeros_like(prevSuperStarFlatCenter)
superStarNGoodStars = np.zeros_like(prevSuperStarFlatCenter, dtype=np.int32)
# and the mean and sigma over the focal plane for reference
superStarFlatFPMean = np.zeros((self.fgcmPars.nEpochs,
self.fgcmPars.nLUTFilter))
superStarFlatFPSigma = np.zeros_like(superStarFlatFPMean)
deltaSuperStarFlatFPMean = np.zeros_like(superStarFlatFPMean)
deltaSuperStarFlatFPSigma = np.zeros_like(superStarFlatFPMean)
# Note that we use the cheb2dFunc even when the previous numbers
# were just an offset, because the other terms are zeros
prevSuperStarFlatCenter[:, :, :] = self.fgcmPars.superStarFlatCenter
if not np.any(self.superStarSubCCD) or doNotUseSubCCD:
# do not use subCCD x/y information (or x/y not available)
mark = np.ones(goodObs.size, dtype=bool)
# Next, we sort by epoch, band
superStarWt = np.zeros_like(superStarFlatCenter)
superStarOffset =
|
np.zeros_like(superStarWt)
|
numpy.zeros_like
|
# Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
# System libraries
from functools import partial
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Local libraries
from cdp import GyroscopeV2
from network_objects import *
from settings import *
class PlotGyroV2(QtGui.QMainWindow):
type = GyroscopeV2.type
def __init__(self, serial):
QtGui.QMainWindow.__init__(self)
self.central = QtGui.QWidget() #This will be our central widget
self.serial = serial
self.setWindowTitle('CUWB Monitor - Gyroscope V2 Devices ID: 0x{:08X}'.format(serial))
self.grid_layout = QtGui.QGridLayout()
self.running = True
self.sub_windows = dict([])
self.id_total = 0
self.from_id_id_labels = dict()
self.from_id_count_labels = dict()
self.from_id_freq_labels = dict()
self.from_id_enable_checks = dict()
self.from_id_frequency_deques = dict()
self.from_id_count = dict()
self.from_ids = np.array([])
self.previous_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type] - len(UwbNetwork.nodes[self.serial].cdp_pkts[self.type])
self.grid_layout.addWidget(QtGui.QLabel("Serial#"), 0, 0)
self.grid_layout.addWidget(QtGui.QLabel("Packet Count"), 0, 1)
self.grid_layout.addWidget(QtGui.QLabel("Frequency"), 0, 2)
self.grid_layout.addWidget(QtGui.QLabel("Print"), 0, 3)
self.update_labels()
self.central.setLayout(self.grid_layout)
self.setCentralWidget(self.central)
self.resize(400, 50)
self.timer = self.startTimer(QPLOT_FREQUENCY)
def timerEvent(self, e):
if not UwbNetwork.running:
self.close()
return
self.update_labels()
def closeEvent(self, e):
self.killTimer(self.timer)
self.running = False
def update_labels(self):
_current_size = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type] - self.previous_count
if _current_size > 1000: _current_size = 1000
self.previous_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type]
for idx in range(_current_size):
_target_id = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].serial_number.as_int
if not (_target_id in self.from_ids):
self.from_id_id_labels.update([(self.id_total, QtGui.QLabel())])
self.from_id_count_labels.update([(self.id_total, QtGui.QLabel())])
self.from_id_freq_labels.update([(self.id_total, QtGui.QLabel())])
self.from_id_enable_checks.update([(self.id_total, QtGui.QCheckBox())])
self.from_id_frequency_deques.update([(_target_id, deque([], FREQUENCY_CALCULATION_DEQUE_LENGTH))])
self.from_id_count.update([(_target_id, 0)])
self.from_ids = np.sort(np.append(self.from_ids, _target_id))
_row = self.id_total
_column = 0
self.grid_layout.addWidget(self.from_id_id_labels[self.id_total], _row+1, _column + 0)
self.grid_layout.addWidget(self.from_id_count_labels[self.id_total], _row+1, _column + 1)
self.grid_layout.addWidget(self.from_id_freq_labels[self.id_total], _row+1, _column + 2)
self.grid_layout.addWidget(self.from_id_enable_checks[self.id_total], _row+1, _column + 3)
if _column > 0:
_row = 2
self.grid_layout.addWidget(QtGui.QLabel("Serial#"), _row, _column + 0)
self.grid_layout.addWidget(QtGui.QLabel("Packet Count"), _row, _column + 1)
self.grid_layout.addWidget(QtGui.QLabel("Frequency"), _row, _column + 2)
self.grid_layout.addWidget(QtGui.QLabel("Print"), _row, _column + 3)
self.id_total += 1
self.from_id_count[_target_id] += 1
if _target_id in self.from_ids:
_row = np.where(self.from_ids==_target_id)[0][0]
if self.from_id_enable_checks[_row].isChecked():
print(UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size])
if _target_id in self.sub_windows.keys():
_scale = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].scale / 2147483647.0
_x = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].x * _scale
_y = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].y * _scale
_z = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].z * _scale
_time = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].network_time * TICK
# _time = UwbNetwork.nodes[self.serial].cdp_pkts_time[self.type][idx - _current_size]
self.sub_windows[_target_id].update_data(_x, _y, _z, _time)
for _target_id in self.from_ids:
self.from_id_frequency_deques[_target_id].append((self.from_id_count[_target_id], time.time()))
for _row in range(self.id_total):
_target_id = int(self.from_ids[_row])
if self.from_id_id_labels[_row].text() != '0x{:08X}'.format(_target_id):
self.from_id_id_labels[_row].setText('0x{:08X}'.format(_target_id))
self.from_id_id_labels[_row].setStyleSheet('color:blue')
self.from_id_id_labels[_row].mouseReleaseEvent = partial(self.labelClickEvent, _target_id)
_freq = UwbNetwork.nodes[self.serial].calculate_frequency(self.from_id_frequency_deques[_target_id])
self.from_id_count_labels[_row].setText('{:5d}'.format(self.from_id_count[_target_id]))
self.from_id_freq_labels[_row].setText('{:5.1f}Hz'.format(_freq))
def labelClickEvent(self, serial, e):
self.sub_windows.update([(serial, PlotGyroV2SubWindow(serial, self))])
def reset(self):
for target_id in self.from_ids:
self.from_id_count[target_id] = 0
self.from_id_frequency_deques[target_id] = deque([], FREQUENCY_CALCULATION_DEQUE_LENGTH)
for target_id in self.sub_windows:
self.sub_windows[target_id].reset()
self.previous_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type]
class PlotGyroV2SubWindow(pg.GraphicsWindow):
def __init__(self, serial, parent):
pg.GraphicsWindow.__init__(self)
self.setWindowTitle('CUWB Monitor - Gyro V2 Plot ID: 0x{:08X}'.format(serial))
self.serial = serial
self.resize(1200, 800)
self.parent = parent
self.x_data = deque([], TRAIL_LENGTH)
self.y_data = deque([], TRAIL_LENGTH)
self.z_data = deque([], TRAIL_LENGTH)
self.t_data = deque([], TRAIL_LENGTH)
self.x_azimuth = 0
self.y_azimuth = 0
self.z_azimuth = 0
self.last_azimuth_update = time.time()
self.graph = self.addPlot(title='GyroScope XYZ', row=0, col=0, colspan=3)
self.graph.setYRange(-15, 15)
self.graph.showGrid(x=True, y=True)
self.legend = self.graph.addLegend()
self.plot_x = self.graph.plot(name='X', pen=pg.mkPen('r', width=2))
self.plot_y = self.graph.plot(name='Y', pen=pg.mkPen('g', width=2))
self.plot_z = self.graph.plot(name='Z', pen=pg.mkPen('b', width=2))
self.x_direction_graph = self.addPlot(title='XY Rotation', row=1, col=0, colspan=1)
self.x_direction_graph.addLine(x=0, pen=0.2)
self.x_direction_graph.addLine(y=0, pen=0.2)
for r in range(2,20,2):
_circle = pg.QtGui.QGraphicsEllipseItem(-r,-r,r * 2,r * 2)
_circle.setPen(pg.mkPen(0.2))
self.x_direction_graph.addItem(_circle)
self.x_direction_arrow = pg.ArrowItem(angle=90, tipAngle=30, headLen=40, tailLen=150, tailWidth=5, brush='r', pen={'color':'r', 'width':1})
self.x_direction_graph.addItem(self.x_direction_arrow)
self.x_direction_arrow.setPos(0,20)
self.x_direction_text = pg.TextItem(text="", color='w', anchor=(0,0))
self.x_direction_graph.addItem(self.x_direction_text)
self.y_direction_graph = self.addPlot(title='XZ Rotation', row=1, col=1, colspan=1)
self.y_direction_graph.addLine(x=0, pen=0.2)
self.y_direction_graph.addLine(y=0, pen=0.2)
for r in range(2,20,2):
_circle = pg.QtGui.QGraphicsEllipseItem(-r,-r,r * 2,r * 2)
_circle.setPen(pg.mkPen(0.2))
self.y_direction_graph.addItem(_circle)
self.y_direction_arrow = pg.ArrowItem(angle=90, tipAngle=30, headLen=40, tailLen=150, tailWidth=5, brush='g', pen={'color':'g', 'width':1})
self.y_direction_graph.addItem(self.y_direction_arrow)
self.y_direction_arrow.setPos(0, 20)
self.y_direction_text = pg.TextItem(text="", color='w', anchor=(0,0))
self.y_direction_graph.addItem(self.y_direction_text)
self.z_direction_graph = self.addPlot(title='YZ Rotation', row=1, col=2, colspan=1)
self.z_direction_graph.addLine(x=0, pen=0.2)
self.z_direction_graph.addLine(y=0, pen=0.2)
for r in range(2, 20, 2):
_circle = pg.QtGui.QGraphicsEllipseItem(-r,-r,r * 2,r * 2)
_circle.setPen(pg.mkPen(0.2))
self.z_direction_graph.addItem(_circle)
self.z_direction_arrow = pg.ArrowItem(angle=90, tipAngle=30, headLen=40, tailLen=150, tailWidth=5, brush='b', pen={'color':'b', 'width':1})
self.z_direction_graph.addItem(self.z_direction_arrow)
self.z_direction_arrow.setPos(0, 20)
self.z_direction_text = pg.TextItem(text="", color='w', anchor=(0,0))
self.z_direction_graph.addItem(self.z_direction_text)
self.timer = self.startTimer(QPLOT_FREQUENCY)
self.running = True
def timerEvent(self, e):
if not UwbNetwork.running or not self.parent.running:
self.running = False
self.close()
return
if len(self.t_data) == 0: return
self.plot_x.setData(self.t_data, self.x_data)
self.plot_y.setData(self.t_data, self.y_data)
self.plot_z.setData(self.t_data, self.z_data)
# _current_time = time.time()
_current_time = self.t_data[-1]
self.x_azimuth -= np.mean(np.array(self.x_data)[-20:]) * (_current_time - self.last_azimuth_update)
x_azimuth_display = (360 - self.x_azimuth + 90) % 360
self.x_direction_arrow.setRotation((90 - x_azimuth_display) % 360)
self.x_direction_arrow.setPos(20.0 * np.cos(np.radians(x_azimuth_display)), 20.0 * np.sin(
|
np.radians(x_azimuth_display)
|
numpy.radians
|
import lasagne
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import MergeLayer, get_output
from cascadenet.network.layers.fourier import FFT2Layer
def roll_and_sum(prior_result, orig):
res = prior_result + orig
res = T.roll(res, 1, axis=-1)
return res
class KspaceFillNeighbourLayer(MergeLayer):
'''
k-space fill layer - The input data is assumed to be in k-space grid.
The input data is assumed to be in k-space grid.
This layer should be invoked from AverageInKspaceLayer
'''
def __init__(self, incomings, frame_dist=range(5), divide_by_n=False,
**kwargs):
super(KspaceFillNeighbourLayer, self).__init__(incomings, **kwargs)
self.frame_dist = frame_dist
n_samples = [1 + 2*i for i in self.frame_dist]
self.n_samples = n_samples
self.divide_by_n = divide_by_n
def get_output_for(self, inputs, **kwargs):
'''
Parameters
------------------------------
inputs: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)
Returns
------------------------------
output: 5d tensor, missing lines of k-space are filled using neighbouring frames.
shape becomes (n* (len(frame_dist), 2, nx, ny, nt)
'''
x = inputs[0]
mask = inputs[1]
result, _ = theano.scan(fn=roll_and_sum,
outputs_info=T.zeros_like(x),
non_sequences=(x),
n_steps=T.constant(np.max(self.n_samples)))
mask_result, _ = theano.scan(fn=roll_and_sum,
outputs_info=T.zeros_like(x),
non_sequences=(mask),
n_steps=T.constant(
|
np.max(self.n_samples)
|
numpy.max
|
import pickle
import os
import sys
import numpy as np
import torch
import torch.utils.data as torch_data
from torch.utils.data import DataLoader
class ScannetDataset(torch_data.Dataset):
def __init__(self,
root= '/data/eva_share_users/zhaotianchen/scannet/raw/scannet_pickles',
npoints=10240,
split='train',
with_dropout=False,
with_norm=True,
with_rgb=True,
with_seg=False,
with_instance=False,
with_pred=False,
sample_rate=None):
super().__init__()
print(' ---- load data from', root)
self.NUM_LABELS = 20
self.NUM_IN_CHANNEL = 3
self.NEED_PRED_POSTPROCESSING = False
self.npoints = npoints
self.with_dropout = with_dropout
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
# assert only 1 of the with_instance/pred/seg is True
assert sum([with_instance, with_seg, with_pred is not None]) <= 1
self.with_aux = with_instance or with_seg or with_pred
print('load scannet dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
# deprecated version of pickle load
# data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
# with open(data_filename, 'rb') as fp:
# self.scene_points_list = pickle.load(fp)
# self.semantic_labels_list = pickle.load(fp)
# # scene_points_id = pickle.load(fp)
# num_point_all = pickle.load(fp)
# TEST: newer loading of the pth file
data_filename = os.path.join(root, 'new_{}.pth'.format(split))
data_dict = torch.load(data_filename)
self.scene_points_list = data_dict['data']
self.semantic_labels_list = data_dict['label']
if self.with_aux:
if with_instance:
self.instance_label_list = data_dict['instance']
elif with_seg:
self.instance_label_list = data_dict['label']
elif with_pred:
self.instance_label_list = torch.load(os.path.join(with_pred, "{}_pred.pth".format(split)))['pred']
else:
pass
#scene_points_id = pickle.load(fp)
num_point_all = data_dict['npoints']
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1/np.log(1.2+labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test' or split == 'debug':
self.labelweights = np.ones(21)
else:
raise ValueError('split must be train or eval.')
# sample & repeat scenes, older version deprecated
if sample_rate is not None:
num_point = npoints
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(self.scene_points_list)):
repeat_times = round(sample_prob[index] * num_iter)
repeat_times = int(max(repeat_times, 1))
room_idxs.extend([index] * repeat_times)
self.room_idxs = np.array(room_idxs)
np.random.seed(123)
np.random.shuffle(self.room_idxs)
else:
self.room_idxs = np.arange(len(self.scene_points_list))
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __getitem__(self, index):
index = self.room_idxs[index]
data_set = self.scene_points_list[index]
point_set = data_set[:, :3]
if self.with_aux:
instance_set = self.instance_label_list[index]
semantic_seg = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set, axis=0)
coordmin = np.min(point_set, axis=0)
smpmin = np.maximum(coordmax-[2, 2, 3.0], coordmin)
smpmin[2] = coordmin[2]
smpsz = np.minimum(coordmax-smpmin,[2,2,3.0])
smpsz[2] = coordmax[2]-coordmin[2]
isvalid = False
# randomly choose a point as center point and sample <n_points> points in the box area of center-point
for i in range(10):
curcenter = point_set[np.random.choice(len(semantic_seg),1)[0],:]
curmin = curcenter - [1, 1, 1.5]
curmax = curcenter + [1, 1, 1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((point_set >= (curmin - 0.2)) * (point_set <= (curmax + 0.2)), axis=1) == 3
cur_point_set = point_set[curchoice, :]
cur_data_set = data_set[curchoice, :]
if self.with_aux:
try:
cur_instance_set = instance_set[curchoice]
except IndexError:
import ipdb; ipdb.set_trace()
cur_semantic_seg = semantic_seg[curchoice]
if len(cur_semantic_seg) == 0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.01)) * (cur_point_set <= (curmax + 0.01)), axis=1) == 3
vidx = np.ceil((cur_point_set[mask, :] - curmin) / (curmax - curmin) * [31.0, 31.0, 62.0])
vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 + vidx[:, 2])
isvalid = np.sum(cur_semantic_seg > 0) / len(cur_semantic_seg) >= 0.7 and len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
if isvalid:
break
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
semantic_seg = cur_semantic_seg[choice]
if self.with_aux:
instance_seg = cur_instance_set[choice]
mask = mask[choice]
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask
selected_points = cur_data_set[choice, :] # np * 6, xyz + rgb
point_set =
|
np.zeros((self.npoints, 9))
|
numpy.zeros
|
"""
Provide functions used in features implementation
Provides: xxxx
Author: <NAME> (Peter) Xue
"""
import numpy as np
import scipy.fftpack as fft
from scipy import signal
import spectrum
def get_num_fft(sample_rate, window_len):
"""
Function get_num_fft calculates optimal number of FFT points based on frame length.
Less number of FFT points than length of frame
will lose precision by droppping many of the samples.
Therefore, we want num_fft as a power of 2, greater than frame length.
@param sample_rate: The sample rate of audio signal we working with.
@param window_len: Time interval we are taking within frames.
@returns: Optimal number of FFT points.
"""
frame_length = sample_rate * window_len
num_fft = 1
while num_fft < frame_length:
num_fft *= 2
return num_fft
def powspec(signal, sample_rate, window_len, hop_size, num_fft):
"""
Function powspec produces the power spectrum of the given audio signal
@param signal: Audio signal we are working with.
@param sample_rate: The sample rate of our audio signal.
@param window_len: Time interval we are taking within frames.
@param hop_size: Time step we are taking between frames.
@param num_fft: Number of FFT points.
@returns: A Power spectrum.
"""
# Convert from seconds to samples.
frame_length, frame_stride = window_len * sample_rate, hop_size * sample_rate
frame_length, frame_stride = int(round(frame_length)), int(round(frame_stride))
signal_length = len(signal)
# Make sure that we have at least 1 frame.
num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_stride))
pad_signal_length = num_frames * frame_stride + frame_length
diff = np.zeros((pad_signal_length - signal_length))
# Pad Signal to make sure that all frames have equal number of samples
# without truncating any samples from the original signal.
pad_signal = np.append(signal, diff)
indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_stride, frame_stride), (frame_length, 1)).T
frames = pad_signal[indices.astype(np.int32, copy=False)]
# Apply Hamming window to frames.
frames *= np.hamming(int(round(sample_rate * window_len)))
# Calculate the Power Spectrum of the frames.
magnitude_frames = np.absolute(np.fft.rfft(frames, num_fft))
power_frames = ((1.0 / num_fft) * (magnitude_frames) ** 2)
energy = np.log(sum(power_frames)) # Calculate log energy.
return power_frames, energy
def get_filter(freq_min, freq_max, num_mel_filter, num_fft, sample_rate):
"""
Function get_filter_points calculates where the filters in filter bank locate
@param freq_min: Lowest frequency band edge of Mel filters in Hz.
@param freq_max: Highest frequency band edge of Mel filters in Hz.
@param num_mel_filter: Number of filter points in filter banks on Mel scale.
@param num_fft: Number of FFT points.
@param sample_rate: The sample rate of audio signal we working with.
@returns: Filters used for computing filter bank feature.
"""
# Get filter points.
freq_min_mel = hz2mel(freq_min)
freq_max_mel = hz2mel(freq_max)
mels = np.linspace(freq_min_mel, freq_max_mel, num=num_mel_filter+2)
freqs = mel2hz(mels)
filter_points = np.floor((num_fft + 1) / sample_rate * freqs).astype(int)
# Get filter bank filters.
filters = np.zeros((len(filter_points)-2, int(num_fft/2+1)))
for n in range(len(filter_points)-2):
filters[n, filter_points[n] : filter_points[n+1]] = np.linspace(0, 1, filter_points[n+1] - filter_points[n])
filters[n, filter_points[n + 1] : filter_points[n + 2]] = np.linspace(1, 0, filter_points[n+2] - filter_points[n+1])
return filters
def pre_emphasis(signal, coef):
"""
Function pre-emphasis applies pre-emphasis filter
on the signal to amplify the high frequencies.
@param signal: Audio signal.
@param coef: Coefficient used in pre-empahsis filter.
@returns: Pre-emphasized signal after applying the filter.
"""
return np.append(signal[0], signal[1:] - coef * signal[:-1])
def hz2mel(freq):
"""
Function hz2mel calculates Mel values.
@param freq: Frequency.
@returns: Corresponding Mel value for given frequency.
"""
return 2595.0 * np.log10(1.0 + freq / 700.0)
def mel2hz(mels):
"""
Function mel2hz calculates Hertz values.
@param mel: Mel value.
@returns: Corresponding Hertz value for given Mel value.
"""
hz = 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
return hz
def hz2bark(freq):
"""
Function hz2bark calculates Bark scale.
Use Traunmueller-formula for f > 200 Hz
Linear mapping for f <= 200 Hz
@param freq: Frequency.
@returns: Corresponding Bark scale for given frequency.
"""
z_less_200 = freq / 102.9
z_greater_200 = 26.81 * freq / (1960 + freq) - 0.53
return (freq > 200) * z_greater_200 + (freq <= 200) * z_less_200
def bark2hz(z):
"""
Function bark2hz Using Hynek's formula to calculate calculate corresponding Hertz.
@param z: Bark scale.
@returns: corresponding Hertz to z.
"""
hz = np.multiply(600, np.sinh(np.divide(z, 6)))
return hz
def audspec(powspec,sample_rate=None,num_filters=None,fbtype='bark',freq_min=0,freq_max=None,sum_power=True,bandwidth=1.0):
"""
Function audspec performs critical band analysis.
@param powerspec: Power Spectrum.
@param sample_rate: The sample rate of our audio signal.
@param num_filters: Number of filters.
@param fbtype: The frequency type we are working with.
@param freq_min: Lowest frequency in Bark scale.
@param freq_max: Highest frequency in Bark scale.
@param sum_power: Integrate FFT bins into Mel bins, in sum_power domains:
@param bandwidth: The critical bandwidth.
@returns: Corresponding Mel value for given frequency.
"""
# Handle exceptions.
if not sample_rate: # Check sample_rate input validness.
return('Invalid input for sample_rate')
# Initialize parameters.
freq_max = freq_max or int(sample_rate/2)
num_filters = num_filters or np.ceil(hz2bark(sample_rate/2)) + 1
num_freqs = powspec.shape[0]
num_fft = (int(num_freqs) - 1) * 2
# Consider frequency domains.
if fbtype == 'bark':
weight_matrix = fft2barkmx(num_fft, sample_rate, num_filters, bandwidth, freq_min, freq_max)
elif fbtype == 'mel':
weight_matrix = fft2melmx(num_fft, sample_rate, num_filters, bandwidth, freq_min, freq_max)
elif fbtype == 'htkmel':
weight_matrix = fft2melmx(num_fft, sample_rate, num_filters, bandwidth, freq_min, freq_max, 1, 1)
elif fbtype == 'fcmel':
weight_matrix = fft2melmx(num_fft, sample_rate, num_filters, bandwidth, freq_min, freq_max, 1, 0)
else:
return 'Invalid fbtype input'
weight_matrix = weight_matrix[:, 0:num_freqs]
# Integrate FFT bins into Mel bins, in abs (if sum_power = True) or abs^2 domains.
if sum_power:
aspectrum = np.matmul(weight_matrix, powspec)
else:
aspectrum = np.power((np.matmul(weight_matrix,np.sqrt(powspec))), 2)
return aspectrum
def fft2barkmx(fft_length, fs, nfilts = 0, band_width = 1, min_freq = 0, max_freq = 0):
"""
Function fft2barkmax generates a matrix of weights
to combine FFT bins into Bark bins.
@param num_fft: Number of FFT points.
@param sample_rate: The sample rate of our audio signal.
@param num_filters: Number of filters. Default is 0.
@param width: Constant width of each band in Bark. Default is 1.
@param freq_min: Lowest frequency in Hertz. Default is 0.
@param freq_max: Highest frequency in Hertz. Default is sample_rate / 2.
@returns: A matrix of weights to combine FFT bins into Bark bins.
"""
# Initialize parameters.
if max_freq == 0:
max_freq = fs / 2
min_bark = hz2bark(min_freq)
nyqbark = hz2bark(max_freq) - min_bark
if nfilts == 0 :
nfilts = np.ceil(nyqbark) + 1
wts = np.zeros((int(nfilts), int(fft_length)))
step_barks = nyqbark / (nfilts - 1)
binbarks = hz2bark(np.arange(0, fft_length / 2 + 1) * fs / fft_length)
for i in range (int(nfilts)):
f_bark_mid = min_bark + np.multiply(i, step_barks)
lof = np.subtract(np.subtract(binbarks, f_bark_mid), 0.5)
hif = np.add(np.subtract(binbarks, f_bark_mid), 0.5)
minimum = np.minimum(0, np.minimum(hif, np.multiply(-2.5, lof)) / band_width)
wts[i, 0 : int(fft_length / 2) + 1] = np.power(10, minimum)
return wts
def rasta_filter(x):
"""
Function rasta_filter turns a (critical band by frame) matrix.
Default filter is single pole at 0.94.
@param x: Rows of x = critical bands, cols of x = frmes.
@returns: A (critical band by frame) matrix.
"""
# RASTA filter.
numer = np.arange(-2, 3)
numer = -numer / np.sum(numer ** 2)
denom = np.array([1, -0.94])
# Initialize the state. This avoids a big spike at the beginning
# resulting from the dc oggrdt level in each band.
zi = signal.lfilter_zi(numer,1)
y = np.zeros((x.shape))
# Dont keep any of these values, just output zero at the beginning.
# Apply the full filter to the rest of the signal, append it.
for i in range(x.shape[0]):
y1, zi = signal.lfilter(numer, 1, x[i, 0:4], axis = 0, zi = zi * x[i, 0])
y1 = y1*0
y2, _ = signal.lfilter(numer, denom, x[i, 4:x.shape[1]], axis = 0, zi = zi)
y[i, :] = np.append(y1, y2)
return y
def postaud(x, freq_max, fbtype='bark', boarden=0):
"""
Function postaud returns the compressed audio.
Does loudness equalization and cube root compression.
@param x: Critical band filters.
@param freq_max: Highest frequency band edge in Hz.
@param fbtype: The frequency domain we are working with. Default is 'bark'.
@param boarden: Number of extra flanking bands. Default is 0.
@returns: The cube root compressed audio.
"""
num_bands, num_frames = x.shape
num_fpts = int(num_bands + 2 * boarden) # Include frequency points at extremes, discard later.
if fbtype == 'bark':
bandcfhz = bark2hz(np.linspace(0, hz2bark(freq_max), num_fpts))
elif fbtype == 'mel':
bandcfhz = mel2hz(np.linspace(0, hz2mel(freq_max), num_fpts))
else:
return 'Invalid fbtype input'
# Remove extremal bands (the ones that will be duplicated)
bandcfhz = bandcfhz[boarden : (num_fpts - boarden)]
# Hynek's magic equal-loudness-curve formula
fsq = np.power(bandcfhz, 2)
ftmp = np.add(fsq, 1.6e5)
eql = np.multiply(np.power(np.divide(fsq, ftmp), 2), np.divide(np.add(fsq, 1.44e6), np.add(fsq, 9.61e6)))
# Weight the critical bands.
z = np.multiply(np.tile(eql, (num_frames, 1)).T, x)
# Cube root compress.
z = np.power(z, 0.33)
# Replicate first and last band (because they are unreliable as calculated).
if boarden:
y = np.zeros((z.shape[0] + 2, z.shape[1]))
y[0, :] = z[0, :]
y[1:num_bands + 1, :] = z
y[num_bands + 1, :] = z[z.shape[0] - 1, :]
else:
y = np.zeros((z.shape[0], z.shape[1]))
y[0, :] = z[1, :]
y[1:num_bands - 1, :] = z[1:z.shape[0] - 1, :]
y[num_bands - 1, :] = z[z.shape[0] - 2, :]
return y, eql
def dolpc(x, model_order=8):
"""
Function dolpc computes the autoregressive model from spectral magnitude samples.
@param x: Critical band filters.
@param model_order: Order of model. Default is 8.
@returns: Autoregressive model from spectral magnitude samples.
"""
num_bands, num_frames = x.shape
# Calculate autocorrelation
R = np.zeros((2 * (num_bands - 1), num_frames))
R[0:num_bands, :] = x
for i in range(num_bands - 1):
R[i + num_bands - 1, :] = x[num_bands - (i + 1), :]
r = fft.ifft(R.T).real.T
r = r[0:num_bands, :]
y = np.ones((num_frames, model_order + 1))
e = np.zeros((num_frames, 1))
# Find LPC coeffs by durbin
if model_order == 0:
for i in range(num_frames):
_ , e_tmp, _ = spectrum.LEVINSON(r[:, i], model_order, allow_singularity = True)
e[i, 0] = e_tmp
else:
for i in range(num_frames):
y_tmp, e_tmp, _ = spectrum.LEVINSON(r[:, i], model_order, allow_singularity = True)
y[i, 1:model_order + 1] = y_tmp
e[i, 0] = e_tmp
# Normalize each poly by gain.
y = np.divide(y.T, np.add(np.tile(e.T, (model_order + 1, 1)), 1e-8))
return y
def lpc2cep(a, nout = None):
"""
Function lpc2cep converts the LPC 'a' coefficients in each column of lpcas
into frames of cepstra.
@param a: LPC.
@param nout: Number of cepstra to produce. Defaults to len(a).
"""
nin, ncol = a.shape
order = nin - 1
if not nout:
nout = order + 1
# First cep is log(Error) from Durbin.
cep = np.zeros((nout, ncol))
cep[0, :] = -np.log(a[0, :])
# Renormalize LPC a coefficients.
norm_a = np.divide(a, np.add(np.tile(a[0, :], (nin, 1)), 1e-8))
for n in range(1, nout):
total = 0
for m in range(1, n):
total = np.add(total, np.multiply(np.multiply((n - m), norm_a[m, :]), cep[(n - m), :]))
cep[n, :] = -np.add(norm_a[n, :], np.divide(total, n))
return cep
def lpc2spec(lpcas, nout=None):
"""
Function lpc2spec converts LPC coefficients back into spectra.
@param lpcas: LPC analysis.
@param nout: Number of frequency channels. Dafault is 17 (i.e. for 8 kHz)
@returns: The spectra coefficients.
"""
nout = nout or 17
rows, cols = lpcas.shape
order = rows - 1
gg = lpcas[1,:]
aa = np.divide(lpcas, np.tile(gg, (rows,1)))
# Calculate the actual z-plane polyvals: nout points around unit circle.
tmp_1 = np.array(
|
np.arange(0, nout)
|
numpy.arange
|
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import logging
import numpy as np
from panda3d.core import Vec3, NodePath, BitMask32, TransformState, LVecBase3f
from panda3d.bullet import BulletWorld, BulletTriangleMesh, BulletRigidBodyNode, BulletBoxShape, BulletTriangleMeshShape, \
BulletDebugNode, BulletCapsuleShape, BulletConvexHullShape
from home_platform.core import World
from home_platform.suncg import ObjectVoxelData, ModelCategoryMapping
from home_platform.utils import mat4ToNumpyArray
logger = logging.getLogger(__name__)
def getCollisionShapeFromModel(model, mode='box', defaultCentered=False):
#NOTE: make sure the position is relative to the center of the object
minBounds, maxBounds = model.getTightBounds()
offset = minBounds + (maxBounds - minBounds) / 2.0
transform = TransformState.makeIdentity()
if mode == 'mesh':
# Use exact triangle mesh approximation
mesh = BulletTriangleMesh()
geomNodes = model.findAllMatches('**/+GeomNode')
for nodePath in geomNodes:
geomNode = nodePath.node()
for n in range(geomNode.getNumGeoms()):
geom = geomNode.getGeom(n)
mesh.addGeom(geom)
shape = BulletTriangleMeshShape(mesh, dynamic=False)
transform = model.getTransform()
elif mode == "sphere":
minBounds, maxBounds = model.getTightBounds()
dims = maxBounds - minBounds
radius = np.sqrt(np.square(dims[0]) +
|
np.square(dims[1])
|
numpy.square
|
import cv2
import numpy as np
import sys
def convert_color_space_BGR_to_RGB(img_BGR):
#print(img_BGR)
B = img_BGR[:,:,0]/255
G = img_BGR[:,:,1]/255
R = img_BGR[:,:,2]/255
img_RGB = np.stack([R, G, B], axis=2)
#print(img_RGB)
return img_RGB
def convert_color_space_RGB_to_BGR(img_RGB):
R = source[:,:,0]*255
G = source[:,:,1]*255
B = source[:,:,2]*255
img_BGR = np.stack([B, G, R], axis=2)
return img_BGR
def convert_color_space_RGB_to_Lab(img_RGB):
'''
convert image color space RGB to Lab
'''
#print(img_RGB)
R = img_RGB[:,:,0]
G = img_RGB[:,:,1]
B = img_RGB[:,:,2]
L = 0.3811*R + 0.5783*G + 0.0402*B
M = 0.1967*R + 0.7244*G + 0.0782*B
S = 0.0241*R + 0.1288*G + 0.8444*B
L = np.log10(L)
M = np.log10(M)
S = np.log10(S)
new_l = 1.0 / np.sqrt(3)*L + 1.0 / np.sqrt(3)*M + 1.0 / np.sqrt(3)*S
new_alpha = 1.0 / np.sqrt(6)*L + 1.0 / np.sqrt(6)*M - 2 / np.sqrt(6)*S
new_beta = 1.0 / np.sqrt(2)*L - 1.0 / np.sqrt(2)*M + 0 *S
img_Lab = np.stack([new_l, new_alpha, new_beta], axis=2)
#print(img_Lab)
return img_Lab
def convert_color_space_Lab_to_BGR(img_Lab):
'''
convert image color space Lab to RGB
'''
l_result = img_Lab[:,:,0]
alpha_result = img_Lab[:,:,1]
beta_result = img_Lab[:,:,2]
L = np.sqrt(3.0) / 3.0 * l_result + np.sqrt(6) / 6.0 * alpha_result + np.sqrt(2) / 2.0 * beta_result
M = np.sqrt(3.0) / 3.0 * l_result + np.sqrt(6) / 6.0 * alpha_result - np.sqrt(2) / 2.0 * beta_result
S = np.sqrt(3.0) / 3.0 * l_result - np.sqrt(6) / 3.0 * alpha_result - 0 * beta_result
L = np.power(10.0, L)
M = np.power(10.0, M)
S = np.power(10.0, S)
R = 4.4679*L - 3.5873*M + 0.1193*S
G = -1.2186*L + 2.3809*M - 0.1624*S
B = 0.0497*L - 0.2439*M + 1.2045*S
R = R*255
G = G*255
B = B*255
img_BGR = np.stack([B, G, R], axis=2)
#print(img_BGR)
return img_BGR
def convert_color_space_RGB_to_CIECAM97s(img_RGB):
'''
convert image color space RGB to CIECAM97s
'''
R = img_RGB[:,:,0]
G = img_RGB[:,:,1]
B = img_RGB[:,:,2]
L = 0.3811*R + 0.5783*G + 0.0402*B
M = 0.1967*R + 0.7244*G + 0.0782*B
S = 0.0241*R + 0.1288*G + 0.8444*B
A = 2.00*L + 1.00*M + 0.05*S
C1 = 1.00*L - 1.09*M + 0.09*S
C2 = 0.11*L + 0.11*M - 0.22*S
img_CIECAM97s = np.stack([A, C1, C2], axis=2)
return img_CIECAM97s
def convert_color_space_CIECAM97s_to_RGB(img_CIECAM97s):
'''
convert image color space CIECAM97s to RGB
'''
A_result = img_CIECAM97s[:,:,0]
C1_result = img_CIECAM97s[:,:,1]
C2_result = img_CIECAM97s[:,:,2]
L = 0.32786885*A_result + 0.32159385*C1_result + 0.20607677*C2_result
M = 0.32786885*A_result - 0.63534395*C1_result - 0.18539779*C2_result
S = 0.32786885*A_result - 0.15687505*C1_result - 4.53511505*C2_result
R = 4.4679*L - 3.5873*M + 0.1193*S
G = -1.2186*L + 2.3809*M - 0.1624*S
B = 0.0497*L - 0.2439*M + 1.2045*S
R = R * 255
G = G * 255
B = B * 255
img_RGB = np.stack([B, G, R], axis=2)
return img_RGB
def color_transfer_in_Lab(img_RGB_source, img_RGB_target):
print('===== color_transfer_in_Lab =====')
#print(img_RGB_source)
#print(img_RGB_target)
source_lab = convert_color_space_RGB_to_Lab(img_RGB_source)
target_lab = convert_color_space_RGB_to_Lab(img_RGB_target)
#print(source_lab)
#print(target_lab)
l_source = source_lab[:,:,0]
a_source = source_lab[:,:,1]
b_source = source_lab[:,:,2]
l_target = target_lab[:,:,0]
a_target = target_lab[:,:,1]
b_target = target_lab[:,:,2]
l_source_mean = l_source - np.mean(l_source)
alpha_source_mean = a_source - np.mean(a_source)
beta_source_mean = b_source - np.mean(b_source)
l_source_std = np.std(l_source)
alpha_source_std = np.std(a_source)
beta_source_std = np.std(b_source)
l_target_mean = np.mean(l_target)
alpha_target_mean = np.mean(a_target)
beta_target_mean = np.mean(b_target)
l_target_std = np.std(l_target)
alpha_target_std = np.std(a_target)
beta_target_std = np.std(b_target)
l_result = (l_target_std/l_source_std) * l_source_mean
alpha_result = (alpha_target_std/alpha_source_std) * alpha_source_mean
beta_result = (beta_target_std/beta_source_std) * beta_source_mean
l_result += l_target_mean
alpha_result += alpha_target_mean
beta_result += beta_target_mean
img_trans = np.stack([l_result, alpha_result, beta_result], axis=2)
#print(img_trans)
img_conv_BGR = convert_color_space_Lab_to_BGR(img_trans)
#print("inside",img_conv_BGR)
return img_conv_BGR
def color_transfer_in_RGB(img_RGB_source, img_RGB_target):
print('===== color_transfer_in_RGB =====')
R = img_RGB_source[:,:,0]
G = img_RGB_source[:,:,1]
B = img_RGB_source[:,:,2]
r_source_mean = R - np.mean(R)
g_source_mean = G - np.mean(G)
b_source_mean = B - np.mean(B)
r_source_std = np.std(R)
g_source_std = np.std(G)
b_source_std = np.std(B)
R_tar = img_RGB_target[:,:,0]
G_tar = img_RGB_target[:,:,1]
B_tar = img_RGB_target[:,:,2]
r_target_mean = np.mean(R_tar)
g_target_mean = np.mean(G_tar)
b_target_mean = np.mean(B_tar)
r_target_std = np.std(R_tar)
g_target_std = np.std(G_tar)
b_target_std = np.std(B_tar)
r_result = (r_target_std/r_source_std) * r_source_mean
g_result = (g_target_std/g_source_std) * g_source_mean
b_result = (b_target_std/b_source_std) * b_source_mean
r_result += r_target_mean
g_result += g_target_mean
b_result += b_target_mean
r_result = r_result*255
g_result = g_result*255
b_result = b_result*255
result_img = np.stack([b_result, g_result, r_result], axis=2)
return result_img
def color_transfer_in_CIECAM97s(img_RGB_source, img_RGB_target):
print('===== color_transfer_in_CIECAM97s =====')
source = convert_color_space_RGB_to_CIECAM97s(img_RGB_source)
target = convert_color_space_RGB_to_CIECAM97s(img_RGB_target)
A_source = source[:,:,0]
C1_source = source[:,:,1]
C2_source = source[:,:,2]
A_target = target[:,:,0]
C1_target = target[:,:,1]
C2_target = target[:,:,2]
A_source_mean = A_source - np.mean(A_source)
C1_source_mean = C1_source - np.mean(C1_source)
C2_source_mean = C2_source - np.mean(C2_source)
A_source_std = np.std(A_source)
C1_source_std = np.std(C1_source)
C2_source_std = np.std(C2_source)
A_target_mean = np.mean(A_target)
C1_target_mean =
|
np.mean(C1_target)
|
numpy.mean
|
# Copyright (c) 2011-2016 by California Institute of Technology
# Copyright (c) 2016 by The Regents of the University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Check Linear Discrete-Time-Invariant System reachability between polytopes
Primary functions:
- L{solve_feasible}
- L{createLM}
- L{get_max_extreme}
See Also
========
L{find_controller}
"""
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
from collections import Iterable
import numpy as np
import polytope as pc
def is_feasible(
from_region, to_region, sys, N,
closed_loop=True,
use_all_horizon=False,
trans_set=None
):
"""Return True if to_region is reachable from_region.
For details see solve_feasible.
"""
S0 = solve_feasible(
from_region, to_region, sys, N,
closed_loop, use_all_horizon,
trans_set
)
return from_region <= S0
def solve_feasible(
P1, P2, ssys, N=1, closed_loop=True,
use_all_horizon=False, trans_set=None, max_num_poly=5
):
r"""Compute S0 \subseteq trans_set from which P2 is N-reachable.
N-reachable = reachable in horizon C{N}.
The system dynamics are C{ssys}.
The closed-loop algorithm solves for one step at a time,
which keeps the dimension of the polytopes down.
Time semantics:
- C{use_all_horizon = False}: fixed sampling period of
discrete-valued environment variables.
Reachability in exactly C{N} steps.
- C{use_all_horizon = True}: sampling period that varies and
is chosen by the system, depending on how many steps are
taken during each trajectory from C{P1} to C{P2}.
Reachability in C{1..N} steps, with an under-approximation
of the attractor set.
If the system dynamics do not allow staying at the same state,
then disconnected polytopes can arise, possibly causing issues
in the computation. Consider decreasing the sampling period
used for discretizing the associated continuous-time dynamical system.
@type P1: C{Polytope} or C{Region}
@type P2: C{Polytope} or C{Region}
@type ssys: L{LtiSysDyn}
@param N: horizon length
@param closed_loop: If C{True}, then take 1 step at a time.
This keeps down polytope dimension and
handles disturbances better.
@type closed_loop: bool
@param use_all_horizon: Used for closed loop algorithm.
- If C{True}, then check for reachability in C{< N} steps,
- otherwise, in exactly C{N} steps.
@type use_all_horizon: bool
@param trans_set: If specified,
then force transitions to be in this set.
Otherwise, P1 is used.
@return: states from which P2 is reachable
@rtype: C{Polytope} or C{Region}
"""
if closed_loop:
if use_all_horizon:
return _underapproximate_attractor(
P1, P2, ssys, N, trans_set=trans_set)
else:
return _solve_closed_loop_fixed_horizon(
P1, P2, ssys, N, trans_set=trans_set)
else:
if use_all_horizon:
raise ValueError(
'`use_all_horizon = True` has no effect if '
'`closed_loop = False`')
return solve_open_loop(
P1, P2, ssys, N,
trans_set=trans_set,
max_num_poly=max_num_poly
)
def _solve_closed_loop_fixed_horizon(
P1, P2, ssys, N, trans_set=None):
"""Under-approximate states in P1 that can reach P2 in N > 0 steps.
If intermediate polytopes are convex,
then the result is exact and not an under-approximation.
@type P1: C{Polytope} or C{Region}
@type P2: C{Polytope} or C{Region}
@param ssys: system dynamics
@param N: horizon length
@type N: int > 0
@param trans_set: If provided,
then intermediate steps are allowed
to be in trans_set.
Otherwise, P1 is used.
"""
assert N > 0, N
p1 = P1.copy() # initial set
p2 = P2.copy() # terminal set
if trans_set is None:
pinit = p1
else:
pinit = trans_set
# backwards in time
for i in range(N, 0, -1):
# first step from P1
if i == 1:
pinit = p1
p2 = solve_open_loop(pinit, p2, ssys, 1, trans_set)
p2 = pc.reduce(p2)
if not pc.is_fulldim(p2):
return pc.Polytope()
return p2
def _solve_closed_loop_bounded_horizon(
P1, P2, ssys, N, trans_set=None):
"""Under-approximate states in P1 that can reach P2 in <= N steps.
See docstring of function `_solve_closed_loop_fixed_horizon`
for details.
"""
_print_horizon_warning()
p1 = P1.copy() # initial set
p2 = P2.copy() # terminal set
if trans_set is None:
pinit = p1
else:
pinit = trans_set
# backwards in time
s = pc.Region()
for i in range(N, 0, -1):
# first step from P1
if i == 1:
pinit = p1
p2 = solve_open_loop(pinit, p2, ssys, 1, trans_set)
p2 = pc.reduce(p2)
# running union
s = s.union(p2, check_convex=True)
s = pc.reduce(s)
# empty target polytope ?
if not pc.is_fulldim(p2):
break
if not pc.is_fulldim(s):
return pc.Polytope()
s = pc.reduce(s)
return s
def _underapproximate_attractor(
P1, P2, ssys, N, trans_set=None):
"""Under-approximate N-step attractor of polytope P2, with N > 0.
See docstring of function `_solve_closed_loop_fixed_horizon`
for details.
"""
assert N > 0, N
_print_horizon_warning()
p1 = P1.copy() # initial set
p2 = P2.copy() # terminal set
if trans_set is None:
pinit = p1
else:
pinit = trans_set
# backwards in time
for i in range(N, 0, -1):
# first step from P1
if i == 1:
pinit = p1
r = solve_open_loop(pinit, p2, ssys, 1, trans_set)
p2 = p2.union(r, check_convex=True)
p2 = pc.reduce(p2)
# empty target polytope ?
if not pc.is_fulldim(p2):
return pc.Polytope()
return r
def _print_horizon_warning():
print('WARNING: different timing semantics and assumptions '
'from the case of fixed horizon. '
'Also, depending on dynamics, disconnected polytopes '
'can arise, which may cause issues in '
'the `polytope` package.')
def solve_open_loop(
P1, P2, ssys, N,
trans_set=None, max_num_poly=5
):
r1 = P1.copy() # Initial set
r2 = P2.copy() # Terminal set
# use the max_num_poly largest volumes for reachability
r1 = volumes_for_reachability(r1, max_num_poly)
r2 = volumes_for_reachability(r2, max_num_poly)
if len(r1) > 0:
start_polys = r1
else:
start_polys = [r1]
if len(r2) > 0:
target_polys = r2
else:
target_polys = [r2]
# union of s0 over all polytope combinations
s0 = pc.Polytope()
for p1 in start_polys:
for p2 in target_polys:
cur_s0 = poly_to_poly(p1, p2, ssys, N, trans_set)
s0 = s0.union(cur_s0, check_convex=True)
return s0
def poly_to_poly(p1, p2, ssys, N, trans_set=None):
"""Compute s0 for open-loop polytope to polytope N-reachability.
"""
p1 = p1.copy()
p2 = p2.copy()
if trans_set is None:
trans_set = p1
# stack polytope constraints
L, M = createLM(ssys, N, p1, trans_set, p2)
s0 = pc.Polytope(L, M)
s0 = pc.reduce(s0)
# Project polytope s0 onto lower dim
n =
|
np.shape(ssys.A)
|
numpy.shape
|
"""The heart of the *oyb* package. Contains orbital models and conversions.
"""
import datetime
import numpy
from math import pi, cos, sin, acos, asin
from oyb import anomaly, rot, earth
class Orbit(object):
"""Restricted two-body propagation model
"""
def __init__(self, a_m=None, e=None, i_rad=None, O_rad=None, w_rad=None, M_rad=None, tEpoch_dt=None):
"""Initializes a restricted two-body propagation model for a spherical
earth. Defaults to a circular GEO orbit with all angles set to 0.
(Epoch is set to the UTC time when the object is created.)
"""
self.tEpoch_dt = tEpoch_dt if tEpoch_dt is not None else datetime.datetime.utcnow()
self.a_m = a_m if a_m is not None else ((earth.tSidDay_s / (2 * pi))**2 * earth.mu_m3ps2)**(1/3)
self.e = e
self.i_rad = i_rad
self.O_rad = O_rad
self.w_rad = w_rad
self.M_rad = M_rad
def __str__(self):
"""Converts an Orbit object into a string representation (invoked by the
*str* and *print* functions) that references the shape (altitude at
perigee and apogee) and object location in memory.
"""
hPer_m, hApo_m = self.getShape()
return '<%g x %g [km] %s at 0x%08x>' % (hPer_m * 1e-3, hApo_m * 1e-3, self.__class__.__name__, id(self))
def getPeriod(self):
"""Returns the period of the current orbit, in seconds.
"""
T_s = 2 * pi * (self.a_m**3 / earth.mu_m3ps2)**0.5
return T_s
def getTrue(self, t_dt=None):
"""Returns the true anomaly of the object when propagated to a given
datetime.
"""
if t_dt is None:
t_dt = self.tEpoch_dt
dt_s = (t_dt - self.tEpoch_dt).total_seconds()
dM_rad = dt_s * 2 * pi / self.getPeriod()
M_rad = (self.M_rad + dM_rad) % (2 * pi)
return anomaly.mean2true(M_rad, self.e)
def getRpqw(self, t_dt=None):
"""Returns the 3-component position vector of the object at the given
datetime, as evaluted in the PQW (co-planar) frame.
"""
tht_rad = self.getTrue(t_dt)
d = 1 + self.e * cos(tht_rad)
p_m = self.a_m * (1 - self.e**2) * cos(tht_rad) / d
q_m = self.a_m * (1 - self.e**2) * sin(tht_rad) / d
return numpy.array([p_m, q_m, 0])
def getQpqw2eci(self):
"""Returns the frame transformation matrix from the PQW (co-planar)
frame to the earth-centered inertial (ECI) frame, w.r.t. J2000.
"""
w = rot.Z(self.w_rad)
i = rot.X(self.i_rad)
O = rot.Z(self.O_rad)
return (w.dot(i).dot(O)).transpose()
def getReci(self, t_dt=None):
"""Returns the position of the object at the given point in time, as
evaluated within the earth-centered inertial (ECI) frame, in meters.
"""
rPqw = self.getRpqw(t_dt)
Qpqw2eci = self.getQpqw2eci()
return Qpqw2eci.dot(rPqw)
def getRlla(self, t_dt=None):
"""Computes and returns the position at the given datetime, in latitude,
longitude, and altitude (radians, radians, and meters, respectively).
"""
if t_dt is None:
t_dt = self.tEpoch_dt
rEci_m = self.getReci(t_dt)
rEcf_m = earth.getQeci2ecf(t_dt).dot(rEci_m)
rLla_radm = rot.xyz2sph(rEcf_m)
return numpy.array([rLla_radm[1], rLla_radm[0], rLla_radm[2] - earth.eqRad_m])
def getAngMom(self):
"""Returns the scalar angular momentum of the orbit, in m/s^2.
"""
return (earth.mu_m3ps2 * self.a_m * (1 - self.e**2))**0.5
def getShape(self):
"""Returns a two-element tuple containing the altitude of the object at
perigee and apogee, in meters above spherical sea level.
"""
hPer_m = self.a_m * (1 - self.e) - earth.eqRad_m
hApo_m = self.a_m * (1 + self.e) - earth.eqRad_m
return (hPer_m, hApo_m)
def getShapeVel(self):
"""Returns a two-element tuple containing the scalar velocity of the
object at perigee and apogee, in meters per second.
"""
rPer_m = self.a_m * (1 - self.e)
rApo_m = self.a_m * (1 + self.e)
h_m2ps = self.getAngMom()
return h_m2ps / rPer_m, h_m2ps / rApo_m
def getTaaRad(self):
"""Returns the true-anomaly averaged radius (geometric mean of radius at
perigee and apogee), in meters.
"""
rPer_m = self.a_m * (1 - self.e)
rApo_m = self.a_m * (1 + self.e)
return (rPer_m * rApo_m)**0.5
def setShape(self, hPer_m, hApo_m):
"""Sets the a_m (semi-major axis) and e (eccentricity) values of an
orbit based on the given altitude (meters above spherical sea level)
at perigee and apogee.
"""
rPer_m = hPer_m + earth.eqRad_m
rApo_m = hApo_m + earth.eqRad_m
self.a_m = 0.5 * (rPer_m + rApo_m)
self.e = (rApo_m - rPer_m) / (rApo_m + rPer_m)
def propagate(self, tEpoch_dt=None, T_s=None, nSamples=1000):
"""Computes inertial position over the course of one orbit, beginning
with the given datetime (or, if not provided, the element epoch).
This defaults to 1,000 samples within that time range.
"""
if tEpoch_dt is None:
tEpoch_dt = self.tEpoch_dt
if T_s is None:
T_s = self.getPeriod()
ti_s = numpy.linspace(0, T_s, nSamples)
rEci_m = numpy.zeros((0,3))
for t_s in ti_s:
r = self.getReci(tEpoch_dt + datetime.timedelta(t_s/86400))
rEci_m = numpy.append(rEci_m, r.reshape(1,-1), axis=0)
return rEci_m
def track(self, tEpoch_dt=None, T_s=None, nSamples=1000):
"""Computes lat/lon/alt position over the course of one orbit, beginning
with the given datetime (or, if not provided, the element epoch).
This defaults to 1,000 samples within that time range.
"""
if tEpoch_dt is None:
tEpoch_dt = self.tEpoch_dt
if T_s is None:
T_s = self.getPeriod()
ti_s = numpy.linspace(0, T_s, nSamples)
rLla_radm =
|
numpy.zeros((0,3))
|
numpy.zeros
|
# Parametric Walker continuous environment
#
# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,
# it gets -100. Applying motor torque costs a small amount of points, more optimal agent
# will get better score.
#
# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,
# position of joints and joints angular speed, legs contact with ground, and 10 lidar
# rangefinder measurements. There's no coordinates
# in the state vector.
#
# Initially Created by <NAME>. Licensed on the same terms as the rest of OpenAI Gym.
# Modified by <NAME> and licensed under TeachMyAgent/teachers/LICENSES/ALP-GMM
# Modified <NAME>
#region Imports
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import colorize, seeding, EzPickle
import math
from TeachMyAgent.environments.envs.bodies.BodiesEnum import BodiesEnum
from TeachMyAgent.environments.envs.bodies.BodyTypesEnum import BodyTypesEnum
from TeachMyAgent.environments.envs.utils.custom_user_data import CustomUserDataObjectTypes, CustomUserData
#endregion
#region Utils
class ContactDetector(contactListener):
'''
Custom contact detector.
'''
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
'''
Triggered when contact is detected.
Checks userData of each of the two fixtures colliding.
Sets `userData.has_contact` to True on the body if `body.userData.check_contact == True`.
If `userData.is_contact_critical == True`, `env.critical_contact` is set to True, stopping the episode.
'''
for body in [contact.fixtureA.body, contact.fixtureB.body]:
if body.userData.object_type == CustomUserDataObjectTypes.BODY_OBJECT and body.userData.check_contact:
body.userData.has_contact = True
if body.userData.is_contact_critical:
self.env.head_contact = True
def EndContact(self, contact):
'''
Triggered when contact ends.
Sets `userData.has_contact` to False on the body if `body.userData.check_contact == True`.
'''
for body in [contact.fixtureA.body, contact.fixtureB.body]:
if body.userData.object_type == CustomUserDataObjectTypes.BODY_OBJECT and body.userData.check_contact:
body.userData.has_contact = False
def Rotate2D(pts,cnt,ang=np.pi/4):
'''pts = {} Rotates points(nx2) about center cnt(2) by angle ang(1) in radian'''
m1 = pts-cnt
m2 = np.array([[np.cos(ang),np.sin(ang)],[-np.sin(ang),np.cos(ang)]])
return np.dot(m1,m2)+cnt
class LidarCallback(Box2D.b2.rayCastCallback):
'''
Callback function triggered when lidar detects an object.
'''
def __init__(self, agent_mask_filter):
'''
Args:
agent_mask_filter: Mask filter used to avoid detecting collisions with the agent's body
'''
Box2D.b2.rayCastCallback.__init__(self)
self.agent_mask_filter = agent_mask_filter
self.fixture = None
def ReportFixture(self, fixture, point, normal, fraction):
'''
Triggered when a body is detected by the lidar.
Returns:
Distance to object detected.
'''
if (fixture.filterData.categoryBits & self.agent_mask_filter) == 0:
return -1
self.p2 = point
self.fraction = fraction
return fraction
#endregion
#region Constants
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
VIEWPORT_W = 600 # Careful, this affects training
VIEWPORT_H = 400 # Careful, this affects training
RENDERING_VIEWER_W = VIEWPORT_W # Only affects rendering, not the policy
RENDERING_VIEWER_H = VIEWPORT_H # Only affects rendering, not the policy
NB_LIDAR = 10 # Number of lidars used by the agent
LIDAR_RANGE = 160/SCALE
INITIAL_RANDOM = 5
TERRAIN_STEP = 14/SCALE
TERRAIN_LENGTH = 200 # in steps
TERRAIN_HEIGHT = VIEWPORT_H/SCALE/4
TERRAIN_END = 10 # in steps
INITIAL_TERRAIN_STARTPAD = 20 # in steps
FRICTION = 2.5
#endregion
class ParametricContinuousStumpTracks(gym.Env, EzPickle):
'''
The Stump Tracks: a procedurally generated Gym environment.
'''
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FPS
}
def __init__(self, walker_type, **walker_args):
'''
Creates a Stump Tracks environment with an embodiment.
walker_type: Embodiment
:type walker_type: BodiesEnum
walker_args: kwargs controlling the agent (e.g. number of body for a millipede)
'''
super(ParametricContinuousStumpTracks, self).__init__()
# Seed env and init Box2D
self.seed()
self.viewer = None
self.world = Box2D.b2World()
self.terrain = []
self.prev_shaping = None
# Create agent
body_type = BodiesEnum.get_body_type(walker_type)
if body_type == BodyTypesEnum.SWIMMER or body_type == BodyTypesEnum.AMPHIBIAN:
self.walker_body = BodiesEnum[walker_type].value(SCALE, density=1.0, **walker_args)
elif body_type == BodyTypesEnum.WALKER:
self.walker_body = BodiesEnum[walker_type].value(SCALE, **walker_args,
reset_on_hull_critical_contact=True)
else:
self.walker_body = BodiesEnum[walker_type].value(SCALE, **walker_args)
# Adapt startpad to walker's width
self.TERRAIN_STARTPAD = INITIAL_TERRAIN_STARTPAD if \
self.walker_body.AGENT_WIDTH / TERRAIN_STEP + 5 <= INITIAL_TERRAIN_STARTPAD else \
self.walker_body.AGENT_WIDTH / TERRAIN_STEP + 5 # in steps
self.create_terrain_fixtures()
# Set observation / action spaces
self._generate_walker() # To get state / action sizes
agent_action_size = self.walker_body.get_action_size()
self.action_space = spaces.Box(np.array([-1] * agent_action_size),
np.array([1] * agent_action_size), dtype=np.float32)
agent_state_size = self.walker_body.get_state_size()
high = np.array([np.inf] * (agent_state_size +
4 + # head infos
NB_LIDAR)) # lidars infos
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def set_environment(self, roughness=None, stump_height=None, stump_width=None, stump_rot=None,
obstacle_spacing=None, poly_shape=None, stump_seq=None):
'''
Set the parameters controlling the PCG algorithm to generate a task.
Call this method before `reset()`.
Args:
roughness: Input vector controlling the CPPN
stump_height: Tuple specifying mean and std of a normal distribution from which the height of each stump is sampled
stump_width: Tuple specifying mean and std of a normal distribution from which the width of each stump is sampled
stump_rot: Tuple specifying mean and std of a normal distribution from which the rotation degree of each stump is sampled
obstacle_spacing: Spacing between stumps
poly_shape: Shape of polygon stumps
'''
self.roughness = roughness if roughness else 0
self.obstacle_spacing = max(0.01, obstacle_spacing) if obstacle_spacing is not None else 8.0
self.stump_height = [stump_height, 0.1] if stump_height is not None else None
self.stump_width = stump_width
self.stump_rot = stump_rot
self.hexa_shape = poly_shape
self.stump_seq = stump_seq
if poly_shape is not None:
self.hexa_shape = np.interp(poly_shape,[0,4],[0,4]).tolist()
assert(len(poly_shape) == 12)
self.hexa_shape = self.hexa_shape[0:12]
def _destroy(self):
# if not self.terrain: return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.walker_body.destroy(self.world)
def reset(self):
self._destroy()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.head_contact = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
self.generate_game()
self.drawlist = self.terrain + self.walker_body.get_elements_to_render()
self.lidar = [LidarCallback(self.walker_body.reference_head_object.fixtures[0].filterData.maskBits)
for _ in range(NB_LIDAR)]
self.episodic_reward = 0
return self.step(np.array([0] * self.action_space.shape[0]))[0]
def step(self, action):
self.walker_body.activate_motors(action)
self.world.Step(1.0/FPS, 6*30, 2*30)
head = self.walker_body.reference_head_object
pos = head.position
vel = head.linearVelocity
for i in range(NB_LIDAR):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5*i/NB_LIDAR)*LIDAR_RANGE,
pos[1] - math.cos(1.5*i/NB_LIDAR)*LIDAR_RANGE)
self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
head.angle, # Normal angles up to 0.5 here, but sure more is possible.
2.0*head.angularVelocity/FPS,
0.3*vel.x*(VIEWPORT_W/SCALE)/FPS, # Normalized to get -1..1 range
0.3*vel.y*(VIEWPORT_H/SCALE)/FPS]
# add leg-related state
state.extend(self.walker_body.get_motors_state())
if self.walker_body.body_type == BodyTypesEnum.CLIMBER:
state.extend(self.walker_body.get_sensors_state())
state += [l.fraction for l in self.lidar]
self.scroll = pos.x - RENDERING_VIEWER_W/SCALE/5
shaping = 130*pos[0]/SCALE # moving forward is a way to receive reward (normalized to get 300 on completion)
if not (hasattr(self.walker_body, "remove_reward_on_head_angle") and self.walker_body.remove_reward_on_head_angle):
shaping -= 5.0*abs(state[0]) # keep head straight, other than that and falling, any behavior is unpunished
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= self.walker_body.TORQUE_PENALTY * 80 * np.clip(np.abs(a), 0, 1) # 80 => Original torque
# normalized to about -50.0 using heuristic, more optimal agent should spend less
done = False
if self.head_contact or pos[0] < 0:
reward = -100
done = True
if pos[0] > (TERRAIN_LENGTH-TERRAIN_END)*TERRAIN_STEP:
done = True
self.episodic_reward += reward
return
|
np.array(state)
|
numpy.array
|
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time
from astropy.io.fits import fitsrec
from batman import TransitModel
from ..core import JointLightCurve, CheopsLightCurve
from ..planets import Planet
from .test_core_phase_curve import simulate_roll_angle
true_basis_vector_weights = [-2, 2, 1, 1, 20, 1, 1.5, 0.1, 0.1, 2e3]
def generate_recarrays_WASP189(depth_ppm=80, seed=42, n_outliers=50,
cheops_orbit_min=99.5, obs_efficiency=0.55,
n_visits=4):
p = Planet.from_name("WASP-189 b")
|
np.random.seed(seed)
|
numpy.random.seed
|
import numpy as np
from scipy import misc
import os
import glob
import math
import scipy.io
MIN_AREA_SIZE = 512.0*512.0
crops_list_name = 'crops_LiTS_gt_2.txt'
database_root = '../../LiTS_database/'
utils_path = '../crops_list/'
results_path = '../../results/'
images_path = os.path.join(database_root, 'images_volumes')
labels_path = os.path.join(database_root, 'item_seg/')
labels_liver_path = os.path.join(database_root, 'liver_seg/')
output_images_path_bb = os.path.join(database_root, 'bb_images_volumes_alldatabase3_gt_nozoom_common_bb')
output_labels_path_bb = os.path.join(database_root, 'bb_liver_lesion_seg_alldatabase3_gt_nozoom_common_bb')
output_labels_liver_path_bb = os.path.join(database_root, 'bb_liver_seg_alldatabase3_gt_nozoom_common_bb')
liver_results = os.path.join(database_root, 'seg_liver_ck/')
output_liver_results_path_bb = os.path.join(database_root, 'liver_results/')
# This script computes the bounding boxes around the liver from the ground truth, computing
# a single 3D bb for all the volume.
if not os.path.exists(output_labels_path_bb):
os.makedirs(output_labels_path_bb)
if not os.path.exists(output_images_path_bb):
os.makedirs(output_images_path_bb)
if not os.path.exists(output_labels_liver_path_bb):
os.makedirs(output_labels_liver_path_bb)
if not os.path.exists(output_liver_results_path_bb):
os.makedirs(output_liver_results_path_bb)
def numerical_sort(value):
return int(value)
def numerical_sort_path(value):
return int(value.split('.png')[0].split('/')[-1])
## If no labels, the masks_folder, should contain the results of liver segmentation
# masks_folders = os.listdir(results_path + 'liver_seg/')
masks_folders = os.listdir(labels_liver_path)
sorted_mask_folder = sorted(masks_folders, key=numerical_sort)
crops_file = open(os.path.join(utils_path, crops_list_name), 'w')
aux = 0
for i in range(len(masks_folders)):
if not masks_folders[i].startswith(('.', '\t')):
dir_name = masks_folders[i]
## If no labels, the masks_folder, should contain the results of liver segmentation
masks_of_volume = glob.glob(labels_liver_path + dir_name + '/*.png')
file_names = (sorted(masks_of_volume, key=numerical_sort_path))
depth_of_volume = len(masks_of_volume)
if not os.path.exists(os.path.join(output_labels_path_bb, dir_name)):
os.makedirs(os.path.join(output_labels_path_bb, dir_name))
if not os.path.exists(os.path.join(output_images_path_bb, dir_name)):
os.makedirs(os.path.join(output_images_path_bb, dir_name))
if not os.path.exists(os.path.join(output_labels_liver_path_bb, dir_name)):
os.makedirs(os.path.join(output_labels_liver_path_bb, dir_name))
if not os.path.exists(os.path.join(output_liver_results_path_bb, dir_name)):
os.makedirs(os.path.join(output_liver_results_path_bb, dir_name))
total_maxa = 0
total_mina = 10000000
total_maxb = 0
total_minb = 10000000
for j in range(0, depth_of_volume):
img = misc.imread(file_names[j])
img = img/255.0
img[np.where(img > 0.5)] = 1
img[np.where(img < 0.5)] = 0
a, b = np.where(img == 1)
if len(a) > 0:
maxa = np.max(a)
maxb = np.max(b)
mina = np.min(a)
minb = np.min(b)
if maxa > total_maxa:
total_maxa = maxa
if maxb > total_maxb:
total_maxb = maxb
if mina < total_mina:
total_mina = mina
if minb < total_minb:
total_minb = minb
for j in range(0, depth_of_volume):
img = misc.imread(file_names[j])
img = img/255.0
img[np.where(img > 0.5)] = 1
img[
|
np.where(img < 0.5)
|
numpy.where
|
import os
import pickle
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
def print_metrics(y, y_pred_argmax, label):
print('{} Set Accuracy = {:.3f}'.format(label, accuracy_score(y, y_pred_argmax)))
print('{} Set F-score = {:.3f}'.format(label, f1_score(y, y_pred_argmax, average='macro')))
print('{} Set Precision = {:.3f}'.format(label, precision_score(y, y_pred_argmax, average='macro')))
print('{} Set Recall = {:.3f}'.format(label, recall_score(y, y_pred_argmax, average='macro')))
def main():
df_train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'train_features.csv'), index_col=0)
print(df_train.shape)
print(df_train.columns)
# Convert emotion labels from text to ordinal:
emotion_dict = {emotion: idx for idx, emotion in enumerate(sorted({emotion for _, emotion in df_train['emotion'].items()}))}
print(emotion_dict)
x_train_raw = df_train.drop(columns=['emotion', 'path']).values
y_train_raw = df_train['emotion'].map(emotion_dict).values
print(x_train_raw.shape)
print(y_train_raw.shape)
x_train, x_validation, y_train, y_validation = train_test_split(x_train_raw, y_train_raw, test_size=0.2, random_state=42)
xgb_classifier = xgb.XGBClassifier(
max_depth=7,
learning_rate=1e-3,
objective='multi:softprob',
n_estimators=5000,
sub_sample=0.8,
num_class=len(emotion_dict),
booster='gbtree',
n_jobs=4,
)
xgb_classifier.fit(x_train, y_train)
y_train_pred = xgb_classifier.predict_proba(x_train)
y_train_pred_argmax = np.argmax(y_train_pred, axis=-1)
y_validation_pred = xgb_classifier.predict_proba(x_validation)
y_validation_pred_argmax =
|
np.argmax(y_validation_pred, axis=-1)
|
numpy.argmax
|
from __future__ import absolute_import
from __future__ import division
import base64
import sys
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion, convolve
import unittest
import pytest
import centrosome.filter as F
from six.moves import range
"""Perform line-integration per-column of the image"""
VERTICAL = "vertical"
"""Perform line-integration per-row of the image"""
HORIZONTAL = "horizontal"
"""Perform line-integration along diagonals from top left to bottom right"""
DIAGONAL = "diagonal"
"""Perform line-integration along diagonals from top right to bottom left"""
ANTI_DIAGONAL = "anti-diagonal"
class TestStretch(unittest.TestCase):
def test_00_00_empty(self):
result = F.stretch(np.zeros((0,)))
self.assertEqual(len(result), 0)
def test_00_01_empty_plus_mask(self):
result = F.stretch(np.zeros((0,)), np.zeros((0,), bool))
self.assertEqual(len(result), 0)
def test_00_02_zeros(self):
result = F.stretch(np.zeros((10, 10)))
self.assertTrue(np.all(result == 0))
def test_00_03_zeros_plus_mask(self):
result = F.stretch(np.zeros((10, 10)), np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_00_04_half(self):
result = F.stretch(np.ones((10, 10)) * 0.5)
self.assertTrue(np.all(result == 0.5))
def test_00_05_half_plus_mask(self):
result = F.stretch(np.ones((10, 10)) * 0.5, np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0.5))
def test_01_01_rescale(self):
np.random.seed(0)
image = np.random.uniform(-2, 2, size=(10, 10))
image[0, 0] = -2
image[9, 9] = 2
expected = (image + 2.0) / 4.0
result = F.stretch(image)
self.assertTrue(np.all(result == expected))
def test_01_02_rescale_plus_mask(self):
np.random.seed(0)
image = np.random.uniform(-2, 2, size=(10, 10))
mask = np.zeros((10, 10), bool)
mask[1:9, 1:9] = True
image[0, 0] = -4
image[9, 9] = 4
image[1, 1] = -2
image[8, 8] = 2
expected = (image[1:9, 1:9] + 2.0) / 4.0
result = F.stretch(image, mask)
self.assertTrue(np.all(result[1:9, 1:9] == expected))
class TestMedianFilter(unittest.TestCase):
def test_00_00_zeros(self):
"""The median filter on an array of all zeros should be zero"""
result = F.median_filter(np.zeros((10, 10)), np.ones((10, 10), bool), 3)
self.assertTrue(np.all(result == 0))
def test_00_01_all_masked(self):
"""Test a completely masked image
Regression test of IMG-1029"""
result = F.median_filter(np.zeros((10, 10)), np.zeros((10, 10), bool), 3)
self.assertTrue(np.all(result == 0))
def test_00_02_all_but_one_masked(self):
mask = np.zeros((10, 10), bool)
mask[5, 5] = True
result = F.median_filter(np.zeros((10, 10)), mask, 3)
def test_01_01_mask(self):
"""The median filter, masking a single value"""
img = np.zeros((10, 10))
img[5, 5] = 1
mask = np.ones((10, 10), bool)
mask[5, 5] = False
result = F.median_filter(img, mask, 3)
self.assertTrue(np.all(result[mask] == 0))
def test_02_01_median(self):
"""A median filter larger than the image = median of image"""
np.random.seed(0)
img = np.random.uniform(size=(9, 9))
result = F.median_filter(img, np.ones((9, 9), bool), 20)
self.assertEqual(result[0, 0], np.median(img))
self.assertTrue(np.all(result == np.median(img)))
def test_02_02_median_bigger(self):
"""Use an image of more than 255 values to test approximation"""
np.random.seed(0)
img = np.random.uniform(size=(20, 20))
result = F.median_filter(img, np.ones((20, 20), bool), 40)
sorted = np.ravel(img)
sorted.sort()
min_acceptable = sorted[198]
max_acceptable = sorted[202]
self.assertTrue(np.all(result >= min_acceptable))
self.assertTrue(np.all(result <= max_acceptable))
def test_03_01_shape(self):
"""Make sure the median filter is the expected octagonal shape"""
radius = 5
a_2 = int(radius / 2.414213)
i, j = np.mgrid[-10:11, -10:11]
octagon = np.ones((21, 21), bool)
#
# constrain the octagon mask to be the points that are on
# the correct side of the 8 edges
#
octagon[i < -radius] = False
octagon[i > radius] = False
octagon[j < -radius] = False
octagon[j > radius] = False
octagon[i + j < -radius - a_2] = False
octagon[j - i > radius + a_2] = False
octagon[i + j > radius + a_2] = False
octagon[i - j > radius + a_2] = False
np.random.seed(0)
img = np.random.uniform(size=(21, 21))
result = F.median_filter(img, np.ones((21, 21), bool), radius)
sorted = img[octagon]
sorted.sort()
min_acceptable = sorted[len(sorted) // 2 - 1]
max_acceptable = sorted[len(sorted) // 2 + 1]
self.assertTrue(result[10, 10] >= min_acceptable)
self.assertTrue(result[10, 10] <= max_acceptable)
def test_04_01_half_masked(self):
"""Make sure that the median filter can handle large masked areas."""
img = np.ones((20, 20))
mask = np.ones((20, 20), bool)
mask[10:, :] = False
img[~mask] = 2
img[1, 1] = 0 # to prevent short circuit for uniform data.
result = F.median_filter(img, mask, 5)
# in partial coverage areas, the result should be only from the masked pixels
self.assertTrue(np.all(result[:14, :] == 1))
# in zero coverage areas, the result should be the lowest valud in the valid area
self.assertTrue(np.all(result[15:, :] == np.min(img[mask])))
@pytest.mark.skipif(sys.version_info > (3, 0), reason="requires Python 2.7")
class TestBilateralFilter(unittest.TestCase):
def test_00_00_zeros(self):
"""Test the bilateral filter of an array of all zeros"""
result = F.bilateral_filter(
np.zeros((10, 10)), np.ones((10, 10), bool), 5.0, 0.1
)
self.assertTrue(np.all(result == 0))
def test_00_01_all_masked(self):
"""Test the bilateral filter of a completely masked array"""
np.random.seed(0)
image = np.random.uniform(size=(10, 10))
result = F.bilateral_filter(image, np.zeros((10, 10), bool), 5.0, 0.1)
self.assertTrue(np.all(result == image))
class TestLaplacianOfGaussian(unittest.TestCase):
def test_00_00_zeros(self):
result = F.laplacian_of_gaussian(np.zeros((10, 10)), None, 9, 3)
self.assertTrue(np.all(result == 0))
def test_00_01_zeros_mask(self):
result = F.laplacian_of_gaussian(
np.zeros((10, 10)), np.zeros((10, 10), bool), 9, 3
)
self.assertTrue(np.all(result == 0))
def test_01_01_ring(self):
"""The LoG should have its lowest value in the center of the ring"""
i, j = np.mgrid[-20:21, -20:21].astype(float)
# A ring of radius 3, more or less
image = (np.abs(i ** 2 + j ** 2 - 3) < 2).astype(float)
result = F.laplacian_of_gaussian(image, None, 9, 3)
self.assertTrue(
(np.argmin(result) % 41, int(np.argmin(result) / 41)) == (20, 20)
)
class TestCanny(unittest.TestCase):
def test_00_00_zeros(self):
"""Test that the Canny filter finds no points for a blank field"""
result = F.canny(np.zeros((20, 20)), np.ones((20, 20), bool), 4, 0, 0)
self.assertFalse(np.any(result))
def test_00_01_zeros_mask(self):
"""Test that the Canny filter finds no points in a masked image"""
result = F.canny(
np.random.uniform(size=(20, 20)), np.zeros((20, 20), bool), 4, 0, 0
)
self.assertFalse(np.any(result))
def test_01_01_circle(self):
"""Test that the Canny filter finds the outlines of a circle"""
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
c = np.abs(np.sqrt(i * i + j * j) - 0.5) < 0.02
result = F.canny(c.astype(float), np.ones(c.shape, bool), 4, 0, 0)
#
# erode and dilate the circle to get rings that should contain the
# outlines
#
cd = binary_dilation(c, iterations=3)
ce = binary_erosion(c, iterations=3)
cde = np.logical_and(cd, np.logical_not(ce))
self.assertTrue(np.all(cde[result]))
#
# The circle has a radius of 100. There are two rings here, one
# for the inside edge and one for the outside. So that's 100 * 2 * 2 * 3
# for those places where pi is still 3. The edge contains both pixels
# if there's a tie, so we bump the count a little.
#
point_count = np.sum(result)
self.assertTrue(point_count > 1200)
self.assertTrue(point_count < 1600)
def test_01_02_circle_with_noise(self):
"""Test that the Canny filter finds the circle outlines in a noisy image"""
np.random.seed(0)
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
c = np.abs(np.sqrt(i * i + j * j) - 0.5) < 0.02
cf = c.astype(float) * 0.5 + np.random.uniform(size=c.shape) * 0.5
result = F.canny(cf, np.ones(c.shape, bool), 4, 0.1, 0.2)
#
# erode and dilate the circle to get rings that should contain the
# outlines
#
cd = binary_dilation(c, iterations=4)
ce = binary_erosion(c, iterations=4)
cde = np.logical_and(cd, np.logical_not(ce))
self.assertTrue(np.all(cde[result]))
point_count = np.sum(result)
self.assertTrue(point_count > 1200)
self.assertTrue(point_count < 1600)
class TestRoberts(unittest.TestCase):
def test_00_00_zeros(self):
"""Roberts on an array of all zeros"""
result = F.roberts(np.zeros((10, 10)), np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_00_01_mask(self):
"""Roberts on a masked array should be zero"""
np.random.seed(0)
result = F.roberts(np.random.uniform(size=(10, 10)), np.zeros((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_01_01(self):
"""Roberts on a diagonal edge should recreate the diagonal line"""
i, j = np.mgrid[0:10, 0:10]
image = (i >= j).astype(float)
result = F.roberts(image)
#
# Do something a little sketchy to keep from measuring the points
# at 0,0 and -1,-1 which are eroded
#
i[0, 0] = 10000
i[-1, -1] = 10000
self.assertTrue(np.all(result[i == j] == 1))
self.assertTrue(np.all(result[np.abs(i - j) > 1] == 0))
def test_01_02(self):
"""Roberts on an anti-diagonal edge should recreate the line"""
i, j = np.mgrid[-5:6, -5:6]
image = (i > -j).astype(float)
result = F.roberts(image)
i[0, -1] = 10000
i[-1, 0] = 10000
self.assertTrue(np.all(result[i == -j] == 1))
self.assertTrue(np.all(result[np.abs(i + j) > 1] == 0))
class TestSobel(unittest.TestCase):
def test_00_00_zeros(self):
"""Sobel on an array of all zeros"""
result = F.sobel(np.zeros((10, 10)), np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_00_01_mask(self):
"""Sobel on a masked array should be zero"""
np.random.seed(0)
result = F.sobel(np.random.uniform(size=(10, 10)), np.zeros((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_01_01_horizontal(self):
"""Sobel on an edge should be a horizontal line"""
i, j = np.mgrid[-5:6, -5:6]
image = (i >= 0).astype(float)
result = F.sobel(image)
# Fudge the eroded points
i[np.abs(j) == 5] = 10000
self.assertTrue(np.all(result[i == 0] == 1))
self.assertTrue(np.all(result[np.abs(i) > 1] == 0))
def test_01_02_vertical(self):
"""Sobel on a vertical edge should be a vertical line"""
i, j = np.mgrid[-5:6, -5:6]
image = (j >= 0).astype(float)
result = F.sobel(image)
j[np.abs(i) == 5] = 10000
self.assertTrue(np.all(result[j == 0] == 1))
self.assertTrue(np.all(result[np.abs(j) > 1] == 0))
class TestHSobel(unittest.TestCase):
def test_00_00_zeros(self):
"""Horizontal sobel on an array of all zeros"""
result = F.hsobel(np.zeros((10, 10)), np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_00_01_mask(self):
"""Horizontal Sobel on a masked array should be zero"""
np.random.seed(0)
result = F.hsobel(np.random.uniform(size=(10, 10)), np.zeros((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_01_01_horizontal(self):
"""Horizontal Sobel on an edge should be a horizontal line"""
i, j = np.mgrid[-5:6, -5:6]
image = (i >= 0).astype(float)
result = F.hsobel(image)
# Fudge the eroded points
i[np.abs(j) == 5] = 10000
self.assertTrue(np.all(result[i == 0] == 1))
self.assertTrue(np.all(result[np.abs(i) > 1] == 0))
def test_01_02_vertical(self):
"""Horizontal Sobel on a vertical edge should be zero"""
i, j = np.mgrid[-5:6, -5:6]
image = (j >= 0).astype(float)
result = F.hsobel(image)
self.assertTrue(np.all(result == 0))
class TestVSobel(unittest.TestCase):
def test_00_00_zeros(self):
"""Vertical sobel on an array of all zeros"""
result = F.vsobel(np.zeros((10, 10)), np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_00_01_mask(self):
"""Vertical Sobel on a masked array should be zero"""
np.random.seed(0)
result = F.vsobel(np.random.uniform(size=(10, 10)), np.zeros((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_01_01_vertical(self):
"""Vertical Sobel on an edge should be a vertical line"""
i, j = np.mgrid[-5:6, -5:6]
image = (j >= 0).astype(float)
result = F.vsobel(image)
# Fudge the eroded points
j[
|
np.abs(i)
|
numpy.abs
|
#!/usr/bin/env python
# /***************************************************************************
#
# @package: panda_siimulator_examples
# @metapackage: panda_simulator
# @author: <NAME> <<EMAIL>>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
"""
This is a demo showing task-space control on the
simulator robot using the ROS topics and messages directly
from panda_simulator. The task-space force for the desired
pose is computed using a simple PD law, and the corresponding
joint torques are computed and sent to the robot.
By using this file you can set a equilibrium pose by using interactive marker. You can also set the target
By publishing the topic "panda_simulator/equili_pose" .
"""
import copy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point, TransformStamped,PoseStamped
from visualization_msgs.msg import *
from interactive_markers.interactive_marker_server import *
from franka_core_msgs.msg import EndPointState, JointCommand, RobotState
# -- add to pythonpath for finding rviz_markers.py
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# -------------------------------------------------
from multi_rviz_markers import RvizMarkers
# --------- Modify as required ------------
# Task-space controller parameters
# stiffness gains
P_pos = 50
P_ori = 0
# damping gains
D_pos = 1
D_ori = 0
# -----------------------------------------
publish_rate = 100
JACOBIAN = None
CARTESIAN_POSE = None
CARTESIAN_VEL = None
destination_marker = RvizMarkers()
def _on_robot_state(msg):
"""
Callback function for updating jacobian and EE velocity from robot state
"""
global JACOBIAN, CARTESIAN_VEL
JACOBIAN =
|
np.asarray(msg.O_Jac_EE)
|
numpy.asarray
|
import numpy as np
import scipy.special
import binom
from common import Models
def _make_lower(phi1, midx):
ret = {
Models.A_B: 0,
Models.B_A: phi1,
Models.diff_branches: 0,
}[midx]
return np.broadcast_to(ret, np.array(phi1).shape)
def _make_upper(phi1, midx):
ret = {
Models.A_B: phi1,
Models.B_A: 1,
Models.diff_branches: 1 - phi1,
}[midx]
return np.broadcast_to(ret, np.array(phi1).shape)
def binom_logpmf_scalar(X, N, P):
return binom.logpmf(
np.array([X]),
np.array([N]),
np.array([P]),
)[0]
def integral_separate_clusters(phi1, V1, V2, sidx, midx, logsub):
logP = binom_logpmf_scalar(
V1.var_reads[sidx],
V1.total_reads[sidx],
V1.omega_v[sidx]*phi1
)
lower = _make_lower(phi1, midx)
upper = _make_upper(phi1, midx)
A = V2.var_reads[sidx] + 1
B = V2.ref_reads[sidx] + 1
betainc_upper = scipy.special.betainc(A, B, V2.omega_v[sidx] * upper)
betainc_lower = scipy.special.betainc(A, B, V2.omega_v[sidx] * lower)
if np.isclose(betainc_upper, betainc_lower):
return 0
logP += np.log(betainc_upper - betainc_lower)
logP -= logsub
return
|
np.exp(logP)
|
numpy.exp
|
import numpy as np
arr =
|
np.array([1, 2, 3])
|
numpy.array
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Testing spatialimages
"""
from ..externals.six import BytesIO
import numpy as np
from ..spatialimages import (Header, SpatialImage, HeaderDataError,
ImageDataError)
from unittest import TestCase
from nose.tools import (assert_true, assert_false, assert_equal,
assert_not_equal, assert_raises)
from numpy.testing import assert_array_equal, assert_array_almost_equal
from .test_helpers import bytesio_round_trip
def test_header_init():
# test the basic header
hdr = Header()
assert_equal(hdr.get_data_dtype(), np.dtype(np.float32))
assert_equal(hdr.get_data_shape(), (0,))
assert_equal(hdr.get_zooms(), (1.0,))
hdr = Header(np.float64)
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (0,))
assert_equal(hdr.get_zooms(), (1.0,))
hdr = Header(np.float64, shape=(1,2,3))
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0))
hdr = Header(np.float64, shape=(1,2,3), zooms=None)
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0))
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (3.0, 2.0, 1.0))
def test_from_header():
# check from header class method. Note equality checks below,
# equality methods used here too.
empty = Header.from_header()
assert_equal(Header(), empty)
empty = Header.from_header(None)
assert_equal(Header(), empty)
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
copy = Header.from_header(hdr)
assert_equal(hdr, copy)
assert_false(hdr is copy)
class C(object):
def get_data_dtype(self): return np.dtype('u2')
def get_data_shape(self): return (5,4,3)
def get_zooms(self): return (10.0, 9.0, 8.0)
converted = Header.from_header(C())
assert_true(isinstance(converted, Header))
assert_equal(converted.get_data_dtype(), np.dtype('u2'))
assert_equal(converted.get_data_shape(), (5,4,3))
assert_equal(converted.get_zooms(), (10.0,9.0,8.0))
def test_eq():
hdr = Header()
other = Header()
assert_equal(hdr, other)
other = Header('u2')
assert_not_equal(hdr, other)
other = Header(shape=(1,2,3))
assert_not_equal(hdr, other)
hdr = Header(shape=(1,2))
other = Header(shape=(1,2))
assert_equal(hdr, other)
other = Header(shape=(1,2), zooms=(2.0,3.0))
assert_not_equal(hdr, other)
def test_copy():
# test that copy makes independent copy
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
hdr_copy = hdr.copy()
hdr.set_data_shape((4,5,6))
assert_equal(hdr.get_data_shape(), (4,5,6))
assert_equal(hdr_copy.get_data_shape(), (1,2,3))
hdr.set_zooms((4,5,6))
assert_equal(hdr.get_zooms(), (4,5,6))
assert_equal(hdr_copy.get_zooms(), (3,2,1))
hdr.set_data_dtype(np.uint8)
assert_equal(hdr.get_data_dtype(), np.dtype(np.uint8))
assert_equal(hdr_copy.get_data_dtype(), np.dtype(np.float64))
def test_shape_zooms():
hdr = Header()
hdr.set_data_shape((1, 2, 3))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (1.0,1.0,1.0))
hdr.set_zooms((4, 3, 2))
assert_equal(hdr.get_zooms(), (4.0,3.0,2.0))
hdr.set_data_shape((1, 2))
assert_equal(hdr.get_data_shape(), (1,2))
assert_equal(hdr.get_zooms(), (4.0,3.0))
hdr.set_data_shape((1, 2, 3))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (4.0,3.0,1.0))
# null shape is (0,)
hdr.set_data_shape(())
assert_equal(hdr.get_data_shape(), (0,))
assert_equal(hdr.get_zooms(), (1.0,))
# zooms of wrong lengths raise error
assert_raises(HeaderDataError, hdr.set_zooms, (4.0, 3.0))
assert_raises(HeaderDataError,
hdr.set_zooms,
(4.0, 3.0, 2.0, 1.0))
# as do negative zooms
assert_raises(HeaderDataError,
hdr.set_zooms,
(4.0, 3.0, -2.0))
def test_data_dtype():
hdr = Header()
assert_equal(hdr.get_data_dtype(),
|
np.dtype(np.float32)
|
numpy.dtype
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.io import fits
from astropy.modeling import models
from astropy.units.quantity import Quantity
import astropy.units as u
from astropy.wcs import WCS
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy.nddata import StdDevUncertainty, CCDData
import astropy
from numpy.testing import assert_array_equal
import pytest
import skimage
from ccdproc.core import (
ccd_process, cosmicray_median, cosmicray_lacosmic, create_deviation,
flat_correct, gain_correct, subtract_bias, subtract_dark, subtract_overscan,
transform_image, trim_image, wcs_project, Keyword)
from ccdproc.core import _blkavg
from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func
try:
from ..core import block_reduce, block_average, block_replicate
HAS_BLOCK_X_FUNCS = True
except ImportError:
HAS_BLOCK_X_FUNCS = False
# test creating deviation
# success expected if u_image * u_gain = u_readnoise
@pytest.mark.parametrize('u_image,u_gain,u_readnoise,expect_success', [
(u.electron, None, u.electron, True),
(u.electron, u.electron, u.electron, False),
(u.adu, u.electron / u.adu, u.electron, True),
(u.electron, None, u.dimensionless_unscaled, False),
(u.electron, u.dimensionless_unscaled, u.electron, True),
(u.adu, u.dimensionless_unscaled, u.electron, False),
(u.adu, u.photon / u.adu, u.electron, False),
])
def test_create_deviation(u_image, u_gain, u_readnoise,
expect_success):
ccd_data = ccd_data_func(data_size=10, data_mean=100)
ccd_data.unit = u_image
if u_gain is not None:
gain = 2.0 * u_gain
else:
gain = None
readnoise = 5 * u_readnoise
if expect_success:
ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise)
assert ccd_var.uncertainty.array.shape == (10, 10)
assert ccd_var.uncertainty.array.size == 100
assert ccd_var.uncertainty.array.dtype == np.dtype(float)
if gain is not None:
expected_var = np.sqrt(2 * ccd_data.data + 5 ** 2) / 2
else:
expected_var = np.sqrt(ccd_data.data + 5 ** 2)
np.testing.assert_array_equal(ccd_var.uncertainty.array,
expected_var)
assert ccd_var.unit == ccd_data.unit
# uncertainty should *not* have any units -- does it?
with pytest.raises(AttributeError):
ccd_var.uncertainty.array.unit
else:
with pytest.raises(u.UnitsError):
ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise)
def test_create_deviation_from_negative():
ccd_data = ccd_data_func(data_mean=0, data_scale=10)
ccd_data.unit = u.electron
readnoise = 5 * u.electron
ccd_var = create_deviation(ccd_data, gain=None, readnoise=readnoise,
disregard_nan=False)
np.testing.assert_array_equal(ccd_data.data < 0,
np.isnan(ccd_var.uncertainty.array))
def test_create_deviation_from_negative():
ccd_data = ccd_data_func(data_mean=0, data_scale=10)
ccd_data.unit = u.electron
readnoise = 5 * u.electron
ccd_var = create_deviation(ccd_data, gain=None, readnoise=readnoise,
disregard_nan=True)
mask = (ccd_data.data < 0)
ccd_data.data[mask] = 0
expected_var =
|
np.sqrt(ccd_data.data + readnoise.value**2)
|
numpy.sqrt
|
import sys
from typing import Collection, Tuple, Optional, Union
import pandas as pd
import numpy as np
from scipy.sparse import issparse
from anndata import AnnData
from . import _simple as pp
from . import _highly_variable_genes as hvg
from ._utils import _get_mean_var
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_array, sparsefuncs
from ..neighbors import compute_neighbors_umap, compute_connectivities_umap
import matplotlib.pyplot as pl
import time
from .. import logging as logg
def scrublet(
adata: AnnData,
sim_doublet_ratio: float = 2.0,
n_neighbors: Optional[int] = None,
expected_doublet_rate: float = 0.05,
normalize_kwds: dict = {},
use_highly_variable: bool = True,
find_highly_variable: bool = True,
highly_variable_kwds: dict = {},
pca_kwds: dict = {},
synthetic_doublet_umi_subsampling: float = 1.0,
log_transform: bool = False,
scale: bool = True,
zero_center: bool = True,
max_value: Optional[float] = None,
knn_method: str = 'umap',
fuzzy_knn: bool = False,
knn_dist_metric: str = 'euclidean',
random_state: int = 0,
copy: bool = False,
return_intermediates: bool = False
) -> Optional[Union[AnnData, Tuple[AnnData, AnnData], Tuple[AnnData, AnnData, AnnData]]]:
"""Predict doublets using Scrublet
Predict cell doublets using a nearest-neighbor classifier of observed transcriptomes and simulated doublets. Works best if the input is a raw (unnormalized) counts matrix from a single sample or a collection of similar samples from the same experiment.
.. note::
More information and bug reports `here <https://github.com/swolock/scrublet>`__.
Parameters
----------
adata
The annotated data matrix of shape ``n_obs`` × ``n_vars``. Rows
correspond to cells and columns to genes. Scrublet uses ``adata.X`` as
the input and works best when ``.X`` represents raw, unnormalized
UMI counts.
sim_doublet_ratio
Number of doublets to simulate relative to the number of observed
transcriptomes ``n_obs``.
n_neighbors
Number of neighbors used to construct the KNN graph of observed
transcriptomes and simulated doublets. If ``None``, this is
automatically set to ``np.round(0.5 * np.sqrt(n_obs))``.
expected_doublet_rate
The estimated doublet rate for the experiment.
synthetic_doublet_umi_subsampling
Rate for sampling UMIs when creating synthetic doublets. If 1.0, each
doublet is created by simply adding the UMI counts from two randomly
sampled observed transcriptomes. For values less than 1, the UMI counts
are added and then randomly sampled at the specified rate.
normalize_kwds
Keyworded arguments for :func:``~scanpy.pp.normalize_per_cell``.
use_highly_variable
Whether to use ony highly variable genes for PCA, stored in ``.var['highly_variable']`` if ``find_highly_variable=False`` or generated by :func:``~scanpy.pp.highly_variable_genes`` if ``find_highly_variable=True``.
find_highly_variable
Whether to find highly variable genes to use as the input for PCA. If ``use_highly_variable=False``, this argument is ignored.
highly_variable_kwds: dict = {},
Keyworded arguments for :func:``~scanpy.pp.highly_variable_genes``.
log_transform
Whether to use :func:``~scanpy.pp.log1p`` to log-transform the data prior to PCA.
scale
Whether to scale the PCA input data such that all variables have unit variance.
zero_center
Whether to center the PCA input data such that all variables have zero mean.
max_value
Clip (truncate) to this value after scaling and/or zero centering. If ``None``, do not clip.
pca_kwds
Keyworded arguments for :func:``~scanpy.pp.pca``.
knn_method
Algorithm used to find nearest neighbors for the classifier. Must be ``'umap'`` (default) or ``'annoy'``. To replicate original Scrublet, use ``'annoy'``.
fuzzy_knn
If ``True``, weight neighbors by connectivity. To replicate original Scrublet, set to ``False`` (default).
knn_dist_metric
A known metric for UMAP or Annoy.
random_state
Initial state for doublet simulation, PCA, and nearest neighbors.
copy
If ``True``, return a copy of the input ``adata`` with Scrublet results added. Otherwise, Scrublet results are added in place.
return_intermediates
Whether to return :class:``~anndata.AnnData`` objects containing the preprocessed data for the observed and simulated transcriptomes.
Returns
-------
adata : anndata.AnnData
if ``copy=True`` it returns or else adds fields to ``adata``:
``.obs['doublet_score']``
Doublet scores for each observed transcriptome
``obsm['X_pca_scrublet']``
PCA coordinates of observed transcriptomes
``uns['scrublet']['Xsim_pca']``
PCA coordinates of simulated doublet transcriptomes
``adata.uns['scrublet']['doublet_scores_sim']``
Doublet scores for each simulated doublet transcriptome
``adata.uns['scrublet']['doublet_parents']``
Pairs of ``.obs_names`` used to generate each simulated doublet transcriptome
``uns['scrublet']['parameters']``
Dictionary of Scrublet parameters
Examples
--------
"""
logg.info('Running Scrublet...')
t_start = time.time()
if copy:
adata = adata.copy()
# Check sparse; convert if not
adata_obs = adata.copy()
# Check the input is valid
if use_highly_variable:
if (not find_highly_variable) and ('highly_variable' not in adata_obs.var.keys()):
raise ValueError('Did not find adata.var[\'highly_variable\']. '
'Please set `find_highly_variable=True`, '
'set `use_highly_variable=False`, '
'or add adata.var[\'highly_variable\'].')
if n_neighbors is None:
n_neighbors = int(round(0.5*np.sqrt(adata.shape[0])))
logg.msg('Preprocessing', v=4)
if 'n_counts' not in adata_obs.obs.keys():
adata_obs.obs['n_counts'] = adata_obs.X.sum(1).A.squeeze()
if 'counts_per_cell_after' not in normalize_kwds:
normalize_kwds['counts_per_cell_after'] = 1e4
pp.normalize_per_cell(adata_obs, **normalize_kwds)
if use_highly_variable:
if find_highly_variable:
highly_variable_kwds['inplace'] = False
adata_obs.var['highly_variable'] = hvg.highly_variable_genes(
pp.log1p(adata_obs, copy=True),
**highly_variable_kwds
)['highly_variable']
adata_obs.raw = adata[:, adata_obs.var['highly_variable']]
adata_obs = adata_obs[:, adata_obs.var['highly_variable']]
else:
adata_obs.raw = adata
logg.msg('Simulating doublets', v=4)
adata_sim = _simulate_doublets(adata_obs, sim_doublet_ratio, synthetic_doublet_umi_subsampling, random_seed=random_state)
normalize_kwds['counts_per_cell'] = adata_sim.obs['n_counts'].values
pp.normalize_per_cell(adata_sim, **normalize_kwds)
if log_transform:
pp.log1p(adata_obs)
pp.log1p(adata_sim)
if scale:
mean, var = _get_mean_var(adata_obs.X)
if zero_center:
if issparse(adata_obs.X):
adata_obs.X = adata_obs.X.toarray()
adata_sim.X = adata_sim.X.toarray()
_scale_precomputed(adata_obs.X, mean, var, zero_center)
_scale_precomputed(adata_sim.X, mean, var, zero_center)
elif zero_center:
if issparse(adata_obs.X):
adata_obs.X = adata_obs.X.toarray()
adata_sim.X = adata_sim.X.toarray()
mean = adata_obs.X.mean(0)
adata_obs.X -= mean
adata_sim.X -= mean
if max_value is not None:
adata_obs.X[adata_obs.X > max_value] = max_value
adata_sim.X[adata_sim.X > max_value] = max_value
logg.msg('Running dimensionality reduction', v=4)
pca_kwds['zero_center'] = zero_center
pca_kwds['random_state'] = random_state
pca_kwds['return_info'] = True
pca_obs, pca_components = pp.pca(
adata_obs.X,
**pca_kwds
)[:2]
if issparse(adata_sim.X):
pca_sim = safe_sparse_dot(
check_array(adata_sim.X, accept_sparse='csr'),
pca_components.T)
else:
pca_sim = np.dot(
(adata_sim.X - adata_obs.X.mean(0)[None, :]),
pca_components.T)
adata_obs.obsm['X_pca'] = pca_obs
adata_sim.obsm['X_pca'] = pca_sim
logg.msg('Calculating doublet scores', v=4)
doublet_scores_obs, doublet_scores_sim = _nearest_neighbor_classifier(
pca_obs,
pca_sim,
expected_doublet_rate,
n_neighbors=n_neighbors,
method=knn_method,
knn_dist_metric=knn_dist_metric,
fuzzy=fuzzy_knn,
random_state=random_state
)
adata_obs.obs['doublet_score'] = doublet_scores_obs
adata_sim.obs['doublet_score'] = doublet_scores_sim
adata.obs['doublet_score'] = doublet_scores_obs
adata.uns['scrublet'] = {}
adata.uns['scrublet']['doublet_scores_sim'] = doublet_scores_sim
adata.uns['scrublet']['doublet_parents'] = adata_sim.obsm['doublet_parents']
adata.obsm['X_pca_scrublet'] = pca_obs
adata.uns['scrublet']['Xsim_pca'] = pca_sim
adata.uns['scrublet']['parameters'] = {
'expected_doublet_rate': expected_doublet_rate,
'sim_doublet_ratio': sim_doublet_ratio,
'n_neighbors': n_neighbors,
'log_transform': log_transform,
'scale': scale,
'zero_center': zero_center,
'max_value': max_value,
'fuzzy_knn': fuzzy_knn,
'random_state': random_state,
'variable_genes': adata_obs.var_names.values.astype(str)
}
call_doublets(adata, copy=False)
t_end = time.time()
logg.info(' Scrublet finished ({})'.format(logg._sec_to_str(t_end - t_start)))
if copy:
return (adata, adata_obs, adata_sim) if return_intermediates else adata
else:
return (adata_obs, adata_sim) if return_intermediates else None
def _simulate_doublets(
adata: AnnData,
sim_doublet_ratio: float = 2.0,
synthetic_doublet_umi_subsampling: float = 1.0,
random_seed: int = 0
) -> AnnData:
''' Simulate doublets by adding the counts of random observed transcriptome pairs.
Arguments
---------
sim_doublet_ratio : float, optional (default: None)
Number of doublets to simulate relative to the number of observed
transcriptomes. If `None`, self.sim_doublet_ratio is used.
synthetic_doublet_umi_subsampling : float, optional (defuault: 1.0)
Rate for sampling UMIs when creating synthetic doublets. If 1.0,
each doublet is created by simply adding the UMIs from two randomly
sampled observed transcriptomes. For values less than 1, the
UMI counts are added and then randomly sampled at the specified
rate.
Sets
----
doublet_parents_
'''
n_obs = adata.shape[0]
n_sim = int(n_obs * sim_doublet_ratio)
np.random.seed(random_seed)
doublet_parents = np.random.randint(0, n_obs, size=(n_sim, 2))
X1 = adata.raw.X[doublet_parents[:,0], :]
X2 = adata.raw.X[doublet_parents[:,1], :]
tots1 = adata.obs['n_counts'][doublet_parents[:,0]].values
tots2 = adata.obs['n_counts'][doublet_parents[:,1]].values
if synthetic_doublet_umi_subsampling < 1:
X_sim, total_counts_sim = _subsample_counts(X1 + X2, synthetic_doublet_umi_subsampling, tots1 + tots2)
else:
X_sim = X1 + X2
total_counts_sim = tots1 + tots2
adata_sim = AnnData(X_sim)
adata_sim.obs['n_counts'] = total_counts_sim
adata_sim.obsm['doublet_parents'] = doublet_parents
return adata_sim
def _subsample_counts(X, rate, original_totals):
if rate < 1:
X.data = np.random.binomial(np.round(X.data).astype(int), rate).astype(np.float32)
X.eliminate_zeros()
current_totals = X.sum(1).A.squeeze()
unsampled_orig_totals = original_totals - current_totals
unsampled_downsamp_totals = np.random.binomial(np.round(unsampled_orig_totals).astype(int), rate)
final_downsamp_totals = current_totals + unsampled_downsamp_totals
else:
final_downsamp_totals = original_totals
return X, final_downsamp_totals
def _scale_precomputed(X, column_means, column_vars, zero_center=True):
scale = np.sqrt(column_vars)
if zero_center:
X -= column_means
scale[scale == 0] = 1e-12
X /= scale
else:
if issparse(X):
sparsefuncs.inplace_column_scale(X, 1/scale)
else:
X /= scale
def _nearest_neighbor_classifier(pca_obs, pca_sim, expected_doublet_rate, n_neighbors=20, method='umap', knn_dist_metric='euclidean', fuzzy=False, random_state=0):
pca_merged = np.vstack((pca_obs, pca_sim))
adata_merged = AnnData(np.zeros((pca_merged.shape[0], 1)))
adata_merged.obsm['X_pca'] = pca_merged
n_obs = pca_obs.shape[0]
n_sim = pca_sim.shape[0]
doub_labels = np.concatenate((np.zeros(n_obs, dtype=int),
np.ones(n_sim, dtype=int)))
# Adjust k (number of nearest neighbors) based on the ratio of simulated to observed cells
k_adj = int(round(n_neighbors * (1 + n_sim / float(n_obs))))
# Find k_adj nearest neighbors
if method == 'annoy':
knn_indices, knn_distances = _get_knn_graph_annoy(
adata_merged.obsm['X_pca'],
n_neighbors=k_adj,
dist_metric=knn_dist_metric,
random_seed=random_state)
elif method == 'umap':
knn_indices, knn_distances = compute_neighbors_umap(
adata_merged.obsm['X_pca'],
k_adj+1,
random_state,
metric=knn_dist_metric)[:2]
knn_indices = knn_indices[:, 1:]
knn_distances = knn_distances[:, 1:]
else:
raise ValueError('Nearest neighbor method must be \'umap\' or \'annoy\'.')
if fuzzy:
distances, connectivities = compute_connectivities_umap(
knn_indices, knn_distances, adata_merged.shape[0], k_adj)
adjacency = connectivities > 0
n_sim_neigh = adjacency[:, n_obs:].sum(1).A.squeeze()
n_obs_neigh = adjacency[:, :n_obs].sum(1).A.squeeze()
else:
n_sim_neigh = (knn_indices >= n_obs).sum(1)
n_obs_neigh = (knn_indices < n_obs).sum(1)
# Calculate doublet score based on ratio of simulated cell neighbors vs. observed cell neighbors
rho = expected_doublet_rate
r = n_sim / float(n_obs)
nd = n_sim_neigh.astype(float)
ns = n_obs_neigh.astype(float)
N = (nd + ns).astype(float)
# Bayesian
q= (nd + 1) / (N + 2)
Ld = q * rho / r / (1 - rho - q * (1 - rho - rho / r))
doublet_scores_obs = Ld[doub_labels == 0]
doublet_scores_sim = Ld[doub_labels == 1]
return doublet_scores_obs, doublet_scores_sim
def _get_knn_graph_annoy(X, n_neighbors=5, dist_metric='euclidean', random_seed=0):
'''
Build k-nearest-neighbor graph
Return edge list and nearest neighbor matrix
'''
try:
from annoy import AnnoyIndex
except ImportError:
raise ImportError(
'Please install the package "annoy". '
'Alternatively, set `knn_method=\'umap\'.')
npc = X.shape[1]
ncell = X.shape[0]
annoy_index = AnnoyIndex(npc, metric=dist_metric)
annoy_index.set_seed(random_seed)
for i in range(ncell):
annoy_index.add_item(i, list(X[i,:]))
annoy_index.build(10) # 10 trees
knn = []
knn_dists = []
for iCell in range(ncell):
neighbors, dists = annoy_index.get_nns_by_item(iCell, n_neighbors+1, include_distances=True)
knn.append(neighbors[1:])
knn_dists.append(dists[1:])
knn = np.array(knn, dtype=int)
knn_dists = np.array(knn_dists)
return knn, knn_dists
def call_doublets(adata, threshold=None, copy=False):
''' Call trancriptomes as doublets or singlets
Arguments
---------
threshold : float, optional (default: None)
Doublet score threshold for calling a transcriptome
a doublet. If `None`, this is set automatically by looking
for the minimum between the two modes of the `doublet_scores_sim_`
histogram. It is best practice to check the threshold visually
using the `doublet_scores_sim_` histogram and/or based on
co-localization of predicted doublets in a 2-D embedding.
Sets
----
predicted_doublets_, z_scores_, threshold_,
detected_doublet_rate_, detectable_doublet_fraction,
overall_doublet_rate_
'''
if copy:
adata = adata.copy()
if 'scrublet' not in adata.uns:
raise ValueError(
'\'scrublet\' not found in `adata.uns`. You must run '
'sc.external.pp.scrublet.scrublet() first.')
Ld_obs = adata.obs['doublet_score'].values
Ld_sim = adata.uns['scrublet']['doublet_scores_sim']
if threshold is None:
# automatic threshold detection
# http://scikit-image.org/docs/dev/api/skimage.filters.html
try:
from skimage.filters import threshold_minimum
except ImportError:
logg.warn('Unable to set doublet score threshold automatically, '
'so it has been set to 1 by default. To enable '
'automatic threshold detection, install the package '
'\'scikit-image\'. Alternatively, manually '
'specify a threshold and call doublets '
'using `sc.external.pp.scrublet.call_doublets(adata, threshold)`.')
adata.obs['predicted_doublet'] = pd.Categorical(
|
np.repeat(False, adata.obs.shape[0])
|
numpy.repeat
|
"""This file contains functions which are used to generate the log-likelihood
for different memory models and other code required to run the experiments in
the manuscript."""
import multiprocessing as MP
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import seaborn as sns
# Constants
TIME_SCALE = 60 * 60 * 24
MODEL_POWER = True
B = [1]
POWER_B = 1
def get_unique_user_lexeme(data_dict):
"""Get all unique (user, lexeme) pairs."""
pairs = set()
for u_id in data_dict.keys():
pairs.update([(u_id, x) for x in data_dict[u_id].keys()])
return sorted(pairs)
def max_unif(N, sum_D):
"""Find maximum value of N * log(x) - x * sum_D"""
x_max = N / sum_D
return N * np.log(x_max) - sum_D * x_max
def max_memorize(n_0, a, b, recalled, Ds,
q_fixed=None, n_max=np.inf, n_min=0, verbose=True):
"""Return max_{over q} memorizeLL."""
# TODO: Currently, these are not true.
# n_max=1440, n_min=1/36500000
# maximum forgetting rate is clipped at 1 minute for exp(-1) forgetting and
# minimum forgetting rate is that exp(-1) chance of forgetting after 100,000 years.
assert len(recalled) == len(Ds), "recalled and t_is are not of the same length."
N = len(Ds)
n_t = n_0
log_sum = 0
int_sum = 0
n_ts = []
m_dts = []
not_warned_min, not_warned_max = True, True
n_correct, n_wrong = 0, 0
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for D, recall in zip(Ds, recalled):
if MODEL_POWER is False:
m_dt = np.exp(-n_t * D)
else:
m_dt = (1 + POWER_B * D)**(-n_t)
n_ts.append(n_t)
m_dts.append(m_dt)
if n_t < 1e-20:
# log_sum += np.log(n_0) + n_correct * np.log(a) + n_wrong * np.log(b) + np.log(D)
int_sum += n_t * (D ** 2) / 2
else:
if MODEL_POWER is False:
int_sum += D + np.expm1(-n_t * D) / n_t
else:
int_sum += D - ((1 + POWER_B * D) ** (1 - n_t) - 1) / (POWER_B * (1 - n_t))
if m_dt >= 1.0:
log_sum = -np.inf
else:
log_sum += np.log1p(-m_dt)
if recall:
n_t *= (1 - a)
n_correct += 1
else:
n_t *= (1 + b)
n_wrong += 1
n_t = min(n_max, max(n_min, n_t))
if n_t == n_max and not_warned_max:
if verbose:
warnings.warn('Max boundary hit.')
not_warned_max = False
if n_t == n_min and not_warned_min:
if verbose:
warnings.warn('Min boundary hit.')
not_warned_min = False
if int_sum != 0:
q_max = 1 / (4 * ((N / 2) / int_sum) ** 2) if q_fixed is None else q_fixed
else:
# If int_sum == 0, then LL should be -inf, not NaN
q_max = 1.0
LL = log_sum - (N / 2) * np.log(q_max) - (1 / q_max)**(0.5) * int_sum
return {
'q_max' : q_max,
'n_ts' : n_ts,
'm_dts' : m_dts,
'm_mean' : np.mean(m_dts),
'log_sum' : log_sum,
'int_sum' : int_sum,
'LL' : LL,
'max_boundary_hit' : not not_warned_max,
'min_boundary_hit' : not not_warned_min,
'n_max' : n_max,
'n_min' : n_min
}
def get_training_pairs(data_dict, pairs):
"""Returns the subset of pairs which have more than 3 reviews, i.e.
the set for which we will be able to perform training using `n-1` reviews
and testing for the last review."""
training_pairs = []
for u_id, l_id in pairs:
if len(data_dict[u_id][l_id]) >= 3:
training_pairs.append((u_id, l_id))
return training_pairs
def calc_ll_arr(method, data_arr, alpha=None, beta=None,
success_prob=0.49, eps=1e-10,
all_mem_output=False, verbose=True):
"""Calculate LL for a given user_id, lexeme_id pair's data_arr."""
n_0 = data_arr[0]['n_0']
if method == 'uniform':
sum_D = max(sum(x['delta_scaled'] for x in data_arr), eps)
N = len(data_arr)
return max_unif(N, sum_D)
elif method == 'memorize':
recalled = np.asarray([x['p_recall'] > success_prob for x in data_arr])
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
op = max_memorize(n_0, alpha, beta, recalled, deltas, verbose=verbose)
if not all_mem_output:
return op['LL']
else:
return op
else:
raise ValueError("Invalid method: {}".format(method))
def calc_user_LL_dict(data_dict, alpha, beta, lexeme_difficulty, map_lexeme,
success_prob=0.49, n_procs=None, pairs=None, verbose=True,
training=False):
"""Calculate LL while assuming that the LL factors are the same per user
instead of setting them for each (user, lexeme) pair.
If `training` is True, then the LL calculation is done only for the
first n-1 entries instead of for all events in the sequence.
"""
u_l_dict = defaultdict(lambda: defaultdict(lambda: {}))
lexeme_difficulty = np.abs(lexeme_difficulty)
global stat_worker
def stat_worker(params):
u_id = params
data_per_lexeme = data_dict[u_id]
n_0s = []
Ns, sumDs = [], []
log_sums, int_sums = [], []
all_mem_op = []
# The tests for all lexemes.
all_tests = []
lexeme_ids = sorted(data_per_lexeme.keys())
valid_lexeme_ids = []
for l_id in lexeme_ids:
arr = data_per_lexeme[l_id]
if training:
if len(arr) < 3:
# Cannot calculate the LL for sequences shorter than 3
# elements if we are looking to train + test with these
# sequences.
continue
else:
# Ignore the last review, which we will use for testing.
all_tests.append(arr[-1])
# Append the test before truncating arr
arr = arr[:-1]
valid_lexeme_ids.append(l_id)
n_0 = arr[0]['n_0']
n_0s.append(n_0)
Ns.append(len(arr))
sumDs.append(sum(x['delta_scaled'] for x in arr))
mem_res = calc_ll_arr('memorize', arr,
alpha=alpha, beta=beta,
success_prob=success_prob, all_mem_output=True,
verbose=verbose)
log_sums.append(mem_res['log_sum'])
int_sums.append(mem_res['int_sum'])
all_mem_op.append(mem_res)
c_unif = np.sum(Ns) / np.sum(sumDs)
q_max = 1 / (4 * ((np.sum(Ns) / 2) / np.sum(int_sums)) ** 2)
res = {}
for idx, l_id in enumerate(valid_lexeme_ids):
res[l_id] = {
'uniform_LL': Ns[idx] * np.log(c_unif) - sumDs[idx] * c_unif,
'memorize_LL': log_sums[idx] + Ns[idx] * np.log(q_max) / 2 - (1 / q_max)**(0.5) * int_sums[idx],
'mem_op': all_mem_op[idx],
'q_max': q_max,
'c_unif': c_unif
}
if training:
res[l_id]['test'] = all_tests[idx]
return u_id, res
if n_procs is None:
n_procs = MP.cpu_count()
user_ids = sorted(set([u_id for u_id, _ in pairs]))
with MP.Pool(n_procs) as pool:
if n_procs > 1:
map_func = pool.map
else:
# This aids debugging.
map_func = map
for u_id, res in map_func(stat_worker, user_ids):
u_l_dict[u_id] = res
return u_l_dict
def max_threshold(n_0, a, b, recalled, Ds, w, p,
alpha_fixed=None, n_max=np.inf, n_min=0, verbose=True):
"""Return max_{over a} threshold-LL, unless alpha_fixed is provided.
In that case, the LL is calculated for the given alpha.
Note (relationship of the symbols with those used in the paper):
- p is m_{th} in the paper.
- alpha (also alpha_max) is c in the paper
- w is 1/\zeta in the paper.
"""
assert len(recalled) == len(Ds), "recalled and t_is are not of the same length."
N = len(Ds)
n_t = n_0
log_sum = 0
int_sum = 0
n_ts = []
m_dts = []
tau_dts = []
not_warned_min, not_warned_max = True, True
n_correct, n_wrong = 0, 0
sum_third = 0
sum_second = 0
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for D, recall in zip(Ds, recalled):
if MODEL_POWER is True:
tau = (np.exp(-np.log(p) / n_t) - 1) / B[0]
else:
tau = -np.log(p) / n_t
if n_t < 1e-20 and p != 1.0:
raise Exception("P should be 1 when n_t is not finite")
# When n_t is too small, p should also be 1.
elif n_t < 1e-20 and p == 1.0:
D_ = np.max([D, 0.0001])
else:
D_ = np.max([D - tau, 0.0001])
sum_third += w * np.expm1(-D_ / w)
sum_second += -D_ / w
n_ts.append(n_t)
m_dts.append(np.exp(-n_t * D))
tau_dts.append(tau)
if recall:
n_t *= a
n_correct += 1
else:
n_t *= b
n_wrong += 1
n_t = min(n_max, max(n_min, n_t))
if n_t == n_max and not_warned_max:
if verbose:
warnings.warn('Max boundary hit.')
not_warned_max = False
if n_t == n_min and not_warned_min:
if verbose:
warnings.warn('Min boundary hit.')
not_warned_min = False
if alpha_fixed is None:
alpha_max = -N / sum_third
else:
alpha_max = alpha_fixed
LL = N * np.log(np.max([alpha_max, 0.0001])) + sum_second + alpha_max * sum_third
if np.isfinite(LL).sum() == 0:
raise Exception("LL is not finite")
return {
'alpha_max': alpha_max,
'n_ts': n_ts,
'm_dts': m_dts,
'm_mean': np.mean(m_dts),
'log_sum': log_sum,
'int_sum': int_sum,
'LL': LL,
'max_boundary_hit': not not_warned_max,
'min_boundary_hit': not not_warned_min,
'n_max': n_max,
'n_min': n_min,
'p': p,
'w': w,
'sum_second': sum_second, # sum_i -D_i / w
'sum_third': sum_third, # sum_i w * (exp(-D_i / w) - 1)
'N': N
}
def calc_ll_arr_thres(
method, data_arr, alpha=None, beta=None,
success_prob=0.49, eps=1e-10, w_range=None, p_range=None,
verbose=True, all_thres_output=True, alpha_fixed=None):
assert method == 'threshold', "This function only computes the max_threshold LL."
n_0 = data_arr[0]['n_0']
recalled = np.asarray([x['p_recall'] > success_prob for x in data_arr])
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
best_LL = None
if w_range is None:
w_range = [0.01, 0.1, 1, 10, 100]
n_is = [n_0]
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for x in data_arr:
if x['p_recall'] > success_prob:
n_is.append(n_is[-1] * alpha)
else:
n_is.append(n_is[-1] * beta)
# Remove the last n_t
n_is = np.array(n_is[:-1])
if p_range is None:
# In most cases p_ == 1, the np.unique limits useless iterations.
if (n_is < 1e-20).sum() > 0:
p_range = [1.0]
else:
p_ = np.exp(-deltas * n_is).max()
p_range = np.unique(np.linspace(p_, 1, 4))
for w in w_range:
for p in p_range:
op = max_threshold(n_0, a=alpha, b=beta, recalled=recalled,
Ds=deltas, p=p, w=w, verbose=verbose,
alpha_fixed=alpha_fixed)
if best_LL is None or best_LL['LL'] < op['LL']:
best_LL = op
if all_thres_output:
return best_LL
else:
return best_LL['LL']
def calc_LL_dict_threshold(data_dict, alpha, beta, pairs,
lexeme_difficulty, map_lexeme, success_prob=0.49,
p_range=None, w_range=None,
n_procs=None, verbose=True):
"""Calculate the LL of the threshold model optimized for each (user, item)
pair."""
u_l_dict = defaultdict(lambda: {})
lexeme_difficulty = np.abs(lexeme_difficulty)
global _max_threshold_worker
def _max_threshold_worker(params):
u_id, l_id = params
arr = data_dict[u_id][l_id]
op = calc_ll_arr_thres('threshold', arr, alpha=alpha, beta=beta,
success_prob=success_prob, all_thres_output=True,
verbose=verbose)
return u_id, l_id, {'threshold_op': op, 'threshold_LL': op['LL']}
if n_procs is None:
n_procs = MP.cpu_count()
with MP.Pool() as pool:
for u_id, l_id, res in pool.map(_max_threshold_worker, pairs):
u_l_dict[u_id][l_id] = res
return u_l_dict
def calc_user_ll_arr_thres(
method, user_data_dict, alpha=None, beta=None,
success_prob=0.49, eps=1e-10, w_range_init=None, p_range_init=None,
training=False, verbose=True, all_thres_output=True):
"""Calculates the best-LL for a given user, by computing it across all
items the user has touched.
If `training` is True, then only consider the first 'n - 1' reviews
of the user/lexme pairs, ignoring sequences smaller than 2.
"""
assert method == 'threshold', "This function only computes the max_threshold LL."
total_sum_second = defaultdict(lambda: 0)
total_sum_third = defaultdict(lambda: 0)
total_N = 0
p_ = 0.0
if w_range_init is None:
w_range = [0.01, 0.1, 1, 10, 100]
else:
w_range = w_range_init
if p_range_init is None:
for l_id in user_data_dict.keys():
data_arr = user_data_dict[l_id]
n_0 = data_arr[0]['n_0']
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
n_is = [n_0]
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for x in data_arr:
if x['p_recall'] > success_prob:
n_is.append(n_is[-1] * alpha)
else:
n_is.append(n_is[-1] * beta)
# Remove the last n_t
n_is = np.array(n_is[:-1])
# In most cases p_ == 1, the np.unique limits useless iterations.
if (n_is < 1e-20).sum() > 0:
p_ = 1.0
else:
p_ = max(p_, np.exp(-deltas * n_is).max())
if p_ < 1.0:
p_range = np.linspace(p_, 1, 4)
else:
# if p_ == 1.0, then no point taking linspace.
p_range = [p_]
else:
p_range = p_range_init
for l_id in user_data_dict.keys():
data_arr = user_data_dict[l_id]
if training:
if len(data_arr) < 3:
# Cannot calculate the LL for training and have a test unless
# there are at least 3 reviews.
continue
else:
# Calculate the LL only using the first 'n-1' reviews.
data_arr = data_arr[:-1]
total_N += len(data_arr)
n_0 = data_arr[0]['n_0']
recalled = np.asarray([x['p_recall'] > success_prob for x in data_arr])
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
for w in w_range:
for p in p_range:
op = max_threshold(n_0, a=alpha, b=beta, recalled=recalled,
Ds=deltas, p=p, w=w, verbose=verbose)
total_sum_second[w, p] += op['sum_second']
total_sum_third[w, p] += op['sum_third']
best_LL = None
for w, p in total_sum_second.keys():
alpha_max_user = - total_sum_third[w, p] / total_N
LL = total_N * alpha_max_user + total_sum_second[w, p] + alpha_max_user * total_sum_third[w, p]
if best_LL is None or best_LL['LL'] < LL:
best_LL = {
'LL': LL,
'w': w,
'p': p,
'sum_third': total_sum_third[w, p],
'sum_second': total_sum_second[w, p],
'alpha_max_user': alpha_max_user
}
if all_thres_output:
return best_LL
else:
return best_LL['LL']
def calc_user_LL_dict_threshold(data_dict, alpha, beta, pairs,
lexeme_difficulty, map_lexeme, success_prob=0.49,
p_range=None, w_range=None, training=False,
n_procs=None, verbose=True):
"""Calculate the LL of the threshold model optimized for each user.
if `training` is True, then it computes the likelihood only for the first
`n - 1` entries instead of for all 'n' reviews.
"""
u_l_dict = defaultdict(lambda: {})
lexeme_difficulty = np.abs(lexeme_difficulty)
if n_procs is None:
n_procs = MP.cpu_count()
global _max_user_c_worker
def _max_user_c_worker(params):
u_id = params
best_LL = calc_user_ll_arr_thres('threshold',
user_data_dict=data_dict[u_id],
alpha=alpha, beta=beta,
success_prob=success_prob,
training=training,
all_thres_output=True,
verbose=verbose)
return u_id, best_LL
with MP.Pool() as pool:
u_best_alpha = dict(pool.map(_max_user_c_worker, data_dict.keys()))
global _max_user_threshold_worker
def _max_user_threshold_worker(params):
u_id, l_id = params
alpha_max_user = u_best_alpha[u_id]['alpha_max_user']
w_range = [u_best_alpha[u_id]['w']]
p_range = [u_best_alpha[u_id]['p']]
arr = data_dict[u_id][l_id]
if training:
assert len(arr) >= 3, "Are you using `training_pairs` instead of" \
" all pairs in the call?"
test = arr[-1]
# Append the test before truncating arr
arr = arr[:-1]
op = calc_ll_arr_thres('threshold', arr, alpha=alpha, beta=beta,
success_prob=success_prob,
all_thres_output=True, verbose=verbose,
alpha_fixed=alpha_max_user, w_range=w_range,
p_range=p_range)
res = {'threshold_op': op, 'threshold_LL': op['LL']}
if training:
res['test'] = test
return u_id, l_id, res
with MP.Pool() as pool:
for u_id, l_id, res in pool.map(_max_user_threshold_worker, pairs):
u_l_dict[u_id][l_id] = res
return u_l_dict
def merge_with_thres_LL(other_LL, thres_LL, pairs):
"""Merge the dictionaries containing the threshold-LL and other thresholds.
Other_LL will be modified in place.
"""
for u_id, l_id in pairs:
for key in thres_LL[u_id][l_id]:
other_LL[u_id][l_id][key] = thres_LL[u_id][l_id][key]
return None
def get_all_durations(data_dict, pairs):
"""Generates all durations from the LL_dict or the data_dict."""
def _get_duration(user_id, item_id):
"""Generates test/train/total duration for the given user_id, item_id pair."""
session = data_dict[user_id][item_id]
session_length = len(session)
if session_length > 2:
train_duration = session[-2]['timestamp'] - session[0]['timestamp']
test_duration = session[-1]['timestamp'] - session[-2]['timestamp']
else:
train_duration = None
test_duration = None
if session_length > 1:
total_duration = session[-1]['timestamp'] - session[0]['timestamp']
else:
total_duration = None
return {
'train_duration': train_duration,
'test_duration': test_duration,
'total_duration': total_duration,
'session_length': session_length,
}
dur_dict = defaultdict(lambda: {})
for u_id, i_id in pairs:
dur_dict[u_id][i_id] = _get_duration(u_id, i_id)
return dur_dict
def filter_by_duration(durations_dict, pairs, T, alpha, verbose=False):
"""Filter the (u_id, l_id) by selecting those which have the duration in
[(1 - alpha) * T, (1 + alpha) * T]."""
filtered_pairs = []
for u_id, l_id in pairs:
train_duration = durations_dict[u_id][l_id]['train_duration'] / TIME_SCALE
if (1 - alpha) * T <= train_duration <= (1 + alpha) * T:
filtered_pairs.append((u_id, l_id))
count = len(filtered_pairs)
total = len(pairs)
if verbose:
print('{} / {} = {:.2f}% sequences selected.'
.format(count, total, count / total * 100.))
return filtered_pairs
def filter_by_users(pairs, users_, verbose=False):
"""Filter the (u_id, l_id) by selecting those which have u_id \in users_."""
filtered_pairs = []
for u_id, l_id in pairs:
if u_id in users_:
filtered_pairs.append((u_id, l_id))
count = len(filtered_pairs)
total = len(pairs)
if verbose:
print('{} / {} = {:.2f}% sequences selected.'
.format(count, total, count / total * 100.))
return filtered_pairs
def calc_empirical_forgetting_rate(data_dict, pairs, return_base=False, no_norm=False):
u_l_dict = defaultdict(lambda: defaultdict(lambda: None))
base = {}
base_count = {}
for u_id, l_id in pairs:
first_session = data_dict[u_id][l_id][0]
res = (- np.log(max(0.01, min(0.99, first_session['p_recall'] + 1e-10))) / (first_session['delta_scaled'] + 0.1))
if l_id not in base:
base[l_id] = res
base_count[l_id] = 1
else:
base[l_id]+=res
base_count[l_id] += 1
if return_base:
return dict([(l_id, base[l_id] / base_count[l_id]) for l_id in base.keys()])
for u_id, l_id in pairs:
last_session = data_dict[u_id][l_id][-1]
u_l_dict[u_id][l_id] = - np.log(max(0.01, min(0.99, last_session['p_recall'] + 1e-10))) / (last_session['delta_scaled'] + 0.1)
if not no_norm:
u_l_dict[u_id][l_id] = (u_l_dict[u_id][l_id]) / (base[l_id] / base_count[l_id])
else:
u_l_dict[u_id][l_id] = u_l_dict[u_id][l_id]
return u_l_dict
def calc_top_k_perf(LL_dict, perf, pairs, quantile=0.25, min_reps=0,
max_reps=None, with_overall=False, with_threshold=False,
only_finite=True, with_uniform=True, whis=1.5):
"""Calculates the average and median performance of people in the
top quantile by log-likelihood of following either strategy."""
def check_u_l(u_id, l_id):
return (not only_finite or
|
np.isfinite(perf[u_id][l_id])
|
numpy.isfinite
|
#!/usr/bin/env python
"""
substitution_model.py
Contains classes for defining Markov models of substitution.
These classes depend on an Alphabet class member for defining the set
of motifs that each represent a state in the Markov chain. Examples of
a 'dna' type alphabet motif is 'a', and of a 'codon' type motif is'atg'.
By default all models include the gap motif ('-' for a 'dna' alphabet or
'---' for a 'codon' alphabet). This differs from software such as PAML,
where gaps are treated as ambiguituous states (specifically, as 'n'). The gap
motif state can be excluded from the substitution model using the method
excludeGapMotif(). It is recommended that to ensure the alignment and the
substitution model are defined with the same alphabet that modifications
are done to the substitution model alphabet and this instance is then given
to the alignment.
The model's substitution rate parameters are represented as a dictionary
with the parameter names as keys, and predicate functions as the values.
These predicate functions compare a pair of motifs, returning True or False.
Many such functions are provided as methods of the class. For instance,
the istransition method is pertinent to dna based models. This method returns
True if an 'a'/'g' or 'c'/'t' pair is passed to it, False otherwise. In this
way the positioning of parameters in the instantaneous rate matrix (commonly
called Q) is determined.
>>> model = Nucleotide(equal_motif_probs=True)
>>> model.setparameterrules({'alpha': model.istransition})
>>> parameter_controller = model.makeParamController(tree)
"""
import numpy
from numpy.linalg import svd
import warnings
import inspect
from cogent.core import moltype
from cogent.evolve import parameter_controller, predicate, motif_prob_model
from cogent.evolve.substitution_calculation import (
SubstitutionParameterDefn as ParamDefn,
RateDefn, LengthDefn, ProductDefn, CallDefn, CalcDefn,
PartitionDefn, NonParamDefn, AlignmentAdaptDefn, ExpDefn,
ConstDefn, GammaDefn, MonotonicDefn, SelectForDimension,
WeightedPartitionDefn)
from cogent.evolve.discrete_markov import PsubMatrixDefn
from cogent.evolve.likelihood_tree import makeLikelihoodTreeLeaf
from cogent.maths.optimisers import ParameterOutOfBoundsError
__author__ = "<NAME>, <NAME> and <NAME>"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__contributors__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>", "<NAME>",
"<NAME>"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def predicate2matrix(alphabet, pred, mask=None):
"""From a test like istransition() produce an MxM boolean matrix"""
M = len(alphabet)
result = numpy.zeros([M,M], int)
for i in range(M):
for j in range(M):
if mask is None or mask[i,j]:
result[i,j] = pred(alphabet[i], alphabet[j])
return result
def redundancyInPredicateMasks(preds):
# Calculate the nullity of the predicates. If non-zero
# there is some redundancy and the model will be overparameterised.
if len(preds) <= 1:
return 0
eqns = 1.0 * numpy.array([list(mask.flat) for mask in preds.values()])
svs = svd(eqns)[1]
# count non-duplicate non-zeros singular values
matrix_rank = len([sv for sv in svs if abs(sv) > 1e-8])
return len(preds) - matrix_rank
def _maxWidthIfTruncated(pars, delim, each):
# 'pars' is an array of lists of strings, how long would the longest
# list representation be if the strings were truncated at 'each'
# characters and joined together with 'delim'.
return max([
sum([min(len(par), each) for par in par_list])
+ len(delim) * (len(par_list)-1)
for par_list in pars.flat])
def _isSymmetrical(matrix):
return numpy.alltrue(numpy.alltrue(matrix == numpy.transpose(matrix)))
def extend_docstring_from(cls, pre=False):
def docstring_inheriting_decorator(fn):
parts = [getattr(cls,fn.__name__).__doc__, fn.__doc__ or '']
if pre: parts.reverse()
fn.__doc__ = ''.join(parts)
return fn
return docstring_inheriting_decorator
class _SubstitutionModel(object):
# Subclasses must provide
# .makeParamControllerDefns()
def __init__(self, alphabet,
motif_probs=None, optimise_motif_probs=False,
equal_motif_probs=False, motif_probs_from_data=None,
motif_probs_alignment=None, mprob_model=None,
model_gaps=False, recode_gaps=False, motif_length=None,
name="", motifs=None):
# subclasses can extend this incomplete docstring
"""
Alphabet:
- alphabet - An Alphabet object
- motif_length: Use a tuple alphabet based on 'alphabet'.
- motifs: Use a subalphabet that only contains those motifs.
- model_gaps: Whether the gap motif should be included as a state.
- recode_gaps: Whether gaps in an alignment should be treated as an
ambiguous state instead.
Motif Probability:
- motif_probs: Dictionary of probabilities.
- equal_motif_probs: Flag to set alignment motif probs equal.
- motif_probs_alignment: An alignment from which motif probs are set.
If none of these options are set then motif probs will be derived
from the data: ie the particular alignment provided later.
- optimise_motif_probs: Treat like other free parameters. Any values
set by the other motif_prob options will be used as initial values.
- mprob_model: 'tuple', 'conditional' or 'monomer' to specify how
tuple-alphabet (including codon) motif probs are used.
"""
# MISC
assert len(alphabet) < 65, "Alphabet too big. Try explicitly "\
"setting alphabet to PROTEIN or DNA"
self.name = name
self._optimise_motif_probs = optimise_motif_probs
# ALPHABET
if recode_gaps:
if model_gaps:
warnings.warn("Converting gaps to wildcards AND modeling gaps")
else:
model_gaps = False
self.recode_gaps = recode_gaps
self.MolType = alphabet.MolType
if model_gaps:
alphabet = alphabet.withGapMotif()
if motif_length > 1:
alphabet = alphabet.getWordAlphabet(motif_length)
if motifs is not None:
alphabet = alphabet.getSubset(motifs)
self.alphabet = alphabet
self.gapmotif = alphabet.getGapMotif()
self._word_length = alphabet.getMotifLen()
# MOTIF PROB ALPHABET MAPPING
if mprob_model is None:
mprob_model = 'tuple' if self._word_length==1 else 'conditional'
elif mprob_model == 'word':
mprob_model = 'tuple'
if model_gaps and mprob_model != 'tuple':
raise ValueError("mprob_model must be 'tuple' to model gaps")
isinst = self._isInstantaneous
self._instantaneous_mask = predicate2matrix(self.alphabet, isinst)
self._instantaneous_mask_f = self._instantaneous_mask * 1.0
self.mprob_model = motif_prob_model.makeModel(mprob_model, alphabet,
self._instantaneous_mask_f)
# MOTIF PROBS
if equal_motif_probs:
assert not (motif_probs or motif_probs_alignment), \
"Motif probs equal or provided but not both"
motif_probs = self.mprob_model.makeEqualMotifProbs()
elif motif_probs_alignment is not None:
assert not motif_probs, \
"Motif probs from alignment or provided but not both"
motif_probs = self.countMotifs(motif_probs_alignment)
motif_probs = motif_probs.astype(float) / sum(motif_probs)
assert len(alphabet) == len(motif_probs)
motif_probs = dict(zip(alphabet, motif_probs))
if motif_probs:
self.adaptMotifProbs(motif_probs) # to check
self.motif_probs = motif_probs
if motif_probs_from_data is None:
motif_probs_from_data = False
else:
self.motif_probs = None
if motif_probs_from_data is None:
motif_probs_from_data = True
self.motif_probs_from_align = motif_probs_from_data
def getParamList(self):
return []
def __str__(self):
s = ["\n%s (" % self.__class__.__name__ ]
s.append("name = '%s'; type = '%s';" %
(getattr(self, "name", None), getattr(self, "type", None)))
if hasattr(self, "predicate_masks"):
parlist = self.predicate_masks.keys()
s.append("params = %s;" % parlist)
motifs = self.getMotifs()
s.append("number of motifs = %s;" % len(motifs))
s.append("motifs = %s)\n" % motifs)
return " ".join(s)
def getAlphabet(self):
return self.alphabet
def getMprobAlphabet(self):
return self.mprob_model.getInputAlphabet()
def getMotifs(self):
return list(self.getAlphabet())
def getWordLength(self):
return self._word_length
def getMotifProbs(self):
"""Return the dictionary of motif probabilities."""
return self.motif_probs.copy()
def setParamControllerMotifProbs(self, pc, mprobs, **kw):
return self.mprob_model.setParamControllerMotifProbs(pc, mprobs, **kw)
def makeLikelihoodFunction(self, tree, motif_probs_from_align=None,
optimise_motif_probs=None, aligned=True, expm=None, digits=None,
space=None, **kw):
if motif_probs_from_align is None:
motif_probs_from_align = self.motif_probs_from_align
if optimise_motif_probs is None:
optimise_motif_probs = self._optimise_motif_probs
kw['optimise_motif_probs'] = optimise_motif_probs
kw['motif_probs_from_align'] = motif_probs_from_align
if aligned:
klass = parameter_controller.AlignmentLikelihoodFunction
else:
alphabet = self.getAlphabet()
assert alphabet.getGapMotif() not in alphabet
klass = parameter_controller.SequenceLikelihoodFunction
result = klass(self, tree, **kw)
if self.motif_probs is not None:
result.setMotifProbs(self.motif_probs, is_constant=
not optimise_motif_probs, auto=True)
if expm is None:
expm = self._default_expm_setting
if expm is not None:
result.setExpm(expm)
if digits or space:
result.setTablesFormat(digits=digits, space=space)
return result
def makeParamController(self, tree, motif_probs_from_align=None,
optimise_motif_probs=None, **kw):
# deprecate
return self.makeLikelihoodFunction(tree,
motif_probs_from_align = motif_probs_from_align,
optimise_motif_probs = optimise_motif_probs,
**kw)
def convertAlignment(self, alignment):
# this is to support for everything but HMM
result = {}
for seq_name in alignment.getSeqNames():
sequence = alignment.getGappedSeq(seq_name, self.recode_gaps)
result[seq_name] = self.convertSequence(sequence, seq_name)
return result
def convertSequence(self, sequence, name):
# makeLikelihoodTreeLeaf, sort of an indexed profile where duplicate
# columns stored once, so likelihoods only calc'd once
return makeLikelihoodTreeLeaf(sequence, self.getAlphabet(), name)
def countMotifs(self, alignment, include_ambiguity=False):
return self.mprob_model.countMotifs(alignment,
include_ambiguity, self.recode_gaps)
def makeAlignmentDefn(self, model):
align = NonParamDefn('alignment', ('locus',))
# The name of this matters, it's used in likelihood_function.py
# to retrieve the correct (adapted) alignment.
return AlignmentAdaptDefn(model, align)
def adaptMotifProbs(self, motif_probs, auto=False):
return self.mprob_model.adaptMotifProbs(motif_probs, auto=auto)
def calcMonomerProbs(self, word_probs):
# Not presently used, always go monomer->word instead
return self.mprob_model.calcMonomerProbs(word_probs)
def calcWordProbs(self, monomer_probs):
return self.mprob_model.calcWordProbs(monomer_probs)
def calcWordWeightMatrix(self, monomer_probs):
return self.mprob_model.calcWordWeightMatrix(monomer_probs)
def makeParamControllerDefns(self, bin_names, endAtQd=False):
(input_probs, word_probs, mprobs_matrix) = \
self.mprob_model.makeMotifWordProbDefns()
if len(bin_names) > 1:
bprobs = PartitionDefn(
[1.0/len(bin_names) for bin in bin_names], name = "bprobs",
dimensions=['locus'], dimension=('bin', bin_names))
else:
bprobs = None
defns = {
'align': self.makeAlignmentDefn(ConstDefn(self, 'model')),
'bprobs': bprobs,
'word_probs': word_probs,
}
rate_params = self.makeRateParams(bprobs)
if endAtQd:
defns['Qd'] = self.makeQdDefn(word_probs, mprobs_matrix, rate_params)
else:
defns['psubs'] = self.makePsubsDefn(
bprobs, word_probs, mprobs_matrix, rate_params)
return defns
class DiscreteSubstitutionModel(_SubstitutionModel):
_default_expm_setting = None
def _isInstantaneous(self, x, y):
return True
def getParamList(self):
return []
def makeRateParams(self, bprobs):
return []
def makePsubsDefn(self, bprobs, word_probs, mprobs_matrix, rate_params):
assert len(rate_params) == 0
assert word_probs is mprobs_matrix, "Must use simple mprob model"
motifs = tuple(self.getAlphabet())
return PsubMatrixDefn(
name="psubs", dimension = ('motif', motifs), default=None,
dimensions=('locus', 'edge'))
class _ContinuousSubstitutionModel(_SubstitutionModel):
# subclass must provide:
#
# - parameter_order: a list of parameter names corresponding to the
# arguments of:
#
# - calcExchangeabilityMatrix(*params)
# convert len(self.parameter_order) params to a matrix
"""A substitution model for which the rate matrix (P) is derived from an
instantaneous rate matrix (Q). The nature of the parameters used to define
Q is up to the subclasses.
"""
# At some point this can be made variable, and probably
# the default changed to False
long_indels_are_instantaneous = True
_scalableQ = True
_exponentiator = None
_default_expm_setting = 'either'
@extend_docstring_from(_SubstitutionModel)
def __init__(self, alphabet, with_rate=False, ordered_param=None,
distribution=None, partitioned_params=None, do_scaling=None, **kw):
"""
- with_rate: Add a 'rate' parameter which varies by bin.
- ordered_param: name of a single parameter which distinguishes any bins.
- distribution: choices of 'free' or 'gamma' or an instance of some
distribution. Could probably just deprecate free
- partitioned_params: names of params to be partitioned across bins
- do_scaling: Scale branch lengths as the expected number of
substitutions. Reduces the maximum substitution df by 1.
"""
_SubstitutionModel.__init__(self, alphabet, **kw)
alphabet = self.getAlphabet() # as may be altered by recode_gaps etc.
if do_scaling is None:
do_scaling = self._scalableQ
if do_scaling and not self._scalableQ:
raise ValueError("Can't autoscale a %s model" % type(self).__name__)
self._do_scaling = do_scaling
# BINS
if not ordered_param:
if ordered_param is not None:
warnings.warn('ordered_param should be a string or None')
ordered_param = None
if distribution:
if with_rate:
ordered_param = 'rate'
else:
raise ValueError('distribution provided without ordered_param')
elif not isinstance(ordered_param, str):
warnings.warn('ordered_param should be a string or None')
assert len(ordered_param) == 1, 'More than one ordered_param'
ordered_param = ordered_param[0]
assert ordered_param, "False value hidden in list"
self.ordered_param = ordered_param
if distribution == "gamma":
distribution = GammaDefn
elif distribution in [None, "free"]:
distribution = MonotonicDefn
elif isinstance(distribution, basestring):
raise ValueError('Unknown distribution "%s"' % distribution)
self.distrib_class = distribution
if not partitioned_params:
partitioned_params = ()
elif isinstance(partitioned_params, str):
partitioned_params = (partitioned_params,)
else:
partitioned_params = tuple(partitioned_params)
if self.ordered_param:
if self.ordered_param not in partitioned_params:
partitioned_params += (self.ordered_param,)
self.partitioned_params = partitioned_params
if 'rate' in partitioned_params:
with_rate = True
self.with_rate = with_rate
# CACHED SHORTCUTS
self._exponentiator = None
#self._ident = numpy.identity(len(self.alphabet), float)
def checkParamsExist(self):
"""Raise an error if the parameters specified to be partitioned or
ordered don't actually exist."""
for param in self.partitioned_params:
if param not in self.parameter_order and param != 'rate':
desc = ['partitioned', 'ordered'][param==self.ordered_param]
raise ValueError('%s param "%s" unknown' % (desc, param))
def _isInstantaneous(self, x, y):
diffs = sum([X!=Y for (X,Y) in zip(x,y)])
return diffs == 1 or (diffs > 1 and
self.long_indels_are_instantaneous and self._isAnyIndel(x, y))
def _isAnyIndel(self, x, y):
"""An indel of any length"""
# Things get complicated when a contigous indel of any length is OK:
if x == y:
return False
gap_start = gap_end = gap_strand = None
for (i, (X,Y)) in enumerate(zip(x,y)):
G = self.gapmotif[i]
if X != Y:
if X != G and Y != G:
return False # non-gap differences had their chance above
elif gap_start is None:
gap_start = i
gap_strand = [X,Y].index(G)
elif gap_end is not None or [X,Y].index(G) != gap_strand:
return False # can't start a second gap
else:
pass # extend open gap
elif gap_start is not None:
gap_end = i
return True
def calcQ(self, word_probs, mprobs_matrix, *params):
Q = self.calcExchangeabilityMatrix(word_probs, *params)
Q *= mprobs_matrix
row_totals = Q.sum(axis=1)
Q -= numpy.diag(row_totals)
if self._do_scaling:
Q *= 1.0 / (word_probs * row_totals).sum()
return Q
def makeQdDefn(self, word_probs, mprobs_matrix, rate_params):
"""Diagonalized Q, ie: rate matrix prepared for exponentiation"""
Q = CalcDefn(self.calcQ, name='Q')(word_probs, mprobs_matrix, *rate_params)
expm = NonParamDefn('expm')
exp = ExpDefn(expm)
Qd = CallDefn(exp, Q, name='Qd')
return Qd
def _makeBinParamDefn(self, edge_par_name, bin_par_name, bprob_defn):
# if no ordered param defined, behaves as old, everything indexed by and edge
if edge_par_name not in self.partitioned_params:
return ParamDefn(dimensions=['bin'], name=bin_par_name)
if edge_par_name == self.ordered_param:
whole = self.distrib_class(bprob_defn, bin_par_name)
else:
# this forces them to average to one, but no forced order
# this means you can't force a param value to be shared across bins
# so 1st above approach has to be used
whole = WeightedPartitionDefn(bprob_defn, bin_par_name+'_partn')
whole.bin_names = bprob_defn.bin_names
return SelectForDimension(whole, 'bin', name=bin_par_name)
def makeRateParams(self, bprobs):
params = []
for param_name in self.parameter_order:
if bprobs is None or param_name not in self.partitioned_params:
defn = ParamDefn(param_name)
else:
e_defn = ParamDefn(param_name, dimensions=['edge', 'locus'])
# should be weighted by bprobs*rates not bprobs
b_defn = self._makeBinParamDefn(
param_name, param_name+'_factor', bprobs)
defn = ProductDefn(b_defn, e_defn, name=param_name+'_BE')
params.append(defn)
return params
def makeFundamentalParamControllerDefns(self, bin_names):
"""Everything one step short of the psubs, because cogent.align code
needs to handle Q*t itself."""
defns = self.makeParamControllerDefns(bin_names, endAtQd=True)
assert not 'length' in defns
defns['length'] = LengthDefn()
return defns
def makePsubsDefn(self, bprobs, word_probs, mprobs_matrix, rate_params):
distance = self.makeDistanceDefn(bprobs)
P = self.makeContinuousPsubDefn(word_probs, mprobs_matrix, distance, rate_params)
return P
def makeDistanceDefn(self, bprobs):
length = LengthDefn()
if self.with_rate and bprobs is not None:
b_rate = self._makeBinParamDefn('rate', 'rate', bprobs)
distance = ProductDefn(length, b_rate, name="distance")
else:
distance = length
return distance
def makeContinuousPsubDefn(self, word_probs, mprobs_matrix, distance, rate_params):
Qd = self.makeQdDefn(word_probs, mprobs_matrix, rate_params)
P = CallDefn(Qd, distance, name='psubs')
return P
class General(_ContinuousSubstitutionModel):
"""A continuous substitution model with one free parameter for each and
every possible instantaneous substitution."""
# k = self.param_pick[i,j], 0<=k<=N+1
# k==0: not instantaneous, should be 0.0 in Q
# k<=N: apply Kth exchangeability parameter
# k==N+1: no parameter, should be 1.0 in unscaled Q
#@extend_docstring_from(_ContinuousSubstitutionModel)
def __init__(self, alphabet, **kw):
_ContinuousSubstitutionModel.__init__(self, alphabet, **kw)
alphabet = self.getAlphabet() # as may be altered by recode_gaps etc.
mask = self._instantaneous_mask
N = len(alphabet)
self.param_pick = numpy.zeros([N,N], int)
self.parameter_order = []
for (i,x) in enumerate(alphabet):
for j in numpy.flatnonzero(mask[i]):
y = alphabet[j]
self.parameter_order.append('%s/%s'%(x,y))
self.param_pick[i,j] = len(self.parameter_order)
if self._do_scaling:
const_param = self.parameter_order.pop()
self.symmetric = False
self.checkParamsExist()
def calcExchangeabilityMatrix(self, mprobs, *params):
return numpy.array((0.0,)+params+(1.0,)).take(self.param_pick)
class GeneralStationary(_ContinuousSubstitutionModel):
"""A continuous substitution model with one free parameter for each and
every possible instantaneous substitution, except the last in each column.
As general as can be while still having stationary motif probabilities"""
#@extend_docstring_from(_ContinuousSubstitutionModel)
def __init__(self, alphabet, **kw):
_ContinuousSubstitutionModel.__init__(self, alphabet, **kw)
alphabet = self.getAlphabet() # as may be altered by recode_gaps etc.
mask = self._instantaneous_mask
N = len(alphabet)
self.param_pick =
|
numpy.zeros([N,N], int)
|
numpy.zeros
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The :mod:`samplesizelib.linear.bayesian` contains classes:
- :class:`samplesizelib.linear.bayesian.APVCEstimator`
- :class:`samplesizelib.linear.bayesian.ACCEstimator`
- :class:`samplesizelib.linear.bayesian.ALCEstimator`
- :class:`samplesizelib.linear.bayesian.MaxUtilityEstimator`
- :class:`samplesizelib.linear.bayesian.KLEstimator`
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
from multiprocessing import Pool
import numpy as np
import scipy.stats as sps
from scipy.optimize import minimize_scalar
from ..shared.estimator import SampleSizeEstimator
from ..shared.utils import Dataset
class APVCEstimator(SampleSizeEstimator):
r"""
Description of APVC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param epsilon: to do
:type epsilon: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.epsilon = kwards.pop('epsilon', 0.5)
if self.epsilon <= 0:
raise ValueError(
"The epsilon must be positive value but get {}".format(
self.epsilon))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _hDispersion(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
return np.sqrt(np.sum((np.linalg.eigvals(cov)/2)**2))
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._hDispersion(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.epsilon:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S':
|
np.array(list_of_S)
|
numpy.array
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
from matplotlib import rc
from matplotlib.patches import Ellipse
from scipy.interpolate import interp1d
from matplotlib.gridspec import GridSpec
from matplotlib import cm,ticker
from numpy import sin, cos, tan, pi
#matplotlib.use('Agg') #so that it does ok with graphics in batch mode
#choose Computer Modern Roman fonts by default
mpl.rcParams['font.serif'] = 'cmr10'
mpl.rcParams['font.sans-serif'] = 'cmr10'
#font = { 'size' : 20}
#rc('font', **font)
rc('xtick', labelsize=20)
rc('ytick', labelsize=20)
#rc('xlabel', **font)
#rc('ylabel', **font)
# legend = {'fontsize': 20}
# rc('legend',**legend)
axes = {'labelsize': 20}
rc('axes', **axes)
#the below fixes the minus signs in axes labels
mpl.rcParams['axes.unicode_minus'] = 'False'
#the below fixes the cross symbol in e.g. $2 \times 10^1$ in axes labels
fix_cross_in_labels = True
if(fix_cross_in_labels):
rc('font', family='stix')
else:
#choose Computer Modern Roman fonts by default
mpl.rcParams['font.serif'] = 'cmr10'
mpl.rcParams['font.sans-serif'] = 'cmr10'
rc('mathtext',fontset='cm')
#use this, but at the expense of slowdown of rendering
#rc('text', usetex=True)
# #add amsmath to the preamble
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amssymb,amsmath}"]
import pdb
import numpy as np
import glob
import os
from scipy.interpolate import griddata
from scipy.interpolate import interp1d
from numpy import ma
import matplotlib.colors as colors
#use_math_text = True
def mathify_axes_ticks(ax,fontsize=20,xticks=None,yticks=None):
if xticks is None:
xticks = ax.get_xticks()
if yticks is None:
yticks = ax.get_yticks()
if ax.get_xscale() != 'log': ax.set_xticklabels([(r'$%g$' % lab) for lab in xticks])
if ax.get_yscale() != 'log': ax.set_yticklabels([(r'$%g$' % lab) for lab in yticks])
if fontsize is not None:
if ax.get_xscale() != 'log':
for label in ax.get_xticklabels():
label.set_fontsize(fontsize)
if ax.get_yscale() != 'log':
for label in ax.get_yticklabels():
label.set_fontsize(fontsize)
def convert_to_single_file(startn=0,endn=-1,ln=10,whichi=0,whichn=1,**kwargs):
which = kwargs.pop("which","convert_file")
rg("gdump")
flist1 = np.sort(glob.glob( os.path.join("dumps/", "dump[0-9][0-9][0-9]_0000") ) )
flist2 = np.sort(glob.glob( os.path.join("dumps/", "dump[0-9][0-9][0-9][0-9]_0000") ) )
flist1.sort()
flist2.sort()
flist = np.concatenate((flist1,flist2))
firsttime = 1
for fldname in flist:
#find the index of the file
fldindex = np.int(fldname.split("_")[0].split("p")[-1])
if fldindex < startn:
continue
if endn>=0 and fldindex >= endn:
break
if fldindex % whichn != whichi:
#do every whichn'th snapshot starting with whichi'th snapshot
continue
#print( "Reading " + fldname + " ..." )
fname = "dump%03d" % fldindex
if os.path.isfile( fname ):
print("File %s exists, skipping..." % fname)
continue
if not os.path.isfile( fname ):
rd(fname)
def ellk(a,r):
ekval = ek(a,r)
lkval = lk(a,r)
return(lkval/ekval)
def ek(a,r):
#-u_t, I suspect
ek = (r**2-2*r+a*r**0.5)/(r*(r**2-3*r+2*a*r**0.5)**0.5)
return(ek)
def lk(a,r):
udphi = r**0.5*(r**2-2*a*r**0.5+a**2)/(r*(r**2-3*r+2*a*r**0.5)**0.5)
return( udphi )
def Risco(ain):
eps = np.finfo(np.float64).eps
a = np.minimum(ain,1.)
Z1 = 1 + (1. - a**2)**(1./3.) * ((1. + a)**(1./3.) + (1. - a)**(1./3.))
Z2 = (3*a**2 + Z1**2)**(1./2.)
risco = 3 + Z2 - np.sign(a)* ( (3 - Z1)*(3 + Z1 + 2*Z2) )**(1./2.)
return(risco)
def Ebind(r,a):
#1+u_t, I suspect
Eb = 1 - (r**2-2*r+a*r**0.5)/(r*(r**2-3*r+2*a*r**0.5)**0.5)
return( Eb )
def etaNT(a):
return( Ebindisco(a) )
def Ebindisco(a):
eps = np.finfo(np.float64).eps
a0 = 0.99999 #1.-1e8*eps
if a > a0:
a = a0
Eb = Ebind( Risco(a), a )
return((a-a0)/(1.-a0)*(1.-3.**(-0.5)) + (1.-a)/(1.-a0)*Eb)
Eb = Ebind( Risco(a), a)
#Eb = (1.-3.**(-0.5))*a**2
return( Eb )
def mkmov_simple(starti=0,endi=400,length=10):
for i in np.arange(starti,endi+1):
rd("dump%03d" % i);
aphi=psicalc()
if i == starti: amax = aphi.max()
cs, cb = plco(np.log10(rho),levels=np.linspace(-8,0,100),isfilled=1,k=0,xy=1,xmax=10,ymax=5,dobh=1,cb=1,extend="both",pretty=1)
ax = plt.gca()
ax.set_xlabel(r"$R\ [r_g]$",fontsize=20,labelpad=-5)
ax.set_ylabel(r"$z\ [r_g]$",fontsize=20,labelpad=-5)
cb.ax.set_xlabel(r"$\log\rho$",fontsize=20,ha="left")
plc(aphi,levels=np.linspace(-amax,amax,10)[1:-1],colors="white",linewidths=2,xy=-1)
print(i);
plt.title("t=%.4g"%np.round(t));
plt.xlim(0,length);plt.ylim(-0.5*length,0.5*length)
plt.draw();
plt.savefig("frame%03d.png"%i)
def convert_wrapper(**kwargs):
if len(sys.argv[2:])==2 and sys.argv[2].isdigit() and sys.argv[3].isdigit():
whichi = int(sys.argv[2])
whichn = int(sys.argv[3])
else:
print( "Usage: %s %s <whichi> <whichn>" % (sys.argv[0], sys.argv[1]) )
return
convert_to_single_file(whichi = whichi, whichn = whichn, **kwargs)
def mkmov_wrapper(**kwargs):
if len(sys.argv[2:])==2 and sys.argv[2].isdigit() and sys.argv[3].isdigit():
whichi = int(sys.argv[2])
whichn = int(sys.argv[3])
else:
print( "Usage: %s %s <whichi> <whichn>" % (sys.argv[0], sys.argv[1]) )
return
mkmov(whichi = whichi, whichn = whichn, **kwargs)
def mkmov(startn=0,endn=-1,ln=10,whichi=0,whichn=1,**kwargs):
which = kwargs.pop("which","mkfrm8panel")
dosavefig = kwargs.pop("dosavefig",1)
print("Doing %s movie" % which)
rg("gdump")
#compute the total magnetic flux at t = 0
rd("dump000")
aphi=psicalc()
aphimax = aphi.max()
#construct file list
flist1 = np.sort(glob.glob( os.path.join("dumps/", "dump[0-9][0-9][0-9]") ) )
flist2 = np.sort(glob.glob( os.path.join("dumps/", "dump[0-9][0-9][0-9][0-9]") ) )
flist1.sort()
flist2.sort()
flist = np.concatenate((flist1,flist2))
if len(flist) == 0:
flist1 = np.sort(glob.glob( os.path.join("dumps/", "dump[0-9][0-9][0-9]_0000") ) )
flist2 = np.sort(glob.glob( os.path.join("dumps/", "dump[0-9][0-9][0-9][0-9]_0000") ) )
flist1.sort()
flist2.sort()
flist = np.concatenate((flist1,flist2))
firsttime = 1
dpi = 135
for fldname in flist:
#find the index of the file
fldindex = np.int(fldname.split("_")[0].split("p")[-1])
if fldindex < startn:
continue
if endn>=0 and fldindex >= endn:
break
if fldindex % whichn != whichi:
#do every whichn'th snapshot starting with whichi'th snapshot
continue
if dosavefig:
fname = "%s%04d.png" % (which,fldindex)
if os.path.isfile( fname ):
print("File %s exists, skipping..." % fname)
continue
#print( "Reading " + fldname + " ..." )
rd("dump%03d" % fldindex);
if which == "mkfrmsimple":
if firsttime:
firsttime = 0
fig = plt.figure(figsize=(12,8))
plt.clf()
mkfrmsimple(fig=fig,aphimax = aphimax)
else:
print("Unknown movie type: %s" % which)
return
print(fldindex)
plt.draw()
if dosavefig:
plt.savefig(fname,dpi = dpi)
#############
def mkfrmsimple(fig=None,aphimax=None,lnx=100,lny=100,vmin=-10,vmax=1,fntsize=20,asp=1.):
if fig is None: fig = plt.gcf();
aphi = psicalc() #vpot[3].mean(-1)
if aphimax is None: aphimax = aphi.max()
#ax.set_aspect(asp)
res,cb=plco(lrho,xy=1,xmax=lnx,ymax=lny,symmx=1,
isfilled=1,cb=1,pretty=1,
levels=np.linspace(vmin,vmax,100),
extend="both",cbxla=r"$\ \ \ \ \ \ \ \ \log_{10}\rho$")
plt.xlabel(r"$x\ [r_g]$",fontsize=fntsize)
plt.ylabel(r"$z\ [r_g]$",fontsize=fntsize,labelpad=-30)
ax = plt.gca()
#cmap = cm.jet
#label = r"$\log_{10}\rho$"
#cx1,cb1 = mkvertcolorbar(ax,fig,gap=0.02,width=0.05,vmin=vmin,vmax=vmax,loc="right",
# label=label,ticks=tcks,fntsize=fntsize,cmap=cmap,extend="both")
plc(aphi/aphimax,symmx=1,xy=-1,levels=np.linspace(0.,1.,20)[1:],colors="black",linewidths=1.)
plt.title(r"$t=%g$" % int(t+0.5), fontsize=fntsize)
plt.xlim(-lnx,lnx)
plt.ylim(-lny,lny)
mathify_axes_ticks(ax)
def mkvertcolorbar(ax,fig,vmin=0,vmax=1,label=None,ylabel=None,ticks=None,fntsize=20,cmap=mpl.cm.jet,gap=0.03,width=0.02,extend="neither",loc="right"):
box = ax.get_position()
#pdb.set_trace()
# cpos = [box.x0,box.y0+box.height+0.05,box.width,0.03]
locs = loc.split()
loc0 = locs[0]
if len(locs)>1:
loc1 = locs[1]
else:
loc1 = None
if loc0 == "left":
cpos = box.x0-gap-width,box.y0,width,box.height
elif loc0 == "right":
cpos = box.x0+box.width+gap,box.y0,width,box.height
elif loc0 == "top":
if loc1 == "right":
cpos = box.x0+box.width*0.55,box.y0+box.height+gap,box.width*0.45,width
elif loc1 == "left":
cpos = box.x0+box.width*0.0,box.y0+box.height+gap,box.width*0.45,width
else:
cpos = box.x0,box.y0+box.height+gap,box.width,width
ax1 = fig.add_axes(cpos)
#cmap = mpl.cm.jet
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
if loc0 == "left" or loc0 == "right":
ori = "vertical"
else:
ori = "horizontal"
if ticks is not None:
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation=ori,
ticks=ticks,
extend=extend)
else:
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation=ori,
extend=extend)
if loc0 == "top" or loc0 == "bottom":
cb1.ax.xaxis.set_ticks_position(loc0)
mathify_axes_ticks(cb1.ax,fontsize=fntsize,xticks=ticks)
elif loc0 == "left" or loc0 == "right":
cb1.ax.yaxis.set_ticks_position(loc0)
mathify_axes_ticks(cb1.ax,fontsize=fntsize,yticks=ticks)
if label is not None:
ax1.set_xlabel(label,fontsize=fntsize)
if ylabel is not None:
ax1.set_ylabel(ylabel,fontsize=fntsize)
for label in ax1.get_xticklabels() + ax1.get_yticklabels():
label.set_fontsize(fntsize)
return ax1,cb1
def Qmri(dir=2):
"""
APPROXIMATELY Computes number of theta cells resolving one MRI wavelength
"""
global bu,rho,uu,_dx2,_dx3
#cvel()
#corrected this expression to include both 2pi and dxdxp[3][3]
#also corrected defition of va^2 to contain bsq+gam*ug term
#need to figure out how to properly measure this in fluid frame
vaudir = np.abs(bu[dir])/np.sqrt(rho+bsq+gam*ug)
omega = dxdxp[3][3]*uu[3]/uu[0]+1e-15
lambdamriudir = 2*np.pi * vaudir / omega
if dir == 2:
res=lambdamriudir/_dx2
elif dir == 3:
res=lambdamriudir/_dx3
return(res)
def goodlabs(fntsize=20):
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(fntsize)
def iofr(rval):
rval = np.array(rval)
if np.max(rval) < r[0,0,0]:
return 0
res = interp1d(r[:,0,0], ti[:,0,0], kind='linear', bounds_error = False, fill_value = 0)(rval)
if len(res.shape)>0 and len(res)>0:
res[rval<r[0,0,0]]*=0
res[rval>r[nx-1,0,0]]=res[rval>r[nx-1,0,0]]*0+nx-1
else:
res = np.float64(res)
return(np.floor(res+0.5).astype(int))
#read in a dump file
def rd(dump):
read_file(dump,type="dump")
#read in a grid file
def rg(dump):
read_file(dump,type="gdump")
#read in a grid file
def rg2(dump):
read_file(dump,type="gdump2",noround=True)
#high-level function that reads either MPI or serial gdump's
def read_file(dump,type=None,savedump=True,saverdump=False,noround=False):
if type is None:
if dump.startswith("dump"):
type = "dump"
print("Reading a dump file %s ..." % dump)
elif dump.startswith("gdump2"):
type = "gdump2"
print("Reading a gdump2 file %s ..." % dump)
elif dump.startswith("gdump"):
type = "gdump"
print("Reading a gdump file %s ..." % dump)
elif dump.startswith("rdump"):
type = "rdump"
print("Reading a rdump file %s ..." % dump)
elif dump.startswith("fdump"):
type = "fdump"
print("Reading a fdump file %s ..." % dump)
else:
print("Couldn't guess dump type; assuming it is a data dump")
type = "dump"
#normal dump
if os.path.isfile( "dumps/" + dump ):
headerline = read_header("dumps/" + dump, returnheaderline = True)
gd = read_body("dumps/" + dump,nx=N1+2*N1G,ny=N2+2*N2G,nz=N3+2*N3G,noround=1)
if noround:
res = data_assign( gd,type=type,nx=N1+2*N1G,ny=N2+2*N2G,nz=N3+2*N3G)
else:
res = data_assign(myfloat(gd),type=type,nx=N1+2*N1G,ny=N2+2*N2G,nz=N3+2*N3G)
return res
#MPI-type dump that is spread over many files
else:
flist = np.sort(glob.glob( "dumps/" + dump + "_[0-9][0-9][0-9][0-9]" ))
if len(flist) == 0:
print( "Could not find %s or its MPI counterpart" % dump )
return
sys.stdout.write( "Reading %s (%d files)" % (dump, len(flist)) )
sys.stdout.flush()
ndots = 10
dndot = len(flist)/ndots
if dndot == 0: dndot = 1
for i,fname in enumerate(flist):
#print( "Reading file %d out of %d..." % (i,len(flist)) )
#header for each file might be different, so read each
header = read_header(fname,issilent=1)
if header is None:
print( "Error reading header of %s, aborting..." % fname )
return
lgd = read_body(fname,nx=N1+2*N1G,ny=N2+2*N2G,nz=N3+2*N3G)
#this gives an array of dimensions (-1,N1,N2,N3)+potentially ghost cells
if 0 == i:
#create full array: of dimensions, (-1,nx,ny,nz)
fgd = np.zeros( (lgd.shape[0], nx+2*N1G, ny+2*N2G, nz+2*N3G), dtype=np.float32)
if not type == "rdump":
#construct full indices: ti, tj, tk
#fti,ftj,ftk = mgrid[0:nx,0:ny,0:nz]
lti,ltj,ltk = lgd[0:3,:,:].view();
lti = np.int64(lti)
ltj = np.int64(ltj)
ltk = np.int64(ltk)
fgd[:,lti+N1G,ltj+N2G,ltk+N3G] = lgd[:,:,:,:]
else:
print(starti,startj,startk)
fgd[:,starti:starti+N1+2*N1G,startj:startj+N2+2*N2G,startk:startk+N3+2*N3G] = lgd[:,:,:,:]
del lgd
if i%dndot == 0:
sys.stdout.write(".")
sys.stdout.flush()
res = data_assign(fgd,type=type,nx=nx+2*N1G,ny=ny+2*N2G,nz=nz+2*N3G)
if savedump:
#if the full dump file does not exist, create it
dumpfullname = "dumps/" + dump
if (type == "dump" or type == "gdump") and not os.path.isfile(dumpfullname):
sys.stdout.write("Saving full dump to %s..." % dumpfullname)
sys.stdout.flush()
header[1] = header[4] #N1 = nx
header[2] = header[5] #N2 = ny
header[3] = header[6] #N3 = nz
fout = open( dumpfullname, "wb" )
#join header items with " " (space) as a glue
#see http://stackoverflow.com/questions/12377473/python-write-versus-writelines-and-concatenated-strings
#write it out with a new line char at the end
fout.write(" ".join(header) + "\n")
fout.flush()
os.fsync(fout.fileno())
#reshape the dump content
gd1 = fgd.transpose(1,2,3,0)
gd1.tofile(fout)
fout.close()
print( " done!" )
if res is not None:
return res
return res
#read in a header
def read_header(dump,issilent=True,returnheaderline=False):
global t,nx,ny,nz,N1,N2,N3,N1G,N2G,N3G,starti,startj,startk,_dx1,_dx2,_dx3,a,gam,Rin,Rout,hslope,R0,ti,tj,tk,x1,x2,x3,r,h,ph,gcov,gcon,gdet,drdx,gn3,gv3,guu,gdd,dxdxp, games, startx1, startx2, startx3, x10, x20, tf, NPR, DOKTOT, BL
global fractheta
global fracphi
global rbr
global npow2
global cpow2
#read image
fin = open( dump, "rb" )
headerline = fin.readline()
header = headerline.split()
nheadertot = len(header)
fin.close()
if not dump.startswith("dumps/rdump"):
if not issilent: print( "dump header: len(header) = %d" % len(header) )
nheader = 57
n = 0
t = myfloat(np.float64(header[n])); n+=1
#per tile resolution
N1 = int(header[n]); n+=1
N2 = int(header[n]); n+=1
N3 = int(header[n]); n+=1
#total resolution
nx = int(header[n]); n+=1
ny = int(header[n]); n+=1
nz = int(header[n]); n+=1
#numbers of ghost cells
N1G = int(header[n]); n+=1
N2G = int(header[n]); n+=1
N3G = int(header[n]); n+=1
startx1 = myfloat(float(header[n])); n+=1
startx2 = myfloat(float(header[n])); n+=1
startx3 = myfloat(float(header[n])); n+=1
_dx1=myfloat(float(header[n])); n+=1
_dx2=myfloat(float(header[n])); n+=1
_dx3=myfloat(float(header[n])); n+=1
tf=myfloat(float(header[n])); n+=1
nstep=myfloat(float(header[n])); n+=1
a=myfloat(float(header[n])); n+=1
gam=myfloat(float(header[n])); n+=1
cour=myfloat(float(header[n])); n+=1
DTd=myfloat(float(header[n])); n+=1
DTl=myfloat(float(header[n])); n+=1
DTi=myfloat(float(header[n])); n+=1
DTr=myfloat(float(header[n])); n+=1
DTr01=myfloat(float(header[n])); n+=1
dump_cnt=myfloat(float(header[n])); n+=1
image_cnt=myfloat(float(header[n])); n+=1
rdump_cnt=myfloat(float(header[n])); n+=1
rdump01_cnt=myfloat(float(header[n])); n+=1
dt=myfloat(float(header[n])); n+=1
lim=myfloat(float(header[n])); n+=1
failed=myfloat(float(header[n])); n+=1
Rin=myfloat(float(header[n])); n+=1
Rout=myfloat(float(header[n])); n+=1
hslope=myfloat(float(header[n])); n+=1
R0=myfloat(float(header[n])); n+=1
NPR=int(header[n]); n+=1
DOKTOT=int(header[n]); n+=1
DOCYLINDRIFYCOORDS=int(header[n]); n+=1
fractheta = myfloat(header[n]); n+=1
fracphi = myfloat(header[n]); n+=1
rbr = myfloat(header[n]); n+=1
npow2 = myfloat(header[n]); n+=1
cpow2 = myfloat(header[n]); n+=1
x10 = myfloat(header[n]); n+=1
x20 = myfloat(header[n]); n+=1
fracdisk = myfloat(header[n]); n+=1
fracjet = myfloat(header[n]); n+=1
r0disk = myfloat(header[n]); n+=1
rdiskend = myfloat(header[n]); n+=1
r0jet = myfloat(header[n]); n+=1
rjetend = myfloat(header[n]); n+=1
jetnu = myfloat(header[n]); n+=1
rsjet = myfloat(header[n]); n+=1
r0grid = myfloat(header[n]); n+=1
BL = myfloat(header[n]); n+=1
else:
print("rdump header")
nheader = 48
n = 0
#per tile resolution
N1 = int(header[n]); n+=1
N2 = int(header[n]); n+=1
N3 = int(header[n]); n+=1
#total resolution
nx = int(header[n]); n+=1
ny = int(header[n]); n+=1
nz = int(header[n]); n+=1
#numbers of ghost cells
N1G = int(header[n]); n+=1
N2G = int(header[n]); n+=1
N3G = int(header[n]); n+=1
#starting indices
starti = int(header[n]); n+=1
startj = int(header[n]); n+=1
startk = int(header[n]); n+=1
t = myfloat(header[n]); n+=1
tf = myfloat(header[n]); n+=1
nstep = int(header[n]); n+=1
a = myfloat(header[n]); n+=1
gam = myfloat(header[n]); n+=1
game = myfloat(header[n]); n+=1
game4 = myfloat(header[n]); n+=1
game5 = myfloat(header[n]); n+=1
cour = myfloat(header[n]); n+=1
DTd = myfloat(header[n]); n+=1
DTl = myfloat(header[n]); n+=1
DTi = myfloat(header[n]); n+=1
DTr = myfloat(header[n]); n+=1
DTr01 = myfloat(header[n]); n+=1
dump_cnt = myfloat(header[n]); n+=1
image_cnt = myfloat(header[n]); n+=1
rdump_cnt = myfloat(header[n]); n+=1
rdump01_cnt=myfloat(float(header[n])); n+=1
dt = myfloat(header[n]); n+=1
lim = myfloat(header[n]); n+=1
failed = myfloat(header[n]); n+=1
Rin = myfloat(header[n]); n+=1
Rout = myfloat(header[n]); n+=1
hslope = myfloat(header[n]); n+=1
R0 = myfloat(header[n]); n+=1
fractheta = myfloat(header[n]); n+=1
fracphi = myfloat(header[n]); n+=1
rbr = myfloat(header[n]); n+=1
npow2 = myfloat(header[n]); n+=1
cpow2 = myfloat(header[n]); n+=1
x10 = myfloat(header[n]); n+=1
x20 = myfloat(header[n]); n+=1
mrat = myfloat(header[n]); n+=1
fel0 = myfloat(header[n]); n+=1
felfloor = myfloat(header[n]); n+=1
tdump = myfloat(header[n]); n+=1
trdump = myfloat(header[n]); n+=1
timage = myfloat(header[n]); n+=1
tlog = myfloat(header[n]); n+=1
if n < len(header):
nheader = 60
global_fracdisk = myfloat(header[n]); n+=1
global_fracjet = myfloat(header[n]); n+=1
global_r0disk = myfloat(header[n]); n+=1
global_rdiskend = myfloat(header[n]); n+=1
global_r0jet = myfloat(header[n]); n+=1
global_rjetend = myfloat(header[n]); n+=1
global_jetnu = myfloat(header[n]); n+=1
global_rsjet = myfloat(header[n]); n+=1
global_r0grid = myfloat(header[n]); n+=1
if n != nheader or n != nheadertot:
print("Wrong number of elements in header: nread = %d, nexpected = %d, nototal = %d: incorrect format?"
% (n, nheader, nheadertot) )
return headerline
if returnheaderline:
return headerline
else:
return header
def read_body(dump,nx=None,ny=None,nz=None,noround=False):
fin = open( dump, "rb" )
header = fin.readline()
if dump.startswith("dumps/rdump"):
dtype = np.float64
body = np.fromfile(fin,dtype=dtype,count=-1)
gd = body.view().reshape((nx,ny,nz,-1), order='C')
if noround:
gd=gd.transpose(3,0,1,2)
else:
gd=myfloat(gd.transpose(3,0,1,2))
elif dump.startswith("dumps/gdump2"):
dtype = np.float64
body = np.fromfile(fin,dtype=dtype,count=-1)
gd = body.view().reshape((nx,ny,nz,-1), order='C')
if noround:
gd=gd.transpose(3,0,1,2)
else:
gd=myfloat(gd.transpose(3,0,1,2))
elif dump.startswith("dumps/fdump"):
dtype = np.int64
body = np.fromfile(fin,dtype=dtype,count=-1)
gd = body.view().reshape((-1,nz,ny,nx), order='F')
gd=myfloat(gd.transpose(0,3,2,1))
else:
dtype = np.float32
body = np.fromfile(fin,dtype=dtype,count=-1)
gd = body.view().reshape((-1,nz,ny,nx), order='F')
gd=myfloat(gd.transpose(0,3,2,1))
return gd
def data_assign(gd,type=None,**kwargs):
if type is None:
print("Please specify data type")
return
if type == "gdump":
gdump_assign(gd,**kwargs)
return None
elif type == "gdump2":
gdump2_assign(gd,**kwargs)
return None
elif type == "dump":
dump_assign(gd,**kwargs)
return None
elif type == "rdump":
gd = rdump_assign(gd,**kwargs)
return gd
elif type == "fdump":
gd = fdump_assign(gd,**kwargs)
return gd
else:
print("Unknown data type: %s" % type)
return gd
def gdump_assign(gd,**kwargs):
global t,nx,ny,nz,N1,N2,N3,_dx1,_dx2,_dx3,a,gam,Rin,Rout,hslope,R0,ti,tj,tk,x1,x2,x3,r,h,ph,gcov,gcon,gdet,drdx,gn3,gv3,guu,gdd,dxdxp, games
nx = kwargs.pop("nx",nx)
ny = kwargs.pop("ny",ny)
nz = kwargs.pop("nz",nz)
ti,tj,tk,x1,x2,x3,r,h,ph = gd[0:9,:,:].view(); n = 9
gv3 = gd[n:n+16].view().reshape((4,4,nx,ny,nz),order='F').transpose(1,0,2,3,4); n+=16
gn3 = gd[n:n+16].view().reshape((4,4,nx,ny,nz),order='F').transpose(1,0,2,3,4); n+=16
gcov = gv3
gcon = gn3
guu = gn3
gdd = gv3
gdet = gd[n]; n+=1
drdx = gd[n:n+16].view().reshape((4,4,nx,ny,nz),order='F').transpose(1,0,2,3,4); n+=16
dxdxp = drdx
if n != gd.shape[0]:
print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) )
return 1
return 0
def gdump2_assign(gd,**kwargs):
global t,nx,ny,nz,N1,N2,N3,_dx1,_dx2,_dx3,a,gam,Rin,Rout,hslope,R0,ti,tj,tk,x1,x2,x3,gdet,games,rf1,hf1,phf1,rf2,hf2,phf2,rf3,hf3,phf3,rcorn,hcord,phcorn,re1,he1,phe1,re2,he2,phe2,re3,he3,phe3
nx = kwargs.pop("nx",nx)
ny = kwargs.pop("ny",ny)
nz = kwargs.pop("nz",nz)
ti,tj,tk,x1,x2,x3 = gd[0:6,:,:].view(); n = 6
rf1,hf1,phf1,rf2,hf2,phf2,rf3,hf3,phf3 = gd[0:9,:,:].view(); n += 9
rcorn,hcord,phcorn,rcent,hcent,phcen = gd[0:6,:,:].view(); n += 6
re1,he1,phe1,re2,he2,phe2,re3,he3,phe3 = gd[0:9,:,:].view(); n += 9
gdet = gd[n]; n+=1
if n != gd.shape[0]:
print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) )
return 1
return 0
#read in a dump file
def dump_assign(gd,**kwargs):
global t,nx,ny,nz,_dx1,_dx2,_dx3,gam,hslope,a,R0,Rin,Rout,ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug,vu,B,pg,cs2,Sden,U,gdetB,divb,uu,ud,bu,bd,v1m,v1p,v2m,v2p,gdet,bsq,gdet,alpha,rhor, ktot, pg
nx = kwargs.pop("nx",nx)
ny = kwargs.pop("ny",ny)
nz = kwargs.pop("nz",nz)
ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug = gd[0:11,:,:].view(); n = 11
pg = (gam-1)*ug
lrho=np.log10(rho)
vu=np.zeros_like(gd[0:4])
B=np.zeros_like(gd[0:4])
vu[1:4] = gd[n:n+3]; n+=3
B[1:4] = gd[n:n+3]; n+=3
#if total entropy equation is evolved (on by default)
if DOKTOT == 1:
ktot = gd[n]; n+=1
divb = gd[n]; n+=1
uu = gd[n:n+4]; n+=4
ud = gd[n:n+4]; n+=4
bu = gd[n:n+4]; n+=4
bd = gd[n:n+4]; n+=4
bsq = mdot(bu,bd)
v1m,v1p,v2m,v2p,v3m,v3p=gd[n:n+6]; n+=6
gdet=gd[n]; n+=1
rhor = 1+(1-a**2)**0.5
if "guu" in globals():
#lapse
alpha = (-guu[0,0])**(-0.5)
if n != gd.shape[0]:
print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) )
return 1
return 0
def rdump_assign(gd,**kwargs):
global t,nx,ny,nz,_dx1,_dx2,_dx3,gam,hslope,a,R0,Rin,Rout,ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug,vu,B,pg,cs2,Sden,U,gdetB,divb,uu,ud,bu,bd,v1m,v1p,v2m,v2p,gdet,bsq,gdet,alpha,rhor, ktot, Ttot, game, qisosq, pflag, qisodotb, kel, uelvar, Tel4, Tel5,Teldis, Tels, kel4, kel5,ugel,ugeldis, ugcon, sel, ugscon, ugel4, ugel5,stot, uelvar, Telvar, Tsel, sel, ugels, games, phi, keldis, phihat,csphib,lrho
nx = kwargs.pop("nx",nx)
ny = kwargs.pop("ny",ny)
nz = kwargs.pop("nz",nz)
n = 0
rho = gd[n]; n+=1
ug = gd[n]; n+=1
vu=np.zeros_like(gd[0:4])
B=np.zeros_like(gd[0:4])
vu[1:4] = gd[n:n+3]; n+=3
B[1:4] = gd[n:n+3]; n+=3
# if n != gd.shape[0]:
# print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) )
# return 1
return gd
def fdump_assign(gd,**kwargs):
global t,nx,ny,nz,_dx1,_dx2,_dx3,gam,hslope,a,R0,Rin,Rout,ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug,vu,B,pg,cs2,Sden,U,gdetB,divb,uu,ud,bu,bd,v1m,v1p,v2m,v2p,gdet,bsq,gdet,alpha,rhor, ktot, Ttot, game, qisosq, pflag, qisodotb, kel, uelvar, Tel4, Tel5,Teldis, Tels, kel4, kel5,ugel,ugeldis, ugcon, sel, ugscon, ugel4, ugel5,stot, uelvar, Telvar, Tsel, sel, ugels, games, phi, keldis, phihat,csphib,lrho,fail
nx = kwargs.pop("nx",nx)
ny = kwargs.pop("ny",ny)
nz = kwargs.pop("nz",nz)
fail = gd
return gd
def mdot(a,b):
"""
Computes a contraction of two tensors/vectors. Assumes
the following structure: tensor[m,n,i,j,k] OR vector[m,i,j,k],
where i,j,k are spatial indices and m,n are variable indices.
"""
if (a.ndim == 3 and b.ndim == 3) or (a.ndim == 4 and b.ndim == 4):
c = (a*b).sum(0)
elif a.ndim == 5 and b.ndim == 4:
c = np.empty(np.maximum(a[:,0,:,:,:].shape,b.shape),dtype=b.dtype)
for i in range(a.shape[0]):
c[i,:,:,:] = (a[i,:,:,:,:]*b).sum(0)
elif a.ndim == 4 and b.ndim == 5:
c = np.empty(np.maximum(b[0,:,:,:,:].shape,a.shape),dtype=a.dtype)
for i in range(b.shape[1]):
c[i,:,:,:] = (a*b[:,i,:,:,:]).sum(0)
elif a.ndim == 5 and b.ndim == 5:
c = np.empty((a.shape[0],b.shape[1],a.shape[2],a.shape[3],max(a.shape[4],b.shape[4])),dtype=a.dtype)
for i in range(c.shape[0]):
for j in range(c.shape[1]):
c[i,j,:,:,:] = (a[i,:,:,:,:]*b[:,j,:,:,:]).sum(0)
elif a.ndim == 5 and b.ndim == 6:
c = np.empty((a.shape[0],b.shape[1],b.shape[2],max(a.shape[2],b.shape[3]),max(a.shape[3],b.shape[4]),max(a.shape[4],b.shape[5])),dtype=a.dtype)
for mu in range(c.shape[0]):
for k in range(c.shape[1]):
for l in range(c.shape[2]):
c[mu,k,l,:,:,:] = (a[mu,:,:,:,:]*b[:,k,l,:,:,:]).sum(0)
else:
raise Exception('mdot', 'wrong dimensions')
return c
def psicalc(B1=None):
"""
Computes the field vector potential
"""
global B
if B1 is None: B1 = B[1]
daphi = -(gdet*B1).mean(-1)*_dx2
aphi=daphi[:,::-1].cumsum(axis=1)[:,::-1]
aphi-=0.5*daphi #correction for half-cell shift between face and center in theta
return(aphi)
def myfloat(f,acc=1):
""" acc=1 means np.float32, acc=2 means np.float64 """
if acc==1:
return( np.float32(f) )
else:
return( np.float64(f) )
def get_fracphi():
fracphi = dxdxp[3,3,0,0,0]*_dx3*nz/(2*np.pi)
return( fracphi )
def plco(myvar,**kwargs):
global r,h,ph
plt.clf()
return plc(myvar,**kwargs)
def plc(myvar,**kwargs): #plc
global r,h,ph
#xcoord = kwargs.pop('x1', None)
#ycoord = kwargs.pop('x2', None)
if(np.min(myvar)==np.max(myvar)):
print("The quantity you are trying to plot is a constant = %g." % np.min(myvar))
return
cb = kwargs.pop('cb', False)
nc = kwargs.pop('nc', 15)
k = kwargs.pop('k',0)
mirrorx = kwargs.pop('mirrorx',0)
mirrory = kwargs.pop('mirrory',0)
symmx = kwargs.pop('symmx',0)
#cmap = kwargs.pop('cmap',cm.jet)
isfilled = kwargs.pop('isfilled',False)
xy = kwargs.pop('xy',0)
xcoord = kwargs.pop("xcoord",None)
ycoord = kwargs.pop("ycoord",None)
lin = kwargs.pop('lin',0)
xmax = kwargs.pop('xmax',10)
ymax = kwargs.pop('ymax',5)
cbxlabel = kwargs.pop('cbxla',None)
cbylabel = kwargs.pop('cbyla',None)
fntsize = kwargs.pop("fntsize",20)
cbgoodticks = kwargs.pop("cbgoodticks",1)
xlabel = kwargs.pop("xla",None)
ylabel = kwargs.pop("yla",None)
dobh = kwargs.pop("dobh",1)
pretty = kwargs.pop("pretty",0)
ax = kwargs.pop("ax",None)
cbticks = kwargs.pop("cbticks",None)
domathify = kwargs.pop("domathify",0)
if np.abs(xy)==1:
if xcoord is None: xcoord = r * np.sin(h)
if ycoord is None: ycoord = r * np.cos(h)
if mirrory: ycoord *= -1
if mirrorx: xcoord *= -1
if xcoord is not None and ycoord is not None:
xcoord = xcoord[:,:,None] if xcoord.ndim == 2 else xcoord[:,:,k:k+1]
ycoord = ycoord[:,:,None] if ycoord.ndim == 2 else ycoord[:,:,k:k+1]
if np.abs(xy)==1 and symmx:
if myvar.ndim == 2:
myvar = myvar[:,:,None] if myvar.ndim == 2 else myvar[:,:,k:k+1]
myvar=np.concatenate((myvar[:,::-1],myvar),axis=1)
xcoord=np.concatenate((-xcoord[:,::-1],xcoord),axis=1)
ycoord=
|
np.concatenate((ycoord[:,::-1],ycoord),axis=1)
|
numpy.concatenate
|
# noqa: D205,D400
"""
==============================
Fire Weather Indices Submodule
==============================
This submodule defines the :py:func:`xclim.indices.fire_season`, :py::func:`xclim.indices.drought_code`
and :py:func:`xclim.indices.fire_weather_indexes` indices, which are used by the eponym indicators.
Users should read this module's documentation and the one of `fire_weather_ufunc`.
First adapted from Matlab code `CalcFWITimeSeriesWithStartup.m` from GFWED made for using
MERRA2 data, which was a translation of FWI.vba of the Canadian Fire Weather Index system.
Then, updated and synchronized with the R code of the cffdrs package. When given the correct parameters,
the current code has an error below 3% when compared with the [GFWED]_ data.
Parts of the code and of the documentation in this submodule are directly taken from [cffdrs] which was published with the GPL-2 license.
Fire season
-----------
Fire weather indexes are iteratively computed, each day's value depending on the previous day indexes.
Additionally and optionally, the codes are "shut down" (set to NaN) in winter. There are a few ways of computing this
shut down and the subsequent spring start up. The `fire_season` function allows for full control of that,
replicating the `fireSeason` method in the R package. It produces a mask to be given a `season_mask` in the
indicators. However, the `fire_weather_ufunc` and the indicators also accept a `season_method` parameter so the
fire season can be computed inside the iterator. Passing `season_method=None` switches to an "always on" mode
replicating the `fwi` method of the R package.
The fire season determination is based on three consecutive daily maximum temperature thresholds ([WF93]_ , [LA08]_).
A "GFWED" method is also implemented. There, the 12h LST temperature is used instead of the daily maximum.
The current implementation is slightly different from the description in [GFWED]_, but it replicates the Matlab code
when `temp_start_thresh` and `temp_end_thresh` are both set to 6 degC.
In xclim, the number of consecutive days, the start and end temperature thresholds and the snow depth threshold
can all be modified.
Overwintering
-------------
Additionnaly, overwintering of the drought code is also directly implemented in :py:func:`fire_weather_ufunc`.
The last drought_code of the season is kept in "winter" (where the fire season mask is False) and the precipitation
is accumulated until the start of the next season. The first drought code is computed as a function of these instead
of using the default DCStart value. Parameters to :py:func:`_overwintering_drought_code` are listed below.
The code for the overwintering is based on [ME19]_.
Finally, a mechanism for dry spring starts is implemented. For now, it is slightly different from what the GFWED, uses, but
seems to agree with the state of the science of the CFS. When activated, the drought code and Duff-moisture codes are started
in spring with a value that is function of the number of days since the last significative precipitation event.
The conventionnal start value increased by that number of days times a "dry start" factor. Parameters are controlled in
the call of the indices and :py:func:`fire_weather_ufunc`. Overwintering of the drought code overrides this mechanism if both are activated.
GFWED use a more complex approach with an added check on the previous day's snow cover for determining "dry" points. Moreover,
there, the start values are only the multiplication of a factor to the number of dry days, the conventionnal
Examples
--------
The current litterature seems to agree that climate-oriented series of the fire weather indexes should be computed
using only the longest fire season of each year and activatting the overwintering of the drought code and
the "dry start" for the duff-moisture code. The following example uses reasonable parameters when computing over all of Canada.
**Note:** here the example snippets use the _indices_ defined in this very module, but we always recommend using the _indicators_
defined in the `xc.atmos` module.
>>> ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc")
>>> ds = ds.assign(
hurs=xclim.atmos.relative_humidity_from_dewpoint(ds=ds),
tas=xclim.core.units.convert_units_to(ds.tas, "degC"),
pr=xclim.core.units.convert_units_to(ds.pr, "mm/d"),
sfcWind=xclim.atmos.wind_speed_from_vector(ds=ds)[0]
)
>>> season_mask = fire_season(
tas=ds.tas,
method="WF93",
freq="YS",
# Parameters below are at their default values, but listed here for explicitness.
temp_start_thresh="12 degC",
temp_end_thresh="5 degC",
temp_condition_days=3,
)
>>> out_fwi = fire_weather_indexes(
tas=ds.tas,
pr=ds.pr,
hurs=ds.hurs,
sfcWind=ds.sfcWind,
lat=ds.lat,
season_mask=season_mask,
overwintering=True,
dry_start="CFS",
prec_thresh="1.5 mm/d",
dmc_dry_factor=1.2,
# Parameters below are at their default values, but listed here for explicitness.
carry_over_fraction=0.75,
wetting_efficiency_fraction=0.75,
dc_start=15,
dmc_start=6,
ffmc_start=85,
)
Similarly, the next lines calculate the fire weather indexes, but according to the parameters and options
used in NASA's GFWED datasets. Here, no need to split the fire season mask from the rest of the computation
as _all_ seasons are used, even the very short shoulder seasons.
>>> ds = open_dataset("FWI/GFWED_sample_2017.nc")
>>> out_fwi = fire_weather_indexes(
tas=ds.tas,
pr=ds.prbc,
snd=ds.snow_depth,
hurs=ds.rh,
sfcWind=ds.sfcwind,
lat=ds.lat,
season_method="GFWED",
overwintering=False,
dry_start="GFWED",
temp_start_thresh="6 degC",
temp_end_thresh="6 degC",
# Parameters below are at their default values, but listed here for explicitness.
temp_condition_days=3,
snow_condition_days=3,
dc_start=15,
dmc_start=6,
ffmc_start=85,
dmc_dry_factor=2,
)
References
----------
Codes:
Updated source code for calculating fire danger indexes in the Canadian Forest Fire Weather Index System, <NAME>, <NAME>, and <NAME>, INFORMATION REPORT NOR-X-424, 2015.
.. [cffdrs] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Canadian Forest Fire Danger Rating System, R package, CRAN, https://cran.r-project.org/package=cffdrs
https://cwfis.cfs.nrcan.gc.ca/background/dsm/fwi
Matlab code of the GFWED obtained through personal communication.
Fire season determination methods:
.. [WF93] <NAME>. and <NAME>. (1993). Length of the fire season in a changing climate. ForestryChronicle, 69, 187-192.
.. [LA08] <NAME>. and <NAME>.B. 2008. Weather Guide for the Canadian Forest Fire Danger RatingSystem. Natural Resources Canada, Canadian Forest Service, Northern Forestry Centre, Edmonton,Alberta. 84 p.http://cfs.nrcan.gc.ca/pubwarehouse/pdfs/29152.pdf
.. [GFWED] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2015) Development of a Global Fire Weather Database, Nat. Hazards Earth Syst. Sci., 15, 1407–1423, https://doi.org/10.5194/nhess-15-1407-2015
Drought Code overwintering:
.. [VW85] <NAME>. 1985. Drought, timelag and fire danger rating. Pages 178-185 in L.R. Donoghueand <NAME>, eds. Proc. 8th Conf. Fire For. Meteorol., 29 Apr.-3 May 1985, Detroit, MI. Soc.Am. For., Bethesda, MD.http://cfs.nrcan.gc.ca/pubwarehouse/pdfs/23550.pd
.. [ME19] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>.: A high-resolution reanalysis of global fire weather from 1979 to 2018 – overwintering the Drought Code, Earth Syst. Sci. Data, 12, 1823–1833, https://doi.org/10.5194/essd-12-1823-2020, 2020.
"""
# This file is structured in the following way:
# Section 1: individual codes, numba-accelerated and vectorized functions.
# Section 2: Larger computing functons (the FWI iterator and the fire_season iterator)
# Section 3: Exposed methods and indices.
#
# Methods starting with a "_" are not usable with xarray objects, whereas the others are.
from collections import OrderedDict
from typing import Mapping, Optional, Sequence, Union
import numpy as np
import xarray as xr
from numba import jit, vectorize
from xclim.core.units import convert_units_to, declare_units
from . import run_length as rl
default_params = dict(
temp_start_thresh=(12, "degC"),
temp_end_thresh=(5, "degC"),
snow_thresh=(0.01, "m"),
temp_condition_days=3,
snow_condition_days=3,
carry_over_fraction=0.75,
wetting_efficiency_fraction=0.75,
dc_start=15,
dmc_start=6,
ffmc_start=85,
prec_thresh=(1.0, "mm/d"),
dc_dry_factor=5,
dmc_dry_factor=2,
snow_cover_days=60,
snow_min_cover_frac=0.75,
snow_min_mean_depth=(0.1, "m"),
)
"""
Default values for numerical parameters of fire_weather_ufunc.
Parameters with units are given as a tuple of default value and units.
A more complete explanation of these parameters is given in the doc of :py:func:`fire_weather_ufunc`.
"""
# SECTION 1 - Codes - Numba accelerated and vectorized functions
# Values taken from GFWED code
DAY_LENGTHS = np.array(
[
[11.5, 10.5, 9.2, 7.9, 6.8, 6.2, 6.5, 7.4, 8.7, 10, 11.2, 11.8],
[10.1, 9.6, 9.1, 8.5, 8.1, 7.8, 7.9, 8.3, 8.9, 9.4, 9.9, 10.2],
12 * [9],
[7.9, 8.4, 8.9, 9.5, 9.9, 10.2, 10.1, 9.7, 9.1, 8.6, 8.1, 7.8],
[6.5, 7.5, 9, 12.8, 13.9, 13.9, 12.4, 10.9, 9.4, 8, 7, 6],
]
)
DAY_LENGTH_FACTORS = np.array(
[
[6.4, 5.0, 2.4, 0.4, -1.6, -1.6, -1.6, -1.6, -1.6, 0.9, 3.8, 5.8],
12 * [1.39],
[-1.6, -1.6, -1.6, 0.9, 3.8, 5.8, 6.4, 5.0, 2.4, 0.4, -1.6, -1.6],
]
)
@jit
def _day_length(lat: Union[int, float], mth: int): # pragma: no cover
"""Return the average day length for a month within latitudinal bounds."""
if -30 > lat >= -90:
dl = DAY_LENGTHS[0, :]
elif -15 > lat >= -30:
dl = DAY_LENGTHS[1, :]
elif 15 > lat >= -15:
return 9
elif 30 > lat >= 15:
dl = DAY_LENGTHS[3, :]
elif 90 >= lat >= 30:
dl = DAY_LENGTHS[4, :]
elif lat > 90 or lat < -90:
raise ValueError("Invalid lat specified.")
else:
raise ValueError
return dl[mth - 1]
@jit
def _day_length_factor(lat: float, mth: int): # pragma: no cover
"""Return the day length factor."""
if -15 > lat >= -90:
dlf = DAY_LENGTH_FACTORS[0, :]
elif 15 > lat >= -15:
return 1.39
elif 90 >= lat >= 15:
dlf = DAY_LENGTH_FACTORS[2, :]
elif lat > 90 or lat < -90:
raise ValueError("Invalid lat specified.")
else:
raise ValueError
return dlf[mth - 1]
@vectorize
def _fine_fuel_moisture_code(t, p, w, h, ffmc0): # pragma: no cover
"""Compute the fine fuel moisture code over one time step.
Parameters
----------
t: array
Noon temperature [C].
p : array
Rain fall in open over previous 24 hours, at noon [mm].
w : array
Noon wind speed [km/h].
h : array
Noon relative humidity [%].
ffmc0 : array
Previous value of the fine fuel moisture code.
Returns
-------
array
Fine fuel moisture code at the current timestep.
"""
mo = (147.2 * (101.0 - ffmc0)) / (59.5 + ffmc0) # *Eq.1*#
if p > 0.5:
rf = p - 0.5 # *Eq.2*#
if mo > 150.0:
mo = (
mo
+ 42.5 * rf * np.exp(-100.0 / (251.0 - mo)) * (1.0 -
|
np.exp(-6.93 / rf)
|
numpy.exp
|
#!/usr/bin/env python
"""
basictest2
Created on Thu Jan 29 13:10:33 2015
@author: <NAME>
"""
import numpy as np
import time
import ISRSpectrum.ISRSpectrum as ISSnew
from isrutilities.physConstants import v_Boltz, v_C_0, v_epsilon0, v_elemcharge, v_me, v_amu
import matplotlib.pylab as plt
if __name__== '__main__':
ISS2 = ISSnew.ISRSpectrum(centerFrequency = 440.2*1e6, bMag = 0.4e-4, nspec=256, sampfreq=50e3,dFlag=True)
ti = 1.0e3
te = 2.0e3
Ne = 1e11
mi = 16
Ce = np.sqrt(v_Boltz*te/v_me)
Ci =
|
np.sqrt(v_Boltz*ti/(v_amu*mi))
|
numpy.sqrt
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as pyplot
from matplotlib.pyplot import cm
import yoda
import os
import glob
from matplotlib import gridspec
from scipy.special import factorial
from scipy.special import gamma as gammafunc
from scipy.optimize import fmin
from scipy.optimize import minimize
import scipy.integrate as integrate
import ROOT
import sys
#################################### Set Up ####################################
SBNames=['Backgrounds','Signal']
SBcolors=['red','blue']
Processes = ['VBF', 'W Higgs-Strahlung', 'Z Higgs-Strahlung']
Channels = ['0Lep','1Lep','2Lep']
Lep0Cuts=[r'Sherpa',
r'$p_{T} < 30 GeV$', #1
r'$N_{isolep} = 0$', #2
r'$N_{fj} \geq 1$', #3
r'$M_{fj} \approx M_{higgs}$',#4
r'$N{jets \in\not fj} = 2$', #5
r'$N_{DV} > 0$', #6
r'$N{jets \in fj} = 2$', #7
r'vertex', #8
r'ML'] #9
Lep1Cuts=[r'Sherpa',
r'$p_{T} > 30 GeV$', #1
r'$N_{isolep} = 1$', #2
r'$N_{fj} \geq 1$', #3
r'$M_{fj} \approx M_{higgs}$',#4
r'Mom balance', #5
r'$N_{DV} > 0$', #6
r'$N{jets \in fj} = 2$', #7
r'vertex', #8
r'ML'] #9
Lep2Cuts=[r'Sherpa',
r'$p_{T} < 30 GeV$', #1
r'$N_{isolep} = 2$', #2
r'$N_{fj} \geq 1$', #3
r'$M_{fj} \approx M_{higgs}$',#4
r'Mom balance', #5
r'$N_{DV} > 0$', #6
r'$N{jets \in fj} = 2$', #7
r'vertex', #8
r'ML'] #9
CutsLabels = [Lep0Cuts,Lep1Cuts,Lep2Cuts]
MLCutID = len(CutsLabels[0])-1 #After ML cuts
FinalCutID = MLCutID - 1 #After Vertex cuts
colors = cm.rainbow(np.linspace(0, 1, len(Channels)))
#https://twiki.cern.ch/twiki/bin/view/LHCPhysics/CERNYellowReportPageAt1314TeV2014
Sigmas = np.zeros((len(Channels),2)) #[Channel, crosssections, errors]
Sigmas[0] = np.array([3.748*1000,3.748*1000*(3.2*1e-2)]) #VBF 3.2% error
Sigmas[1] = np.array([1.380*1000,1.380*1000*(2.2*1e-2)]) #WH 2.2% error
Sigmas[2] = np.array([(0.8696+0.1057)*1000,(0.8696+0.1057)*1000*(6.4*1e-2)]) #ZH 6.4% error
SherpaIntError = 0.1
HTotWidth = np.array([ 4.07e-3*1000, 4.07e-3*(4*0.01)*1000]) #MeV
Hmass = np.array([125.1,0.24]) #Mass, Error
Cmass = np.array([1.27, 0.02]) #Mass, Error
Bmass = np.array([4.18, 0.04]) #Mass, Error
yccsqSM = np.array([(Cmass[0] / 246)**2,0]) #SM Value
yccsqSM[1] = yccsqSM[0]*(Cmass[1]/Cmass[0])*2
ybbsqSM = np.array([(Bmass[0] / 246)**2,0])
ybbsqSM[1] = ybbsqSM[0]*(Bmass[1]/Bmass[0])*2
#https://twiki.cern.ch/twiki/bin/view/LHCPhysics/CERNYellowReportPageBR
BrCC_SM = np.array([2.891e-2, 2.891e-2*(5*1e-2)])
BrBB_SM = np.array([5.824e-1, 5.824e-1*(0.8*1e-2)])
#################################### Misc useful ####################################
def Gaussian(Mean, SD, x):
return np.exp(-0.5*((x-Mean)/SD)**2.0)/(SD*np.sqrt(2*np.pi))
def Poisson(Lambda, N):
return np.exp(-Lambda)*(Lambda)**(N) / factorial(N)
def ChiSq(x,k):
return ( x**(k*0.5 - 1)*np.exp(-x*0.5) ) / ( 2**(0.5*k)*gammafunc(0.5*k))
def PValue(Mean,SD,Xmin,Xmax):
return integrate.quad(lambda x: Gaussian(Mean,SD,x), Xmin,Xmax)
def Confidence(Mean,SD,PVal):
import scipy.integrate as integrate
Xs = np.linspace(Mean+5*SD,Mean,10000)
for i in range(len(Xs)):
P = integrate.quad(lambda x: Gaussian(Mean,SD,x), Xs[i],np.inf)[0]
if P > PVal:
XCrit = (Xs[i]+Xs[i-1])/2
break
return XCrit
def TransformMuKa(CvsB, mu):
if CvsB in ['CB','CB_Enrich']: #Get y_c by feeding in knowledge of y_b from dat
Br = BrCC_SM[0]+BrBB_SM[0]
BrDivide = BrCC_SM[0]
elif CvsB in ['C','C_Enrich']:
Br = BrCC_SM[0]
BrDivide = BrCC_SM[0]
elif CvsB in ['B','B_Enrich']:
Br = BrBB_SM[0]
BrDivide = BrBB_SM[0]
eps_b = np.mean([0.153, 0.191, 0.138])
eps_c = np.mean([0.582, 0.527, 0.433])
if CvsB in ["CB_Enrich"]:
Be = eps_c*BrCC_SM[0]+eps_b*BrBB_SM[0]
elif CvsB in ["C_Enrich"]:
Be = eps_c*BrCC_SM[0]+0*BrBB_SM[0]
else:
eps_b = 1
eps_c = 1
Be = Br
mu_prime = (mu*Be*(1-Br))/(eps_c-Be*mu)
if CvsB in ['CB','CB_Enrich']: #Get y_c by feeding in knowledge of y_b from dat
T = -BrBB_SM[0]*((eps_b-Be*mu)/(eps_c-Be*mu))
elif CvsB in ['C','C_Enrich']:
T = 0
elif CvsB in ['B','B_Enrich']:
T = 0
return np.sqrt((mu_prime+T)/BrDivide ), (mu_prime+T)/BrDivide
def PlotMuKs():
Mus =
|
np.logspace(-1,2.5,10000)
|
numpy.logspace
|
from sklearn.model_selection import train_test_split
import torch
from transformers import *
import numpy as np
import torch.optim as optim
from tqdm import tqdm
from torch.utils.data import DataLoader, TensorDataset
from torch import tensor
from twitter_analyzer.data import MultiLabelClassificationProcessor
def accuracy_thresh(y_pred, y_true, thresh: float = 0.5, sigmoid: bool = True):
"Compute accuracy when `y_pred` and `y_true` are the same size."
if sigmoid: y_pred = y_pred.sigmoid()
return np.mean(np.mean(((y_pred > thresh) == y_true.byte()).float().cpu().numpy(), axis=1))
label_map = {0: 'toxic',
1: 'severe_toxic',
2: 'obscene',
3: 'threat',
4: 'insult',
5: 'identity_hate'}
class MultiLabelClassifier:
def __init__(self,
num_classes,
tokenizer=BertTokenizer.from_pretrained("bert-base-uncased"),
transf_model=BertForSequenceClassification.from_pretrained("bert-base-uncased")
):
self.num_classes = num_classes
self.tokenizer = tokenizer
self.model = transf_model
def get_features(self,
x,
y):
processor = MultiLabelClassificationProcessor(mode="multilabel_classification")
processor.add_examples(texts_or_text_and_labels=x,
labels=y)
features = processor.get_features(tokenizer=self.tokenizer,
return_tensors="pt")
return features
def fit(self,
x,
y,
epochs=10,
lr=3e-5,
batch_size=8,
val_split=0.7,
model_save_path="weights_imdb.{epoch:02d}.hdf5"
):
x_train, x_valid, y_train, y_valid = train_test_split(x,
y,
train_size=val_split)
train_features = self.get_features(x=x_train, y=y_train)
valid_features = self.get_features(x=x_valid, y=y_valid)
train_input_ids = tensor(
|
np.array(train_features[:][0])
|
numpy.array
|
import torch
import numpy as np
import torch_utils
from Models import base_model
import torch_utils as my_utils
import time
import json
import interactions
from handlers.tensorboard_writer import TensorboardWrapper
from setting_keywords import KeyWordSettings
from Fitting.FittingFC.multi_level_attention_composite_fitter import MultiLevelAttentionCompositeFitter
from typing import List
from sklearn.metrics import f1_score, precision_score, recall_score
import sklearn
class CharManFitterQueryRepr1(MultiLevelAttentionCompositeFitter):
"""
I implement this class for testing if the padding all zeros sequences are the root cause of performance reduction.
"""
def __init__(self, net: base_model.BaseModel,
loss="bpr",
n_iter=100,
testing_epochs=5,
batch_size=16,
reg_l2=1e-3,
learning_rate=1e-4,
early_stopping=0, # means no early stopping
decay_step=None,
decay_weight=None,
optimizer_func=None,
use_cuda=False,
num_negative_samples=4,
logfolder=None,
curr_date=None,
seed=None,
**kargs):
super(CharManFitterQueryRepr1, self).__init__(net, loss, n_iter, testing_epochs, batch_size, reg_l2, learning_rate,
early_stopping, decay_step, decay_weight, optimizer_func,
use_cuda, num_negative_samples, logfolder, curr_date, seed, **kargs)
self.output_size = kargs["output_size"]
def fit(self,
train_iteractions: interactions.ClassificationInteractions,
verbose=True, # for printing out evaluation during training
topN=10,
val_interactions: interactions.ClassificationInteractions = None,
test_interactions: interactions.ClassificationInteractions = None):
"""
Fit the model.
Parameters
----------
train_iteractions: :class:`interactions.ClassificationInteractions` The input sequence dataset.
val_interactions: :class:`interactions.ClassificationInteractions`
test_interactions: :class:`interactions.ClassificationInteractions`
"""
self._initialize(train_iteractions)
best_val_f1_macro, best_epoch = 0, 0
test_results_dict = None
iteration_counter = 0
count_patience_epochs = 0
for epoch_num in range(self._n_iter):
# ------ Move to here ----------------------------------- #
self._net.train(True)
query_ids, left_contents, left_lengths, query_sources, query_char_sources, query_adj, \
evd_docs_ids, evd_docs_contents, evd_docs_lens, evd_sources, evd_cnt_each_query, evd_char_sources, \
pair_labels, evd_docs_adj = self._sampler.get_train_instances_char_man(train_iteractions,
self.fixed_num_evidences)
queries, query_content, query_lengths, query_sources, query_char_sources, query_adj, \
evd_docs, evd_docs_contents, evd_docs_lens, evd_sources, evd_cnt_each_query, evd_char_sources, \
pair_labels, evd_docs_adj = my_utils.shuffle(query_ids, left_contents, left_lengths, query_sources,
query_char_sources, query_adj, evd_docs_ids, evd_docs_contents,
evd_docs_lens, evd_sources, evd_cnt_each_query,
evd_char_sources, pair_labels, evd_docs_adj)
epoch_loss, total_pairs = 0.0, 0
t1 = time.time()
for (minibatch_num,
(batch_query, batch_query_content, batch_query_len, batch_query_sources, batch_query_chr_src,
batch_query_adj, batch_evd_docs, batch_evd_contents, batch_evd_lens, batch_evd_sources,
# i.e. claim source
batch_evd_cnt_each_query, batch_evd_chr_src, batch_labels, batch_evd_docs_adj)) \
in enumerate(my_utils.minibatch(queries, query_content, query_lengths, query_sources,
query_char_sources, query_adj,
evd_docs, evd_docs_contents, evd_docs_lens, evd_sources,
evd_cnt_each_query, evd_char_sources, pair_labels, evd_docs_adj,
batch_size=self._batch_size)):
batch_query = my_utils.gpu(torch.from_numpy(batch_query), self._use_cuda)
batch_query_content = my_utils.gpu(torch.from_numpy(batch_query_content), self._use_cuda)
# batch_query_len = my_utils.gpu(torch.from_numpy(batch_query_len), self._use_cuda)
batch_query_sources = my_utils.gpu(torch.from_numpy(batch_query_sources), self._use_cuda)
batch_query_chr_src = my_utils.gpu(torch.from_numpy(batch_query_chr_src), self._use_cuda)
batch_query_adj = my_utils.gpu(torch.from_numpy(batch_query_adj), self._use_cuda)
batch_evd_docs = my_utils.gpu(torch.from_numpy(batch_evd_docs), self._use_cuda)
batch_evd_contents = my_utils.gpu(torch.from_numpy(batch_evd_contents), self._use_cuda)
# batch_evd_lens = my_utils.gpu(torch.from_numpy(batch_evd_lens), self._use_cuda)
batch_evd_sources = my_utils.gpu(torch.from_numpy(batch_evd_sources), self._use_cuda)
batch_evd_cnt_each_query = my_utils.gpu(torch.from_numpy(batch_evd_cnt_each_query), self._use_cuda)
batch_evd_chr_src = my_utils.gpu(torch.from_numpy(batch_evd_chr_src), self._use_cuda)
batch_labels = my_utils.gpu(torch.from_numpy(batch_labels), self._use_cuda)
batch_evd_docs_adj = my_utils.gpu(torch.from_numpy(batch_evd_docs_adj), self._use_cuda)
# total_pairs += self._batch_size * self.
additional_data = {KeyWordSettings.EvidenceCountPerQuery: batch_evd_cnt_each_query,
KeyWordSettings.FCClass.QueryCharSource: batch_query_chr_src,
KeyWordSettings.FCClass.DocCharSource: batch_evd_chr_src,
KeyWordSettings.Query_Adj: batch_query_adj,
KeyWordSettings.Evd_Docs_Adj: batch_evd_docs_adj}
self._optimizer.zero_grad()
if self._loss in ["bpr", "hinge", "pce", "bce", "cross_entropy",
"vanilla_cross_entropy", "regression_loss", "masked_cross_entropy"]:
loss = self._get_multiple_evidences_predictions_normal(
batch_query, batch_query_content, batch_query_len, batch_query_sources,
batch_evd_docs, batch_evd_contents, batch_evd_lens, batch_evd_sources,
batch_labels, self.fixed_num_evidences, **additional_data)
# print("Loss: ", loss)
epoch_loss += loss.item()
iteration_counter += 1
# if iteration_counter % 2 == 0: break
TensorboardWrapper.mywriter().add_scalar("loss/minibatch_loss", loss.item(), iteration_counter)
loss.backward()
self._optimizer.step()
# for name, param in self._net.named_parameters():
# self.tensorboard_writer.add_histogram(name + "/grad", param.grad, iteration_counter)
# self.tensorboard_writer.add_histogram(name + "/value", param, iteration_counter)
# epoch_loss /= float(total_pairs)
TensorboardWrapper.mywriter().add_scalar("loss/epoch_loss_avg", epoch_loss, epoch_num)
# print("Number of Minibatches: ", minibatch_num, "Avg. loss of epoch: ", epoch_loss)
t2 = time.time()
epoch_train_time = t2 - t1
if verbose: # validation after each epoch
f1_macro_val = self._output_results_every_epoch(topN, val_interactions, test_interactions,
epoch_num, epoch_train_time, epoch_loss)
if f1_macro_val > best_val_f1_macro :
# if (hits + ndcg) > (best_hit + best_ndcg):
count_patience_epochs = 0
with open(self.saved_model, "wb") as f:
torch.save(self._net.state_dict(), f)
# test_results_dict = result_test
best_val_f1_macro, best_epoch = f1_macro_val, epoch_num
# test_hit, test_ndcg = hits_test, ndcg_test
else:
count_patience_epochs += 1
if self._early_stopping_patience and count_patience_epochs > self._early_stopping_patience:
self.output_handler.myprint(
"Early Stopped due to no better performance in %s epochs" % count_patience_epochs)
break
if np.isnan(epoch_loss) or epoch_loss == 0.0:
raise ValueError('Degenerate epoch loss: {}'.format(epoch_loss))
self._flush_training_results(best_val_f1_macro, best_epoch)
def _flush_training_results(self, best_val_f1_macro: float, best_epoch: int):
self.output_handler.myprint("Closing tensorboard")
TensorboardWrapper.mywriter().close()
self.output_handler.myprint('Best result: | vad F1_macro = %.5f | epoch = %d' % (best_val_f1_macro, best_epoch))
def _get_multiple_evidences_predictions_normal(self, query_ids: torch.Tensor,
query_contents: torch.Tensor,
query_lens: np.ndarray,
query_sources: torch.Tensor,
evd_doc_ids: torch.Tensor,
evd_doc_contents: torch.Tensor,
evd_docs_lens: np.ndarray,
evd_sources: torch.Tensor,
labels: np.ndarray,
n: int, **kargs) -> torch.Tensor:
"""
compute cross entropy loss
Parameters
----------
query_ids: (B, )
query_contents: (B, L)
query_lens: (B, )
evd_doc_ids: (B, n)
evd_doc_contents: (B, n, R)
evd_docs_lens: (B, n)
evd_sources: (B, n)
labels: (B, ) labels of pair
n: `int` is the number of evidences for each claim/query
kargs: `dict` include: query_adj: (B,L,L), evd_docs_adj: (B, n, R, R)
Returns
-------
loss value based on a loss function
"""
evd_count_per_query = kargs[KeyWordSettings.EvidenceCountPerQuery] # (B, )
query_char_source = kargs[KeyWordSettings.FCClass.QueryCharSource]
doc_char_source = kargs[KeyWordSettings.FCClass.DocCharSource]
query_adj = kargs[KeyWordSettings.Query_Adj]
evd_docs_adj = kargs[KeyWordSettings.Evd_Docs_Adj]
assert evd_doc_ids.size() == evd_docs_lens.shape
assert query_ids.size(0) == evd_doc_ids.size(0)
assert query_lens.shape == labels.size()
assert query_contents.size(0) == evd_doc_contents.size(0) # = batch_size
_, L = query_contents.size()
batch_size = query_ids.size(0)
# prunning at this step to remove padding\
e_lens, e_conts, q_conts, q_lens, e_adj = [], [], [], [], []
e_chr_src_conts = []
expaned_labels = []
for evd_cnt, q_cont, q_len, evd_lens, evd_doc_cont, evd_chr_src, label, evd_adj in \
zip(evd_count_per_query, query_contents, query_lens,
evd_docs_lens, evd_doc_contents, doc_char_source, labels, evd_docs_adj):
evd_cnt = int(torch_utils.cpu(evd_cnt).detach().numpy())
e_lens.extend(list(evd_lens[:evd_cnt]))
e_conts.append(evd_doc_cont[:evd_cnt, :]) # stacking later
e_adj.append(evd_adj[:evd_cnt])
e_chr_src_conts.append(evd_chr_src[:evd_cnt, :])
q_lens.extend([q_len] * evd_cnt)
q_conts.append(q_cont.unsqueeze(0).expand(evd_cnt, L))
expaned_labels.extend([int(torch_utils.cpu(label).detach().numpy())] * evd_cnt)
# concat
e_conts = torch.cat(e_conts, dim=0) # (n1 + n2 + ..., R)
e_chr_src_conts = torch.cat(e_chr_src_conts, dim=0) # (n1 + n2 + ... , R)
e_adj = torch.cat(e_adj, dim=0) # (n1 + n2 + ..., R, R)
e_lens = np.array(e_lens) # (n1 + n2 + ..., )
q_conts = torch.cat(q_conts, dim=0) # (n1 + n2 + ..., R)
q_lens = np.array(q_lens)
assert q_conts.size(0) == q_lens.shape[0] == e_conts.size(0) == e_lens.shape[0]
d_new_indices, d_old_indices = torch_utils.get_sorted_index_and_reverse_index(e_lens)
e_lens = my_utils.gpu(torch.from_numpy(e_lens), self._use_cuda)
x = query_lens
q_new_indices, q_restoring_indices = torch_utils.get_sorted_index_and_reverse_index(x)
x = my_utils.gpu(torch.from_numpy(x), self._use_cuda)
# query_lens = my_utils.gpu(torch.from_numpy(query_lens), self._use_cuda)
additional_paramters = {
KeyWordSettings.Query_lens: x, # 每一个query长度
KeyWordSettings.Doc_lens: evd_docs_lens,
KeyWordSettings.DocLensIndices: (d_new_indices, d_old_indices, e_lens),
KeyWordSettings.QueryLensIndices: (q_new_indices, q_restoring_indices, x),
KeyWordSettings.QuerySources: query_sources,
KeyWordSettings.DocSources: evd_sources,
KeyWordSettings.TempLabel: labels,
KeyWordSettings.DocContentNoPaddingEvidence: e_conts,
KeyWordSettings.QueryContentNoPaddingEvidence: q_conts,
KeyWordSettings.EvidenceCountPerQuery: evd_count_per_query,
KeyWordSettings.FCClass.QueryCharSource: query_char_source, # (B, 1, L)
KeyWordSettings.FCClass.DocCharSource: e_chr_src_conts,
KeyWordSettings.FIXED_NUM_EVIDENCES: n,
KeyWordSettings.Query_Adj: query_adj,
KeyWordSettings.Evd_Docs_Adj: e_adj # flatten->(n1 + n2 ..., R, R)
}
# (B,)
predictions = self._net(query_contents, evd_doc_contents, **additional_paramters)
# labels.unsqueeze(-1).expand(batch_size, n).reshape(batch_size * n)
# labels = torch_utils.gpu(torch.from_numpy(np.array(expaned_labels)), self._use_cuda)
# print("Labels: ", labels)
# mask = (evd_doc_ids >= 0).view(batch_size * n).float()
return self._loss_func(predictions, labels.float())
def evaluate(self, testRatings: interactions.ClassificationInteractions, K: int, output_ranking=False, **kargs):
"""
Compute evaluation metrics. No swearing in code please!!!
Parameters
----------
testRatings
K
output_ranking: whether we should output predictions
kargs
Returns
-------
"""
all_labels = []
all_final_preds = []
all_final_probs = []
list_error_analysis = []
for query, evidences_info in testRatings.dict_claims_and_evidences_test.items():
evd_ids, labels, evd_contents, evd_lengths, evd_adj = evidences_info
assert len(set(labels)) == 1, "Must have only one label due to same claim"
all_labels.append(labels[0])
claim_content = testRatings.dict_claim_contents[query]
claim_source = np.array([testRatings.dict_claim_source[query]]) # (1, )
claim_char_src = np.array([testRatings.dict_char_left_src[query]])
evd_sources = np.array([testRatings.dict_evd_source[e] for e in evd_ids]) # (len(labels), 1)
evd_sources = self._pad_article_sources(evd_sources) # (1, 30)
evd_char_src = np.array([testRatings.dict_char_right_src[e] for e in evd_ids]) # (len(labels), 1)
query_len = np.array([testRatings.dict_claim_lengths[query]]) # shape = (1, ) where B =1
# doc_lens = [testRatings.dict_doc_lengths[d] for d in docs]
claim_content = np.tile(claim_content, (1, 1)) # (1, L)
L = claim_content.shape[1]
evd_contents =
|
np.array(evd_contents)
|
numpy.array
|
##
## results summary
##
import numpy as np
import pandas as pd
from scipy.stats.distributions import norm
from .tools import maybe_diag
##
## constants
##
z95 = norm.ppf(0.975)
##
## param summary
##
def param_table(beta, y_name, x_names, sigma=None):
# basic frame
frame = pd.DataFrame({
'coeff': beta,
}, index=x_names)
frame = frame.rename_axis(y_name, axis=1)
# handle sigma cases
if sigma is None:
return frame
elif type(sigma) is tuple:
sigr, sigc = sigma
stderr = np.sqrt(np.hstack([maybe_diag(sigr), sigc]))
else:
stderr = np.sqrt(maybe_diag(sigma))
# confidence interval
low95 = beta - z95*stderr
high95 = beta + z95*stderr
# p-value
zscore = beta/stderr
pvalue = 2*(1-norm.cdf(
|
np.abs(zscore)
|
numpy.abs
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 14:42:29 2021
@author: emilygordon
"""
# LRP top transitions from top acc models
# no significance just contours
from keras.layers import Dense, Activation, Dropout
from keras import regularizers,optimizers,metrics,initializers
from keras.utils import to_categorical
from keras.models import Sequential
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import numpy.random as random
from matplotlib.colors import ListedColormap
import tensorflow as tf
import innvestigate
import cartopy.crs as ccrs
from scipy.cluster.vq import kmeans,vq
import seaborn as sns
import cmasher as cmr
from scipy import interpolate
sampleweight=1.2 # zone marker
topseeds = [44,75,89]# random seeds for best models
run=6 # running mean used
gap=30 # cutoff
month1 = 12 # transition range we focus on
month2 = 27
def loadmodel(random_seed): # function to load in model wanted
gap = 30
n_units = 8
num_classes = 2
ridgepen = 12
run=6
# load in the two models
modelstr1 = '../models/PDOtransition_fromOHC_better/SingleLayer_%dunits_seed%d_ridgepen%d_run%d_samedirection_leadmonth%d.h5' %(
n_units, random_seed, ridgepen, run, gap)
model1 = Sequential()
model1.add(Dense(n_units, activation='relu',input_shape=(3*4050,)))
model1.add(Dense(num_classes,activation='softmax'))
model1.load_weights(modelstr1)
trainingdatastr = modelstr1[:-3] + '_validation.nc'
ohcPDOweight_dataset = xr.open_dataset(trainingdatastr)
return model1, ohcPDOweight_dataset, modelstr1 #output the model, validation data, and string
#%%
# import ALL data, nn input, output, and deseasoned ohc
ohcstr = "../data/ohc_nninput_3xmaps_4monthsapart_run%d_samedirection.nc" %(run)
PDOstr = "../data/PDO_nninput_run%d_samedirection.nc" %(run)
ohcstr2 = "../data/ohc-deseasoned_0-24000.nc"
PDO_dataarray = xr.open_dataset(PDOstr,decode_times=False)
ohc_dataarray = xr.open_dataset(ohcstr,decode_times=False)
ohc2_dataarray = xr.open_dataset(ohcstr2,decode_times=False)
lat = np.asarray(ohc2_dataarray.lat)
lon = np.asarray(ohc2_dataarray.lon)
PDO = PDO_dataarray.PDO
ohc = ohc_dataarray.ohc
PDO = np.asarray(PDO)
ohc = np.asarray(ohc)
ohc[np.isnan(ohc)] = 0
#make phase lengths vector
samplesize = np.shape(PDO)[0]
PDO_now = np.copy(PDO)
PDO_now[PDO_now>0] = 1
PDO_now[PDO_now<=0] = 0
phaselength = []
jj=0
while jj<samplesize:
kk=jj
check = PDO_now[jj]
while check == PDO_now[kk]:
kk+=1
if kk == samplesize:
kk = np.nan
break
looplength = kk-jj
phaselength.append(looplength)
jj+=1
phaselength = np.asarray(phaselength)
phaselength = phaselength[~np.isnan(phaselength)]
# cut down to phase lengths available
samplescut = np.shape(phaselength)[0]
PDO = PDO[:samplescut]
ohc = ohc[:samplescut,:]
PDO_now = PDO_now[:samplescut]
#%% using ohc2, make ohc maps for plotting
ohcmaps = np.asarray(ohc2_dataarray.ohc) # deseasoned, monthly ohc
arrdims = np.shape(ohcmaps)
ohc_run = []
for ii in range(arrdims[0]-run): # apply 'run' running mean
ohcint = np.mean(ohcmaps[ii:ii+run,:,:],axis=0)
ohc_run.append(ohcint)
ohc_run = np.asarray(ohc_run)
ohc_run = np.divide(ohc_run,np.nanstd(ohc_run,axis=0)) # standardize
ohc0 = ohc_run[(gap+8):,:,:] # ohc at month0 output month
ohcnowlong = ohc_run[8:]
ohctrans = []
# ohctrans at any point is the 1st ohc grid after the next transition
for ii in range(samplescut):
ind=phaselength[ii]
if ~np.isnan(ind):
ohctransint = ohcnowlong[ii+int(phaselength[ii]),:,:]
else:
ohctransint = np.empty((45,90))
ohctransint[:] = np.nan
ohctrans.append(ohctransint)
ohctrans = np.asarray(ohctrans)
#%% ohc0 is generally too short because it is the first time we account for "gap"
# generally chop everything to the same length
ohc0 = ohc0[:samplescut,:]
ohctrans = ohctrans[:samplescut,:,:]
ohc0size = np.shape(ohc0)[0]
phaselength = phaselength[:ohc0size]
ohc = ohc[:ohc0size,:]
ohctrans=ohctrans[:ohc0size,:,:]
PDO_now = PDO_now[:ohc0size]
#%% analyze model
PDO_pred = 1*(phaselength<=gap)
nummodels = np.shape(topseeds)[0]
acctotal = np.empty(nummodels) # vector of total accuracies
acctranstotal = np.empty(nummodels)
accalltranstotal = np.empty(nummodels)
condactposneg = np.empty(nummodels)
condactnegpos = np.empty(nummodels)
allPDOtrue = np.copy(PDO_pred) #truth output copy (useful)
allLRPmapsposneg = [] #initialise arrays
allohcmapsposneg = []
allohc0posneg = []
allohctransposneg = []
allLRPmapsnegpos = []
allohcmapsnegpos = []
allohc0negpos = []
allohctransnegpos = []
allposneggrabfrom = []
allnegposgrabfrom = []
allposnegsig = np.empty((nummodels,4050))
allnegpossig = np.empty((nummodels,4050))
for loopind, seeds in enumerate(topseeds):
print(loopind)
model1, ohcPDOweight_dataset, modelstr1 = loadmodel(seeds) #load model
valdata = np.asarray(ohcPDOweight_dataset.ohc) #load validation data
valsize = np.shape(valdata)[0]
ohc_val = valdata[:,:12150] #separate into input
PDO_val = valdata[:,-3] #validation truth
inzone_val = valdata[:,-2] #validation 12-24 zone
phaselength_val = valdata[:,-1] #phaselength
PDOguess = model1.predict(ohc_val) #make prediction of validation
argPDOguess = np.argmax(PDOguess,axis=1)
argPDOtrue = np.copy(PDO_val)
modelcorr = argPDOtrue == argPDOguess #where model correct about validation data
nummodelcorr = np.shape(PDO_val[modelcorr])[0] #num correct of validation
accint = nummodelcorr/valsize
acctotal[loopind] = accint # save total accuracy
transinzone = inzone_val==sampleweight #calculate 12-24 month accuracy
numtranszone = np.shape(inzone_val[transinzone])[0]
numcorrtranszone = np.shape(inzone_val[transinzone & modelcorr])[0]
acctransint = numcorrtranszone/numtranszone
acctranstotal[loopind] = acctransint #save 12-24 month accuracy
alltrans = np.shape(inzone_val[PDO_val==1])[0]
corrtrans = np.shape(inzone_val[(PDO_val==1) & modelcorr])[0]
accalltranstotal[loopind] = (corrtrans/alltrans)
#now testing on ALLLLL data (training, validation and whatever is left over)
PDOguessall = model1.predict(ohc) #predict all data
argPDOguessall = np.argmax(PDOguessall,axis=1)
modelcorrall = (argPDOguessall == allPDOtrue) # boo where model is correct
modelconfall = np.max(PDOguessall,axis=1) # absolute confidence
numtrue = np.shape(PDO_pred[modelcorrall])[0] # total correct predictions
truepersistence = allPDOtrue==0 # boolean True for persistence
truetrans = allPDOtrue==1 # boolean True for transition
posnow = PDO_now==1 # boo True where PDO phase is positive at input
negnow = PDO_now==0 # boo True where PDO phase is negative at input
numposneg = np.shape(posnow[posnow & truetrans])[0]
numnegpos = np.shape(negnow[negnow & truetrans])[0]
numtrueposneg = np.shape(posnow[posnow & truetrans & modelcorrall])[0]
numtruenegpos = np.shape(negnow[negnow & truetrans & modelcorrall])[0]
condactposneg[loopind] = numtrueposneg/numposneg
condactnegpos[loopind] = numtruenegpos/numnegpos
inzone = (phaselength>=month1) & (phaselength<=month2) # boo true where phaselength inzone
# strip softmax and make LRP-Z analyzer
model_nosoftmax = innvestigate.utils.model_wo_softmax(model1)
analyzer1 = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPZ(model_nosoftmax)
# find cutoff of confidence for each transition direction
confpercent = 50
transconfposneg = modelconfall[modelcorrall & truetrans & inzone & posnow]
confcutoffposneg = np.percentile(transconfposneg,confpercent)
cutoffbooposneg = modelconfall>confcutoffposneg # boo for cutting lowest 50% posneg predictions
transconfnegpos = modelconfall[modelcorrall & truetrans & inzone & negnow]
confcutoff = np.percentile(transconfnegpos,confpercent)
cutoffboonegpos = modelconfall>confcutoff # boo for cutting lowest 50% negpos predictions
#grab ohc at input, transition and output for the intersection of: model is correct, transition occurs,
# transition was in 12-24 month zone, confidence above threshold, and pos to neg transitition
ohcLRPposneg = ohc[modelcorrall & truetrans & inzone & cutoffbooposneg & posnow,:]
ohc0LRPposneg = ohc0[modelcorrall & truetrans & inzone & cutoffbooposneg & posnow,:,:]
ohctransposneg = ohctrans[modelcorrall & truetrans & inzone & cutoffbooposneg & posnow,:,:]
numposnegzone = np.shape(inzone[truetrans & inzone & posnow])[0]
numnegposzone = np.shape(inzone[truetrans & inzone & negnow])[0]
numallposnegtruezone = np.shape(inzone[truetrans & inzone & posnow & modelcorrall])[0]
numallnegpostruezone = np.shape(inzone[truetrans & inzone & negnow & modelcorrall])[0]
#grab ohc at input, transition and output for the intersection of: model is correct, transition occurs,
# transition was in 12-24 month zone, confidence above threshold, and neg to pos transitition
ohcLRPnegpos = ohc[modelcorrall & truetrans & inzone & cutoffboonegpos & negnow,:]
ohc0LRPnegpos = ohc0[modelcorrall & truetrans & inzone & cutoffboonegpos & negnow,:,:]
ohctransnegpos = ohctrans[modelcorrall & truetrans & inzone & cutoffboonegpos & negnow,:,:]
#maps for LRP output
LRPmaps = []
predcheck1 = []
for ii, mapsamp in enumerate(ohc): # LRP all maps
sample = np.transpose(np.expand_dims(mapsamp,axis=1)) # sample in correct dimensions
pred = np.max(model1.predict(sample),axis=1) #predict sample
LRPsample = analyzer1.analyze(sample) # calculate LRP map
LRPsample = np.divide(LRPsample,pred) # divide by confidence
LRPmaps.append(np.squeeze(LRPsample)) # append to vector
predcheck1.append(pred)
LRPmaps = np.asarray(LRPmaps)
predcheck1 = np.asarray(predcheck1)
#grab LRP at input for the intersection of: model is correct, transition occurs,
# transition was in 12-24 month zone, confidence above threshold, and pos to neg (neg to pos) transitition
LRPmapsposneg = LRPmaps[modelcorrall & truetrans & inzone & cutoffbooposneg & posnow,8100:]
LRPmapsnegpos = LRPmaps[modelcorrall & truetrans & inzone & cutoffboonegpos & negnow,8100:]
allLRPmapsposneg.append(LRPmapsposneg) # LRP and OHC maps for each model
allohcmapsposneg.append(ohcLRPposneg)
allohc0posneg.append(ohc0LRPposneg)
allohctransposneg.append(ohctransposneg)
allLRPmapsnegpos.append(LRPmapsnegpos)
allohcmapsnegpos.append(ohcLRPnegpos)
allohc0negpos.append(ohc0LRPnegpos)
allohctransnegpos.append(ohctransnegpos)
#%% make map stacks from lists, the best way I could figure to do it probably could be better
allLRPmapsposneg = np.asarray(allLRPmapsposneg)
allLRPstackedposneg = allLRPmapsposneg[0]
allohcmapsposneg = np.asarray(allohcmapsposneg)
allohcstackedposneg = allohcmapsposneg[0]
allohc0posneg = np.asarray(allohc0posneg)
allohc0stackedposneg = allohc0posneg[0]
allohctransposneg = np.asarray(allohctransposneg)
allohctransstackedposneg = allohctransposneg[0]
allLRPmapsnegpos = np.asarray(allLRPmapsnegpos)
allLRPstackednegpos = allLRPmapsnegpos[0]
allohcmapsnegpos = np.asarray(allohcmapsnegpos)
allohcstackednegpos = allohcmapsnegpos[0]
allohc0negpos = np.asarray(allohc0negpos)
allohc0stackednegpos = allohc0negpos[0]
allohctransnegpos =
|
np.asarray(allohctransnegpos)
|
numpy.asarray
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
import numpy as np
import subprocess
from mcu.utils import misc
from mcu.cell import utils as cell_utils
from mcu.cell import spg_wrapper
##################### EXPORT CIF, XSF, POSCAR ###########################
def write_poscar(cell, filename=None):
if filename == None: filename = 'POSCAR_mcu'
comment = misc.date()
lattice = np.asarray(cell[0])
positions = np.asarray(cell[1])
atoms = np.asarray(cell[2])
idx = np.argsort(atoms)
atoms = atoms[idx]
symbol = cell_utils.convert_atomtype(atoms)
irred_symbol, count = misc.unique(symbol)
positions = positions[idx]
with open(filename, 'w') as f:
f.write('Generated by mcu on ' + comment + '\n')
f.write('1.0\n')
for i in range(3):
f.write(' %15.10f %15.10f %15.10f\n' % (lattice[i,0],lattice[i,1],lattice[i,2]))
for symb in irred_symbol:
f.write(' %s ' % (symb))
f.write('\n')
for num_atom in count:
f.write(' %d ' % (num_atom))
f.write('\n')
f.write('Direct\n')
for atom in positions:
f.write(' %15.10f %15.10f %15.10f\n' % (atom[0],atom[1],atom[2]))
def write_xsf(cell, filename=None):
if filename == None: filename = 'mcu'
comment = misc.date()
lattice = np.asarray(cell[0])
positions = np.asarray(cell[1])
abs_positions = positions.dot(lattice)
atoms = np.asarray(cell[2])
natom = len(atoms)
symbol = cell_utils.convert_atomtype(atoms)
with open(filename + '.xsf', 'w') as f:
f.write('Generated by mcu on ' + comment + '\n')
f.write('CRYSTAL\n')
f.write('PRIMVEC\n')
for i in range(3):
f.write(' %15.10f %15.10f %15.10f\n' % (lattice[i,0],lattice[i,1],lattice[i,2]))
f.write('CONVVEC\n')
for i in range(3):
f.write(' %15.10f %15.10f %15.10f\n' % (lattice[i,0],lattice[i,1],lattice[i,2]))
f.write('PRIMCOORD\n')
f.write('%3d %3d\n' % (natom, 1))
for atom in range(natom):
f.write(' %s %15.10f %15.10f %15.10f\n' % (symbol[atom], abs_positions[atom][0], abs_positions[atom][1], abs_positions[atom][2]))
def write_cif(cell, spacegroup, equi_atoms, symopt, filename=None):
if filename == None: filename = 'mcu'
comment = misc.date()
lattice = np.asarray(cell[0])
lattice = cell_utils.convert_lattice(lattice)
positions =
|
np.asarray(cell[1])
|
numpy.asarray
|
"""
Test prior module
"""
import pytest
import numpy as np
from dimsm.prior import GaussianPrior, UniformPrior, extend_info
def ad_jacobian(fun, x, out_shape=(), eps=1e-10):
c = x + 0j
g = np.zeros((*out_shape, *x.shape))
if len(out_shape) == 0:
for i in np.ndindex(x.shape):
c[i] += eps*1j
g[i] = fun(c).imag/eps
c[i] -= eps*1j
else:
for j in np.ndindex(out_shape):
for i in np.ndindex(x.shape):
c[i] += eps*1j
g[j][i] = fun(c)[j].imag/eps
c[i] -= eps*1j
return g
@pytest.mark.parametrize(("info", "size"),
[(np.ones((2, 2, 2)), 2),
(np.ones(3), 4),
(np.ones(3), 1)])
def test_extend_info_error(info, size):
with pytest.raises(ValueError):
extend_info(info, size)
@pytest.mark.parametrize("info", [np.ones(1), np.ones((1, 1))])
@pytest.mark.parametrize("size", [1, 2, 3])
def test_extend_info(info, size):
extended_info = extend_info(info, size)
assert extended_info.shape[0] == size
if info.ndim == 1:
assert
|
np.allclose(extended_info, info[0])
|
numpy.allclose
|
if __name__ == '__main__':
import lightkurve as lk
import astropy.table as astab
import pandas as pd
import numpy as np
import astropy
from astropy.coordinates import SkyCoord
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from tqdm import tqdm
import warnings
import astropy.table as astab
from astropy.io import fits
warnings.filterwarnings('ignore',
message="WARNING (theano.tensor.opt): Cannot construct a scalar test value from a test value with no size:"
)
import pickle as pk
import pymc3 as pm
import pymc3_ext as pmx
import aesara_theano_fallback.tensor as tt
from celerite2.theano import terms, GaussianProcess
from pymc3.util import get_default_varnames, get_untransformed_name, is_transformed_name
import theano
import exoplanet as xo
import arviz as az
from corner import corner
from scipy.signal import savgol_filter
import wquantiles
dd = "/Users/kjaehnig/CCA_work/GAT/"
def docs_setup():
"""Set some environment variables and ignore some warnings for the docs"""
import logging
import warnings
# Remove when Theano is updated
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
# Remove when arviz is updated
warnings.filterwarnings("ignore", category=UserWarning)
logger = logging.getLogger("theano.gof.compilelock")
logger.setLevel(logging.ERROR)
logger = logging.getLogger("theano.tensor.opt")
logger.setLevel(logging.ERROR)
logger = logging.getLogger("exoplanet")
logger.setLevel(logging.DEBUG)
docs_setup()
from helper_functions import *
# TIC_TARGET = "TIC 28159019"
# COVARIANCE_USE_TYPE = 'isochrones'
def load_construct_run_pymc3_model(TIC_TARGET='TIC 20215452', COVARIANCE_USE_TYPE='diagonal',
mult_factor=1,
Ntune=1000, Ndraw=500, chains=4, sparse_factor=5, nsig=5):
pymc3_model_dict = load_all_data_for_pymc3_model(TIC_TARGET,
sparse_factor=sparse_factor, nsig=nsig)
file = open(f"/Users/kjaehnig/CCA_work/GAT/pymc3_data_dicts/{TIC_TARGET.replace(' ','_')}_sf{int(sparse_factor)}_pymc3_data_dict",'wb')
pk.dump(pymc3_model_dict,file)
file.close()
return
tic_dest, fig_dest = check_for_system_directory(TIC_TARGET, return_directories=True)
texp = pymc3_model_dict['texp']
x_rv, y_rv, yerr_rv = pymc3_model_dict['x_rv'], pymc3_model_dict['y_rv'], pymc3_model_dict['yerr_rv']
x, y, yerr = pymc3_model_dict['x'], pymc3_model_dict['y'], pymc3_model_dict['yerr']
lk_sigma = pymc3_model_dict['lk_sigma']
lit_period, lit_t0, lit_tn = pymc3_model_dict['lit_period'], pymc3_model_dict['lit_t0'], pymc3_model_dict['lit_tn']
Ntrans, ecosw_tv = pymc3_model_dict['Ntrans'], pymc3_model_dict['ecosw_tv']
print('ecosw_tv: ', ecosw_tv)
if abs(ecosw_tv) > 0.05:
ecosw_tv = np.sign(ecosw_tv) * 0.05
MV_mu, MV_cov = pymc3_model_dict['isores']['logM1Q']
print(MV_cov)
print(f"positive semi definiteness: {is_pos_def(MV_cov)}")
DIM = MV_cov.shape[0]
if COVARIANCE_USE_TYPE == 'diagonal':
diag_cov = np.zeros_like(MV_cov)
diag_cov[np.diag_indices(MV_cov.shape[0])] = 1.0
pymc3_mu, pymc3_cov = MV_mu, diag_cov
suffix = 'diagonal_MV_prior'
if COVARIANCE_USE_TYPE == 'isochrones':
pymc3_mu, pymc3_cov = MV_mu, MV_cov
suffix = 'isochrones_MV_prior'
if COVARIANCE_USE_TYPE == 'isotropized':
eVa, eVe = np.linalg.eig(MV_cov)
R,S = eVe, np.diag(np.sqrt(eVa))
T = R.dot(S).T
Z = pymc3_model_dict['isores']['MVdat'].T.dot(np.linalg.inv(T))
sphr_cov = np.cov(Z.T)
sphr_mu = np.mean(Z.T, axis=-1)
pymc3_mu, pymc3_cov = sphr_mu, sphr_cov
suffix = 'isotropized_isochrones_MV_prior'
if COVARIANCE_USE_TYPE == 'diagonalized':
diagONLYcov = np.zeros_like(MV_cov)
diagONLYcov[np.diag_indices(MV_cov.shape[0])] = MV_cov[np.diag_indices(MV_cov.shape[0])]
pymc3_mu, pymc3_cov = MV_mu, diagONLYcov * mult_factor
if mult_factor > 1:
suffix = f'diagonalized{int(mult_factor)}_isochrones_MV_prior'
else:
suffix = 'diagonalized_isochrones_MV_prior'
print(pymc3_cov)
print(f"positive semi definiteness: {is_pos_def(pymc3_cov)}")
logr1_mu, logr1_sig = pymc3_model_dict['isores']['logR1']
logk_mu, logk_sig = pymc3_model_dict['isores']['logk']
logs_mu, logs_sig = pymc3_model_dict['isores']['logs']
trv = np.linspace(x_rv.min(), x_rv.max(), 5000)
tlc = np.linspace(x.min(), x.max(), 5000)
# rvK = xo.estimate_semi_amplitude(bls_period, x_rv, y_rv*u.km/u.s, yerr_rv*u.km/u.s, t0s=bls_t0)[0]
# print(rvK)
# mask = x < 400
def build_model(mask=None, start=None, plot_MAP_diagnostic_rv_curves=False, suffix=None,
pymc3_model_dict=None):
if mask is None:
mask = np.ones(len(x), dtype='bool')
with pm.Model() as model:
# Systemic parameters
mean_lc = pm.Normal("mean_lc", mu=0.0, sd=10.0)
mean_rv = pm.Normal("mean_rv", mu=0.0, sd=50.0)
u1 = xo.QuadLimbDark("u1")
u2 = xo.QuadLimbDark("u2")
# # Parameters describing the primary
# log_M1 = pm.Normal("log_M1",
# mu=np.log(isoM1), sigma=3.0,
# testval=np.log(isoM1))
# # log_R1 = pm.Uniform('log_R1', lower=np.log(1e-5), upper=np.log(1000))
# log_R1 = pm.Normal("log_R1",
# mu=np.log(isoR1), sigma=3.0,
# testval=np.log(isoR1))
BigPrior = pm.MvNormal("BigPrior",
mu = pymc3_mu,
cov = pymc3_cov,
shape=pymc3_cov.shape[0],
testval=pymc3_mu
)
# M1R1_prior = pm.MvNormal('M1R1_prior',
# mu = M1R1_mu,
# cov = M1R1_cov,
# shape = (2),
# testval = M1R1_mu
# )
if COVARIANCE_USE_TYPE=='isotropized':
descaled_BigPrior = tt.dot(BigPrior,T)
M1 = pm.Deterministic("M1", tt.exp(descaled_BigPrior[0].squeeze()))
R1 = pm.Deterministic("R1", tt.exp(descaled_BigPrior[1].squeeze()))
q = pm.Deterministic("q", tt.exp(descaled_BigPrior[2].squeeze()))
s = pm.Deterministic("s", tt.exp(descaled_BigPrior[3].squeeze()))
else:
M1 = pm.Deterministic("M1", tt.exp(BigPrior.T[0].squeeze()))
R1 = pm.Deterministic("R1", tt.exp(BigPrior.T[1].squeeze()))
q = pm.Deterministic("q", tt.exp(BigPrior.T[2].squeeze()))
s = pm.Deterministic("s", tt.exp(BigPrior.T[3].squeeze()))
# Secondary ratios
log_k = pm.Normal("log_k", mu=logk_mu, sigma=1, testval=logk_mu) # radius ratio
# log_q = pm.Normal("log_q", mu=logq_mu, sigma=logq_sig, testval=logq_mu) # mass ratio
# log_s = pm.Normal("log_s", mu=logs_mu, sigma=1, testval = logs_mu) # surface brightness ratio
# log_R1 = pm.Normal("log_R1", mu=logr1_mu, sigma=logr1_sig, testval = logr1_mu)
# ratio_prior = pm.MvNormal("ratio_prior",
# mu = ratio_mu,
# cov = ratio_cov,
# shape = (2),
# testval = ratio_mu
# )
# M1 = pm.Deterministic("M1", tt.exp(BigPrior.T[0].squeeze()))
# R1 = pm.Deterministic("R1", tt.exp(BigPrior.T[1].squeeze()))
# # k = pm.Deterministic("k", tt.exp(ratio_prior.T[0].squeeze()))
# q = pm.Deterministic("q", tt.exp(BigPrior.T[2].squeeze()))
# s = pm.Deterministic("s", tt.exp(BigPrior.T[3].squeeze()))
# R1 = pm.Deterministic("R1", tt.exp(log_R1))
# s = pm.Deterministic("s", tt.exp(log_s))
k = pm.Deterministic("k", tt.exp(log_k))
# Prior on flux ratio
# pm.Normal(
# "flux_prior",
# mu=0.5,
# sigma=0.25,
# observed=tt.exp(2 * log_k + log_s),
# )
# Parameters describing the orbit
b = xo.ImpactParameter("b", ror=tt.exp(log_k))
# log_period = pm.Uniform(
# "log_period",
# lower=np.log(0.1),
# upper=np.log(3*lit_period),
# testval=np.log(lit_period)
# )
# log_period = pm.Normal("log_period", mu=np.log(lit_period), sigma=5.0)
# period = pm.Deterministic("period", tt.exp(log_period))
t0 = pm.Normal("t0", mu=lit_t0, sigma=1.0)
tn = pm.Normal("tn", mu=lit_tn, sigma=1.0)
period = pm.Deterministic("period", (tn - t0) / Ntrans)
# Parameters describing the eccentricity: ecs = [e * cos(w), e * sin(w)]
# ecosw_tv=0.01
sqrt_ecosw = np.sign(ecosw_tv) * np.sqrt(abs(ecosw_tv))
# ecs is now sqrt(ecs) even if variable name is still ecs
ecs = pmx.UnitDisk("ecs", testval=np.array([sqrt_ecosw, 0.0]))
# remove sqrt from ecc, rewrite as ecosW and esinW
ecc = pm.Deterministic("ecc", tt.sum(ecs ** 2))
omega = pm.Deterministic("omega", tt.arctan2(ecs[1], ecs[0]))
# Build the orbit
# R2 = pm.Deterministic("R2", tt.exp(log_k) * R1)
# M2 = pm.Deterministic("M2", tt.exp(log_q) * M1)
R2 = pm.Deterministic("R2", tt.exp(log_k) * R1)
M2 = pm.Deterministic("M2", q * M1)
orbit = xo.orbits.KeplerianOrbit(
period=period,
t0=t0,
ecc=ecc,
omega=omega,
b=b,
r_star=R1,
m_star=M1,
m_planet=M2,
)
# Track some other orbital elements
pm.Deterministic("incl", orbit.incl)
pm.Deterministic("a", orbit.a)
# Noise model for the light curve
sigma_lc = pm.InverseGamma(
"sigma_lc",
testval= np.mean(yerr),
**pmx.estimate_inverse_gamma_parameters(0.1,5.0)
)
sigma_gp = pm.InverseGamma(
"sigma_gp",
testval= lk_sigma,
**pmx.estimate_inverse_gamma_parameters(0.1,10.0),
)
rho_gp = pm.InverseGamma(
"rho_gp",
testval= 2.0 * lit_period,
**pmx.estimate_inverse_gamma_parameters(0.1,10.0)
)
# sigma_lc = np.mean(yerr)
# sigma_gp = lk_sigma
# rho_gp = 0.25*lit_period
print(sigma_lc, sigma_gp, rho_gp)
kernel_lc = terms.SHOTerm(sigma=sigma_gp, rho=rho_gp, Q=1.0 / 3.)
# # Noise model for the radial velocities
# sigma_rv = pm.InverseGamma(
# "sigma_rv",
# testval=1.0,
# **pmx.estimate_inverse_gamma_parameters(0.1, 15.0)
# )
# sigma_rv_gp = pm.InverseGamma(
# "sigma_rv_gp",
# testval=1.0,
# **pmx.estimate_inverse_gamma_parameters(0.1, 15.0)
# )
# rho_rv_gp = pm.InverseGamma(
# "rho_rv_gp",
# testval=2.0,
# **pmx.estimate_inverse_gamma_parameters(0.1, 25.0)
# )
# kernel_rv = terms.SHOTerm(sigma=sigma_rv_gp, w0=rho_rv_gp, Q=1.0 / 3.)
# Set up the light curve model
lc = xo.SecondaryEclipseLightCurve(u1, u2, s)
def model_lc(t):
return (
mean_lc
+ 1e3
* lc.get_light_curve(orbit=orbit, r=R2, t=t, texp=texp)[:,0]
)
# pm.Deterministic(
# "lc_pred",
# model_lc(tlc)
# )
# Condition the light curve model on the data
gp_lc = GaussianProcess(kernel_lc, t=x[mask], yerr=sigma_lc)
gp_lc.marginal("obs_lc", observed=y[mask] - model_lc(x[mask]))
# gp_pred = pm.Deterministic("gp_pred",gp_lc.predict(y[mask] - model_lc(x[mask])))
# # Set up the radial velocity model
log_sigma_rv = pm.Normal(
"log_sigma_rv", mu=np.log(np.median(yerr_rv)), sd=10., testval=np.log(np.median(yerr_rv))
)
def model_rv(t):
return orbit.get_radial_velocity(t, output_units=u.km/u.s) + mean_rv
rv_model = model_rv(x_rv)
# def model_K(t, period, t0):
# rvs = model_rv(t)
# modK = xo.estimate_semi_amplitude(period, t, rvs, yerr_rv, t0).to(u.km/u.s)
# return modK
# rv_pred = pm.Deterministic('rv_pred', model_rv(trv))
err = tt.sqrt(yerr_rv**2. + tt.exp(2*log_sigma_rv))
pm.Normal("obs",mu=rv_model, sd=err, observed=y_rv)
# ## compute phased RV signal
# n = 2.*np.pi * (1./period)
# phi = (t0 * n) - omega
# phase = np.linspace(0, 1, 500)
# M_pred = 2 * np.pi * phase - (phi)
# f_pred = xo.orbits.get_true_anomaly(M_pred, ecc + tt.zeros_like(M_pred))
# # K = xo.estimate_semi_amplitude(period, t, rv_model, yerr_rv, t0).to(u.km/u.s)
# K = (tt.max(rv_model) - tt.min(rv_model)) / 2.
# rvphase = pm.Deterministic(
# "rvphase", K * (tt.cos(omega) * (tt.cos(f_pred) + ecc) - tt.sin(omega) * tt.sin(f_pred))
# )
# Optimize the logp
if start is None:
start = model.test_point
extras = dict(
x=x[mask],
y=y[mask],
x_rv = x_rv,
y_rv = y_rv,
yerr_rv = yerr_rv,
model_lc=model_lc,
model_rv=model_rv,
gp_lc_pred=gp_lc.predict(y[mask] - model_lc(x[mask])),
)
# First the RV parameters
print(model.check_test_point())
opti_logp = []
filename_list = []
filename_base = f"{fig_dest}{TIC_TARGET.replace(' ','_')}_{suffix}"
plot = plot_MAP_rv_curve_diagnostic_plot(model, start, extras, mask,
title=' after start point opt step',
filename=filename_base + ' after start point opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(start, log_k, return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title='after log_k opt step',
filename=filename_base + 'after log_k opt step.png'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, b, return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after b opt step',
filename = filename_base + ' after b opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, ecs, return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title='model after [ecs] opt step',
filename=filename_base + ' model after [ecs] opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
ecs_logp = -info_['fun']
# map_soln, info_ = pmx.optimize(map_soln, log_s, return_info=True)
# plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask, title=f'RVs after log_s opt step',RETURN_FILENAME=True)
# filename_list.append(plot)
# map_soln, info_ = pmx.optimize(map_soln, log_R1, return_info=True)
# plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask, title=f'RVs after log_R1 opt step',RETURN_FILENAME=True)
# filename_list.append(plot)
# map_soln, info_ = pmx.optimize(map_soln, ratio_prior, return_info=True)
# plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask, title=f'RVs after ratio prior opt step', RETURN_FILENAME=True)
# filename_list.append(plot)
# map_soln, info_ = pmx.optimize(map_soln, log_q, return_info=True)
# plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask, title=f'RVs after log_q opt step', RETURN_FILENAME=True)
# filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, [t0,tn], return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title='after [tn,t0] opt step',
filename = filename_base + ' after [tn,t0] opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, [u1,u2], return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after [u1, u2] opt step',
filename=filename_base + ' after [u1, u2] opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, [sigma_lc, sigma_gp, rho_gp], return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after GP params opt step',
filename=filename_base + ' after GP params opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, [mean_rv,mean_lc], return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after [mean_lc, mean_rv] opt step',
filename=filename_base+' after [mean_lc, mean_rv] opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
# map_soln, info_ = pmx.optimize(map_soln, M1R1_prior, return_info=True)
# plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask, title=f'RVs after M1R1prior opt step', RETURN_FILENAME=True)
# filename_list.append(plot)
if ~np.isfinite(ecs_logp):
map_soln, info_ = pmx.optimize(map_soln, ecs, return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after [ecs] opt step',
filename=filename_base + ' after [ecs] opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, log_sigma_rv, return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after [log_sigma_rv] opt step',
filename=filename_base + ' after [log_sigma_rv] opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln, BigPrior, return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after BigPriors opt step',
filename=filename_base + ' after BigPriors opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
map_soln, info_ = pmx.optimize(map_soln,
# [log_sigma_rv, rho_gp, sigma_gp, sigma_lc, ecs, tn, t0, b, log_k, log_s, BigPrior, u2, u1, mean_rv, mean_lc],
return_info=True)
plot = plot_MAP_rv_curve_diagnostic_plot(model, map_soln, extras, mask,
title=' after final opt step',
filename=filename_base+' after final opt step'.replace(' ','_'),
RETURN_FILENAME=True, pymc3_model_dict=pymc3_model_dict)
filename_list.append(plot)
return model, map_soln, extras, start, opti_logp, filename_list
model, map_soln, extras, start, opti_logp, filename_list = build_model(
plot_MAP_diagnostic_rv_curves=True, suffix=suffix,
pymc3_model_dict=pymc3_model_dict)
import imageio
images = []
filename_list.append(filename_list[-1])
filename_list.append(filename_list[-1])
filename_list.append(filename_list[-1])
filename_list.append(filename_list[-1])
for filename in filename_list:
images.append(imageio.imread(filename))
imageio.mimsave(tic_dest+f"/{TIC_TARGET.replace(' ','_')}_{suffix}__diagnostic_movie_test.gif", images, fps=0.75)
print("#" * 50)
print("#"*19 +" FINISHED " + "#"*19)
print("#"*50)
with model:
mod = pmx.eval_in_model(
extras['model_lc'](extras['x']) + extras['gp_lc_pred'],
map_soln,
)
resid = y - mod
rms = np.sqrt(np.median(resid ** 2))
mask = np.abs(resid) < 5 * rms
plt.figure(figsize=(10, 5))
plt.plot(x, resid, "k", label="data")
plt.plot(x[~mask], resid[~mask], "xr", label="outliers")
plt.axhline(0, color="#aaaaaa", lw=1)
plt.ylabel("residuals [ppt]")
plt.xlabel("time [days]")
plt.legend(fontsize=12, loc=3)
_ = plt.xlim(x.min(), x.max())
plt.savefig(fig_dest + f"{TIC_TARGET}_{suffix}_sigma_cliped_lightcurve_plot.png", dpi=150, bbox_inches='tight')
plt.close()
print("#" * 50)
print("Starting 2nd round of MAP optimizations.")
print("#" * 50)
model, map_soln, extras, start, opti_logp,_ = build_model(
mask, map_soln, plot_MAP_diagnostic_rv_curves=False,
suffix=suffix+"_2nd_rnd",
pymc3_model_dict=None)
# ###### quick fix to save x and y to the main file dump #######
# file = open(f"/Users/kjaehnig/CCA_work/GAT/pymc3_models/{TIC_TARGET}_pymc3_Nt{Ntune}_Nd{Ndraw}_Nc{chains}_{suffix}.pickle",'rb')
# indres = pk.load(file)
# file.close()
# indres['lcdat'] = {'x':x[mask],'y':y[mask],'yerr':yerr[mask]}
# indres['rvdat'] = {'x_rv':x_rv,'y_rv':y_rv,'yerr_rv':yerr_rv}
# with model:
# gp_pred = (
# pmx.eval_in_model(extras["gp_lc_pred"], map_soln)
# )
# indres['gp_pred'] = gp_pred
# file = open(f"/Users/kjaehnig/CCA_work/GAT/pymc3_models/{TIC_TARGET}_pymc3_Nt{Ntune}_Nd{Ndraw}_Nc{chains}_{suffix}.pickle",'wb')
# pk.dump(indres, file)
# file.close()
# print("DONE")
# return
########
# Ntune = 1000
# Ndraw = 500
# chains = 4
random_seeds = [int(f'26113668{ii+1}') for ii in range(chains)]
print(random_seeds)
with model:
trace = pm.sample(
tune=Ntune,
draws=Ndraw,
start=map_soln,
# Parallel sampling runs poorly or crashes on macos
cores=chains,
chains=chains,
target_accept=0.99,
return_inferencedata=True,
random_seed=random_seeds,##[261136681, 261136682,261136683,261136684],#261136685, 261136686,261136687,261136688],
init='jitter+adapt_full'
)
def compute_value_in_post(model, idata, target, size=None):
# Get the names of the untransformed variables
vars = get_default_varnames(model.unobserved_RVs, True)
names = list(sorted(set([
get_untransformed_name(v.name)
if is_transformed_name(v.name)
else v.name
for v in vars
])))
# Compile a function to compute the target
func = theano.function([model[n] for n in names], target, on_unused_input="ignore")
# Call this function for a bunch of values
flat_samps = idata.posterior.stack(sample=("chain", "draw"))
if size is None:
indices = np.arange(len(flat_samps.sample))
else:
indices = np.random.randint(len(flat_samps.sample), size=size)
return [func(*(flat_samps[n].values[..., i] for n in names)) for i in indices]
flat_samps = trace.posterior.stack(sample=('chain', 'draw')) #trace.posterior.stack(sample=("chain", "draw"))
rvvals = compute_value_in_post(model, trace, extras['model_rv'](trv), size=512)
lcvals = compute_value_in_post(model, trace, extras['model_lc'](tlc), size=512)
rvact = compute_value_in_post(model, trace, extras['model_rv'](x_rv), size=512)
lcact = compute_value_in_post(model, trace, extras['model_lc'](x), size=512)
# print(map_soln)
with model:
gp_pred = (
pmx.eval_in_model(extras["gp_lc_pred"], map_soln)
)
file = open(f"/Users/kjaehnig/CCA_work/GAT/pymc3_models/{TIC_TARGET}_pymc3_Nt{Ntune}_Nd{Ndraw}_Nc{chains}_{suffix}.pickle",'wb')
pk.dump({
'trace':trace,
'mask':mask,
'map_soln':map_soln,
'model':model,
'trv':trv,
'tlc':tlc,
'lcvals': lcvals,
'rvvals': rvvals,
'lcact': lcact,
'rvact': rvact,
'gp_pred': gp_pred,
'lcdat': {'x':x[mask],'y':y[mask],'yerr':yerr[mask]},
'rvdat': {'x_rv':x_rv,'y_rv':y_rv,'yerr_rv':yerr_rv}
},
file)
file.close()
p_med = flat_samps['period'].median().values
t0_med = flat_samps['t0'].median().values
mean_rv = flat_samps['mean_rv'].median().values
mean_lc = flat_samps['mean_lc'].median().values
# gp_pred = flat_samps['gp_pred'].median().values
fig, axes = plt.subplots(figsize=(10,10), ncols=1, nrows=2)
# print(flat_samps['ecc'].median())
axes[0].errorbar(fold(x_rv, p_med, t0_med),
y_rv, yerr=yerr_rv, fmt=".k")
# rvvals = indres['rvvals']
# lcvals = indres['lcvals']
t_fold = fold(trv, p_med, t0_med)
inds = np.argsort(t_fold)
pred = np.percentile(rvvals, [16, 50, 84], axis=0)
axes[0].plot(t_fold[inds], pred[1][inds], color='C1', zorder=2)
pred = np.percentile(rvvals, [16, 84], axis=0)
art = axes[0].fill_between(t_fold[inds], pred[0][inds], pred[1][inds], color='C1', alpha=0.5, zorder=1)
art.set_edgecolor("none")
pred = np.percentile(rvvals, [5, 95], axis=0)
art = axes[0].fill_between(t_fold[inds], pred[0][inds], pred[1][inds], color='C1', alpha=0.25, zorder=0)
art.set_edgecolor("none")
# pred = np.percentile(rvvals, [1, 99], axis=0)
# art = axes[0].fill_between(t_fold, pred[0], pred[1], color='C1', alpha=0.10, zorder=0)
# art.set_edgecolor("none")
# axes[0].set_ylim(-40, 40)
# axes[1].set_ylim(-40, 40)
axes[0].set_ylabel("RV [kms]")
x,y = extras['x'],extras['y']
# with model:
# gp_pred = (
# pmx.eval_in_model(extras["gp_lc_pred"], post_map_soln)
# )
axes[1].errorbar(fold(x, p_med, t0_med),
y-gp_pred, fmt=".k", ms=1, zorder=-1)
t_fold = fold(tlc, p_med, t0_med)
inds = np.argsort(t_fold)
pred = np.percentile(lcvals, [16, 50, 84], axis=0)
axes[1].plot(t_fold[inds], pred[1][inds], color='C1', zorder=2)
pred =
|
np.percentile(lcvals, [16, 84], axis=0)
|
numpy.percentile
|
"""Functions to visualize outputs at different stages of GTSFM.
Authors: <NAME>
"""
from gtsfm.common.gtsfm_data import GtsfmData
from typing import List, Optional, Tuple
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from gtsam import Pose3
from matplotlib.axes._axes import Axes
import gtsfm.utils.images as image_utils
import gtsfm.utils.geometry_comparisons as comp_utils
from gtsfm.common.image import Image
from gtsfm.common.keypoints import Keypoints
COLOR_RED = (255, 0, 0)
COLOR_GREEN = (0, 255, 0)
def set_axes_equal(ax: Axes):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one
possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Ref: https://github.com/borglab/gtsam/blob/develop/python/gtsam/utils/plot.py#L13
Args:
ax: axis for the plot.
"""
# get the min and max value for each of (x, y, z) axes as 3x2 matrix.
# This gives us the bounds of the minimum volume cuboid encapsulating all
# data.
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
# find the centroid of the cuboid
centroid = np.mean(limits, axis=1)
# pick the largest edge length for this cuboid
largest_edge_length = np.max(
|
np.abs(limits[:, 1] - limits[:, 0])
|
numpy.abs
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 16:41:20 2020
@author: petrapoklukar
"""
import numpy as np
import h5py
import os
import gin.tf
@gin.configurable("split_train_and_validation_per_model",
blacklist=["dataset_name", "model_name"])
def create_split_train_and_validation_per_model(dataset_name,
model_name,
random_seed=gin.REQUIRED,
unit_labels=False):
""" Randomly splits the model split into smaller datasets of different
sizes.
Args:
filename: name of the file to split further
"""
if model_name:
model_name = '_{0}_{1}'.format(model_name, str(random_seed))
random_state = np.random.RandomState(random_seed)
SHAPES3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "3dshapes",
dataset_name + ".h5")
dataset_split = h5py.File(SHAPES3D_PATH, 'r')
print(dataset_split.keys())
images_split = dataset_split['images'][()]
labels_split = dataset_split['labels'][()]
indices_split = dataset_split['indices'][()]
dataset_size = len(images_split)
ims = np.array(images_split)
labs = np.array(labels_split)
inds = np.array(indices_split)
if unit_labels:
labels_min = np.array([0., 0., 0., 0.75, 0., -30.])
labels_max = np.array([0.9, 0.9, 0.9, 1.25, 3., 30.])
labels_split = (labels_split - labels_min)/(labels_max - labels_min)
assert(np.min(labels_split) == 0 and np.max(labels_split) == 1)
all_local_indices = random_state.choice(dataset_size, dataset_size, replace=False)
random_state.shuffle(all_local_indices)
splitratio = int(dataset_size * 0.85)
train_local_indices = all_local_indices[:splitratio]
test_local_indices = all_local_indices[splitratio:]
print('Writing files')
for indices, split in list(zip([train_local_indices, test_local_indices],
['_train', '_valid'])):
SPLIT_SHAPES3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "3dshapes",
dataset_name + model_name + split + ".h5")
assert(ims[indices].shape[0] == indices.shape[0])
assert(labs[indices].shape[0] == indices.shape[0])
assert(inds[indices].shape[0] == indices.shape[0])
hf = h5py.File(SPLIT_SHAPES3D_PATH, 'w')
hf.create_dataset('images', data=ims[indices])
hf.create_dataset('labels', data=labs[indices])
hf.create_dataset('indices', data=inds[indices])
hf.close()
dataset_split.close()
@gin.configurable("split_train_and_validation",
blacklist=["dataset_name", "model_name"])
def create_split_train_and_validation(dataset_name,
model_name,
random_seed=gin.REQUIRED,
unit_labels=False):
""" Randomly splits the dataset split into train and validation
splits.
Args:
filename: name of the file to split further
"""
del model_name
random_state = np.random.RandomState(random_seed)
SHAPES3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "3dshapes",
dataset_name + ".h5")
dataset_split = h5py.File(SHAPES3D_PATH, 'r')
print(dataset_split.keys())
images_split = dataset_split['images'][()]
labels_split = dataset_split['labels'][()]
indices_split = dataset_split['indices'][()]
dataset_size = len(images_split)
ims = np.array(images_split)
labs = np.array(labels_split)
inds = np.array(indices_split)
if unit_labels:
labels_min = np.array([0., 0., 0., 0.75, 0., -30.])
labels_max =
|
np.array([0.9, 0.9, 0.9, 1.25, 3., 30.])
|
numpy.array
|
from stl import mesh
import numpy as np
import numpy.linalg as la
from scipy.spatial import Delaunay
from scipy.spatial import ConvexHull
import timeit
import copy
import math
import random
np.seterr(divide='ignore', invalid='ignore')
def direction_to_bone(cog,bv): # cog는 2-dim vector (x,y)
center=cog
dist_list=[]
for i in range(len(bv)):
dist = la.norm(center-bv[i])
dist_list.append([dist,i])
dist_list.sort()
p1,p2=np.array(bv[dist_list[0][1]]),np.array(bv[dist_list[1][1]])
d=abs(np.cross(p2-p1,center-p1)/la.norm(p2-p1))
original=p1-p2
dir_vector1=np.array([original[1],-original[0]])
dir_vector1=dir_vector1/la.norm(dir_vector1)
dir_vector2=np.array([-original[1],original[0]])
dir_vector2=dir_vector2/la.norm(dir_vector2)
if abs(np.cross(p2-p1,center+dir_vector1*d-p1)/la.norm(p2-p1)) < abs(np.cross(p2-p1,center+dir_vector2*d-p1)/la.norm(p2-p1)):
dir_vector = dir_vector1
center=center+dir_vector1*d
else:
dir_vector = dir_vector2
center=center+dir_vector2*d
dir_vector=dir_vector.tolist()
dir_vector.append(0.0)
dir_vector=np.array(dir_vector)
dir_list=[dir_vector,np.array([dir_vector[1],-dir_vector[0],0.0]),np.array([-dir_vector[1],dir_vector[0],0.0])]
return d,dir_list
def get_tooth_list():
tooth_list=[]
for _ in range(16):
_+=1
temp = mesh.Mesh.from_file('./new teeth/case3/st'+str(_)+'.stl')
tooth_list.append(temp)
return tooth_list
def get_2d_hull(bound):
vertex=[]
for i in bound.vectors:
temp = np.matrix(i).tolist()
v1 = temp[0]
v2 = temp[1]
v3 = temp[2]
for j in [v1,v2,v3]:
tj = [j[0],j[1]]
if tj in vertex:
pass
else:
vertex.append(tj)
vertex=np.array(vertex)
hull = Delaunay(vertex)
return hull
def get_2d_vertices(tooth):
vertices=[]
for i in tooth.vectors:
temp =
|
np.matrix(i)
|
numpy.matrix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.