prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
''' CONFIDENTIAL
Copyright (c) 2021 <NAME>,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
try:
import pcl
from pyquaternion import Quaternion
except:
print('cannot import pcl -> change python version')
import matplotlib.cm as cmx
from scipy.spatial import distance_matrix
from scipy.optimize import leastsq
import matplotlib
import matplotlib.animation as animation
import open3d as o3d
import glob
import cv2
import cv2.aruco as aruco
import os
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
import pickle
from matplotlib.lines import Line2D
import pandas as pd
import random
from scipy.spatial import ConvexHull
from math import sqrt
from math import atan2, cos, sin, pi
from collections import namedtuple
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
from pyquaternion import Quaternion
np.set_printoptions(suppress=True)
def eulerAnglesToRotationMatrix2(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
Rot_matrix = eulerAnglesToRotationMatrix2([0, 0, np.deg2rad(-90)])
InitLidar = True
InitLidar = False
global globalTrigger
globalTrigger = True
stereoRectify = False# True
#stereoRectify = True
class Annotation3D(Annotation):
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self, s, xy=(0, 0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy = (xs, ys)
Annotation.draw(self, renderer)
def save_obj(obj, name):
with open('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
print('{}.pkl Object saved'.format(name))
def load_obj(name):
with open('/home/eugeniu/Desktop/my_data/CameraCalibration/data/saved_files/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def showErros(_3DErros, IMageNames):
print('len(_3DErros)->{}'.format(np.shape(_3DErros)))
if len(_3DErros)>1:
_3DErros = np.array(_3DErros).squeeze()
# norm_total = np.array(_3DErros[:,0]).squeeze()
norm_axis = np.array(_3DErros).squeeze() * 1000
index, bar_width = np.arange(len(IMageNames)), 0.24
fig, ax = plt.subplots()
X = ax.bar(index, norm_axis[:, 0], bar_width, label="X")
Y = ax.bar(index + bar_width, norm_axis[:, 1], bar_width, label="Y")
Z = ax.bar(index + bar_width + bar_width, norm_axis[:, 2], bar_width, label="Z")
ax.set_xlabel('images')
ax.set_ylabel('errors in mm')
ax.set_title('3D error')
ax.set_xticks(index + bar_width / 3)
ax.set_xticklabels(IMageNames)
ax.legend()
plt.show()
def triangulation(kp1, kp2, T_1w, T_2w):
"""Triangulation to get 3D points
Args:
kp1 (Nx2): keypoint in view 1 (normalized)
kp2 (Nx2): keypoints in view 2 (normalized)
T_1w (4x4): pose of view 1 w.r.t i.e. T_1w (from w to 1)
T_2w (4x4): pose of view 2 w.r.t world, i.e. T_2w (from w to 2)
Returns:
X (3xN): 3D coordinates of the keypoints w.r.t world coordinate
X1 (3xN): 3D coordinates of the keypoints w.r.t view1 coordinate
X2 (3xN): 3D coordinates of the keypoints w.r.t view2 coordinate
"""
kp1_3D = np.ones((3, kp1.shape[0]))
kp2_3D = np.ones((3, kp2.shape[0]))
kp1_3D[0], kp1_3D[1] = kp1[:, 0].copy(), kp1[:, 1].copy()
kp2_3D[0], kp2_3D[1] = kp2[:, 0].copy(), kp2[:, 1].copy()
X = cv2.triangulatePoints(T_1w[:3], T_2w[:3], kp1_3D[:2], kp2_3D[:2])
X /= X[3]
X1 = T_1w[:3].dot(X)
X2 = T_2w[:3].dot(X)
return X[:3].T, X1.T, X2.T
def triangulate(R1,R2,t1,t2,K1,K2,D1,D2, pts1, pts2):
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
_3d_points = []
for i,point in enumerate(pts1):
point3D = cv2.triangulatePoints(P1, P2, pts1[i], pts2[i]).T
point3D = point3D[:, :3] / point3D[:, 3:4]
_3d_points.append(point3D)
print('Triangulate _3d_points -> {}'.format(np.shape(_3d_points)))
return np.array(_3d_points).squeeze()
def mai(R1,R2,t1,t2,imagePoint1,imagePoint2, K2=None,K1=None, D2=None,D1=None):
# Set up two cameras near each other
if K1 is None:
K = np.array([
[718.856, 0., 607.1928],
[0., 718.856, 185.2157],
[0., 0., 1.],
])
R1 = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
R2 = np.array([
[0.99999183, -0.00280829, -0.00290702],
[0.0028008, 0.99999276, -0.00257697],
[0.00291424, 0.00256881, 0.99999245]
])
t1 = np.array([[0.], [0.], [0.]])
t2 = np.array([[-0.02182627], [0.00733316], [0.99973488]])
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
point3D = cv2.triangulatePoints(P1, P2, imagePoint1, imagePoint2).T
point3D = point3D[:, :3] / point3D[:, 3:4]
print('Triangulate point3D -> {}'.format(point3D))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(R1.T) # Change
rvec2, _ = cv2.Rodrigues(R2.T) # Change
p1, _ = cv2.projectPoints(point3D, rvec1, -t1, K1, distCoeffs=D1) # Change
p2, _ = cv2.projectPoints(point3D, rvec2, -t2, K2, distCoeffs=D2) # Change
# measure difference between original image point and reporjected image point
reprojection_error1 = np.linalg.norm(imagePoint1 - p1[0, :])
reprojection_error2 = np.linalg.norm(imagePoint2 - p2[0, :])
print('difference between original image point and reporjected image point')
print(reprojection_error1, reprojection_error2)
return p1,p2
class PointCloud_filter(object):
def __init__(self, file, img_file=None, img_file2=None, debug=True):
self.debug = debug
self.img_file = img_file
self.img_file2 = img_file2
self.name = os.path.basename(file).split('.')[0]
self.file = file
self.useVoxel, self.voxel_size = False, 0.15
self.lowerTemplate, self.showImage = False, True
self.showError = False
self.points_correspondences = None
self.OK = False
self.useInitialPointCloud = False #user all point to fit or only margins
self.chessBoard = False
self.applyICP_directly = False
self.s = .1 # scale
self.plotInit, self.axis_on, self.colour, self.Annotate = False, True, False, False
self.chess, self.corn, self.p1, self.p2, self.p3, self.ICP_finetune_plot = None, None, None, None, None, None
if self.showImage:
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
self.ImageNames = []
self._3DErros = []
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]]).reshape(-1, 3)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * self.s
self.fig = plt.figure(figsize=plt.figaspect(0.5))
self.fig.suptitle('Data collection', fontsize=16)
self.ax = self.fig.add_subplot(1, 2, 1, projection='3d')
#self.ax = self.fig.add_subplot(1, 2, 2, projection='3d')
self.readCameraIntrin()
self.QueryImg = cv2.imread(img_file)
self.ImageNames.append(os.path.basename(img_file))
if self.img_file2: # use stereo case
self.QueryImg2 = cv2.imread(img_file2)
if stereoRectify:
self.QueryImg = cv2.remap(src=self.QueryImg, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
self.QueryImg2 = cv2.remap(src=self.QueryImg2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(self.QueryImg2, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_right and ret_left:
print('Found chessboard in both images')
self.chessBoard = True
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
self.corners2 = corners2_left
cv2.drawChessboardCorners(self.QueryImg, (10, 7), self.corners2, ret_left)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K_left, self.D_left)
imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.QueryImg = self.draw(self.QueryImg, corners=corners2_left, imgpts=imgpts)
self.pixelsPoints = np.asarray(corners2_left).squeeze()
self.pixels_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg2, (10, 7), corners2_right, ret_right)
self.pixels_right = np.asarray(corners2_right).squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
'''print('TRIANGULATE HERE==========================================')
P_1 = np.vstack((np.hstack((np.eye(3), np.zeros(3)[:, np.newaxis])), [0, 0, 0, 1])) # left camera
P_2 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # right camera
print('P1_{}, P_2{}, x_left:{}, x_right:{}'.format(np.shape(P_1), np.shape(P_2),
np.shape(self.x_left), np.shape(self.x_right)))
X_w, X1, X2 = triangulation(self.x_left,self.x_right,P_1,P_2)
print('X_w:{}, X1:{}, X2:{}, '.format(np.shape(X_w), np.shape(X1), np.shape(X2)))
print(X_w[0])
print(X1[0])
print(X2[0])'''
'''R1 = np.eye(3)
R2 = self.R
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
imagePoint1 = self.x_left[0]
imagePoint2 = self.x_right[0]
print('imagePoint1:{}, imagePoint2:{}'.format(np.shape(imagePoint1), np.shape(imagePoint2)))
print('self.K_left ')
print(self.K_left)
print('self.K_right ')
print(self.K_right)
p1,p2 = test(R1,R2,t1,t2,imagePoint1,imagePoint2,K1=self.K_left,K2=self.K_right, D1=self.D_left,D2=self.D_right)
p1 = np.array(p1).squeeze().astype(int)
p2 = np.array(p2).squeeze().astype(int)
print('p1:{}, p2:{}'.format(np.shape(p1), np.shape(p2)))
#d2 = distance_matrix(X_w, X_w)
#print('d2:{}'.format(d2))
cv2.circle(self.QueryImg, (p1[0],p1[1]), 7, (255, 0, 0), 7)
cv2.circle(self.QueryImg2, (p2[0], p2[1]), 7, (255, 0, 0), 7)
cv2.imshow('QueryImg', cv2.resize(self.QueryImg,None,fx=.5,fy=.5))
cv2.imshow('QueryImg2', cv2.resize(self.QueryImg2, None, fx=.5, fy=.5))
cv2.waitKey(0)
cv2.destroyAllWindows()'''
else:
self.chessBoard = False
self.useVoxel = False
print('No chessboard ')
corners2_left, ids_left, rejectedImgPoints = aruco.detectMarkers(gray_left, self.ARUCO_DICT)
corners2_left, ids_left, _, _ = aruco.refineDetectedMarkers(image=gray_left,
board=self.calibation_board,
detectedCorners=corners2_left,
detectedIds=ids_left,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_left,
distCoeffs=self.D_left)
corners2_right, ids_right, rejectedImgPoints = aruco.detectMarkers(gray_right, self.ARUCO_DICT)
corners2_right, ids_right, _, _ = aruco.refineDetectedMarkers(image=gray_right,
board=self.calibation_board,
detectedCorners=corners2_right,
detectedIds=ids_right,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_right,
distCoeffs=self.D_right)
if np.all(ids_left != None) and np.all(ids_right != None):
print('found charuco board, in both images')
retval_left, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners2_left, ids_left,
self.calibation_board,
self.K_left, self.D_left, None,
None)
retval_right, self.rvecs_right, self.tvecs_right = aruco.estimatePoseBoard(corners2_right,
ids_right,
self.calibation_board,
self.K_right,
self.D_right, None,
None)
if retval_left and retval_right:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
self.tvecs, 0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners2_left, ids_left,
borderColor=(0, 0, 255))
b = 1
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs_right, self.tvecs_right, self.K_right,
self.D_right)
self.corners2_right = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
circle_tvec, 0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('Cannot estimate board position for both charuco')
self.pixelsPoints = self.corners2.squeeze()
self.pixels_left = self.pixelsPoints
self.pixels_right = self.corners2_right.squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
# self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
print('disparity:{}'.format(np.shape(disparity)))
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
else:
print('No any board found!!!')
else:
# Undistortion
h, w = self.QueryImg.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.K, self.D, (w, h), 1, (w, h))
dst = cv2.undistort(self.QueryImg, self.K, self.D, None, newcameramtx)
x, y, w, h = roi
self.QueryImg = dst[y:y + h, x:x + w]
gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
if ret: # found chessboard
print('Found chessboard')
self.chessBoard = True
self.corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg, (10, 7), corners, ret)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K, self.D)
# ret, self.rvecs, self.tvecs, inliers = cv2.solvePnPRansac(self.objp, self.corners2, self.K, self.D)
self.imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K, self.D)
self.QueryImg = self.draw(self.QueryImg, self.corners2, self.imgpts)
self.pixelsPoints = np.asarray(self.corners2).squeeze()
else: # check for charuco
self.chessBoard = False
self.useVoxel = False
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
self.chessBoard = False
if len(ids) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
if retval:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, self.tvecs,
0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, ids,
borderColor=(0, 0, 255))
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, circle_tvec,
0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('No board Found')
self.image_ax = self.fig.add_subplot(1, 2, 2)
#self.image_ax = self.fig.add_subplot(1, 2, 1)
self.image_ax.imshow(self.QueryImg)
self.image_ax.set_axis_off()
self.image_ax.set_xlabel('Y')
self.image_ax.set_ylabel('Z')
else:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection="3d")
self.ax.set_xlabel('X', fontsize=10)
self.ax.set_ylabel('Y', fontsize=10)
self.ax.set_zlabel('Z', fontsize=10)
self.fig.tight_layout()
plt.subplots_adjust(left=.15, bottom=0.2)
#plt.subplots_adjust( bottom=0.2)
self.Rx, self.Ry, self.Rz = [np.deg2rad(-90), 0, np.deg2rad(-40)] if self.chessBoard else [0, 0, 0]
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.savePoints = Button(plt.axes([0.03, 0.45, 0.15, 0.04], ), 'filter points', color='white')
self.savePoints.on_clicked(self.getClosestPoints)
self.resetBtn = Button(plt.axes([0.03, 0.25, 0.15, 0.04], ), 'reset', color='white')
self.resetBtn.on_clicked(self.reset)
self.X_btn = Button(plt.axes([0.03, 0.9, 0.024, 0.04], ), 'X', color='red')
self.X_btn.on_clicked(self.Close)
self.OK_btn = Button(plt.axes([0.03, 0.83, 0.074, 0.04], ), 'OK', color='green')
self.OK_btn.on_clicked(self.OK_btnClick)
self.not_OK_btn = Button(plt.axes([0.105, 0.83, 0.074, 0.04], ), 'not OK', color='red')
self.not_OK_btn.on_clicked(self.not_OK_btnClick)
self.saveCorrespondences = Button(plt.axes([0.03, 0.76, 0.15, 0.04], ), 'Save points', color='white')
self.saveCorrespondences.on_clicked(self.savePointsCorrespondences)
self.fitChessboard = Button(plt.axes([0.03, 0.66, 0.15, 0.04], ), 'auto fit', color='white')
self.fitChessboard.on_clicked(self.auto_fitBoard)
# set up sliders
self.Rx_Slider = Slider(plt.axes([0.25, 0.15, 0.65, 0.03]), 'Rx', -180, 180.0, valinit=np.degrees(self.Rx))
self.Ry_Slider = Slider(plt.axes([0.25, 0.1, 0.65, 0.03]), 'Ry', -180, 180.0, valinit=np.degrees(self.Ry))
self.Rz_Slider = Slider(plt.axes([0.25, 0.05, 0.65, 0.03]), 'Rz', -180, 180.0, valinit=np.degrees(self.Rz))
self.Rx_Slider.on_changed(self.update_R)
self.Ry_Slider.on_changed(self.update_R)
self.Rz_Slider.on_changed(self.update_R)
self.check = CheckButtons(plt.axes([0.03, 0.3, 0.15, 0.12]), ('Axes', 'Black', 'Annotate'),
(self.axis_on, self.colour, self.Annotate))
self.check.on_clicked(self.func_CheckButtons)
# set up translation buttons
self.step = .1 # m
self.trigger = True
self.Tx_btn_plus = Button(plt.axes([0.05, 0.15, 0.04, 0.045]), '+Tx', color='white')
self.Tx_btn_plus.on_clicked(self.Tx_plus)
self.Tx_btn_minus = Button(plt.axes([0.12, 0.15, 0.04, 0.045]), '-Tx', color='white')
self.Tx_btn_minus.on_clicked(self.Tx_minus)
self.Ty_btn_plus = Button(plt.axes([0.05, 0.1, 0.04, 0.045]), '+Ty', color='white')
self.Ty_btn_plus.on_clicked(self.Ty_plus)
self.Ty_btn_minus = Button(plt.axes([0.12, 0.1, 0.04, 0.045]), '-Ty', color='white')
self.Ty_btn_minus.on_clicked(self.Ty_minus)
self.Tz_btn_plus = Button(plt.axes([0.05, 0.05, 0.04, 0.045]), '+Tz', color='white')
self.Tz_btn_plus.on_clicked(self.Tz_plus)
self.Tz_btn_minus = Button(plt.axes([0.12, 0.05, 0.04, 0.045]), '-Tz', color='white')
self.Tz_btn_minus.on_clicked(self.Tz_minus)
self.Tx_flip = Button(plt.axes([0.17, 0.15, 0.04, 0.045]), 'FlipX', color='white')
self.Tx_flip.on_clicked(self.flipX)
self.Ty_flip = Button(plt.axes([0.17, 0.1, 0.04, 0.045]), 'FlipY', color='white')
self.Ty_flip.on_clicked(self.flipY)
self.Tz_flip = Button(plt.axes([0.17, 0.05, 0.04, 0.045]), 'FlipZ', color='white')
self.Tz_flip.on_clicked(self.flipZ)
self.radio = RadioButtons(plt.axes([0.03, 0.5, 0.15, 0.15], ), ('Final', 'Init'), active=0)
self.radio.on_clicked(self.colorfunc)
self.tag = None
self.circle_center = None
self.errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d.",
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible.",
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
self.legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Original pointcloud', markerfacecolor='g', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Corners', markerfacecolor='k', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Margins', markerfacecolor='r', markersize=4),
]
def setUp(self):
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.board()
self.ax.legend(handles=self.legend_elements, loc='best')
if self.showImage:
self.getDepth_Inside_Outside()
self.fitNewPlan()
def auto_fitBoard(self, args):
# estimate 3D-R and 3D-t between chess and PointCloud
# Inital guess of the transformation
x0 = np.array([np.degrees(self.Rx), np.degrees(self.Ry), np.degrees(self.Rz), self.Tx, self.Ty, self.Tz])
report = {"error": [], "template": []}
def f_min(x):
self.Rx, self.Ry, self.Rz = np.deg2rad(x[0]), np.deg2rad(x[1]), np.deg2rad(x[2])
self.Tx, self.Ty, self.Tz = x[3], x[4], x[5]
template = self.board(plot=False)
if self.useInitialPointCloud:
dist_mat = distance_matrix(template, self.point_cloud)
else:
dist_mat = distance_matrix(template, self.corners_)
err_func = dist_mat.sum(axis=1) # N x 1
# err_func = dist_mat.sum(axis=0) # N x 1
if self.debug:
print('errors = {}, dist_mat:{}, err_func:{}'.format(round(np.sum(err_func), 2), np.shape(dist_mat),
np.shape(err_func)))
report["error"].append(np.sum(err_func))
report["template"].append(template)
return err_func
maxIters = 700
sol, status = leastsq(f_min, x0, ftol=1.49012e-07, xtol=1.49012e-07, maxfev=maxIters)
print('sol:{}, status:{}'.format(sol, status))
print(self.errors[status])
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.ICP_finetune_plot:
self.ICP_finetune_plot.remove()
self.lowerTemplate = False
self.board()
point_cloud = np.asarray(self.point_cloud, dtype=np.float32)
template = np.asarray(report["template"][0], dtype=np.float32) if self.applyICP_directly else np.asarray(
self.template_cloud, dtype=np.float32)
converged, self.transf, estimate, fitness = self.ICP_finetune(template, point_cloud)
# converged, self.transf, estimate, fitness = self.ICP_finetune(point_cloud,template)
self.estimate = np.array(estimate)
if self.chessBoard:
self.ICP_finetune_plot = self.ax.scatter(self.estimate[:, 0], self.estimate[:, 1], self.estimate[:, 2],
c='k', marker='o', alpha=0.8, s=4)
else:
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = self.estimate[idx, :]
self.ICP_finetune_plot = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2],
c='k', marker='o', alpha=0.8, s=4)
self.trigger = False
# set values of sol to Sliders
self.Rx_Slider.set_val(np.rad2deg(self.Rx))
self.Ry_Slider.set_val(np.rad2deg(self.Ry))
self.Rz_Slider.set_val(np.rad2deg(self.Rz))
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.trigger = True
self.board()
self.AnnotateEdges()
self.fig.canvas.draw_idle()
if self.showError:
print('min error:{} , at index:{}'.format(np.min(report["error"]), np.argmin(report["error"])))
rep = plt.figure(figsize=(15, 8))
plt.xlim(0, len(report["error"]) + 1)
plt.xlabel('Iteration')
plt.ylabel('RMSE')
plt.yticks(color='w')
plt.plot(np.arange(len(report["error"])) + 1, report["error"])
print('Start animation gif')
def update_graph(num):
data = np.asarray(report["template"][num])
graph._offsets3d = (data[:, 0], data[:, 1], data[:, 2])
title.set_text('Iteration {}'.format(num))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
title = ax.set_title('3D Test')
data = report["template"][0]
graph = ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2])
ani = animation.FuncAnimation(fig, update_graph, 101, interval=2, blit=False, repeat=False)
ani.save('myAnimation.gif', writer='imagemagick', fps=30)
print('Animation done')
plt.show()
def flipX(self, event):
self.Rx_Slider.set_val(np.rad2deg(self.Rx + np.pi))
self.update_R(0)
def flipY(self, event):
self.Ry_Slider.set_val(np.rad2deg(self.Ry + np.pi))
self.update_R(0)
def flipZ(self, event):
self.Rz_Slider.set_val(np.rad2deg(self.Rz + np.pi))
self.update_R(0)
def update_R(self, val):
if self.trigger:
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.Rx = np.deg2rad(self.Rx_Slider.val)
self.Ry = np.deg2rad(self.Ry_Slider.val)
self.Rz = np.deg2rad(self.Rz_Slider.val)
self.board()
self.fig.canvas.draw_idle()
def board(self, plot=True, given_origin=None, angle=None):
self.board_origin = [self.Tx, self.Ty, self.Tz] if given_origin is None else given_origin
if self.chessBoard:
self.nCols, self.nRows, org = 7 + 2, 10 + 2, np.asarray(self.board_origin)
#org[0] -= self.nCols / 2
#org[1] -= self.nRows / 2
org[0] -= 4
org[1] -= 6
#org = np.zeros(3)
if self.lowerTemplate:
nrCols, nrRows = 2, 3
else:
nrCols, nrRows = self.nCols, self.nRows
#nrCols, nrRows = self.nCols+1, self.nRows+1 #remove later
print('org:{}, self.nCols - >{}, nrCols:{}'.format(org,self.nCols,nrCols))
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,num=nrRows)
X, Y = np.linspace(org[0], org[0] + self.nCols-1, num=nrCols), np.linspace(org[1], org[1] + self.nRows-1,
num=nrRows)
print('X:{}'.format(X))
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
colors, colortuple = np.empty(X.shape, dtype=str), ('k', 'w')
for y in range(nrCols):
for x in range(nrRows):
colors[x, y] = colortuple[(x + y) % len(colortuple)]
colors[0, 0] = 'r'
alpha = 0.65
else:
self.nCols, self.nRows, org = 10, 10, np.asarray(self.board_origin)
org[0] -= self.nCols / 2
org[1] -= self.nRows / 2
# nrCols, nrRows = 4,4z
nrCols, nrRows = self.nCols, self.nRows
# nrCols, nrRows = 20, 20
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,
num=nrRows)
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
alpha = 0.25
angles = np.array([self.Rx, self.Ry, self.Rz]) if angle is None else np.array(angle)
Rot_matrix = self.eulerAnglesToRotationMatrix(angles)
X, Y, Z = X * self.s, Y * self.s, Z * self.s
corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0))
init = corners.reshape(-1, 3)
print('corners-----------------------------------------------------')
#print(init)
print('corners -> {}'.format(np.shape(init)))
dist_Lidar = distance_matrix(init, init)
print('dist_Lidar corners---------------------------------------------------------')
print(dist_Lidar[0, :11])
translation = np.mean(init, axis=0) # get the mean point
corners = np.subtract(corners, translation) # substract it from all the other points
X, Y, Z = np.transpose(np.add(np.dot(corners, Rot_matrix), translation), (2, 0, 1))
# corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0)).reshape(-1, 3)
corners = np.transpose(np.array([X, Y, Z]), (2, 1, 0)).reshape(-1, 3)
if plot:
if self.chessBoard:
self.chess = self.ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0.2, cmap='gray', alpha=alpha)
else:
self.chess = self.ax.plot_surface(X, Y, Z, linewidth=0.2, cmap='gray', alpha=alpha)
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = corners[idx, :]
self.corn = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2], c='tab:blue',
marker='o', s=5)
self.template_cloud = corners
return np.array(corners)
def getPointCoud(self, colorsMap='jet', skip=1, useRing = True):
# X, Y, Z, intensity, ring
if useRing:
originalCloud = np.array(np.load(self.file, mmap_mode='r'))[:,:5]
if InitLidar:
xyz = originalCloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
originalCloud[:, 0:3] = new_xyz
#mean_x = np.mean(originalCloud[:, 0])
#originalCloud[:, 0] = mean_x
df = pd.DataFrame(data=originalCloud, columns=["X", "Y", "Z","intens","ring"])
gp = df.groupby('ring')
keys = gp.groups.keys()
#groups = gp.groups
coolPoints, circlePoints = [],[]
for i in keys:
line = np.array(gp.get_group(i), dtype=np.float)
first,last = np.array(line[0], dtype=np.float)[:3],np.array(line[-1], dtype=np.float)[:3]
coolPoints.append(first)
coolPoints.append(last)
if self.chessBoard == False:
if len(line) > 50:
l = line[:,:3]
for i in range(2,len(l)-2,1):
d = np.linalg.norm(l[i]-l[i+1])
if d > 0.08: #half of the circle
circlePoints.append(l[i])
circlePoints.append(l[i+1])
self.coolPoints = np.array(coolPoints).squeeze()
self.ax.scatter(*self.coolPoints.T, color='r', marker='o', alpha=1, s=2)
print('coolPoints:{}, circlePoints:{}'.format(np.shape(self.coolPoints), np.shape(circlePoints)))
circlePoints = np.array(circlePoints)
if len(circlePoints)>0:
self.ax.scatter(*circlePoints.T, color='r', marker='o', alpha=1, s=5)
self.fitCircle(circlePoints)
#self.point_cloud = np.array(self.coolPoints, dtype=np.float32)
self.point_cloud = np.array(np.load(self.file, mmap_mode='r')[::skip, :3], dtype=np.float32)
if InitLidar:
xyz = self.point_cloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
self.point_cloud[:, 0:3] = new_xyz
# center the point_cloud
#mean_x = np.mean(self.point_cloud[:, 0])
#self.point_cloud[:, 0] = mean_x
self.point_cloud_mean = np.mean(self.point_cloud, axis=0)
self.Tx, self.Ty, self.Tz = self.point_cloud_mean
# self.point_cloud = self.point_cloud - self.point_cloud_mean
self.point_cloud_colors = np.array(np.load(self.file, mmap_mode='r'))[::skip, 3]
if self.plotInit:
cm = plt.get_cmap(colorsMap)
cNorm = matplotlib.colors.Normalize(vmin=min(self.point_cloud_colors), vmax=max(self.point_cloud_colors))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
self.p1 = self.ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2],
color=scalarMap.to_rgba(self.point_cloud_colors), s=0.2)
else:
self.p = pcl.PointCloud(self.point_cloud)
inlier, outliner, coefficients = self.do_ransac_plane_segmentation(self.p, pcl.SACMODEL_PLANE,
pcl.SAC_RANSAC, 0.01)
#self.planeEquation(coef=np.array(coefficients).squeeze())
self.point_cloud_init = self.point_cloud.copy()
if self.useVoxel:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(self.point_cloud)
self.point_cloud = np.array(pcd.voxel_down_sample(voxel_size=self.voxel_size).points)
# self.p1 = self.ax.scatter(outliner[:, 0], outliner[:, 1], outliner[:, 2], c='y', s=0.2)
self.p2 = self.ax.scatter(inlier[:, 0], inlier[:, 1], inlier[:, 2], c='g', s=0.2)
w, v = self.PCA(inlier)
point = np.mean(inlier, axis=0)
if self.chessBoard == False and self.circle_center:
#point[1:] = self.circle_center
point[[0,2]]= self.circle_center
w *= 2
if self.chessBoard==False and self.circle_center:
p = Circle(self.circle_center, self.circle_radius, alpha = .3, color='tab:blue')
self.ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=point[1], zdir="y")
self.p3 = self.ax.quiver([point[0]], [point[1]], [point[2]], [v[0, :] * np.sqrt(w[0])],
[v[1, :] * np.sqrt(w[0])],
[v[2, :] * np.sqrt(w[0])], linewidths=(1.8,))
def axisEqual3D(self, centers=None):
extents = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
# centers = np.mean(extents, axis=1) if centers is None
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(self.ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def planeEquation(self, coef):
a, b, c, d = coef
mean = np.mean(self.point_cloud, axis=0)
normal = [a, b, c]
d2 = -mean.dot(normal)
# print('d2:{}'.format(d2))
# print('mean:{}'.format(mean))
# print('The equation is {0}x + {1}y + {2}z = {3}'.format(a, b, c, d))
# plot the normal vector
startX, startY, startZ = mean[0], mean[1], mean[2]
startZ = (-normal[0] * startX - normal[1] * startY - d) * 1. / normal[2]
self.ax.quiver([startX], [startY], [startZ], [normal[0]], [normal[1]], [normal[2]], linewidths=(3,),edgecolor="red")
def PCA(self, data, correlation=False, sort=True):
# data = nx3
mean = np.mean(data, axis=0)
data_adjust = data - mean
#: the data is transposed due to np.cov/corrcoef syntax
if correlation:
matrix = np.corrcoef(data_adjust.T)
else:
matrix = np.cov(data_adjust.T)
eigenvalues, eigenvectors = np.linalg.eig(matrix)
if sort:
#: sort eigenvalues and eigenvectors
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def eulerAnglesToRotationMatrix(self, theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def do_ransac_plane_segmentation(self, pcl_data, pcl_sac_model_plane, pcl_sac_ransac, max_distance):
"""
Create the segmentation object
:param pcl_data: point could data subscriber
:param pcl_sac_model_plane: use to determine plane models
:param pcl_sac_ransac: RANdom SAmple Consensus
:param max_distance: Max distance for apoint to be considered fitting the model
:return: segmentation object
"""
seg = pcl_data.make_segmenter()
seg.set_model_type(pcl_sac_model_plane)
seg.set_method_type(pcl_sac_ransac)
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
inlier_object = pcl_data.extract(inliers, negative=False)
outlier_object = pcl_data.extract(inliers, negative=True)
if len(inliers) <= 1:
outlier_object = [0, 0, 0]
inlier_object, outlier_object = np.array(inlier_object), np.array(outlier_object)
return inlier_object, outlier_object, coefficients
def func_CheckButtons(self, label):
if label == 'Axes':
if self.axis_on:
self.ax.set_axis_off()
self.axis_on = False
else:
self.ax.set_axis_on()
self.axis_on = True
elif label == 'Black':
if self.colour:
self.colour = False
self.ax.set_facecolor((1, 1, 1))
else:
self.colour = True
self.ax.set_facecolor((0, 0, 0))
elif label == 'Annotate':
self.Annotate = not self.Annotate
self.AnnotateEdges()
self.fig.canvas.draw_idle()
def ICP_finetune(self, points_in, points_out):
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(points_in)
cloud_out.from_array(points_out)
# icp = cloud_in.make_IterativeClosestPoint()
icp = cloud_out.make_IterativeClosestPoint()
converged, transf, estimate, fitness = icp.icp(cloud_in, cloud_out)
print('fitness:{}, converged:{}, transf:{}, estimate:{}'.format(fitness, converged, np.shape(transf),
np.shape(estimate)))
return converged, transf, estimate, fitness
def colorfunc(self, label):
if label == 'Init':
self.plotInit = True
else:
self.plotInit = False
self.reset(0)
def OK_btnClick(self, args):
self.OK = True
plt.close()
def not_OK_btnClick(self, args):
self.OK = False
plt.close()
def Close(self, args):
global globalTrigger
globalTrigger = False
plt.close()
def reset(self, args):
self.ax.cla()
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.Rx, self.Ry, self.Rz = 0, 0, 0
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.board()
self.fig.canvas.draw_idle()
def getClosestPoints(self, arg):
dist_mat = distance_matrix(self.template_cloud, self.point_cloud_init)
self.neighbours = np.argsort(dist_mat, axis=1)[:, 0]
self.finaPoints = np.asarray(self.point_cloud_init[self.neighbours, :]).squeeze()
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.p3:
self.p3.remove()
if self.p2:
self.p2.remove()
if self.p1:
self.p1.remove()
self.scatter_finalPoints = self.ax.scatter(self.finaPoints[:, 0], self.finaPoints[:, 1], self.finaPoints[:, 2],
c='k', marker='x', s=1)
self.corn = self.ax.scatter(self.template_cloud[:, 0], self.template_cloud[:, 1], self.template_cloud[:, 2],
c='blue', marker='o', s=5)
self.fig.canvas.draw_idle()
def Tz_plus(self, event):
self.Tz += self.step
self.update_R(0)
def Tz_minus(self, event):
self.Tz -= self.step
self.update_R(0)
def Ty_plus(self, event):
self.Ty += self.step
self.update_R(0)
def Ty_minus(self, event):
self.Ty -= self.step
self.update_R(0)
def Tx_plus(self, event):
self.Tx += self.step
self.update_R(0)
def Tx_minus(self, event):
self.Tx -= self.step
self.update_R(0)
def readCameraIntrin(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_left = self.camera_model['K_left']
self.K_right = self.camera_model['K_right']
self.D_left = self.camera_model['D_left']
self.D_right = self.camera_model['D_right']
# self.K_left = self.camera_model['K_right']
# self.K_right = self.camera_model['K_left']
# self.D_left = self.camera_model['D_right']
# self.D_right = self.camera_model['D_left']
# print('K_left')
# print(self.K_left)
# print('K_right')
# print(self.K_right)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.T = np.array([-0.98, 0., 0.12])[:, np.newaxis]
#self.T = np.array([-.75, 0., 0.])[:, np.newaxis]
#print('self T after {}'.format(np.shape(self.T)))
#angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
#self.R = euler_matrix(angles)
#Q = self.camera_model_rectify['Q']
#roi_left, roi_right = self.camera_model_rectify['roi_left'], self.camera_model_rectify['roi_right']
self.leftMapX, self.leftMapY = self.camera_model_rectify['leftMapX'], self.camera_model_rectify['leftMapY']
self.rightMapX, self.rightMapY = self.camera_model_rectify['rightMapX'], self.camera_model_rectify['rightMapY']
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
self.K = self.K_right
self.D = self.D_right
try:
N = 5
aruco_dict = aruco.custom_dictionary(0, N, 1)
aruco_dict.bytesList = np.empty(shape=(4, N - 1, N - 1), dtype=np.uint8)
A = np.array([[0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[0] = aruco.Dictionary_getByteListFromBits(A)
R = np.array([[1, 1, 1, 1, 0], [1, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]],
dtype=np.uint8)
aruco_dict.bytesList[1] = aruco.Dictionary_getByteListFromBits(R)
V = np.array([[1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0]],
dtype=np.uint8)
O = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[2] = aruco.Dictionary_getByteListFromBits(O)
aruco_dict.bytesList[3] = aruco.Dictionary_getByteListFromBits(V)
self.ARUCO_DICT = aruco_dict
self.calibation_board = aruco.GridBoard_create(
markersX=2, markersY=2,
markerLength=0.126, markerSeparation=0.74,
dictionary=self.ARUCO_DICT)
except:
print('Install Aruco')
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
return img
def annotate3D(self, ax, s, *args, **kwargs):
self.tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(self.tag)
def AnnotateEdges(self, giveAX=None, givenPoints=None):
if self.Annotate:
# add vertices annotation.
if giveAX is None:
if self.lowerTemplate or self.chessBoard == False:
if self.chessBoard == False:
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
self.templatePoints = [pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]
self.templatePoints = np.array(self.templatePoints).reshape(-1, 3)
cornersToPLot = self.estimate[idx, :]
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=12, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(self.template_cloud):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
try:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
except:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols+1, self.nRows+1, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
# templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nRows,self.nCols, 3)[1:self.nRows-1,1:self.nCols-1,:]
self.templatePoints = np.array(templatePoints).reshape(-1, 3)
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(givenPoints):
self.annotate3D(giveAX, s=str(j), xyz=xyz_, fontsize=10, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
if self.showImage:
# annotate image
points = np.asarray(self.corners2).squeeze()
font, lineType = cv2.FONT_HERSHEY_SIMPLEX, 2 if self.chessBoard else 10
for i, point in enumerate(points):
point = tuple(point.ravel())
cv2.putText(self.QueryImg, '{}'.format(i), point, font, 1 if self.chessBoard else 3, (0, 0, 0)
if self.chessBoard else (255, 0, 0), lineType)
self.image_ax.imshow(self.QueryImg)
def getCamera_XYZ_Stereo(self):
#cam_rot, jac = cv2.Rodrigues(self.rvecs)
#mR = np.matrix(cam_rot)
#mT = np.matrix(self.tvecs)
#cam_trans = -mR * mT
_3DPoints = []
for i, pixel in enumerate(self.x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = self.depth[i]
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - self.fxypxy[2]) / self.fxypxy[0]
pt[1] = pt[2] * (pt[1] - self.fxypxy[3]) / self.fxypxy[1]
# pt = pt.dot(cam_rot.T) + self.tvecs
_3DPoints.append(pt)
print('_3DPoints {}'.format(np.shape(_3DPoints)))
print('tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ_Stereo mean {}'.format(np.mean(_3DPoints, axis=0)))
_3DPoints = np.array(_3DPoints).squeeze()
print('from disparity getCamera_XYZ_Stereo ')
d = distance_matrix(_3DPoints,_3DPoints)
print(d)
return _3DPoints
def getCamera_XYZ(self):
R_mtx, jac = cv2.Rodrigues(self.rvecs)
inv_R_mtx = np.linalg.inv(R_mtx)
inv_K = np.linalg.inv(self.K)
def compute_XYZ(u, v): # from 2D pixels to 3D world
uv_ = np.array([[u, v, 1]], dtype=np.float32).T
suv_ = uv_
xyz_ = inv_K.dot(suv_) - self.tvecs
XYZ = inv_R_mtx.dot(xyz_)
pred = XYZ.T[0]
return pred
Camera_XYZ = []
for i, point in enumerate(self.pixelsPoints):
xyz = compute_XYZ(u=point[0], v=point[1])
# print 'xyz:{}'.format(xyz)
Camera_XYZ.append(xyz)
Camera_XYZ = np.array(Camera_XYZ)
print('init tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ mean {}'.format(np.mean(Camera_XYZ, axis=0)))
if self.img_file2 is None:
for i, point in enumerate(Camera_XYZ):
imgpts, jac = cv2.projectPoints(point, self.rvecs, self.tvecs, self.K, self.D)
imgpts = np.asarray(imgpts).squeeze()
cv2.circle(self.QueryImg, (int(imgpts[0]), int(imgpts[1])), 7, (255, 0, 0), 7)
self.image_ax.imshow(self.QueryImg)
return Camera_XYZ
def getImagePixels(self):
img = cv2.imread(self.img_file) #left image
img2 = cv2.imread(self.img_file2) # left image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
pixelsPoints,pixelsPoints2, _3DreconstructedBoard = [],[],[]
if self.chessBoard:
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
ret2, corners2 = cv2.findChessboardCorners(gray2, (10, 7), None)
if ret and ret2: # found chessboard
print('Found chessboard')
corners_2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
corners2_2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), self.criteria)
pixelsPoints = np.asarray(corners_2).squeeze()
pixelsPoints2 = np.asarray(corners2_2).squeeze()
cv2.drawChessboardCorners(img, (10, 7), corners_2, ret)
cv2.drawChessboardCorners(img2, (10, 7), corners2_2, ret)
# Find the rotation and translation vectors.
success, rvecs, tvecs, inliers = cv2.solvePnPRansac(self.objp, corners_2, self.K, self.D)
rvecs, _ = cv2.Rodrigues(rvecs)
_3Dpoints = self.objp
# project 3D points to image plane
_2Dpoints, jac = cv2.projectPoints(_3Dpoints, rvecs, tvecs, self.K, self.D)
_2Dpoints = np.array(_2Dpoints, dtype=np.float32).squeeze()
print('_2Dpoints -> {}'.format(np.shape(_2Dpoints)))
for i in range(len(_2Dpoints)):
cv2.circle(img, tuple(_2Dpoints[i]), 5, (0, 255, 0), 3)
_3Dpoints = rvecs.dot(_3Dpoints.T) + tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat[0, :11])
_3DreconstructedBoard = _3Dpoints
else:
return None,None
else:
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
corners2, ids2, rejectedImgPoints2 = aruco.detectMarkers(gray2, self.ARUCO_DICT)
corners2, ids2, rejectedImgPoints2, recoveredIds2 = aruco.refineDetectedMarkers(
image=gray2, board=self.calibation_board, detectedCorners=corners2, detectedIds=ids2,
rejectedCorners=rejectedImgPoints2, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None) and np.all(ids2 != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
if len(ids) and len(ids2) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
retval2, self.rvecs2, self.tvecs2 = aruco.estimatePoseBoard(corners2, ids2,
self.calibation_board, self.K,
self.D, None, None)
img = aruco.drawDetectedMarkers(img, corners, ids,borderColor=(0, 0, 255))
img2 = aruco.drawDetectedMarkers(img2, corners2, ids2, borderColor=(0, 0, 255))
if retval and retval2:
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
self.dst2, jacobian = cv2.Rodrigues(self.rvecs2)
#self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0],[.5,.5,0]])
_3Dpoints = self.dst.T.dot(np.array(self.pts).squeeze().T) + self.tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat)
_3DreconstructedBoard = _3Dpoints
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img = cv2.line(img, top_right, bot_right, (0, 255, 0), 4)
img = cv2.line(img, bot_right, bot_left, (0, 255, 0), 4)
img = cv2.line(img, bot_left, top_left, (0, 255, 0), 4)
img = cv2.line(img, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img, tuple(corners2[-1]), 5, (0, 255, 0), 3)
cv2.circle(img, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints = np.asarray(corners2).squeeze()
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs2, self.tvecs2, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img2 = cv2.line(img2, top_right, bot_right, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_right, bot_left, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_left, top_left, (0, 255, 0), 4)
img2 = cv2.line(img2, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img2, tuple(corners2[-1]), 5, (0, 255, 0), 3)
#cv2.circle(img2, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints2 = np.asarray(corners2).squeeze()
else:
return None,None
else:
return None,None
else:
return None,None
scale = .4
_horizontal = np.hstack(
(cv2.resize(img, None, fx=scale, fy=scale), cv2.resize(img2, None, fx=scale, fy=scale)))
cv2.imshow('_horizontal', _horizontal)
cv2.waitKey(0)
cv2.destroyAllWindows()
return pixelsPoints,pixelsPoints2, _3DreconstructedBoard
def savePointsCorrespondences(self, args):
display = True
fig = plt.figure(figsize=plt.figaspect(1))
ax = plt.axes(projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if self.chessBoard:
legend_elements = [
Line2D([0], [0], marker='o', label='board template', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='green', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='k', markersize=6),
Line2D([0], [0], marker='o', label='Camera_XYZ', markerfacecolor='red', markersize=6),
]
board_template = self.template_cloud
board_template_ICP_finetuned = self.estimate
closest_lidar_points = self.finaPoints
try:
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
except:
print('Second-----------------------------')
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
icp_finetuned_inside = np.array(icp_finetuned_inside).reshape(-1, 3)
board_template_inside = np.array(board_template_inside).reshape(-1, 3)
print('board_template_inside-----------------------------------------------------')
print(board_template_inside)
print('board_template_inside -> {}'.format(np.shape(board_template_inside)))
dist_Lidar = distance_matrix(board_template_inside, board_template_inside)
print('dist_Lidar---------------------------------------------------------')
print(dist_Lidar[0, :11])
closest_lidar_points_inside = np.array(closest_lidar_points_inside).reshape(-1, 3)
Camera_XYZ = self.getCamera_XYZ()
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
display = True
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('icp_finetuned_inside:{}'.format(np.shape(icp_finetuned_inside)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('closest_lidar_points_inside:{}'.format(np.shape(closest_lidar_points_inside)))
print('Camera_XYZ:{}'.format(np.shape(Camera_XYZ)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
#dist = distance_matrix(Camera_XYZ_Stereo, Camera_XYZ_Stereo)
#print('distance matrix Camera_XYZ_Stereo:{}'.format(dist))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*icp_finetuned_inside.T, color='g', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*closest_lidar_points_inside.T, color='k', marker='x', alpha=1, s=20)
ax.scatter(*Camera_XYZ.T, color='k', marker='x', alpha=1, s=30)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=1, s=3)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ_Stereo, axis=0) if self.img_file2 is not None else np.mean(board_template,axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft)<=0:
print('Cannot get pixels points !!! ')
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('icp_finetuned_inside', icp_finetuned_inside),
('closest_lidar_points', closest_lidar_points),
('closest_lidar_points_inside', closest_lidar_points_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ', Camera_XYZ)])
# save_obj(self.points_correspondences, self.name)
else:
legend_elements = [
Line2D([0], [0], marker='o', label='board template all', markerfacecolor='b', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='red', markersize=6),
Line2D([0], [0], marker='o', label='board template inside', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='red', markersize=6),
]
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
board_template = np.array([pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1,
3)
board_template = board_template
pts = np.asarray(self.estimate.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.estimate[idx], axis=0)
board_template_ICP_finetuned = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
board_template_inside = self.templatePoints
pts = np.asarray(self.finaPoints.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.finaPoints[idx], axis=0)
closest_lidar_points = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=.8, s=20)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ, axis=0) if self.img_file2 is not None else np.mean(board_template, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
_3DreconstructedBoard = np.array(_3DreconstructedBoard).squeeze()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft) <= 0:
print('Cannot get pixels points !!! ')
ax.scatter(*_3DreconstructedBoard.T, color='b', marker='x', alpha=1, s=20)
print('pixelsPointsLeft:{}'.format(np.shape(self.pixelsPointsLeft)))
print('pixelsPointsRight:{}'.format(np.shape(self.pixelsPointsRight)))
print('_3DreconstructedBoard:{}'.format(np.shape(_3DreconstructedBoard)))
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('closest_lidar_points', closest_lidar_points)])
# save_obj(self.points_correspondences, self.name)
ax.legend(handles=legend_elements, loc='best')
plt.show()
def getDepth_Inside_Outside(self):
calibrations = ['inside', 'outside']
output = []
for calib in calibrations:
camera_model = load_obj('{}_combined_camera_model'.format(calib))
camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(calib))
K_left = camera_model['K_right']
D_left = camera_model['D_right']
T = camera_model['T']
leftMapX, leftMapY = camera_model_rectify['leftMapX'], camera_model_rectify['leftMapY']
rightMapX, rightMapY = camera_model_rectify['rightMapX'], camera_model_rectify['rightMapY']
imgleft = cv2.imread(self.img_file)
imgright = cv2.imread(self.img_file2)
if stereoRectify:
imgleft = cv2.remap(src=imgleft, map1=leftMapX, map2=leftMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
imgright = cv2.remap(src=imgright, map1=rightMapX, map2=rightMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(imgleft, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(imgright, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_left and ret_right: # found chessboard
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
x_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
x_right = np.asarray(corners2_right).squeeze()
baseline = abs(T[0])
focal_length, cx, cy = K_left[0, 0], K_left[0, 2], K_left[1, 2]
disparity = np.sum(np.sqrt((x_left - x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
depth = (baseline * focal_length / disparity) # .reshape(10,7)
fxypxy = [K_left[0, 0], K_left[1, 1], cx, cy]
print('{} fx:{}, fy:{}'.format(calib, round(K_left[0, 0],2), round(K_left[1, 1],2)))
_3DPoints = []
for i, pixel in enumerate(x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = depth[i]
# print('u:{},v:{},distance:{}'.format(u,v, distance))
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - fxypxy[2]) / fxypxy[0]
pt[1] = pt[2] * (pt[1] - fxypxy[3]) / fxypxy[1]
_3DPoints.append(pt)
_3DPoints = np.array(_3DPoints)
output.append(_3DPoints)
else:
print('cannot detect board in both images')
if len(output)>1:
inside_3D = np.array(output[0]).squeeze()
outisde_3D = np.array(output[1]).squeeze()
#get the error for each point
a_min_b = inside_3D - outisde_3D
norm_total = np.linalg.norm(a_min_b)/70
norm_axis = np.linalg.norm(a_min_b, axis=0)/70
print('norm_total:{}, norm_axis:{}'.format(norm_total,norm_axis))
self._3DErros.append(norm_axis)
def fitNewPlan(self):
coolPoints = self.coolPoints
def minimum_bounding_rectangle(points):
pi2 = np.pi / 2.
# get the convex hull for the points
hull = ConvexHull(points)
hull_points = points[hull.vertices]
y_saved = []
for simplex in hull.simplices:
y = coolPoints[simplex,1]
x = points[simplex, 0]
z = points[simplex, 1]
self.ax.plot(x, y, z, 'k-', alpha = .5)
y_saved.append(y)
y_saved = np.array(y_saved)
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
rotations = np.vstack([
np.cos(angles),np.cos(angles - pi2),
np.cos(angles + pi2),np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
rval = np.array(rval)
d_matrix = distance_matrix(rval, points)
neighbours = np.argsort(d_matrix, axis=1)[:, 0]
rval2 = np.asarray(coolPoints[neighbours, 1]).squeeze()
return rval, rval2
points = list(self.coolPoints[:, [0, -1]])
y = np.mean(self.coolPoints[:, 1])
c, c2 = minimum_bounding_rectangle(np.array(points))
self.corners_ = []
for i,point in enumerate(c):
#self.corners_.append([point[0],y, point[1]])
self.corners_.append([point[0],c2[i], point[1]])
if self.chessBoard==False and self.circle_center:
self.corners_.append([self.circle_center[0],y,self.circle_center[1]])
self.corners_ = np.array(self.corners_)
self.ax.scatter(*self.corners_.T, color='k', marker='x', alpha=1, s=50)
def fitCircle(self, points):
if len(points)>0:
def calc_R(x, y, xc, yc):
"""calculate the distance of each 2D points from the center (xc, yc)"""
return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
def f(c, x, y):
"""calculate the algebraic distance between the data points
and the mean circle centered at c=(xc, yc)"""
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def sigma(coords, x, y, r):
"""Computes Sigma for circle fit."""
dx, dy, sum_ = 0., 0., 0.
for i in range(len(coords)):
dx = coords[i][1] - x
dy = coords[i][0] - y
sum_ += (sqrt(dx * dx + dy * dy) - r) ** 2
return sqrt(sum_ / len(coords))
def hyper_fit(coords, IterMax=99, verbose=False):
"""
Fits coords to circle using hyperfit algorithm.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : s, sigma - variance of data wrt solution (float)
"""
X, Y = None, None
if isinstance(coords, np.ndarray):
X = coords[:, 0]
Y = coords[:, 1]
elif isinstance(coords, list):
X = np.array([x[0] for x in coords])
Y = np.array([x[1] for x in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
n = X.shape[0]
Xi = X - X.mean()
Yi = Y - Y.mean()
Zi = Xi * Xi + Yi * Yi
# compute moments
Mxy = (Xi * Yi).sum() / n
Mxx = (Xi * Xi).sum() / n
Myy = (Yi * Yi).sum() / n
Mxz = (Xi * Zi).sum() / n
Myz = (Yi * Zi).sum() / n
Mzz = (Zi * Zi).sum() / n
# computing the coefficients of characteristic polynomial
Mz = Mxx + Myy
Cov_xy = Mxx * Myy - Mxy * Mxy
Var_z = Mzz - Mz * Mz
A2 = 4 * Cov_xy - 3 * Mz * Mz - Mzz
A1 = Var_z * Mz + 4. * Cov_xy * Mz - Mxz * Mxz - Myz * Myz
A0 = Mxz * (Mxz * Myy - Myz * Mxy) + Myz * (Myz * Mxx - Mxz * Mxy) - Var_z * Cov_xy
A22 = A2 + A2
# finding the root of the characteristic polynomial
y = A0
x = 0.
for i in range(IterMax):
Dy = A1 + x * (A22 + 16. * x * x)
xnew = x - y / Dy
if xnew == x or not np.isfinite(xnew):
break
ynew = A0 + xnew * (A1 + xnew * (A2 + 4. * xnew * xnew))
if abs(ynew) >= abs(y):
break
x, y = xnew, ynew
det = x * x - x * Mz + Cov_xy
Xcenter = (Mxz * (Myy - x) - Myz * Mxy) / det / 2.
Ycenter = (Myz * (Mxx - x) - Mxz * Mxy) / det / 2.
x = Xcenter + X.mean()
y = Ycenter + Y.mean()
r = sqrt(abs(Xcenter ** 2 + Ycenter ** 2 + Mz))
s = sigma(coords, x, y, r)
iter_ = i
if verbose:
print('Regression complete in {} iterations.'.format(iter_))
print('Sigma computed: ', s)
return x, y, r, s
def least_squares_circle(coords):
"""Circle fit using least-squares solver.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : MSE of solution against training data (float)
"""
x, y = None, None
if isinstance(coords, np.ndarray):
x = coords[:, 0]
y = coords[:, 1]
elif isinstance(coords, list):
x = np.array([point[0] for point in coords])
y = np.array([point[1] for point in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, _ = leastsq(f, center_estimate, args=(x, y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R) ** 2)
return xc, yc, R, residu
def plot_data_circle(x, y, xc, yc, R):
"""
Plot data and a fitted circle.
Inputs:
x : data, x values (array)
y : data, y values (array)
xc : fit circle center (x-value) (float)
yc : fit circle center (y-value) (float)
R : fir circle radius (float)
Output:
None (generates matplotlib plot).
"""
f = plt.figure(facecolor='white')
plt.axis('equal')
theta_fit = np.linspace(-pi, pi, 180)
x_fit = xc + R * np.cos(theta_fit)
y_fit = yc + R * np.sin(theta_fit)
plt.plot(x_fit, y_fit, 'b-', label="fitted circle", lw=2)
plt.plot([xc], [yc], 'bD', mec='y', mew=1)
plt.xlabel('x')
plt.ylabel('y')
# plot data
plt.scatter(x, y, c='red', label='data')
plt.legend(loc='best', labelspacing=0.1)
plt.grid()
plt.title('Fit Circle')
x1, y1, r1, resid1 = hyper_fit(points[:,[0,2]])
x2, y2, r2, resid2 = least_squares_circle(points[:,[0,2]])
#plot_data_circle(points[:,1], points[:,2],x,y,r)
if resid1>resid2:
x, y, r = x2, y2, r2
else:
x, y, r = x1, y1, r1
self.circle_center = (x, y)
self.circle_radius = r
def getData(chess=True):
pcl_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/*.npy'.format('chess' if chess else 'charuco'))
imgleft_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/left/*.png'.format('chess' if chess else 'charuco'))
imgright_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/right/*.png'.format('chess' if chess else 'charuco'))
pcl_files.sort()
imgleft_files.sort()
imgright_files.sort()
GoodPoints,_3DErros, IMageNames = [],[],[]
for i, file in enumerate(pcl_files):
if globalTrigger:
print('work with {}'.format(file))
image_left = imgleft_files[i]
image_right = imgright_files[i]
filt = PointCloud_filter(file=file, img_file=image_left, img_file2=image_right, debug=False)
filt.setUp()
plt.show()
plt.close()
print('\n OK:{}, Save points_correspondences : {}'.format(filt.OK, np.shape(filt.points_correspondences)))
if filt.OK:
GoodPoints.append(filt.points_correspondences)
print('save data {} '.format(np.shape(GoodPoints)))
_3DErros.append(filt._3DErros)
IMageNames.append(os.path.basename(image_left))
else:
print('Close')
break
#save_obj(GoodPoints, 'GoodPoints2_{}'.format('chess' if chess else 'charuco'))
print('Data saved in GoodPoints')
showErros(_3DErros, IMageNames)
def euler_from_matrix(R):
beta = -np.arcsin(R[2, 0])
alpha = np.arctan2(R[2, 1] / np.cos(beta), R[2, 2] / np.cos(beta))
gamma = np.arctan2(R[1, 0] / np.cos(beta), R[0, 0] / np.cos(beta))
return np.array((alpha, beta, gamma))
def euler_matrix(theta):
R = np.array([[np.cos(theta[1]) * np.cos(theta[2]),
np.sin(theta[0]) * np.sin(theta[1]) * np.cos(theta[2]) - np.sin(theta[2]) * np.cos(theta[0]),
np.sin(theta[1]) * np.cos(theta[0]) * np.cos(theta[2]) + np.sin(theta[0]) * np.sin(
theta[2])],
[np.sin(theta[2]) * np.cos(theta[1]),
np.sin(theta[0]) * np.sin(theta[1]) * np.sin(theta[2]) + np.cos(theta[0]) * np.cos(theta[2]),
np.sin(theta[1]) * np.sin(theta[2]) * np.cos(theta[0]) - np.sin(theta[0]) * np.cos(
theta[2])],
[-np.sin(theta[1]), np.sin(theta[0]) * np.cos(theta[1]),
np.cos(theta[0]) * np.cos(theta[1])]])
return R
class LiDAR_Camera_Calibration(object):
def __init__(self, file, chess = True, debug=True):
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * .1
self.debug = debug
self.file = file
self.chess = chess
if chess:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside',
'icp_finetuned_inside','closest_lidar_points','closest_lidar_points_inside',
'pixelsPoints','Camera_XYZ_Stereo','Camera_XYZ']
else:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside','pixelsPoints',
'Camera_XYZ_Stereo','closest_lidar_points']
self.readIntrinsics()
try:
self.load_points()
except:
print('cannot load data points')
'''self.Rotation = np.array([[ 0.94901505, 0.01681284, 0.3147821 ],
[-0.01003801, 0.99968204, -0.02313113],
[-0.31507091, 0.018792, 0.94888207]]).squeeze()
self.Translation = np.array([[-0.98078971],
[ 0.00600202],
[ 0.19497569]]).squeeze()
#self.Translation[0] = -.64
euler = euler_from_matrix(self.Rotation)
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.Rotation)
print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler[1] = np.deg2rad(22.598)
self.Rotation = euler_matrix(euler)'''
def rmse(self, objp, imgp, K, D, rvec, tvec):
print('objp:{}, imgp:{}'.format(np.shape(objp), np.shape(imgp)))
predicted, _ = cv2.projectPoints(objp, rvec, tvec, K, D)
print('rmse=====================================================')
print('predicted -> {}, type - >{}'.format(np.shape(predicted), type(predicted)))
predicted = cv2.undistortPoints(predicted, K, D, P=K)
predicted = predicted.squeeze()
pix_serr = []
for i in range(len(predicted)):
xp = predicted[i, 0]
yp = predicted[i, 1]
xo = imgp[i, 0]
yo = imgp[i, 1]
pix_serr.append((xp - xo) ** 2 + (yp - yo) ** 2)
ssum = sum(pix_serr)
return math.sqrt(ssum / len(pix_serr))
def readIntrinsics(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_right = self.camera_model['K_left']
self.K_left = self.camera_model['K_right']
self.D_right = self.camera_model['D_left']
self.D_left = self.camera_model['D_right']
print(' self.K_right')
print( self.K_right)
print(' self.K_left')
print(self.K_left)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.K = self.K_right
self.D = self.D_right
print('self T before {}'.format(np.shape(self.T)))
self.T = np.array([-0.96, 0., 0.12])[:, np.newaxis]
print('self T after {}'.format(np.shape(self.T)))
angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
self.R = euler_matrix(angles)
#-----------------------------------------------------
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#print(self.R)
print('translation is {}-----------------------------'.format(self.T))
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
#print('R1:{}'.format(R1))
#print('R2:{}'.format(R2))
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.R)
print('self.R: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R1)
#print('euler1->{}'.format(euler))
angles = euler_from_matrix(R1)
#print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R2)
#print('euler2->{}'.format(euler))
angles = euler_from_matrix(R2)
#print('rotation2: ', [(180.0 / math.pi) * i for i in angles])
self.R1 = R1
self.R2 = R2
self.P1 = P1
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
print('Got camera intrinsic')
print('Got camera-lidar extrinsics')
def load_points(self):
self.Lidar_3D, self.Image_2D,self.Image_2D2, self.Image_3D,self.Camera_XYZ = [],[],[],[],[]
with open(self.file, 'rb') as f:
self.dataPoinst = pickle.load(f, encoding='latin1')
#with open(self.file,'rb') as f:
#self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
#self.N = 1
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] #N x 3
#pixelsPoints = dictionary_data['pixelsPoints'] #N x 2
#StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] #N x 3
pixelsPointsLeft = dictionary_data['pixelsPointsLeft']
pixelsPointsRight = dictionary_data['pixelsPointsRight']
StereoCam_3D_points = dictionary_data['_3DreconstructedBoard'] #N x 3
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPointsLeft)
self.Image_2D2.append(pixelsPointsRight)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
#print('Cannot read data')
pass
#self.Lidar_3D = np.array(self.Lidar_3D).reshape(-1,3)
#self.Image_2D = np.array(self.Image_2D).reshape(-1,2)
#self.Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_2D:{}, Image_2D2:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),np.shape(self.Image_2D2),
np.shape(self.Image_3D)))
def plotData(self):
self.fig = plt.figure(figsize=plt.figaspect(0.33))
self.fig.tight_layout()
for i in range(self.N):
print('{}/{}'.format(i+1,self.N))
ax1 = self.fig.add_subplot(1, 3, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax2 = self.fig.add_subplot(1, 3, 2, projection='3d')
ax2.set_title('3D Stereo cameras')
ax2.set_xlabel('X', fontsize=8)
ax2.set_ylabel('Y', fontsize=8)
ax2.set_zlabel('Z', fontsize=8)
ax3 = self.fig.add_subplot(1, 3, 3, projection='3d')
ax3.set_title('2D pixels')
ax3.set_xlabel('X', fontsize=8)
ax3.set_ylabel('Y', fontsize=8)
ax3.set_zlabel('Z', fontsize=8)
_3d_LIDAR = np.array(self.Lidar_3D[i])
ax1.scatter(*_3d_LIDAR.T)
self.axisEqual3D(ax1, _3d_LIDAR)
_3d_cam = np.array(self.Image_3D[i])
ax2.scatter(*_3d_cam.T, c='r')
self.axisEqual3D(ax2,_3d_cam)
_2d_cam = np.array(self.Image_2D[i])
ax3.scatter(*_2d_cam.T, c='g')
self.axisEqual3D(ax3, _2d_cam)
plt.show()
def axisEqual3D(self,ax,data):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(data, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def get3D_3D_homography(self, src, dst): #both or Nx3 matrices
src_mean = np.mean(src, axis=0)
dst_mean = np.mean(dst, axis=0)
# Compute covariance
"""try:
H = reduce(lambda s, (a, b): s + np.outer(a, b), zip(src - src_mean, dst - dst_mean), np.zeros((3, 3)))
u, s, v = np.linalg.svd(H)
R = v.T.dot(u.T) # Rotation
T = - R.dot(src_mean) + dst_mean # Translation
H = np.hstack((R, T[:, np.newaxis]))
return H,R.T,T
except:
print('switch to python 2')"""
def calibrate_3D_3D_old(self):
print('3D-3D ========================================================================================')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_3D3D_{}.pkl'.format('chess')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format('chess')
self.Lidar_3D, self.Image_2D, self.Image_3D, self.Camera_XYZ = [], [], [], []
with open(file, 'rb') as f:
self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] # N x 3
pixelsPoints = dictionary_data['pixelsPoints'] # N x 2
StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] # N x 3
#StereoCam_3D_points = dictionary_data['point3D_trianguate']
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPoints)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
print('Cannot read data===================================================')
break
print('Lidar_3D:{}, Image_2D:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),
np.shape(self.Image_3D)))
Lidar_3D = np.array(self.Lidar_3D).reshape(-1, 3)
Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_3D:{}'.format(np.shape(Lidar_3D),np.shape(Image_3D)))
#-------------------------------------#-------------------------------------
c_, R_, t_ = self.estimate(Lidar_3D,Image_3D)
#import superpose3d as super
#(RMSD, R_, t_, c_) = super.Superpose3D(Lidar_3D, Image_3D)
#print('RMSD -> {}, t_{}, c_->{}'.format(RMSD, t_, c_))
# -------------------------------------#-------------------------------------
def similarity_transform(from_points, to_points):
assert len(from_points.shape) == 2, \
"from_points must be a m x n array"
assert from_points.shape == to_points.shape, \
"from_points and to_points must have the same shape"
N, m = from_points.shape
mean_from = from_points.mean(axis=0)
mean_to = to_points.mean(axis=0)
delta_from = from_points - mean_from # N x m
delta_to = to_points - mean_to # N x m
sigma_from = (delta_from * delta_from).sum(axis=1).mean()
sigma_to = (delta_to * delta_to).sum(axis=1).mean()
cov_matrix = delta_to.T.dot(delta_from) / N
U, d, V_t = np.linalg.svd(cov_matrix, full_matrices=True)
cov_rank = np.linalg.matrix_rank(cov_matrix)
S = np.eye(m)
if cov_rank >= m - 1 and np.linalg.det(cov_matrix) < 0:
S[m - 1, m - 1] = -1
elif cov_rank < m - 1:
raise ValueError("colinearility detected in covariance matrix:\n{}".format(cov_matrix))
R = U.dot(S).dot(V_t)
c = (d * S.diagonal()).sum() / sigma_from
t = mean_to - c * R.dot(mean_from)
print('R:{},t:{},c:{}'.format(R,t,c))
return c * R, t
print('similarity_transform===============================')
from_points = Lidar_3D
to_points = Image_3D
M_ans, t_ans = similarity_transform(from_points, to_points)
H, R, T = self.get3D_3D_homography(src = Lidar_3D, dst=Image_3D)
print('H:{}, R:{}, T:{}'.format(np.shape(H), np.shape(R), np.shape(T)))
print(H)
self.fig = plt.figure(figsize=plt.figaspect(1.))
ax1 = self.fig.add_subplot(1, 1, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax1.set_axis_off()
_3d_LIDAR = self.Lidar_3D[0]
ax1.scatter(*_3d_LIDAR.T, label = 'LiDAR')
_3d_Image = self.Image_3D[0]
ax1.scatter(*_3d_Image.T, s=25, label = 'Stereo Cam')
T = _3d_LIDAR.dot(c_ * R_) + t_
print('T -> {}'.format(np.shape(T)))
ax1.scatter(*T.T, marker='x', label='T')
d2 = distance_matrix(_3d_Image,_3d_Image)
print('d2:{}'.format(d2))
print('d2 shape :{}'.format(np.shape(d2)))
ones = np.ones(len(_3d_LIDAR))[:, np.newaxis]
transformed_ = np.hstack((_3d_LIDAR,ones))
transformed = np.dot(H, transformed_.T).T #transformation estimated with SVD
print(np.shape(transformed))
ax1.scatter(*transformed.T, s=25, label = 'ICP sol')
#ax1.set_axis_off()
primary = Lidar_3D# _3d_LIDAR
secondary = Image_3D# _3d_Image
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:, :-1]
X = pad(primary)
Y = pad(secondary)
# Solve the least squares problem X * A = Y # to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
transform = lambda x: unpad(np.dot(pad(x), A))
#print transform(primary)
print("Max error:", np.abs(secondary - transform(primary)).max())
trns2 = transform(_3d_LIDAR) #transformation estimated with LS
ax1.scatter(*trns2.T, label = 'least square sol')
to_points = M_ans.dot(_3d_LIDAR.T).T + t_ans
print('to_points ->{}'.format( | np.shape(to_points) | numpy.shape |
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from numpy.polynomial.polynomial import polyval as npp_polyval
import math
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] <NAME>, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = | np.delete(z, z2_idx) | numpy.delete |
import warnings
import numpy as np
from numba import njit
from joblib import Memory
from .utils import loss, compute_covariances
location = './cachedir'
memory = Memory(location, verbose=0)
EPS = 1e-12
def one_update(C_hat, C, a):
'''
exact update for the noise/powers
'''
Rna = np.linalg.solve(C, a)
diff = C_hat - C
return np.dot(np.dot(diff, Rna), Rna) / (np.dot(a, Rna)) ** 2
def invert(weighted_cys, sigmas, c_ss, n_jobs=1):
'''
Used to compute A in the m step
'''
M = np.einsum('it, ijk->tkj', sigmas, c_ss)
inv = np.linalg.solve(M, weighted_cys)
return inv
@njit
def compute_w_cys(sigmas, cov_signal_source):
n, p, q = cov_signal_source.shape
w_s = np.zeros((q, p))
for j in range(n):
w_s += cov_signal_source[j].T / sigmas[j, :]
return w_s.T
@njit
def compute_sigmas(A, cov_signal_source, cov_signal_signal,
cov_source_source_s):
n_epochs, p, q = cov_signal_source.shape
sigmas_square = np.zeros((n_epochs, p))
AR_d = np.zeros(p)
AcA_d = np.zeros(p)
# update sigma
for j in range(n_epochs):
c_si_so = cov_signal_source[j]
Acss = A.dot(cov_source_source_s[j].T)
for i in range(p):
AR_d[i] = np.dot(A[i], c_si_so[i])
AcA_d[i] = np.dot(A[i], Acss[i])
sigmas_square[j] = np.diag(cov_signal_signal[j]) - 2 * AR_d
sigmas_square[j] += AcA_d
return sigmas_square
def m_step(cov_source_source_s, cov_signal_source, cov_signal_signal, corr,
avg_noise, A=None):
if avg_noise:
cov_source_source = np.mean(cov_source_source_s, axis=0)
cov_source_source_inv = np.linalg.inv(cov_source_source)
A = cov_signal_source.dot(cov_source_source_inv)
sigmas_square = np.diag(cov_signal_signal - A.dot(cov_signal_source.T))
else:
sigmas_square = compute_sigmas(A, cov_signal_source, cov_signal_signal,
cov_source_source_s)
# update A
weighted_cys = compute_w_cys(sigmas_square, cov_signal_source)
A = invert(weighted_cys, 1. / (sigmas_square + EPS),
cov_source_source_s)
if corr:
source_powers = cov_source_source_s
else:
source_powers = np.diagonal(cov_source_source_s, axis1=1, axis2=2)
return A, sigmas_square, source_powers
@njit
def pairwise_dots1(A, B, op):
n, a, _ = A.shape
_, b, _ = B.shape
for i in range(n):
op[i] = np.dot(A[i], B[i].T)
return op
@njit
def pairwise_dots2(A, B, op):
n, a, _ = A.shape
_, _, b = B.shape
for i in range(n):
op[i] = np.dot(A[i], B[i])
return op
@njit
def one_dots(A, B, op):
n, a, _ = A.shape
_, b = B.shape
for i in range(n):
op[i] = np.dot(A[i], B)
return op
@njit
def compute_matrices_uncorr(A, sigmas, source_powers):
p, q = A.shape
n, _ = sigmas.shape
op = np.zeros((n, q, q))
for i in range(n):
op[i] = np.dot(A.T / sigmas[i, :], A)
for j in range(q):
op[i, j, j] += 1 / source_powers[i, j]
return op
@njit
def compute_matrices_corr(A, sigmas, source_powers):
p, q = A.shape
n, _ = sigmas.shape
op = np.zeros((n, q, q))
for i in range(n):
op[i] = np.dot(A.T / sigmas[i, :], A)
op[i] += np.linalg.pinv(source_powers[i])
return op
def compute_matrices(A, sigmas, source_powers, corr):
if corr:
return compute_matrices_corr(A, sigmas, source_powers)
else:
return compute_matrices_uncorr(A, sigmas, source_powers)
# @profile
def e_step(covs, covs_inv, A, sigmas_square, source_powers, corr, avg_noise):
n_epochs, p, _ = covs.shape
p, q = A.shape
cov_source_source_s = np.zeros((n_epochs, q, q))
cov_signal_source = np.zeros((n_epochs, p, q))
wiener = np.zeros((n_epochs, q, p))
if avg_noise:
cov_signal_signal = np.mean(covs, axis=0)
else:
cov_signal_signal = covs
if avg_noise:
At_sig_A = np.zeros((n_epochs, q, q))
At_sig_A_ = A.T.dot(A / (sigmas_square[:, None] + EPS))
proj = A.T / (sigmas_square[None, :] + EPS)
if not avg_noise:
At_sig_A = compute_matrices(A, sigmas_square, source_powers, corr)
proj = A.T / sigmas_square[:, None, :]
else:
for epoch, source_power in enumerate(source_powers):
if avg_noise:
if corr:
At_sig_A[epoch] = At_sig_A_ + np.linalg.pinv(source_power)
else:
At_sig_A[epoch] = At_sig_A_ + np.diag(1. / source_power)
expected_cov = np.linalg.inv(At_sig_A)
if avg_noise:
wiener = one_dots(expected_cov, proj, wiener)
else:
pairwise_dots2(expected_cov, proj, wiener)
pairwise_dots1(covs, wiener, cov_signal_source)
pairwise_dots2(wiener, cov_signal_source, cov_source_source_s)
cov_source_source_s += expected_cov
if avg_noise:
cov_signal_source = np.mean(cov_signal_source, axis=0)
return cov_source_source_s, cov_signal_source, cov_signal_signal
# @profile
@memory.cache(ignore=['verbose'])
def em_algo(covs, A, sigmas_square, source_powers, corr, avg_noise,
max_iter=10000, verbose=False, tol=1e-7, n_it_min=10,
cd_every=0, n_jobs=1):
'''
EM algorithm to fit the SMICA model on the covariances matrices covs
'''
n_sensors, n_sources = A.shape
n_mat, _, _ = covs.shape
covs_inv = np.array([np.linalg.inv(cov) for cov in covs])
loss_init = loss(covs, A, sigmas_square, source_powers, avg_noise, corr)
loss_old = loss_init
criterion = 0
do_cd = cd_every != 0
for it in range(max_iter):
cov_source_source_s, cov_signal_source, cov_signal_signal =\
e_step(covs, covs_inv, A, sigmas_square, source_powers, corr,
avg_noise)
A, sigmas_square, source_powers =\
m_step(cov_source_source_s, cov_signal_source, cov_signal_signal,
corr, avg_noise, A)
# CD updates
if do_cd:
if it % cd_every == 0 and not avg_noise:
covs_estimates = compute_covariances(A, source_powers,
sigmas_square)
# Noise updates
for mat in range(n_mat):
for sensor in range(n_sensors):
e_i = np.zeros(n_sensors)
e_i[sensor] = 1.
update = one_update(covs[mat], covs_estimates[mat],
e_i)
new_coef = np.maximum(update + sigmas_square[mat,
sensor],
EPS)
diff = new_coef - sigmas_square[mat, sensor]
sigmas_square[mat, sensor] = new_coef
covs_estimates[mat, sensor, sensor] += diff
# Powers updates
source_powers.setflags(write=1)
for source in range(n_sources):
a = A[:, source]
aaT = | np.outer(a, a) | numpy.outer |
import dataclasses
from collections import defaultdict
from itertools import combinations
from typing import List, Tuple
import cv2
import numpy as np
import tensorflow as tf
from distinctipy import distinctipy
from matplotlib import pyplot as plt
from scipy.optimize import minimize
from scipy.signal import find_peaks, peak_widths
from skimage.draw import circle_perimeter, disk, line
from skimage.filters import gaussian
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import KernelDensity
from tensorflow.python.keras.utils.np_utils import to_categorical
from watch_recognition.data_preprocessing import binarize, keypoints_to_angle
from watch_recognition.utilities import Line, Point
def set_shapes(img, target, img_shape=(224, 224, 3), target_shape=(28, 28, 4)):
img.set_shape(img_shape)
target.set_shape(target_shape)
return img, target
def set_shapes_with_sample_weight(
img, target, weights, img_shape=(224, 224, 3), target_shape=(28, 28, 4)
):
img.set_shape(img_shape)
target.set_shape(target_shape)
weights.set_shape((*target_shape[:-1], 1))
return img, target, weights
def encode_keypoints_to_mask_np(
keypoints,
image_size,
mask_size,
radius=1,
include_background=False,
separate_hour_and_minute_hands: bool = False,
add_perimeter: bool = False,
sparse: bool = False,
with_perimeter_for_hands: bool = False,
blur: bool = False,
hands_as_lines: bool = False,
):
downsample_factor = image_size[0] / mask_size[0]
all_masks = []
points = keypoints[:, :2]
fm_point = points / downsample_factor
int_points = np.floor(fm_point).astype(int)
# center and top
for int_point in int_points[:2]:
mask = _encode_point_to_mask(radius, int_point, mask_size, add_perimeter)
if blur:
mask = _blur_mask(mask)
all_masks.append(mask)
# hour and minute hands
if separate_hour_and_minute_hands:
for int_point in int_points[2:]:
mask = _encode_point_to_mask(
radius, int_point, mask_size, with_perimeter_for_hands
)
if blur:
mask = _blur_mask(mask)
all_masks.append(mask)
else:
if hands_as_lines:
mask = _encode_multiple_points_to_lines(
int_points[2:], int_points[0], mask_size, blur
)
else:
mask = _encode_multiple_points_to_mask(
radius, int_points[2:], mask_size, with_perimeter_for_hands
)
if blur:
mask = _blur_mask(mask)
all_masks.append(mask)
masks = np.array(all_masks).transpose((1, 2, 0))
if include_background:
background_mask = ((np.ones(mask_size) - masks.sum(axis=-1)) > 0).astype(
"float32"
)
background_mask = np.expand_dims(background_mask, axis=-1)
masks = np.concatenate((masks, background_mask), axis=-1)
if sparse:
masks = np.expand_dims(np.argmax(masks, axis=-1), axis=-1)
return masks.astype("float32")
def _blur_mask(mask, sigma=3):
mask = gaussian(
mask,
sigma=sigma,
)
mask = mask / (np.max(mask) + 1e-8)
mask = (mask > 0.3).astype(float)
return mask
def _encode_multiple_points_to_lines(int_points, center, mask_size, blur):
masks = []
for int_point in int_points:
mask = np.zeros(mask_size, dtype=np.float32)
# TODO make lines thicker, maybe stronger blur? maybe line_aa?
rr, cc = line(*int_point, *center)
cc, rr = select_rows_and_columns_inside_mask(cc, mask_size, rr)
mask[cc, rr] = 1
if blur:
mask = _blur_mask(mask)
masks.append(mask)
masks = np.stack(masks, axis=-1)
mask = np.max(masks, axis=-1)
return mask
def _encode_multiple_points_to_mask(extent, int_points, mask_size, with_perimeter):
mask = np.zeros(mask_size, dtype=np.float32)
for int_point in int_points:
mask += _encode_point_to_mask(extent, int_point, mask_size, with_perimeter)
masks_clipped = np.clip(mask, 0, 1)
return masks_clipped
def _encode_point_to_mask(radius, int_point, mask_size, with_perimeter: bool = False):
mask = np.zeros(mask_size, dtype=np.float32)
coords = tuple(int_point)
rr, cc = disk(coords, radius)
cc, rr = select_rows_and_columns_inside_mask(cc, mask_size, rr)
mask[cc, rr] = 1
if with_perimeter:
rr, cc = circle_perimeter(*coords, radius)
cc, rr = select_rows_and_columns_inside_mask(cc, mask_size, rr)
mask[cc, rr] = 1
return mask
def encode_keypoints_to_mask(
image,
keypoints,
image_size,
mask_size,
radius,
include_background=True,
separate_hour_and_minute_hands=False,
add_perimeter=False,
sparse=False,
with_perimeter_for_hands: bool = False,
blur: bool = False,
hands_as_lines: bool = False,
):
mask = tf.numpy_function(
func=encode_keypoints_to_mask_np,
inp=[
keypoints,
image_size,
mask_size,
radius,
include_background,
separate_hour_and_minute_hands,
add_perimeter,
sparse,
with_perimeter_for_hands,
blur,
hands_as_lines,
],
Tout=tf.float32,
)
return image, mask
def add_sample_weights(image, label, class_weights: List[float]):
# The weights for each class, with the constraint that:
# sum(class_weights) == 1.0
class_weights_tf = tf.constant(class_weights)
class_weights_tf = class_weights_tf / tf.reduce_sum(class_weights_tf)
# Create an image of `sample_weights` by using the label at each pixel as an
# index into the `class weights` .
sample_weights = tf.gather(class_weights_tf, indices=tf.cast(label, tf.int32))
return image, label, sample_weights
def encode_keypoints_to_angle(image, keypoints, bin_size=90):
angle = tf.numpy_function(
func=encode_keypoints_to_angle_np,
inp=[
keypoints,
bin_size,
],
Tout=tf.float32,
)
return image, angle
def encode_keypoints_to_angle_np(keypoints, bin_size=90):
center = keypoints[0, :2]
top = keypoints[1, :2]
angle = keypoints_to_angle(center, top)
angle = binarize(angle, bin_size)
return to_categorical(angle, num_classes=360 // bin_size)
def decode_single_point(mask, threshold=0.1) -> Point:
# this might be faster implementation, and for batch of outputs
# https://github.com/OlgaChernytska/2D-Hand-Pose-Estimation-RGB/blob/c9f201ca114129fa750f4bac2adf0f87c08533eb/utils/prep_utils.py#L114
mask = np.where(mask < threshold, np.zeros_like(mask), mask)
if mask.sum() == 0:
mask = np.ones_like(mask)
y_idx, x_idx = np.indices(mask.shape)
x_mask = np.average(x_idx.flatten(), weights=mask.flatten())
y_mask = np.average(y_idx.flatten(), weights=mask.flatten())
return Point(x_mask, y_mask, score=float(mask.flatten().mean()))
def extract_points_from_map(
predicted_map,
detection_threshold=0.5,
text_threshold=0.5,
size_threshold=2,
) -> List[Point]:
"""
Inspired by keras-ocr segmentation to bboxes code
https://github.com/faustomorales/keras-ocr/blob/6473e146dc3fc2c386c595efccb55abe558b2529/keras_ocr/detection.py#L207
Args:
predicted_map:
detection_threshold:
text_threshold:
size_threshold:
Returns:
"""
_, text_score = cv2.threshold(
predicted_map, thresh=text_threshold, maxval=1, type=cv2.THRESH_BINARY
)
n_components, labels, stats, _ = cv2.connectedComponentsWithStats(
np.clip(text_score, 0, 1).astype("uint8"), connectivity=4
)
points = []
for component_id in range(1, n_components):
# Filter by size
size = stats[component_id, cv2.CC_STAT_AREA]
if size < size_threshold:
continue
score = np.max(predicted_map[labels == component_id])
if score < detection_threshold:
continue
segmap = np.where(
labels == component_id, predicted_map, np.zeros_like(predicted_map)
)
box_center = np.array(decode_single_point(segmap).as_coordinates_tuple)
points.append(Point(*box_center, score=float(score)))
return points
def convert_mask_outputs_to_keypoints(
predicted: np.ndarray,
return_all_hand_points: bool = False,
experimental_hands_decoding: bool = False,
decode_hands_from_lines: bool = False,
) -> Tuple[Point, ...]:
masks = predicted.transpose((2, 0, 1))
center = decode_single_point(masks[0])
center = dataclasses.replace(center, name="Center")
# Top
top_points = extract_points_from_map(
masks[1],
)
if not top_points:
top_points = [decode_single_point(masks[1])]
top = sorted(top_points, key=lambda x: x.score)[-1]
top = dataclasses.replace(top, name="Top")
# Hands
hands_map = masks[2]
hands_points = extract_points_from_map(
predicted_map=hands_map,
size_threshold=4,
detection_threshold=0.15,
text_threshold=0.15,
)
if return_all_hand_points:
points = (center, top, *hands_points)
return points
if experimental_hands_decoding:
hands = select_hand_points_with_line_fits(center, hands_points)
hour, minute = get_minute_and_hour_points(center, tuple(hands))
points = (center, top, hour, minute)
return points
if decode_hands_from_lines:
hands_points = decode_keypoints_via_line_fits(hands_map, center)
if not hands_points:
hands_points = [Point.none(), Point.none()]
if len(hands_points) == 1:
hands_points = (hands_points[0], hands_points[0])
hands_points = sorted(hands_points, key=lambda x: x.score)[-2:]
hour, minute = get_minute_and_hour_points(center, tuple(hands_points))
hour = dataclasses.replace(hour, name="Hour")
minute = dataclasses.replace(minute, name="Minute")
return center, top, hour, minute
def select_hand_points_with_line_fits(center, hands_points, max_distance=1):
"""
Finds points that are collinear with the center point to get hand lines lengths.
Then selects 2 shortest hand lines (to get rid of seconds hand)
Args:
center:
hands_points:
max_distance:
Returns:
"""
lines = []
used_points = set()
for a, b in combinations(hands_points, 2):
if a.distance(b) < a.distance(center):
continue
line = Line(a, b)
proj_point = line.projection_point(center)
d = proj_point.distance(center)
if d < max_distance:
lines.append(line)
used_points.add(a)
used_points.add(b)
unused_points = [p for p in hands_points if p not in used_points]
for point in unused_points:
lines.append(Line(point, center))
best_lines = sorted(lines, key=lambda l: l.length)[:2]
hands = []
for line in best_lines:
if line.start.distance(center) > line.end.distance(center):
hands.append(line.start)
else:
hands.append(line.end)
return hands
def poly_area(x, y):
"""https://stackoverflow.com/a/30408825/8814045"""
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def select_minute_and_hour_points(
center: Point, hand_points: List[Point]
) -> Tuple[Point, Point]:
point_combinations = list(combinations(hand_points, 2))
areas = [
poly_area(np.array([center.x, a.x, b.x]), np.array([center.y, a.y, b.y]))
for a, b in point_combinations
]
sort = np.argsort(areas)
idx = sort[-1]
return point_combinations[idx]
def get_minute_and_hour_points(
center: Point, hand_points: Tuple[Point, Point]
) -> Tuple[Point, Point]:
assert len(hand_points) < 3, "expected max 2 points for hands"
hand_points_np = np.array([p.as_coordinates_tuple for p in hand_points]).reshape(
-1, 2
)
center = np.array(center.as_coordinates_tuple).reshape(1, -1)
distances = euclidean_distances(hand_points_np, center)
hour = hand_points[int(np.argmin(distances))]
minute = hand_points[int(np.argmax(distances))]
return hour, minute
def select_rows_and_columns_inside_mask(cc, mask_size, rr):
row_filter = np.where(
(0 <= rr) & (rr < mask_size[0]),
| np.ones_like(rr) | numpy.ones_like |
import solvers as sol
from AS1_class import Asym_slab
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import pickle
def save():
with open('pickles/wavenumber_c0={}_R1={}_R2={}_K={}_M_A={}.p'.format(
slab.c0, slab.R1, slab.R2, slab.K, slab.M_A), 'wb') as f:
pickle.dump(root_array, f)
slab = Asym_slab(c0=0.6, R1=1.4, R2=1.6, K=None, M_A=1)
x_range = | np.linspace(0, 2, 101) | numpy.linspace |
from __future__ import division
import warnings
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_warns, assert_no_warnings)
from ..lomb_scargle_fast import (extirpolate, bitceil, trig_sum,
lomb_scargle_fast)
from .. import LombScargle, LombScargleFast
def _generate_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
def test_extirpolate():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
def check_result(N, M=5):
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
for N in [100, None]:
yield check_result, N
def test_extirpolate_with_integers():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: | np.sin(x / 10) | numpy.sin |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------#
"""
- Module that should take care of collocation of points or swaths
- Needs input from modules that retrieve from observational platforms
and models
"""
# --- import libraries ------------------------------------------------#
# standard library imports
import numpy as np
import netCDF4
from datetime import datetime, timedelta
import os
import time
from dateutil.relativedelta import relativedelta
import pyresample
from tqdm import tqdm
from copy import deepcopy
# own imports
from wavy.utils import collocate_times
from wavy.utils import make_fc_dates
from wavy.utils import make_pathtofile
from wavy.utils import hour_rounder
from wavy.utils import NoStdStreams
from wavy.utils import make_subdict
from wavy.wconfig import load_or_default
from wavy.modelmod import model_class, make_model_filename_wrapper
from wavy.modelmod import get_model_filedate
from wavy.modelmod import model_class,get_model
from wavy.ncmod import dumptonc_ts_collocation
from wavy.satmod import satellite_class
from wavy.insitumod import insitu_class
# ---------------------------------------------------------------------#
# read yaml config files:
model_dict = load_or_default('model_specs.yaml')
insitu_dict = load_or_default('insitu_specs.yaml')
collocation_dict = load_or_default('collocation_specs.yaml')
variable_info = load_or_default('variable_info.yaml')
flatten = lambda l: [item for sublist in l for item in sublist]
def collocation_fct(obs_lons,obs_lats,model_lons,model_lats):
grid = pyresample.geometry.GridDefinition(\
lats=model_lats, \
lons=model_lons)
# Define some sample points
swath = pyresample.geometry.SwathDefinition(lons=obs_lons,
lats=obs_lats)
# Determine nearest (great circle distance) neighbour in the grid.
valid_input_index, valid_output_index, index_array, distance_array = \
pyresample.kd_tree.get_neighbour_info(
source_geo_def=grid,
target_geo_def=swath,
radius_of_influence=1000000000,
neighbours=1)
# get_neighbour_info() returns indices in the
# flattened lat/lon grid. Compute the 2D grid indices:
index_array_2d = np.unravel_index(index_array, grid.shape)
return index_array_2d, distance_array, valid_output_index
def find_valid_fc_dates_for_model_and_leadtime(fc_dates,model,leadtime):
'''
Finds valid dates that are close to desired dates at a precision
of complete hours
'''
if (leadtime is None or leadtime == 'best'):
fc_dates_new = [hour_rounder(d) for d in fc_dates]
else:
fc_dates_new = [hour_rounder(d) for d in fc_dates \
if get_model_filedate(model,d,leadtime) != False]
return fc_dates_new
def check_if_file_is_valid(fc_date,model,leadtime):
fname = make_model_filename_wrapper(model,fc_date,leadtime)
print('Check if requested file:\n',fname,'\nis available and valid')
try:
nc = netCDF4.Dataset(fname,mode='r')
time = nc.variables['time']
dt = netCDF4.num2date(time[:],time.units)
if fc_date in list(dt):
print('File is available')
return True
else:
print('Desired date ' + str(fc_date) + ' is not in', fname)
return False
except (FileNotFoundError, OSError) as e:
print('File is not available')
print(e)
return False
def get_closest_date(overdetermined_lst,target_lst):
idx = []
for i in range(len(target_lst)):
diffs=np.abs( [ ( target_lst[i]
- overdetermined_lst[j] ).total_seconds()
for j in range(len(overdetermined_lst)) ] )
mindiff= np.min(diffs)
idx.append(list(diffs).index(mindiff))
return idx
def collocate_station_ts(obs_obj=None,model=None,distlim=None,\
leadtime=None,date_incr=None):
"""
Some info
"""
fc_date = make_fc_dates(obs_obj.sdate,obs_obj.edate,date_incr)
# get coinciding date between fc_date and dates in obs_obj
idx1 = collocate_times( unfiltered_t = obs_obj.vars['datetime'],
target_t = fc_date, twin = obs_obj.twin )
# find valid/coinciding fc_dates
if len(idx1) > len(fc_date):
print('Muliple assignments within given time window')
print('--> only closest to time stamp is chosen')
idx_closest = get_closest_date(\
list(np.array(obs_obj.vars['datetime'])[idx1]),\
fc_date)
idx1 = list(np.array(idx1)[idx_closest])
# adjust obs_obj according to valid dates
for key in obs_obj.vars.keys():
if (key != 'time_unit' and key !='meta'):
obs_obj.vars[key] = list(np.array(obs_obj.vars[key])[idx1])
# adjust again assumed fc_dates by filtered obs dates
fc_date = obs_obj.vars['datetime']
# find valid dates for given leadtime and model
fc_date = find_valid_fc_dates_for_model_and_leadtime(\
fc_date,model,leadtime)
# check if file exists and if it includes desired time
# if not check next possible file
check = False
for d in range(len(fc_date)):
check = check_if_file_is_valid(fc_date[d],model,leadtime)
if check == True:
break
if check == True:
mc_obj = model_class( model=model,
fc_date=fc_date[d],
leadtime=leadtime,
varalias=obs_obj.varalias)
col_obj = collocation_class( mc_obj_in=mc_obj,
obs_obj_in=obs_obj,
distlim=distlim )
model_vals = [col_obj.vars['model_values'][0]]
tmpdate = hour_rounder(col_obj.vars['datetime'][0])
model_datetime = [ datetime(tmpdate.year,
tmpdate.month,
tmpdate.day,
tmpdate.hour) ]
model_time = [netCDF4.date2num(model_datetime[0],
units=col_obj.vars['time_unit'])]
if check == False:
print('No valid model file available!')
else:
print('Collocating and appending values ...')
for i in tqdm(range(d+1,len(fc_date))):
with NoStdStreams():
try:
check = check_if_file_is_valid(fc_date[i],model,leadtime)
if check == False:
raise FileNotFoundError
mc_obj = model_class( model=model,
fc_date=fc_date[i],
leadtime=leadtime,
varalias=obs_obj.varalias )
model_vals.append(
mc_obj.vars[\
mc_obj.stdvarname][ \
col_obj.vars['collocation_idx_x'],\
col_obj.vars['collocation_idx_y']\
][0] )
model_time.append(mc_obj.vars['time'][0])
model_datetime.append( datetime(\
mc_obj.vars['datetime'][0].year,
mc_obj.vars['datetime'][0].month,
mc_obj.vars['datetime'][0].day,
mc_obj.vars['datetime'][0].hour ) )
except FileNotFoundError as e:
print(e)
# potentially there are different number of values
# for obs and model
# double check and use only coherent datetimes
idx2 = collocate_times( model_datetime,
target_t = obs_obj.vars['datetime'],
twin = obs_obj.twin)
col_obj.vars['model_values'] = list(np.array(\
model_vals)[idx2])
col_obj.vars['time'] = list(np.array(model_time)\
[idx2])
col_obj.vars['datetime'] = list(np.array(\
model_datetime)[idx2])
idx3 = collocate_times( \
unfiltered_t = obs_obj.vars['datetime'],
target_t = col_obj.vars['datetime'],
twin = obs_obj.twin)
col_obj.vars['obs_values'] = list(np.array(
obs_obj.vars[
obs_obj.stdvarname
])[idx3])
# valid_date is meaningless for ts application and set to None
col_obj.vars['valid_date'] = None
# inflate length of constant sized variables
col_obj.vars['distance'] = col_obj.vars['distance']*\
len(col_obj.vars['datetime'])
col_obj.vars['obs_lats'] = col_obj.vars['obs_lats']*\
len(col_obj.vars['datetime'])
col_obj.vars['obs_lons'] = col_obj.vars['obs_lons']*\
len(col_obj.vars['datetime'])
col_obj.vars['collocation_idx_x'] = col_obj.vars['collocation_idx_x']*\
len(col_obj.vars['datetime'])
col_obj.vars['collocation_idx_y'] = col_obj.vars['collocation_idx_y']*\
len(col_obj.vars['datetime'])
col_obj.vars['model_lats'] = col_obj.vars['model_lats']*\
len(col_obj.vars['datetime'])
col_obj.vars['model_lons'] = col_obj.vars['model_lons']*\
len(col_obj.vars['datetime'])
results_dict = col_obj.vars
return results_dict
def collocate_satellite_ts(obs_obj=None,model=None,distlim=None,\
leadtime=None,date_incr=None):
"""
Some info
"""
fc_date = make_fc_dates(obs_obj.sdate,obs_obj.edate,date_incr)
fc_date = find_valid_fc_dates_for_model_and_leadtime(\
fc_date,model,leadtime)
results_dict = {
'valid_date':[],
'time':[],
'time_unit':obs_obj.vars['time_unit'],
'datetime':[],
'distance':[],
'model_values':[],
'model_lons':[],
'model_lats':[],
'obs_values':[],
'obs_lons':[],
'obs_lats':[],
'collocation_idx_x':[],
'collocation_idx_y':[],
}
for i in tqdm(range(len(fc_date))):
# for i in range(len(fc_date)):
# for f in range(1):
with NoStdStreams():
# for t in range(1):
try:
# filter needed obs within time period
idx = collocate_times( obs_obj.vars['datetime'],
target_t = [fc_date[i]],
twin = obs_obj.twin )
# make tmp obs_obj with filtered data
obs_obj_tmp = deepcopy(obs_obj)
obs_obj_tmp.vars['time'] = list(\
np.array(obs_obj.vars['time'])[idx] )
obs_obj_tmp.vars['latitude'] = list(\
np.array(obs_obj.vars['latitude'])[idx] )
obs_obj_tmp.vars['longitude'] = list(\
np.array(obs_obj.vars['longitude'])[idx] )
obs_obj_tmp.vars[obs_obj.stdvarname] = \
list(np.array(\
obs_obj.vars[obs_obj.stdvarname])[idx] )
vardict,_,_,_,_ = get_model(model=model,
fc_date=fc_date[i],
varalias=obs_obj.varalias,
leadtime=leadtime)
results_dict_tmp = collocate_field(\
datein=fc_date[i],\
model_lats=vardict['latitude'],\
model_lons=vardict['longitude'],\
model_vals=vardict[obs_obj.stdvarname],\
obs_obj=obs_obj_tmp,\
distlim=distlim )
# append to dict
results_dict['valid_date'].append(fc_date[i])
results_dict['time'].append(results_dict_tmp['time'])
results_dict['datetime'].append(results_dict_tmp['datetime'])
results_dict['distance'].append(results_dict_tmp['distance'])
results_dict['model_values'].append(results_dict_tmp['model_values'])
results_dict['model_lons'].append(results_dict_tmp['model_lons'])
results_dict['model_lats'].append(results_dict_tmp['model_lats'])
results_dict['obs_values'].append(results_dict_tmp['obs_values'])
results_dict['obs_lats'].append(results_dict_tmp['obs_lats'])
results_dict['obs_lons'].append(results_dict_tmp['obs_lons'])
results_dict['collocation_idx_x'].append(\
results_dict_tmp['collocation_idx_x'])
results_dict['collocation_idx_y'].append(\
results_dict_tmp['collocation_idx_y'])
if 'results_dict_tmp' in locals():
del results_dict_tmp
except (ValueError,FileNotFoundError,OSError) as e:
# ValueError, pass if no collocation
# FileNotFoundError, pass if file not accessible
# OSError, pass if file not accessible from thredds
print(e)
# flatten all aggregated entries
results_dict['time'] = flatten(results_dict['time'])
results_dict['datetime'] = flatten(results_dict['datetime'])
results_dict['distance'] = flatten(results_dict['distance'])
results_dict['model_values'] = flatten(results_dict['model_values'])
results_dict['model_lons'] = flatten(results_dict['model_lons'])
results_dict['model_lats'] = flatten(results_dict['model_lats'])
results_dict['obs_values'] = flatten(results_dict['obs_values'])
results_dict['obs_lats'] = flatten(results_dict['obs_lats'])
results_dict['obs_lons'] = flatten(results_dict['obs_lons'])
results_dict['collocation_idx_x'] = flatten(\
results_dict['collocation_idx_x'])
results_dict['collocation_idx_y'] = flatten(\
results_dict['collocation_idx_y'])
return results_dict
def collocate_field(mc_obj=None,obs_obj=None,col_obj=None,distlim=None,
datein=None,model_lats=None,model_lons=None,
model_vals=None):
"""
Some info
"""
if mc_obj is not None:
datein = netCDF4.num2date(mc_obj.vars['time'],mc_obj.vars['time_unit'])
model_lats = mc_obj.vars['latitude']
model_lons = mc_obj.vars['longitude']
model_vals = mc_obj.vars[mc_obj.stdvarname]
dtime = netCDF4.num2date(obs_obj.vars['time'],
obs_obj.vars['time_unit'])
if isinstance(dtime,np.ndarray):
dtime = list(dtime)
if isinstance(datein,np.ndarray):
datein = list(datein)
if isinstance(datein,datetime):
datein = [datein]
cidx = collocate_times(dtime,target_t=datein,twin=obs_obj.twin)
obs_time_dt = np.array(dtime)[cidx]
obs_time_dt = [datetime(t.year,t.month,t.day,
t.hour,t.minute,t.second)
for t in obs_time_dt]
datein = [datetime(t.year,t.month,t.day,
t.hour,t.minute,t.second)
for t in datein]
obs_time = np.array(obs_obj.vars['time'])[cidx]
obs_time_unit = obs_obj.vars['time_unit']
# Compare wave heights of satellite with model with
# constraint on distance and time frame
# 1. time constraint
obs_lats = np.array(obs_obj.vars['latitude'])[cidx]
obs_lons = np.array(obs_obj.vars['longitude'])[cidx]
obs_vals = np.array(obs_obj.vars[obs_obj.stdvarname])[cidx]
if distlim == None:
distlim = 6
if (col_obj is None):
print ("No collocation idx available")
print (len(obs_time_dt),"footprints to be collocated")
print ("Perform collocation with distance limit\n",\
"distlim:",distlim)
index_array_2d, distance_array, _ =\
collocation_fct(
obs_lons, obs_lats,
model_lons, model_lats)
# caution: index_array_2d is tuple
# impose distlim
dist_idx = np.where( (distance_array<distlim*1000)&\
(~np.isnan(\
model_vals[index_array_2d[0],\
index_array_2d[1]])) )[0]
idx_x = index_array_2d[0][dist_idx]
idx_y = index_array_2d[1][dist_idx]
results_dict = {
'valid_date':datein,
'time':list(obs_time[dist_idx]),
'time_unit':obs_time_unit,
'datetime':list(np.array(obs_time_dt)[dist_idx]),
'distance':list(distance_array[dist_idx]),
'model_values':list(model_vals[idx_x,\
idx_y]),
'model_lons':list(model_lons[idx_x,\
idx_y]),
'model_lats':list(model_lats[idx_x,\
idx_y]),
'obs_values':list(obs_vals[dist_idx]),
'obs_lons':list(obs_lons[dist_idx]),
'obs_lats':list(obs_lats[dist_idx]),
'collocation_idx_x':list(idx_x),
'collocation_idx_y':list(idx_y),
}
elif (col_obj is not None and \
len(col_obj.vars['collocation_idx'][0]) > 0):
print("Collocation idx given through collocation_class object")
results_dict = col_obj.vars
results_dict['model_values'] = list(\
model_vals[\
col_obj.vars['collocation_idx_x'],
col_obj.vars['collocation_idx_y'] ])
return results_dict
def collocate(mc_obj=None,obs_obj=None,col_obj=None,
model=None,distlim=None,leadtime=None,date_incr=None):
"""
get obs value for model value for given
temporal and spatial constraints
"""
if (len(obs_obj.vars[obs_obj.stdvarname]) < 1):
raise Exception ( '\n###\n'
+ 'Collocation not possible, '
+ 'no observation values for collocation!'
+ '\n###'
)
if ((mc_obj is None or len(mc_obj.vars[mc_obj.stdvarname]) < 1)
and model is None):
raise Exception ( '\n###\n'
+ 'Collocation not possible, '
+ 'no model values available for collocation!'
+ '\n###'
)
if (mc_obj is None and model is not None and obs_obj is not None\
and isinstance(obs_obj,insitu_class)):
results_dict = collocate_station_ts(obs_obj=obs_obj,
model=model,\
distlim=distlim,\
leadtime=leadtime,\
date_incr=date_incr)
elif (mc_obj is None and model is not None and obs_obj is not None\
and isinstance(obs_obj,satellite_class)):
results_dict = collocate_satellite_ts(obs_obj=obs_obj,
model=model,\
distlim=distlim,\
leadtime=leadtime,\
date_incr=date_incr)
else:
results_dict = collocate_field( mc_obj=mc_obj,\
obs_obj=obs_obj,\
col_obj=col_obj,\
distlim=distlim )
return results_dict
class collocation_class():
'''
draft of envisioned collocation class object
'''
def __init__(self,mc_obj_in=None,obs_obj_in=None,
col_obj_in=None,model=None,distlim=None,leadtime=None,
date_incr=1):
print('# ----- ')
print(" ### Initializing collocation_class object ###")
print(" ")
# make clones to prevent overwriting
mc_obj = deepcopy(mc_obj_in)
obs_obj = deepcopy(obs_obj_in)
col_obj = deepcopy(col_obj_in)
if isinstance(obs_obj,satellite_class):
self.obsname = obs_obj.mission
self.mission = obs_obj.mission
self.obstype = "satellite_altimeter"
self.region = obs_obj.region
if isinstance(obs_obj,insitu_class):
obs_obj.twin = insitu_dict[obs_obj.nID].get('twin',None)
self.obsname = obs_obj.nID + '_' + obs_obj.sensor
self.obstype = 'insitu'
self.nID = obs_obj.nID
self.sensor = obs_obj.sensor
if mc_obj is not None:
model = mc_obj.model
# define class variables
self.sdate = obs_obj.sdate
self.edate = obs_obj.edate
self.model = model
self.varalias = obs_obj.varalias
self.stdvarname = obs_obj.stdvarname
self.units = variable_info[self.varalias].get('units')
self.leadtime = leadtime
if leadtime is None:
self.leadtime = 'best'
self.leadtimestr = 'best'
elif isinstance(self.leadtime,str):
self.leadtime = leadtime
leadtimestr = leadtime
self.leadtimestr = leadtime
else:
leadtimestr="{:0>3d}".format(self.leadtime)
self.leadtime = leadtime
self.leadtimestr = leadtimestr
# get vars dictionary
print(" ")
print(" ## Collocate ... ")
# for t in range(1):
try:
t0=time.time()
results_dict = collocate(mc_obj=mc_obj,
obs_obj=obs_obj,
col_obj=col_obj,
model=model,
distlim=distlim,
leadtime=self.leadtime,
date_incr=date_incr)
self.vars = results_dict
self.fc_date = results_dict['datetime']
t1=time.time()
print(" ")
print(" ## Summary:")
print(len(self.vars['time'])," values collocated.")
print("Time used for collocation:",round(t1-t0,2),"seconds")
print(" ")
print (" ### Collocation_class object initialized ###")
except Exception as e:
print(e)
self.error = e
print ("! No collocation_class object initialized !")
# add class variables
print ('# ----- ')
def quicklook(self,m=True,ts=True,projection=None):
if m:
import cartopy.crs as ccrs
import cmocean
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
lons = self.vars['obs_lons']
lats = self.vars['obs_lats']
var = self.vars['obs_values']
if projection is None:
projection = ccrs.PlateCarree()
lonmax,lonmin = np.max(lons),np.min(lons)
latmax,latmin = np.max(lats),np.min(lats)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=projection)
ax.set_extent( [lonmin, lonmax,latmin, latmax],
crs = projection )
sc = ax.scatter(lons,lats,s=10,
c = var,
marker='o', edgecolor = 'face',
cmap=cmocean.cm.amp,
transform=ccrs.PlateCarree())
axins = inset_axes(ax,
width="5%", # width = 5% of parent_bbox width
height="100%", # height : 50%
loc='lower left',
bbox_to_anchor=(1.01, 0., 1, 1),
bbox_transform=ax.transAxes,
borderpad=0,
)
fig.colorbar(sc, cax=axins, label=self.varalias
+ ' [' + self.units + ']')
ax.coastlines()
gl = ax.gridlines(draw_labels=True,crs=projection,
linewidth=1, color='grey', alpha=0.4,
linestyle='-')
gl.top_labels = False
gl.right_labels = False
plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
ax.set_title(self.obsname + ' (' + self.obstype + ')\n'
+ 'from ' + str(self.vars['datetime'][0])
+ ' to ' + str(self.vars['datetime'][-1]))
#fig.suptitle('', fontsize=16) # unused
plt.show()
if ts:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
fig = plt.figure(figsize=(9,3.5))
ax = fig.add_subplot(111)
colors = ['k','orange']
if self.obstype == 'insitu':
ax.plot(self.vars['datetime'],self.vars['obs_values'],
linestyle='None',color=colors[0],
label='obs ( ' + self.nID + ' - '
+ self.sensor + ' )',
marker='o',alpha=.5,ms=2)
elif self.obstype == 'satellite_altimeter':
ax.plot(self.vars['datetime'],self.vars['obs_values'],
linestyle='None',color=colors[0],
label='obs (' + self.mission + ')',
marker='o',alpha=.5,ms=2)
ax.plot(self.vars['datetime'],self.vars['model_values'],
linestyle='None',color=colors[1],
label='model (' + self.model + ')',
marker='o',alpha=.8,ms=2)
plt.ylabel(self.varalias + '[' + self.units + ']')
plt.legend(loc='best')
plt.tight_layout()
#ax.set_title()
plt.show()
def write_to_nc(self,pathtofile=None,file_date_incr=None):
if 'error' in vars(self):
print('Erroneous collocation_class file detected')
print('--> dump to netCDF not possible !')
else:
tmpdate = self.sdate
edate = self.edate
while tmpdate <= edate:
if pathtofile is None:
path_template = collocation_dict[self.obstype]\
['dst']\
['path_template'][0]
file_template = collocation_dict[self.obstype]\
['dst']\
['file_template']
strsublst = collocation_dict[self.obstype]\
['dst']['strsub']
subdict = make_subdict(strsublst,
class_object_dict=vars(self))
if 'filterData' in vars(self).keys():
file_template = 'filtered_' + file_template
tmppath = os.path.join(path_template,file_template)
if isinstance(self.leadtime,str):
leadtimestr=self.leadtime
else:
leadtimestr="{:0>3d}h".format(self.leadtime)
if self.obstype=='insitu':
pathtofile = make_pathtofile(tmppath,strsublst,
subdict,date=tmpdate)
elif self.obstype=='satellite_altimeter':
pathtofile = make_pathtofile(tmppath,strsublst,
subdict,date=tmpdate)
if self.obstype=='insitu':
title = ( 'Collocation of ' + self.stdvarname
+ ' observations from '
+ self.nID + ' ' + self.sensor
+ ' vs ' + self.model)
elif self.obstype=='satellite_altimeter':
title = ( 'Collocation of ' + self.stdvarname
+ ' observations from ' + self.mission
+ ' vs ' + self.model)
dumptonc_ts_collocation(self,pathtofile,title)
# determine date increment
if file_date_incr is None:
file_date_incr = collocation_dict[self.obstype]\
['dst'].get('file_date_incr','m')
if file_date_incr == 'm':
tmpdate += relativedelta(months = +1)
elif file_date_incr == 'Y':
tmpdate += relativedelta(years = +1)
elif file_date_incr == 'd':
tmpdate += timedelta(days = +1)
return
def validate_collocated_values(self,**kwargs):
dtime = self.vars['datetime']
mods = self.vars['model_values']
obs = self.vars['obs_values']
sdate = self.vars['datetime'][0]
edate = self.vars['datetime'][-1]
validation_dict = validate_collocated_values(
dtime,obs,mods,\
sdate=sdate,edate=edate,\
**kwargs)
return validation_dict
def validate_collocated_values(dtime,obs,mods,**kwargs):
target_t, sdate, edate, twin = None, None, None, None
if ('col_obj' in kwargs.keys() and kwargs['col_obj'] is not None):
mods = col_obj.vars['model_values']
obs = col_obj.vars['obs_values']
dtime = col_obj.vars['datetime']
# get idx for date and twin
if 'target_t' in kwargs.keys():
target_t = kwargs['target_t']
if 'sdate' in kwargs.keys():
sdate = kwargs['sdate']
if 'edate' in kwargs.keys():
edate = kwargs['edate']
if 'twin' in kwargs.keys():
twin = kwargs['twin']
idx = collocate_times(dtime,
target_t=target_t,
sdate=sdate,
edate=edate,
twin=twin)
mods = np.array(mods)[idx]
obs = | np.array(obs) | numpy.array |
# -*- coding: utf-8 -*-
def get_colors(f, do_shuffle=True):
from numpy import array
try:
import Image
except Exception:
from PIL import Image
im = Image.open(f)
data = array(list(im.convert('RGB').getdata()),'float')/255.0
res = []
for rgb in data:
res.append(list(rgb))
if do_shuffle:
from numpy.random import shuffle
shuffle(res)
return res
def get_img_as_rgb_array(f):
from PIL import Image
from numpy import array
from numpy import reshape
im = Image.open(f)
w,h = im.size
data = array(list(im.convert('RGB').getdata()), 'float')/255.0
return | reshape(data,(w,h,3)) | numpy.reshape |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Orthogonal IV for Heterogeneous Treatment Effects.
A Double/Orthogonal machine learning approach to estimation of heterogeneous
treatment effect with an endogenous treatment and an instrument. It
implements the DMLIV and related algorithms from the paper:
Machine Learning Estimation of Heterogeneous Treatment Effects with Instruments
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://arxiv.org/abs/1905.10176
"""
import numpy as np
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from ..._ortho_learner import _OrthoLearner
from ..._cate_estimator import LinearModelFinalCateEstimatorMixin, StatsModelsCateEstimatorMixin
from ...inference import StatsModelsInference
from ...sklearn_extensions.linear_model import StatsModelsLinearRegression
from ...utilities import (_deprecate_positional, add_intercept, filter_none_kwargs,
inverse_onehot, get_feature_names_or_default)
from .._nuisance_wrappers import _FirstStageWrapper, _FinalWrapper
class _BaseDRIVModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - \\E[Y | X] = \\theta(X) \\cdot (\\E[T | X, Z] - \\E[T | X]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""
def __init__(self, model_final, featurizer,
discrete_treatment, discrete_instrument,
fit_cate_intercept, cov_clip, opt_reweighted):
self._model_final = clone(model_final, safe=False)
self._fit_cate_intercept = fit_cate_intercept
self._original_featurizer = clone(featurizer, safe=False)
self._discrete_treatment = discrete_treatment
self._discrete_instrument = discrete_instrument
if self._fit_cate_intercept:
add_intercept_trans = FunctionTransformer(add_intercept,
validate=True)
if featurizer:
self._featurizer = Pipeline([('featurize', self._original_featurizer),
('add_intercept', add_intercept_trans)])
else:
self._featurizer = add_intercept_trans
else:
self._featurizer = self._original_featurizer
self._cov_clip = cov_clip
self._opt_reweighted = opt_reweighted
def _effect_estimate(self, nuisances):
prel_theta, res_t, res_y, res_z, cov = [nuisance.reshape(nuisances[0].shape) for nuisance in nuisances]
# Estimate final model of theta(X) by minimizing the square loss:
# (prel_theta(X) + (Y_res - prel_theta(X) * T_res) * Z_res / cov[T,Z | X] - theta(X))^2
# We clip the covariance so that it is bounded away from zero, so as to reduce variance
# at the expense of some small bias. For points with very small covariance we revert
# to the model-based preliminary estimate and do not add the correction term.
cov_sign = np.sign(cov)
cov_sign[cov_sign == 0] = 1
clipped_cov = cov_sign * np.clip( | np.abs(cov) | numpy.abs |
import matplotlib
matplotlib.use('Agg')
import pickle
import os
#import ipdb
import statsmodels.stats.power as smp
from rectify_vars_and_wald_functions import *
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.insert(1, '../../../le_experiments/')
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
import glob
import numpy as np
import read_config
from output_format import H_ALGO_ACTION_FAILURE, H_ALGO_ACTION_SUCCESS, H_ALGO_ACTION, H_ALGO_OBSERVED_REWARD
from output_format import H_ALGO_ESTIMATED_MU, H_ALGO_ESTIMATED_V, H_ALGO_ESTIMATED_ALPHA, H_ALGO_ESTIMATED_BETA
from output_format import H_ALGO_PROB_BEST_ACTION, H_ALGO_NUM_TRIALS
import beta_bernoulli
#import thompson_policy
from pathlib import Path
EPSILON_PROB = .000001
DESIRED_POWER = 0.8
DESIRED_ALPHA = 0.05
SMALL_SIZE = 10
MEDIUM_SIZE = 10
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=8.5) # fontsize of the tick labels
plt.rc('ytick', labelsize=10) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def hist_pval(df = None, to_check = None, to_check_unif = None, to_check_ts = None, n = None, num_sims = None, load_df = True, \
title = None, plot = True):
'''
TODO rename to_check_ipw to to_check_ipw_wald_stat
'''
if load_df == True:
with open(to_check, 'rb') as f:
df = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
with open(to_check_ts, 'rb') as f:
df_ts = pickle.load(f)
#print(data)
step_sizes = df['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
if plot == True:
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
percenticle_dict_left = {}
percentile_dict_right = {}
for num_steps in step_sizes:
df_unif_for_num_steps = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps = df[df['num_steps'] == num_steps]
df_ts_for_num_steps = df_ts[df_ts['num_steps'] == num_steps]
mle_mean1 = np.mean(df_for_num_steps['mean_1'])
mle_mean2 = np.mean(df_for_num_steps['mean_2'])
unif_mean1 = np.mean(df_unif_for_num_steps['mean_1'])
unif_mean2 = np.mean(df_unif_for_num_steps['mean_2'])
df_for_num_steps_pval = df_for_num_steps['pvalue']
df_unif_for_num_steps_pval = df_unif_for_num_steps['pvalue']
df_ts_for_num_steps_pval = df_ts_for_num_steps['pvalue']
# df_unif_for_num_steps = np.ma.masked_invalid(df_unif_for_num_steps)
#print(np.mean(df_unif_for_num_steps))
if plot == True:
#ax[i].hist(df_unif_for_num_steps, density = True)
ax[i].hist(df_unif_for_num_steps_pval, normed = False, alpha = 0.5, \
label = "Uniform")
ax[i].hist(df_for_num_steps_pval, \
normed = False, alpha = 0.5, \
label = "Epsilon Greedy")
ax[i].hist(df_ts_for_num_steps_pval, \
normed = False, alpha = 0.5, \
label = "Thompson Sampling")
ax[i].set_xlabel("Pvalue for number of participants = {} = {}".format(size_vars[i], num_steps))
# mu = 0
# variance = 1
# sigma = np.sqrt(variance)
# x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
# ax[i].plot(x, stats.norm.pdf(x, mu, sigma))
ax[i].legend()
i+=1
if plot == True:
fig.suptitle(title)
fig.tight_layout(rect=[0, 0.03, 1, 0.90])
# if not os.path.isdir("plots"):
# os.path.mkdir("plots")
print("saving to ", "pval_hist/{}.png".format(title))
fig.savefig("pval_hist/{}.png".format(title))
#plt.show()
plt.clf()
plt.close()
def create_models_binary(actions_df, prior, num_actions):
assert num_actions == 2
all_models = []
cache_keys = [[] for _ in range(actions_df.shape[0])]
action = 0
# print(actions_df.loc[:,H_ALGO_ACTION_SUCCESS.format(action + 1)])
# print('Failures------------')
#print(actions_df.loc[:,H_ALGO_ACTION_FAILURE.format(action + 1)])
for action in range(num_actions):
[cache_keys[i].extend((successes,failures)) for (i,successes,failures) in zip(range(actions_df.shape[0]),actions_df.loc[:,H_ALGO_ACTION_SUCCESS.format(action + 1)],actions_df.loc[:,H_ALGO_ACTION_FAILURE.format(action + 1)])]
# print((successes, failures)\
# for (successes,failures) in\
# zip(actions_df.loc[:,H_ALGO_ACTION_SUCCESS.format(action + 1)],\
# actions_df.loc[:,H_ALGO_ACTION_FAILURE.format(action + 1)]))
cur_models = [beta_bernoulli.BetaBern(successes, failures)\
for (successes,failures) in\
zip(actions_df.loc[:,H_ALGO_ACTION_SUCCESS.format(action + 1)],\
actions_df.loc[:,H_ALGO_ACTION_FAILURE.format(action + 1)])]
# add in the one for the prior
cur_models.insert(0, beta_bernoulli.BetaBern(prior[0], prior[1]))
all_models.append(cur_models)
# Add in a cache key for the prior
cache_keys.insert(0, prior*num_actions)
return all_models,cache_keys
def plot_hist_and_table(df_for_num_steps_eg0pt1, df_for_num_steps_eg0pt3, df_for_num_steps_ts, df_for_num_steps_unif, num_steps, epsilon, n):
fig_h, ax_h = plt.subplots()
proportions_unif = df_for_num_steps_unif['sample_size_1'] / num_steps
proportions_eg0pt1 = df_for_num_steps_eg0pt1['sample_size_1'] / num_steps
proportions_eg0pt3 = df_for_num_steps_eg0pt3['sample_size_1'] / num_steps
proportions_ts = df_for_num_steps_ts['sample_size_1'] / num_steps
ax_h.hist(proportions_eg0pt1, alpha = 0.5, label = "Epsilon Greedy 0.1")
ax_h.hist(proportions_eg0pt3, alpha = 0.5, label = "Epsilon Greedy 0.3")
ax_h.hist(proportions_unif, alpha = 0.5, label = "Uniform Random")
ax_h.hist(proportions_ts, alpha = 0.5, label = "Thompson Sampling")
ax_h.legend()
fig_h.suptitle("Histogram of Proportion of {} Participants Assigned to Condition 1 Across 500 Simulations".format(num_steps))
# rows = ["Areferg"]
# columns = ["Berger"]
# cell_text = ["ergerg"]
# the_table = ax_h.table(cellText=cell_text,
# rowLabels=rows,
# colLabels=columns,
# loc='right')
# fig_h.subplots_adjust(left=0.2, wspace=0.4)
data = np.random.uniform(0, 1, 80).reshape(20, 4)
mean_ts = np.mean(proportions_ts)
var_ts = np.var(proportions_ts)
mean_eg0pt1 = np.mean(proportions_eg0pt1)
mean_eg0pt3 = np.mean(proportions_eg0pt3)
var_eg0pt1 = np.var(proportions_eg0pt1)
var_eg0pt3 = np.var(proportions_eg0pt3)
prop_lt_25_eg0pt1 = np.sum(proportions_eg0pt1 < 0.25) / len(proportions_eg0pt1)
prop_lt_25_eg0pt3 = np.sum(proportions_eg0pt3 < 0.25) / len(proportions_eg0pt3)
prop_lt_25_ts = np.sum(proportions_ts < 0.25) / len(proportions_ts)
# prop_gt_25_lt_5_eg = np.sum(> proportions > 0.25) / len(proportions)
# prop_gt_25_lt_5_ts = np.sum(> proportions_ts > 0.25) / len(proportions_ts)
data = [[mean_ts, var_ts, prop_lt_25_ts],\
[mean_eg0pt1, var_eg0pt1, prop_lt_25_eg0pt1],\
[mean_eg0pt3, var_eg0pt3, prop_lt_25_eg0pt3]]
final_data = [['%.3f' % j for j in i] for i in data] #<0.25, 0.25< & <0.5, <0.5 & <0.75, <0.75 & <1.0
#table.auto_set_font_size(False)
# table.set_fontsize(7)
# table.auto_set_column_width((-1, 0, 1, 2, 3))
table = ax_h.table(cellText=final_data, colLabels=['Mean', 'Variance', 'prop < 0.25'], rowLabels = ["Thompson Sampling", "Epsilon Greedy 0.1", "Epsilon Greedy 0.3"], loc='bottom', cellLoc='center', bbox=[0.25, -0.5, 0.5, 0.3])
table.auto_set_font_size(False)
table.set_fontsize(7)
table.auto_set_column_width((-1, 0, 1, 2, 3))
# Adjust layout to make room for the table:
#ax_h.tick_params(axis='x', pad=20)
#fig_h.subplots_adjust(left=0.2, bottom=0.5)
#fig_h.tight_layout()
# save_dir = "../simulation_analysis_saves/Tables/{}/{}/{}/num_sims={}/".format(outcome, iseffect, include_stderr, num_sims)
save_dir = "../simulation_analysis_saves/histograms/ExploreAndExploit/N={}".format(n)
Path(save_dir).mkdir(parents=True, exist_ok=True)
fig_h.savefig(save_dir + "/condition_prop_n={}.png".format(num_steps), bbox_inches = 'tight')
fig_h.clf()
def hist_means_bias(df = None, to_check_eg0pt1 = None, to_check_eg0pt3 = None, to_check_unif = None, to_check_ipw = None, n = None, num_sims = None, load_df = True, \
title = None,\
to_check_ts = None, mean_key = "mean_1"):
'''
Not using bias
'''
if load_df == True:
with open(to_check_eg0pt1, 'rb') as f:
df_eg0pt1 = pickle.load(f)
with open(to_check_eg0pt3, 'rb') as f:
df_eg0pt3 = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ipw != None:
ipw_t1_list = np.load(to_check_ipw)
if to_check_ts != None:
with open(to_check_ts, 'rb') as t:
df_ts = pickle.load(t)
#print(data)
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps]
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
# bins = np.arange(0, 1.01, .025)
num_replications = len(df_for_num_steps_eg0pt1)
df_for_num_steps_mean1_eg0pt1 = df_for_num_steps_eg0pt1[mean_key]
df_for_num_steps_mean1_eg0pt3 = df_for_num_steps_eg0pt3[mean_key]
df_for_num_steps_mean1_unif = df_for_num_steps_unif[mean_key]
df_for_num_steps_mean1_ts = df_for_num_steps_ts[mean_key]
ax[i].hist(df_for_num_steps_mean1_eg0pt1, normed = False, alpha = 0.5, label = "Epsilon Greedy 0.1: mean = {} var = {}".format(round(np.mean(df_for_num_steps_mean1_eg0pt1),2), round(np.var(df_for_num_steps_mean1_eg0pt1), 3)))
ax[i].hist(df_for_num_steps_mean1_eg0pt3, normed = False, alpha = 0.5, label = "Epsilon Greedy 0.3: mean = {} var = {}".format(round(np.mean(df_for_num_steps_mean1_eg0pt3),2), round(np.var(df_for_num_steps_mean1_eg0pt3), 3)))
ax[i].hist(df_for_num_steps_mean1_unif, normed = False, alpha = 0.5, label = "Uniform: mean = {} var = {}".format(round(np.mean(df_for_num_steps_mean1_unif),2), round(np.var(df_for_num_steps_mean1_unif), 3)))
ax[i].hist(df_for_num_steps_mean1_ts, normed = False, alpha = 0.5, label = "Thompson Sampling: mean = {} var = {}".format(round(np.mean(df_for_num_steps_mean1_ts),2), round(np.var(df_for_num_steps_mean1_ts), 3)))
# ax[i].hist(df_for_num_steps_mean1_eg0pt3, normed = False, alpha = 0.5, label = "Epsilon Greedy 0.3")
# ax[i].hist(df_for_num_steps_mean1_unif, normed = False, alpha = 0.5, label = "Uniform")
# ax[i].hist(df_for_num_steps_mean1_ts, normed = False, alpha = 0.5, label = "Thompson Sampling")
mean_num = int(mean_key.split("_")[-1])
ax[i].set_xlabel("Mean {} ($\hatp_{}$ with MLE) for number of participants = {} = {}".format(mean_num, mean_num, size_vars[i], num_steps))
ax[i].legend()
ax[i].set_ylim(0,num_sims)
i +=1
fig.suptitle(title)
#fig.tight_layout(rect=[0, 0.03, 1, 0.90])
# if not os.path.isdir("plots"):
# os.path.mkdir("plots")
# save_dir = "../simulation_analysis_saves/Tables/{}/{}/{}/num_sims={}/".format(outcome, iseffect, include_stderr, num_sims)
save_dir_ne = "../simulation_analysis_saves/{}_hist/NoEffect/".format(mean_key)
save_dir_e = "../simulation_analysis_saves/{}_hist/Effect/".format(mean_key)
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = save_dir_ne + "/{}.png".format(title)
save_str_e = save_dir_e + "/{}.png".format(title)
# save_str_e = "../simulation_analysis_saves/{}_hist/Effect/{}.png".format(mean_key, title)
if "No Effect" in title:
print("saving to ", save_str_ne)
fig.savefig(save_str_ne)
elif "With Effect" in title:
print("saving to ", save_str_e)
fig.savefig(save_str_e)
#plt.show()
plt.clf()
plt.close()
def hist_means_diff(df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, n = None, num_sims = None, \
title = None,\
df_ts = None):
'''
Not using bias
'''
#print(data)
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps]
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
# bins = np.arange(0, 1.01, .025)
num_replications = len(df_for_num_steps_eg0pt1)
df_for_num_steps_diff_eg0pt1 = np.abs(df_for_num_steps_eg0pt1["mean_1"] - df_for_num_steps_eg0pt1["mean_2"])
df_for_num_steps_diff_eg0pt3 = np.abs(df_for_num_steps_eg0pt3["mean_1"] - df_for_num_steps_eg0pt3["mean_2"])
df_for_num_steps_diff_unif = np.abs(df_for_num_steps_unif["mean_1"] - df_for_num_steps_unif["mean_2"])
df_for_num_steps_diff_ts = np.abs(df_for_num_steps_ts["mean_1"] - df_for_num_steps_ts["mean_2"])
ax[i].hist(df_for_num_steps_diff_eg0pt1, normed = False, alpha = 0.5, label = "Epsilon Greedy 0.1: mean = {} var = {}".format(round(np.mean(df_for_num_steps_diff_eg0pt1),2), round(np.var(df_for_num_steps_diff_eg0pt1), 3)), color = "yellow")
ax[i].hist(df_for_num_steps_diff_eg0pt3, normed = False, alpha = 0.5, label = "Epsilon Greedy 0.3: mean = {} var = {}".format(round(np.mean(df_for_num_steps_diff_eg0pt3),2), round( | np.var(df_for_num_steps_diff_eg0pt3) | numpy.var |
# -*- coding: utf-8 -*-
"""
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import tensorflow as tf
import numpy as np
import time
import math
from kin_kor_char_parser import decompose_str_as_one_hot
LOCAL_DATASET_PATH = '../sample_data/kin/'
from soy.soy.nlp.tokenizer import CohesionTokenizer, RegexTokenizer
from gensim.models import Word2Vec
class KinQueryDataset:
"""
지식인 데이터를 읽어서, tuple (데이터, 레이블)의 형태로 리턴하는 파이썬 오브젝트 입니다.
"""
def __init__(self, dataset_path: str, max_length: int):
"""
:param dataset_path: 데이터셋 root path
:param max_length: 문자열의 최대 길이 400
"""
# 데이터, 레이블 각각의 경로
queries_path = os.path.join(dataset_path, 'train', 'train_data')
labels_path = os.path.join(dataset_path, 'train', 'train_label')
# 지식인 데이터를 읽고 preprocess까지 진행합니다
self.test_idx = -1
with open(queries_path, 'rt', encoding='utf8') as f:
#self.queries1, self.queries2,self.queries1_test,self.queries2_test,self.test_idx = preprocess2(f.readlines(), max_length,test_data=True)
self.queries1, self.queries2= preprocess2(f.readlines(), max_length,test_data=False)
#self.queries,self.queries_test,self.test_idx = preprocess_origin(f.readlines(),max_length,test_data=True)
# 지식인 레이블을 읽고 preprocess까지 진행합니다.
with open(labels_path) as f:
self.labels = np.array([[np.float32(x)] for x in f.readlines()])
if self.test_idx != -1:
self.labels_test = self.labels[self.test_idx:]
self.labels = self.labels[:self.test_idx]
print("test data splited size %d" % self.test_idx)
def __len__(self):
"""
:return: 전체 데이터의 수를 리턴합니다
"""
return len(self.labels)
def __getitem__(self, idx):
"""
:param idx: 필요한 데이터의 인덱스
:return: 인덱스에 맞는 데이터, 레이블 pair를 리턴합니다
"""
return self.queries1[idx], self.queries2[idx] ,self.labels[idx]
def add_noise(query):
query = query + (query * (0.001) * ((np.random.rand(1) - 0.5)))
query = np.rint(query)
query = query.astype(np.int32)
return query
def data_augmentation(queries1,queries2,labels):
# Add noise in query data
def get_noised_queries(queries):
# Expand query numpy array size
q_expand = np.zeros((len(queries) * 2, len(queries[0])), dtype=np.int32)
np.random.seed(int(time.time()))
for i in range(len(q_expand)):
if i < len(queries):
q_expand[i] = queries[i]
else:
noised_val = add_noise(queries[i - len(queries)])
q_expand[i] = noised_val
return q_expand
def get_double_labels(labels):
l_expand = np.zeros((len(labels) * 2,1), dtype=np.int32)
for i in range(len(l_expand)):
if i < len(labels):
l_expand[i] = labels[i]
else:
l_expand[i] = labels[i - len(labels)]
return l_expand
q1_expand = get_noised_queries(queries1)
q2_expand = get_noised_queries(queries2)
l_expand = get_double_labels(labels)
return q1_expand, q2_expand, l_expand
def preprocess2(data: list, max_length: int, test_data: bool):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
query1 =[]
query2 =[]
for d in data:
q1,q2 = d.split('\t')
query1.append(q1)
query2.append(q2.replace('\n',''))
vectorized_data1 = [decompose_str_as_one_hot(datum, warning=False) for datum in query1]
vectorized_data2 = [decompose_str_as_one_hot(datum, warning=False) for datum in query2]
if test_data :
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding2 = | np.zeros((train_size, max_length), dtype=np.int32) | numpy.zeros |
#MIT License
#
#Copyright (c) 2020 standupmaths
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
def xmaslight():
# This is the code from my
#NOTE THE LEDS ARE GRB COLOUR (NOT RGB)
# Here are the libraries I am currently using:
import time
import board
import neopixel
import re
import math
# FOR DEBUGGING PURPOSE
#import matplotlib.pyplot as plt
#import matplotlib.animation as animation
# You are welcome to add any of these:
# import random
import numpy
# import scipy
import sys
# If you want to have user changable values, they need to be entered from the command line
# so import sys sys and use sys.argv[0] etc
# some_value = int(sys.argv[0])
# IMPORT THE COORDINATES (please don't break this bit)
coordfilename = "Python/coords.txt"
# FOR DEBUGGING PURPOSE
#coordfilename = "xmastree2020/coords.txt"
fin = open(coordfilename,'r')
coords_raw = fin.readlines()
coords_bits = [i.split(",") for i in coords_raw]
coords = []
for slab in coords_bits:
new_coord = []
for i in slab:
new_coord.append(int(re.sub(r'[^-\d]','', i)))
coords.append(new_coord)
#set up the pixels (AKA 'LEDs')
PIXEL_COUNT = len(coords) # this should be 500
pixels = neopixel.NeoPixel(board.D18, PIXEL_COUNT, auto_write=False)
# FOR DEBUGGING PURPOSE
#pixels = [ 0 for i in range(PIXEL_COUNT) ]
# YOU CAN EDIT FROM HERE DOWN
# This program is intended to make a neuronal network out of the tree's LEDs.
#
# By neuronal network I mean:
# the light of each LED will be set according to a dynamic variable V
# that stands for a model of the electric potential in the membrane of a real neuron.
# And these 'neurons' (i.e., LEDs) will have connections between them
# that obey the dynamics of chemical synapses in the brain represented by the variable S.
# The network is built according to a 'cubic-like' lattice: i.e.,
# a given LED receives input from the closest LEDs in each of the 6 spatial directions.
# Thus, the 'synapse' is represented by a 'virtual' connection, and not a physical one (i.e., the LED wire)
# I implemented other 2 types of networks:
# a surface networks (only LEDs in the surface of the tree cone 'talk' to each other)
# a proximity network (only LEDs within a radius R of each other are connected)
#
# to visualize the network generated by this algorithm, please run
# python view_tree_network.py
# first we need to define (a lot of) functions
def memb_potential_to_01(V):
# V -> dynamic variable (defined in [-1,1])
# the formula below is just a smart way to map
# [-1,1] to [0,1], emphasizing bright colors (i.e., colors close to 1)
# [0,1] is then mapped on the color_arr below
if type(V) is numpy.ndarray:
return ((V[:,0]+1.0)*0.5)**4 # raising to 4 is just to emphasize bright colors
else:
return ((V+1.0)*0.5)**4 # raising to 4 is just to emphasize bright colors
def memb_potential_to_coloridx(V,n_colors):
# V -> dynamic variable
# n_colors -> total number of colors
return numpy.floor(n_colors*memb_potential_to_01(V)).astype(int)
def create_input_lists(neigh):
# given a list of neighbors, where neigh[i] is a list of inputs to node i
# generate the list of inputs to be used in the simulation
presyn_neuron_list = [n for sublist in neigh for n in sublist]
cs = numpy.insert(numpy.cumsum([ n.size for n in neigh ]),0,0)
input_list = [ numpy.arange(a,b) for a,b in zip(cs[:-1],cs[1:]) ]
return input_list,presyn_neuron_list
def generate_list_of_neighbors(r,R=0.0,on_conic_surface_only=False):
# generates a network of "pixels"
# each pixel in position r[i,:] identifies its 6 closest neighbors and should receive a connection from it
# if R is given, includes all pixels within a radius R of r[i,:] as a neighbor
# the 6 neighbors are chosen such that each one is positioned to the left, right, top, bottom, front or back of each pixel (i.e., a simple attempt of a cubic lattice)
#
# r -> position vector (each line is the position of each pixel)
# R -> neighborhood ball around each pixel
# on_conic_surface_only -> if true, only links pixels that are on the conic shell of the tree
#
# returns:
# list of neighbors
# neigh[i] -> list of 6 "pixels" closest to i
def is_left_neigh(u,v):
# u and v are two vectors on the x,y plane
# u may be a list of vectors (one vector per row)
return numpy.dot(u,[-v[1],v[0]])>0.0 # # the vector [-v[1],v[0]] is the 90-deg CCW rotated version of v
def get_first_val_not_in_list(v,l): # auxiliary function
# returns first value in v that is not in l
if v.size == 0:
return None
n = len(v)
i = 0
while i < n:
if not (v[i] in l):
return v[i]
i+=1
if on_conic_surface_only:
# only adds 4 neighbors (top, bottom, left, right) that are outside of the cone defined by the estimated tree cone parameters
# cone equation (x**2 + y**2)/c**2 = (z-z0)**2
z0 = numpy.max(r[:,2]) # cone height above the z=0 plane
h = z0 + numpy.abs(numpy.min(r[:,2])) # cone total height
base_r = (numpy.max( (numpy.max(r[:,1]),numpy.max(r[:,0])) ) + numpy.abs(numpy.min( ( numpy.min(r[:,1]),numpy.min(r[:,0]) ) )))/2.0 # cone base radius
c = base_r / h # cone opening radius (defined by wolfram https://mathworld.wolfram.com/Cone.html )
#z_cone = lambda x,y,z0,c,s: z0+s*numpy.sqrt((x**2+y**2)/(c**2)) # s is the concavity of the cone: -1 turned down, +1 turned up
cone_r_sqr = lambda z,z0,c: (c*(z-z0))**2
outside_cone = (r[:,0]**2+r[:,1]**2) > cone_r_sqr(r[:,2],z0,c)
pixel_list = numpy.nonzero(outside_cone)[0]
r_out = r[outside_cone,:]
neigh = [ numpy.array([],dtype=int) for i in range(r.shape[0]) ]
for i,r0 in enumerate(r_out):
# a radius is not given, hence returns a crystalline-like cubic-like structure :P
pixel_list_sorted = numpy.argsort(numpy.linalg.norm(r_out-r0,axis=1)) # sorted by Euler distance to r0
rs = r_out[pixel_list_sorted,:] # list of positions from the closest to the farthest one to r0
local_neigh_list = [] # local neighbor list
x1_neigh = get_first_val_not_in_list(numpy.nonzero( is_left_neigh(rs[:,:2],r0[:2]) )[0],local_neigh_list) # gets first neighbor to the left that is not added yet
if x1_neigh:
local_neigh_list.append(x1_neigh)
x2_neigh = get_first_val_not_in_list(numpy.nonzero( numpy.logical_not(is_left_neigh(rs[:,:2],r0[:2])) )[0],local_neigh_list) # gets first neighbor to the right that is not added yet
if x2_neigh:
local_neigh_list.append(x2_neigh)
z1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]<r0[2])[0],local_neigh_list) # gets first neighbor to the top that is not added yet
if z1_neigh:
local_neigh_list.append(z1_neigh)
z2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]>r0[2])[0],local_neigh_list) # gets first neighbor to the bottom that is not added yet
if z2_neigh:
local_neigh_list.append(z2_neigh)
neigh[pixel_list[i]] = pixel_list[pixel_list_sorted[local_neigh_list]] # adds neighbors
return neigh
neigh = []
for r0 in r:
if (R>0.0): # a neighborhood radius is given
neigh.append(numpy.nonzero(numpy.linalg.norm(r-r0,axis=1)<R)[0])
else:
# a radius is not given, hence returns a crystalline-like cubic-like structure :P
pixel_list_sorted = numpy.argsort(numpy.linalg.norm(r-r0,axis=1)) # sorted by Euler distance to r0
rs = r[pixel_list_sorted,:] # list of positions from the closest to the farthest one to r0
local_neigh_list = [] # local neighbor list
x1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,0]<r0[0])[0],local_neigh_list) # gets first neighbor to the left that is not added yet
if x1_neigh:
local_neigh_list.append(x1_neigh)
x2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,0]>r0[0])[0],local_neigh_list) # gets first neighbor to the right that is not added yet
if x2_neigh:
local_neigh_list.append(x2_neigh)
y1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,1]<r0[1])[0],local_neigh_list) # gets first neighbor to the back that is not added yet
if y1_neigh:
local_neigh_list.append(y1_neigh)
y2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,1]>r0[1])[0],local_neigh_list) # gets first neighbor to the front that is not added yet
if y2_neigh:
local_neigh_list.append(y2_neigh)
z1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]<r0[2])[0],local_neigh_list) # gets first neighbor to the top that is not added yet
if z1_neigh:
local_neigh_list.append(z1_neigh)
z2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]>r0[2])[0],local_neigh_list) # gets first neighbor to the bottom that is not added yet
if z2_neigh:
local_neigh_list.append(z2_neigh)
neigh.append(pixel_list_sorted[local_neigh_list]) # adds neighbors
return neigh
def build_network(r_nodes,R=0.0,conic_surface_only=False):
# r_nodes vector of coordinates of each pixel
# R connection radius
# if R is zero, generates an attempt of a cubic-like lattice, otherwise connects all pixels within a radius R of each other
neigh = generate_list_of_neighbors(r_nodes,R,on_conic_surface_only=conic_surface_only)
# creates the interaction lists between dynamic variables
input_list,presyn_neuron_list = create_input_lists(neigh)
# creates dynamic variables
N = len(neigh) # number of neurons (or pixels)
Nsyn = len(presyn_neuron_list)
V = numpy.zeros((N,3)) # membrane potential (dynamic variables) of each neuron (pixel)
S = numpy.zeros((Nsyn,2)) # synaptic current input generated by each pixel towards each of its postsynaptic pixels
return V,S,input_list,presyn_neuron_list
def get_neuron_resting_state(neuron_map_iter,par,T=20000):
V = -0.9* | numpy.ones((1,3)) | numpy.ones |
import localmodule
import datetime
import h5py
import math
import music21 as m21
import numpy as np
import os
import scipy
import scipy.linalg
import sys
import time
# Parse arguments
args = sys.argv[1:]
composer_str = args[0]
track_str = args[1]
# Define constants.
J_tm = 8
N = 2**10
n_octaves = 8
midi_octave_offset = 2
quantization = 2.0
xi = 0.25
sigma = 0.1
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Eigenprogression transform.")
print("Composer: " + composer_str + ".")
print("Piece: " + track_str + ".")
print("")
print("h5py version: {:s}".format(h5py.__version__))
print("music21 version: {:s}".format(m21.__version__))
print("numpy version: {:s}".format(np.__version__))
print("scipy version: {:s}".format(scipy.__version__))
print("")
############################# (1) PARSING ##################################
# Start clock.
parsing_start_time = int(time.time())
# Parse Kern score with music21.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
kern_name = "_".join([dataset_name, "kern"])
kern_dir = os.path.join(data_dir, kern_name)
composer_dir = os.path.join(kern_dir, composer_str)
track_name = track_str + ".krn"
track_path = os.path.join(composer_dir, track_name)
score = m21.converter.parse(track_path)
pianoroll_parts = []
n_parts = len(score.parts)
n_semitones = 12 * n_octaves
# Loop over parts to extract piano rolls.
for part_id in range(n_parts):
part = score.parts[part_id]
pianoroll_part = np.zeros((n_semitones, N), dtype=np.float32)
# Get the measure offsets
measure_offset = {}
for el in part.recurse(classFilter=('Measure')):
measure_offset[el.measureNumber] = el.offset
# Loop over notes
for note in part.recurse(classFilter=('Note')):
note_start = int(math.ceil(
(measure_offset[note.measureNumber] +\
note.offset) *\
quantization))
note_end = int(math.ceil((
measure_offset[note.measureNumber] +\
note.offset +\
note.duration.quarterLength) *\
quantization))
pianoroll_part[
note.midi - midi_octave_offset * 12,
note_start:note_end] = 1
pianoroll_parts.append(pianoroll_part)
# Stack parts into piano roll.
mtrack_pianoroll = np.stack(pianoroll_parts, 2)
pianoroll = mtrack_pianoroll.max(axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Parsing took " + elapsed_str + " seconds.")
####################### (2) WAVELET TRANSFORM ##############################
# Start clock.
wavelet_start_time = int(time.time())
# Setup wavelet filter bank over time.
wavelet_filterbank_ft = np.zeros((1, N, J_tm), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
wavelet_filterbank_ft[0, :, -1 - j] = psi_ft
# Append scaling function phi (average).
wavelet_filterbank_ft[0, 0, 0] = 1
# Convolve pianoroll with filterbank.
pianoroll_ft = scipy.fftpack.fft(pianoroll, axis=1)
pianoroll_ft = np.expand_dims(pianoroll_ft, axis=2)
wavelet_transform_ft = pianoroll_ft * wavelet_filterbank_ft
wavelet_transform = scipy.fftpack.ifft(wavelet_transform_ft, axis=1)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Wavelet transform took " + elapsed_str + " seconds.")
####################### (3) EIGENTRIAD TRANSFORM ###########################
# Start clock.
eigentriad_start_time = int(time.time())
# Reshape MIDI axis to chromagram
chromagram = np.reshape(wavelet_transform,
(12, -1, wavelet_transform.shape[1], wavelet_transform.shape[2]), 'F')
# Construct eigentriads
cosine_basis = np.array([[np.cos(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
sine_basis = np.array([[np.sin(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
fourier_basis = cosine_basis + 1.0j * sine_basis
major_template = [0, 4, 7]
minor_template = [0, 3, 7]
major_eigentriads = np.zeros((12, 3), dtype=np.complex64)
minor_eigentriads = np.zeros((12, 3), dtype=np.complex64)
for omega in range(3):
for t, p in enumerate(major_template):
major_eigentriads[p, omega] = fourier_basis[t, omega]
for t, p in enumerate(minor_template):
minor_eigentriads[p, omega] = fourier_basis[t, omega]
eigentriads = np.stack(
(major_eigentriads, minor_eigentriads), axis=1)
eigentriads = eigentriads.astype(np.complex64)
# Convolve chromagram with eigentriads
chromagram_ft = scipy.fftpack.fft(chromagram, axis=0)
chromagram_ft = chromagram_ft[:, np.newaxis, :, :, :, np.newaxis]
eigentriads_ft = scipy.fftpack.fft(eigentriads, axis=0)
eigentriads_ft = eigentriads_ft[:, :, np.newaxis,
np.newaxis, np.newaxis, :]
eigentriad_transform_ft = chromagram_ft * eigentriads_ft
eigentriad_transform = scipy.fftpack.fft(
eigentriad_transform_ft, axis=0)
# Apply modulus nonlinearity
eigentriad_transform_modulus = np.abs(eigentriad_transform)
# Print elapsed time.
elapsed_time = time.time() - int(eigentriad_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigentriad transform took " + elapsed_str + " seconds.")
####################### (4) SCATTERING TRANSFORM ###########################
# Start clock.
scattering_start_time = int(time.time())
# Setup scattering filter bank over time.
scattering_filterbank_ft = np.zeros((1, N, 2*J_tm-1), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
scattering_filterbank_ft[0, :, -1 - 2*j] = psi_ft
scattering_filterbank_ft[0, :, -1 - (2*j+1)] = conj_psi_ft
scattering_filterbank_ft[0, 0, 0] = 1
# Convolve eigentriad transform with filterbank again.
# This is akin to a scattering transform.
# We remove the finest scale (last two coefficients).
eigentriad_transform_modulus_ft =\
scipy.fftpack.fft(eigentriad_transform_modulus, axis=3)
eigentriad_transform_modulus_ft =\
eigentriad_transform_modulus_ft[:, :, :, :, :, :, np.newaxis]
scattering_filterbank_ft =\
wavelet_filterbank_ft[:, np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis, :-2]
scattering_transform_ft =\
eigentriad_transform_modulus_ft * scattering_filterbank_ft
scattering_transform = scipy.fftpack.ifft(scattering_transform_ft, axis=3)
# Print elapsed time.
elapsed_time = time.time() - int(scattering_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Scattering transform took " + elapsed_str + " seconds.")
###################### (5) EIGENPROGRESSION TRANSFORM ######################
# Start clock.
eigenprogression_start_time = int(time.time())
# Reshape chroma and quality into a chord axis
sc_shape = scattering_transform.shape
tonnetz_shape = (
sc_shape[0]*sc_shape[1], sc_shape[2],
sc_shape[3], sc_shape[4], sc_shape[5],
sc_shape[6])
tonnetz = np.reshape(scattering_transform,
tonnetz_shape, 'F')
# Build adjacency matrix for Tonnetz graph
# (1/3) Major to minor transitions.
major_edges = np.zeros((12,), dtype=np.float32)
# Parallel minor (C major to C minor)
major_edges[0] = 1
# Relative minor (C major to A minor)
major_edges[9] = 1
# Leading tone minor (C major to E minor)
major_edges[4] = 1
# (2/3) Minor to major transitions
minor_edges = np.zeros((12,))
# Parallel major (C minor to C major)
minor_edges[0] = 1
# Relative major (C minor to Eb major)
minor_edges[3] = 1
# Leading tone major (C major to Ab minor)
minor_edges[8] = 1
# (2/3) Build full adjacency matrix by 4 blocks.
major_adjacency = scipy.linalg.toeplitz(major_edges, minor_edges)
minor_adjacency = scipy.linalg.toeplitz(minor_edges, major_edges)
tonnetz_adjacency = | np.zeros((24, 24), dtype=np.float32) | numpy.zeros |
import pcp_utils
import os
import yaml
import trimesh
import numpy as np
import xml.etree.ElementTree as ET
# add gym and baselines to the dir
gym_path = pcp_utils.utils.get_gym_dir()
baseline_path = pcp_utils.utils.get_baseline_dir()
os.sys.path.append(gym_path)
os.sys.path.append(baseline_path)
def maybe_check_obj_mean(mesh_path):
mesh = trimesh.load(mesh_path)
if not isinstance(mesh, trimesh.Trimesh):
return None
mean_vertex = mesh.vertices.mean(axis=0)
return mean_vertex
def get_mesh_paths(xml_path):
tree = ET.parse(xml_path)
root = tree.getroot()
asset_tag = None
for root_child in iter(root):
if root_child.tag == 'asset':
asset_tag = root_child
if asset_tag is None:
raise ValueError('given xml does not contain asset element')
mesh_paths = list()
for asset_child in iter(asset_tag):
if asset_child.tag == 'mesh':
mesh_paths.append(asset_child.attrib['file'])
return mesh_paths
def maybe_refine_mesh_paths(mesh_paths, source_mesh_dir):
refined_mesh_paths = list()
for m in mesh_paths:
if os.path.exists(m):
refined_mesh_paths.append(m)
else:
remove_str = '../meshes/'
new_path = os.path.join(source_mesh_dir,
m[len(remove_str):])
refined_mesh_paths.append(new_path)
return refined_mesh_paths
def compute_correction_factor(mesh_paths, combined=False):
meshes = [trimesh.load(m) for m in mesh_paths]
if combined:
combined_mesh = np.sum(meshes)
# compute the mean
mean_position = combined_mesh.vertices.mean(axis=0)
else:
# for each mesh I will compute the geometric center
mean_position = [m.vertices.mean(axis=0) for m in meshes]
return mean_position
def compute_correction_factor_bbox(mesh_paths):
meshes = [trimesh.load(m) for m in mesh_paths]
combined_mesh = np.sum(meshes)
# compute the bounding box bounds
bbox_bounds = combined_mesh.bounding_box.bounds
# compute the center from the bounds
bbox_center = bbox_bounds[0, :] + ((bbox_bounds[1, :] - bbox_bounds[0, :])/2.0)
return bbox_center
def correct_meshes_and_save(correction_factor, mesh_paths):
# load all the meshes
meshes = [trimesh.load(m) for m in mesh_paths]
if isinstance(correction_factor, list):
for i, (mesh, corr_factor) in enumerate(zip(meshes, correction_factor)):
mesh.vertices -= corr_factor
mesh.export(mesh_paths[i])
else: #It is an integer and I am using the combined mesh correction
# from all meshes.vertices subtract the correction factor
for mp, m in zip(mesh_paths, meshes):
m.vertices -= correction_factor
m.export(mp)
def check_meshes(mesh_paths, combined=False):
meshes = [trimesh.load(m) for m in mesh_paths]
eps = np.zeros(3,)
if combined:
combined_mesh = np.sum(meshes)
mean_pos = combined_mesh.vertices.mean(axis=0)
truth_val = | np.allclose(mean_pos, eps) | numpy.allclose |
## Copyright 2020 UT-Battelle, LLC. See LICENSE.txt for more information.
###
# @author <NAME>, <NAME>, <NAME>, <NAME>
# <EMAIL>
#
# Modification:
# Baseline code
# Date: Apr, 2020
# **************************************************************************
###
import os
from multi_thread_run import *
from deffe_utils import *
import numpy as np
import argparse
import shlex
import pathlib
from read_config import *
""" DeffeExtract class to extract cost metrics for the batch of samples with
through multi-thread execution environment either with/without
the help of slurm
"""
class DeffeExtract:
def __init__(self, framework):
self.framework = framework
self.config = framework.config.GetExtract()
self.slurm_flag = self.config.slurm
self.slurm = LoadPyModule(self.config.GetSlurm().pyscript,
self, self.config.GetSlurm())
if self.framework.args.no_slurm:
self.slurm_flag = False
self.sample_extract_script = self.config.sample_extract_script
if not os.path.exists(self.sample_extract_script):
self.sample_extract_script = os.path.join(
self.framework.config_dir, self.sample_extract_script
)
self.batch_size = self.config.batch_size
self.fr_config = self.framework.fr_config
if self.framework.args.batch_size!= -1:
self.batch_size = self.framework.args.batch_size
if self.framework.args.extract_batch_size!= -1:
self.batch_size = self.framework.args.extract_batch_size
self.output_flow = self.batch_size
if self.config.output_flow != -1:
self.output_flow = self.config.output_flow
if self.framework.args.extract_out_flow != -1:
self.output_flow = self.framework.args.extract_out_flow
self.parameters = self.framework.parameters
self.parser = self.AddArgumentsToParser()
self.args = self.ReadArguments()
def GetBatchSize(self):
return self.batch_size
def GetOutputFlow(self):
return self.output_flow
# Read arguments provided in JSON configuration file
def ReadArguments(self):
arg_string = self.config.arguments
args = self.parser.parse_args(shlex.split(arg_string))
return args
# Add command line arguments to parser
def AddArgumentsToParser(self):
parser = argparse.ArgumentParser()
return parser
# Initialize the class with parameters list and to be extracted cost metrics
def Initialize(self, param_list, cost_list, param_data):
self.param_list = param_list
self.cost_list = cost_list
self.param_data = param_data
def GetExtractCommand(self, output,
param_pattern,
param_val_with_escapechar_hash,
bash_param_val_with_escapechar_hash):
(run_dir, counter, evaluate_script) = output
extract_script = self.sample_extract_script
if not os.path.isfile(extract_script):
return None
extract_script = self.parameters.CreateRunScript(
extract_script,
self.config.sample_extract_arguments,
self.config.sample_extract_excludes,
run_dir,
param_pattern,
param_val_with_escapechar_hash,
bash_param_val_with_escapechar_hash, "extract_"
)
cmd = (
"cd "
+ run_dir
+ " ; sh "
+ os.path.basename(extract_script)
+ " > "
+ self.config.output_log
+ " 2>&1 3>&1 ; cd "
+ os.getcwd()
)
return cmd
def GetResult(self, flag, param_val, eval_output):
(run_dir, counter, evaluate_script) = eval_output
file_path = os.path.join(run_dir, self.config.cost_output)
if os.path.exists(file_path):
if pathlib.Path(file_path).suffix == '.json':
data = {}
try:
results_json = DeffeConfig(file_path)
data = results_json.data
except (ValueError, TypeError) as e:
Log(f"Unable to read results json file due to Type/Value errors! file:{file_path} ", 'Error')
param_hash_key = self.framework.config.GetCosts()
if data != None:
result = []
for p in param_hash_key:
if p in data:
result.append(str(data[p]))
else:
result.append("NULL")
result = np.array(result).astype("str")
if self.framework.args.hold_evaluated_data or \
self.config.hold_evaluated_data:
self.param_data.PushEvaluatedData(param_val, result)
return (
self.framework.valid_flag, flag,
result,
)
else:
with open(file_path, "r") as fh:
lines = fh.readlines()
if len(lines) == 0:
return (self.framework.not_valid_flag,
flag, np.array([0,]).astype("str"))
flines = [RemoveWhiteSpaces(lines[index])
if index < len(lines) else 0
for index in range(len(self.cost_list)) ]
result = | np.array(flines) | numpy.array |
import numpy as np
from mot.common import Gaussian
from mot.configs import Object
COMMON_BIRTH_TIME = 10
COMMON_DEATH_TIME = 80
# TODO single static object (no motion
single_static_object = [
Object(
initial=Gaussian(x=np.array([10.0, 10.0, 0.0, 0.0]), P=np.eye(4)),
t_birth=COMMON_BIRTH_TIME,
t_death=COMMON_DEATH_TIME,
)
]
# TODO two static objects (no motion)
two_static_objects = [
Object(
initial=Gaussian(x=np.array([-100.0, 100.0, 0.0, 0.0]), P=np.eye(4)),
t_birth=COMMON_BIRTH_TIME,
t_death=COMMON_DEATH_TIME,
),
Object(
initial=Gaussian(x=np.array([100.0, 100.0, 0.0, 0.0]), P=np.eye(4)),
t_birth=COMMON_BIRTH_TIME,
t_death=COMMON_DEATH_TIME,
),
]
# TODO three static objects (no motion)
three_static_objects = [
Object(
initial=Gaussian(x= | np.array([-250.0, 250.0, 0.0, 0.0]) | numpy.array |
import pytest
import numpy
import scipy
from numpy.testing import assert_allclose
from jadapy import jdqr
from jadapy import Target
from jadapy.utils import norm
try:
from jadapy import schur
n = 20
a = numpy.random.rand(n, n)
t, q = schur.schur(a)
schur.schur_sort(t, q, Target.LargestMagnitude)
except ValueError:
pytest.skip('SciPy too old', allow_module_level=True)
REAL_DTYPES = [numpy.float32, numpy.float64]
COMPLEX_DTYPES = [numpy.complex64, numpy.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
if dtype in COMPLEX_DTYPES:
return (numpy.random.rand(*shape) + numpy.random.rand(*shape) * 1.0j).astype(dtype)
return numpy.random.rand(*shape).astype(dtype)
def generate_mass_matrix(shape, dtype):
x = numpy.zeros(shape, dtype)
numpy.fill_diagonal(x, numpy.random.rand(shape[0]))
return x
def generate_test_matrix(shape, dtype):
a = generate_random_dtype_array(shape, dtype)
a += 3 * numpy.diag(numpy.ones([shape[0]], dtype))
return a
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_smallest_magnitude(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, num=k, tol=tol)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: abs(x)))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: abs(x)))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_smallest_magnitude_with_mass(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
m = generate_mass_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, num=k, tol=tol, M=m)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: abs(x)))
eigs = scipy.linalg.eigvals(a, m)
eigs = numpy.array(sorted(eigs, key=lambda x: abs(x)))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_largest_magnitude(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.LargestMagnitude, tol=tol)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: -abs(x)))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: -abs(x)))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_largest_magnitude_with_mass(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
m = generate_mass_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.LargestMagnitude, tol=tol, M=m)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: -abs(x)))
eigs = scipy.linalg.eigvals(a, m)
eigs = numpy.array(sorted(eigs, key=lambda x: -abs(x)))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_smallest_real(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.SmallestRealPart, tol=tol)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: x.real))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: x.real))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_largest_real(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.LargestRealPart, tol=tol)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: -x.real))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: -x.real))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_smallest_imag(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.SmallestImaginaryPart, tol=tol, arithmetic='complex')
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: x.imag))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: x.imag))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_smallest_imag_real(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 6
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.SmallestImaginaryPart, tol=tol)
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: x.imag))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: x.imag))
eigs = eigs[:k]
# In the real case, we store complex conjugate eigenpairs, so only
# at least half of the eigenvalues are correct
eigs = eigs[:k // 2]
jdqr_eigs = jdqr_eigs[:k // 2]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_largest_imag(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
alpha = jdqr.jdqr(a, k, Target.LargestImaginaryPart, tol=tol, arithmetic='complex')
jdqr_eigs = numpy.array(sorted(alpha, key=lambda x: -x.imag))
eigs = scipy.linalg.eigvals(a)
eigs = numpy.array(sorted(eigs, key=lambda x: -x.imag))
eigs = eigs[:k]
assert_allclose(jdqr_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqr_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqr_largest_imag_real(dtype):
numpy.random.seed(1234)
tol = | numpy.finfo(dtype) | numpy.finfo |
import gc
import numpy as np
import pandas as pd
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
from tqdm import tqdm
import lightgbm as lgb
# Load Data
dtype = {
'id': str,
'teacher_id': str,
'teacher_prefix': str,
'school_state': str,
'project_submitted_datetime': str,
'project_grade_category': str,
'project_subject_categories': str,
'project_subject_subcategories': str,
'project_title': str,
'project_essay_1': str,
'project_essay_2': str,
'project_essay_3': str,
'project_essay_4': str,
'project_resource_summary': str,
'teacher_number_of_previously_posted_projects': int,
'project_is_approved': np.uint8,
}
# Write code that limits the rows until I've sorted out the kinks
data_dir = "F:/Nerdy Stuff/Kaggle/DonorsChoose"
sub_path = "F:/Nerdy Stuff/Kaggle submissions/DonorChoose"
train = pd.read_csv(os.path.join(data_dir, 'data/train_stem.csv'),
low_memory=True)
test = pd.read_csv(os.path.join(data_dir, 'data/test_stem.csv'),
low_memory=True)
id_test = test['id'].values
# Extract features
def extract_features(df):
df['project_title_len'] = df['project_title'].apply(lambda x: len(str(x)))
df['project_essay_1_len'] = df['project_essay_1'].apply(lambda x: len(str(x)))
df['project_essay_2_len'] = df['project_essay_2'].apply(lambda x: len(str(x)))
df['project_essay_3_len'] = df['project_essay_3'].apply(lambda x: len(str(x)))
df['project_essay_4_len'] = df['project_essay_4'].apply(lambda x: len(str(x)))
df['project_resource_summary_len'] = df['project_resource_summary'].apply(lambda x: len(str(x)))
df['project_title_wc'] = df['project_title'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_1_wc'] = df['project_essay_1'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_2_wc'] = df['project_essay_2'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_3_wc'] = df['project_essay_3'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_4_wc'] = df['project_essay_4'].apply(lambda x: len(str(x).split(' ')))
df['project_resource_summary_wc'] = df['project_resource_summary'].apply(lambda x: len(str(x).split(' ')))
extract_features(train)
extract_features(test)
train.drop([
'project_essay_1',
'project_essay_2',
'project_essay_3',
'project_essay_4'], axis=1, inplace=True)
test.drop([
'project_essay_1',
'project_essay_2',
'project_essay_3',
'project_essay_4'], axis=1, inplace=True)
# Recoding as when stopwords are removed some titles have no values
print("Recoding missing values once NLP preprocessing done. Might want to check that")
train.loc[train['project_title'].isnull() == True, 'project_title'] = 'No values once NLP preprocessing is done'
test.loc[test['project_title'].isnull() == True, 'project_title'] = 'No values once NLP preprocessing is done'
train.loc[train['project_essay'].isnull() == True, 'project_essay'] = 'No values once NLP preprocessing is done'
test.loc[test['project_essay'].isnull() == True, 'project_essay'] = 'No values once NLP preprocessing is done'
train.loc[train['project_resource_summary'].isnull() == True, 'project_resource_summary'] = 'No values once NLP preprocessing is done'
test.loc[test['project_resource_summary'].isnull() == True, 'project_resource_summary'] = 'No values once NLP preprocessing is done'
train.loc[train['description_ttl'].isnull() == True, 'description_ttl'] = 'No values once NLP preprocessing is done'
test.loc[test['description_ttl'].isnull() == True, 'description_ttl'] = 'No values once NLP preprocessing is done'
gc.collect()
# Preprocess columns with label encoder
print('Label Encoder...')
cols = [
'teacher_id',
'teacher_prefix',
'school_state',
'project_grade_category',
'project_subject_categories',
'project_subject_subcategories'
]
df_all = pd.concat([train, test], axis=0)
for c in tqdm(cols):
le = LabelEncoder()
le.fit(df_all[c].astype(str))
train[c] = le.transform(train[c].astype(str))
test[c] = le.transform(test[c].astype(str))
del le
gc.collect()
print('Done.')
# Preprocess timestamp
print('Preprocessing timestamp...')
def process_timestamp(df):
df['project_submitted_datetime'] = pd.to_datetime(df['project_submitted_datetime'])
df['year'] = df['project_submitted_datetime'].apply(lambda x: x.year)
df['month'] = df['project_submitted_datetime'].apply(lambda x: x.month)
df['day'] = df['project_submitted_datetime'].apply(lambda x: x.day)
df['day_of_week'] = df['project_submitted_datetime'].apply(lambda x: x.dayofweek)
df['hour'] = df['project_submitted_datetime'].apply(lambda x: x.hour)
df['minute'] = df['project_submitted_datetime'].apply(lambda x: x.minute)
df['project_submitted_datetime'] = df['project_submitted_datetime'].values.astype(np.int64)
process_timestamp(train)
process_timestamp(test)
print('Done.')
# Preprocess text
print('Preprocessing text...')
cols = [
'project_title',
'project_essay',
'project_resource_summary',
'description_ttl'
]
n_features = [
400,
4040,
400,
400
]
for c_i, c in tqdm(enumerate(cols)):
print("TFIDF for %s" % (c))
tfidf = TfidfVectorizer(
max_features=n_features[c_i],
norm='l2',
)
tfidf.fit(df_all[c])
tfidf_train = np.array(tfidf.transform(train[c]).toarray(), dtype=np.float16)
tfidf_test = np.array(tfidf.transform(test[c]).toarray(), dtype=np.float16)
for i in range(n_features[c_i]):
train[c + '_tfidf_' + str(i)] = tfidf_train[:, i]
test[c + '_tfidf_' + str(i)] = tfidf_test[:, i]
del tfidf, tfidf_train, tfidf_test
gc.collect()
print('Done.')
gc.collect()
# Prepare data
cols_to_drop = [
'Unnamed: 0'
, 'id'
, 'teacher_id'
, 'project_title'
, 'project_essay'
, 'project_resource_summary'
, 'project_is_approved'
, 'description_ttl'
]
X = train.drop(cols_to_drop, axis=1, errors='ignore')
y = train['project_is_approved']
X_test = test.drop(cols_to_drop, axis=1, errors='ignore')
id_test = test['id'].values
feature_names = list(X.columns)
print(X.shape, X_test.shape)
# del train, test
gc.collect()
# Build the model
cnt = 0
p_buf = []
n_splits = 5
n_repeats = 1
kf = RepeatedKFold(
n_splits=n_splits,
n_repeats=n_repeats,
random_state=0)
auc_buf = []
num_rows = 60000
X_train_test = X.iloc[0:num_rows, :]
y_train_test = y.iloc[0:num_rows]
prob_ests = []
y_test = []
prb = np.array(prob_ests[0])
y_tst = | np.asarray(y_test[0], np.int32) | numpy.asarray |
# Copyright 2016 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation using a Hidden Markov Model
Given an ROI timeseries, this class uses an annealed fitting procedure to
segment the timeseries into events with stable activity patterns. After
learning the signature activity pattern of each event, the model can then be
applied to other datasets to identify a corresponding sequence of events.
Full details are available in the bioRxiv preprint:
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
Discovering event structure in continuous narrative perception and memory
Neuron, Volume 95, Issue 3, 709 - 721.e5
http://www.cell.com/neuron/abstract/S0896-6273(17)30593-7
"""
# Authors: <NAME> and <NAME> (Princeton University)
import numpy as np
from scipy import stats
import logging
import copy
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.exceptions import NotFittedError
from . import _utils as utils # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"EventSegment",
]
class EventSegment(BaseEstimator):
"""Class for event segmentation of continuous fMRI data
Parameters
----------
n_events: int
Number of segments to learn
step_var: Callable[[int], float] : default 4 * (0.98 ** (step - 1))
The Gaussian variance to use during fitting, as a function of the
number of steps. Should decrease slowly over time.
n_iter: int : default 500
Maximum number of steps to run during fitting
Attributes
----------
p_start, p_end: length n_events+1 ndarray
initial and final prior distributions over events
P: n_events+1 by n_events+1 ndarray
HMM transition matrix
ll_ : ndarray with length = number of training datasets
Log-likelihood for training datasets over the course of training
segments_: list of (time by event) ndarrays
Learned (soft) segmentation for training datasets
event_var_ : float
Gaussian variance at the end of learning
event_pat_ : voxel by event ndarray
Learned mean patterns for each event
"""
def _default_var_schedule(step):
return 4 * (0.98 ** (step - 1))
def __init__(self, n_events=2,
step_var=_default_var_schedule,
n_iter=500):
self.n_events = n_events
self.step_var = step_var
self.n_iter = n_iter
def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = copy.deepcopy(X)
if type(X) is not list:
X = check_array(X)
X = [X]
n_train = len(X)
for i in range(n_train):
X[i] = X[i].T
self.classes_ = np.arange(self.n_events)
n_dim = X[0].shape[0]
for i in range(n_train):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(n_train):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1],
self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = | np.mean(mean_pat, axis=0) | numpy.mean |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Nipype translation of ANTs' workflows."""
# general purpose
from pkg_resources import resource_filename as pkgr_fn
# nipype
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces.ants import (
AI,
ImageMath,
N4BiasFieldCorrection,
)
# niworkflows
from niworkflows.interfaces.bids import DerivativesDataSink as _DDS
from niworkflows.interfaces.images import RegridToZooms
from niworkflows.interfaces.nibabel import ApplyMask, Binarize, IntensityClip
from niworkflows.interfaces.fixes import (
FixHeaderRegistration as Registration,
FixHeaderApplyTransforms as ApplyTransforms,
)
from niworkflows.interfaces.reportlets.registration import (
SimpleBeforeAfterRPT as SimpleBeforeAfter,
)
from templateflow.api import get as get_template
from ..interfaces import DenoiseImage
from .. import __version__
class DerivativesDataSink(_DDS):
"""Generate a BIDS-Derivatives-compatible output folder."""
out_path_base = f"nirodents-{__version__}"
LOWRES_ZOOMS = (0.4, 0.4, 0.4)
HIRES_ZOOMS = (0.1, 0.1, 0.1)
def init_rodent_brain_extraction_wf(
ants_affine_init=False,
factor=20,
arc=0.12,
step=4,
grid=(0, 4, 4),
debug=False,
interim_checkpoints=True,
mem_gb=3.0,
mri_scheme="T2w",
name="rodent_brain_extraction_wf",
omp_nthreads=None,
output_dir=None,
template_id="Fischer344",
template_specs=None,
use_float=True,
):
"""
Build an atlas-based brain extraction pipeline for rodent T1w and T2w MRI data.
Parameters
----------
ants_affine_init : :obj:`bool`, optional
Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.
"""
inputnode = pe.Node(
niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode"
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_corrected", "out_brain", "out_mask"]),
name="outputnode",
)
template_specs = template_specs or {}
if template_id == "WHS" and "resolution" not in template_specs:
template_specs["resolution"] = 2
# Find a suitable target template in TemplateFlow
tpl_target_path = get_template(template_id, suffix=mri_scheme, **template_specs,)
if not tpl_target_path:
raise RuntimeError(
f"An instance of template <tpl-{template_id}> with MR scheme '{mri_scheme}'"
" could not be found."
)
tpl_brainmask_path = get_template(
template_id,
atlas=None,
hemi=None,
desc="brain",
suffix="probseg",
**template_specs,
) or get_template(
template_id,
atlas=None,
hemi=None,
desc="brain",
suffix="mask",
**template_specs,
)
tpl_regmask_path = get_template(
template_id,
atlas=None,
desc="BrainCerebellumExtraction",
suffix="mask",
**template_specs,
)
denoise = pe.Node(DenoiseImage(dimension=3, copy_header=True),
name="denoise", n_procs=omp_nthreads)
# Resample template to a controlled, isotropic resolution
res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS, smooth=True), name="res_tmpl")
# Create Laplacian images
lap_tmpl = pe.Node(ImageMath(operation="Laplacian", copy_header=True), name="lap_tmpl")
tmpl_sigma = pe.Node(niu.Function(function=_lap_sigma),
name="tmpl_sigma", run_without_submitting=True)
norm_lap_tmpl = pe.Node(niu.Function(function=_norm_lap), name="norm_lap_tmpl")
lap_target = pe.Node(ImageMath(operation="Laplacian", copy_header=True), name="lap_target")
target_sigma = pe.Node(niu.Function(function=_lap_sigma),
name="target_sigma", run_without_submitting=True)
norm_lap_target = pe.Node(niu.Function(function=_norm_lap), name="norm_lap_target")
# Set up initial spatial normalization
ants_params = "testing" if debug else "precise"
norm = pe.Node(
Registration(
from_file=pkgr_fn(
"nirodents", f"data/artsBrainExtraction_{ants_params}_{mri_scheme}.json"
)
),
name="norm",
n_procs=omp_nthreads,
mem_gb=mem_gb,
)
norm.inputs.float = use_float
# main workflow
wf = pe.Workflow(name)
# truncate target intensity for N4 correction
clip_target = pe.Node(IntensityClip(p_min=15, p_max=99.9), name="clip_target")
# truncate template intensity to match target
clip_tmpl = pe.Node(IntensityClip(p_min=5, p_max=98), name="clip_tmpl")
clip_tmpl.inputs.in_file = _pop(tpl_target_path)
# set INU bspline grid based on voxel size
bspline_grid = pe.Node(niu.Function(function=_bspline_grid), name="bspline_grid")
# INU correction of the target image
init_n4 = pe.Node(
N4BiasFieldCorrection(
dimension=3,
save_bias=False,
copy_header=True,
n_iterations=[50] * (4 - debug),
convergence_threshold=1e-7,
shrink_factor=4,
rescale_intensities=True,
),
n_procs=omp_nthreads,
name="init_n4",
)
clip_inu = pe.Node(IntensityClip(p_min=1, p_max=99.8), name="clip_inu")
# Create a buffer interface as a cache for the actual inputs to registration
buffernode = pe.Node(
niu.IdentityInterface(fields=["hires_target"]), name="buffernode"
)
# Merge image nodes
mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
# fmt: off
wf.connect([
# Target image massaging
(inputnode, denoise, [(("in_files", _pop), "input_image")]),
(inputnode, bspline_grid, [(("in_files", _pop), "in_file")]),
(bspline_grid, init_n4, [("out", "args")]),
(denoise, clip_target, [("output_image", "in_file")]),
(clip_target, init_n4, [("out_file", "input_image")]),
(init_n4, clip_inu, [("output_image", "in_file")]),
(clip_inu, target_sigma, [("out_file", "in_file")]),
(clip_inu, buffernode, [("out_file", "hires_target")]),
(buffernode, lap_target, [("hires_target", "op1")]),
(target_sigma, lap_target, [("out", "op2")]),
(lap_target, norm_lap_target, [("output_image", "in_file")]),
(buffernode, mrg_target, [("hires_target", "in1")]),
(norm_lap_target, mrg_target, [("out", "in2")]),
# Template massaging
(clip_tmpl, res_tmpl, [("out_file", "in_file")]),
(res_tmpl, tmpl_sigma, [("out_file", "in_file")]),
(res_tmpl, lap_tmpl, [("out_file", "op1")]),
(tmpl_sigma, lap_tmpl, [("out", "op2")]),
(lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
(res_tmpl, mrg_tmpl, [("out_file", "in1")]),
(norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
# Setup inputs to spatial normalization
(mrg_target, norm, [("out", "moving_image")]),
(mrg_tmpl, norm, [("out", "fixed_image")]),
])
# fmt: on
# Graft a template registration-mask if present
if tpl_regmask_path:
hires_mask = pe.Node(
ApplyTransforms(
input_image=_pop(tpl_regmask_path),
transforms="identity",
interpolation="Gaussian",
float=True,
),
name="hires_mask",
mem_gb=1,
)
# fmt: off
wf.connect([
(res_tmpl, hires_mask, [("out_file", "reference_image")]),
(hires_mask, norm, [("output_image", "fixed_image_masks")]),
])
# fmt: on
# Finally project brain mask and refine INU correction
map_brainmask = pe.Node(
ApplyTransforms(interpolation="Gaussian", float=True),
name="map_brainmask",
mem_gb=1,
)
map_brainmask.inputs.input_image = str(tpl_brainmask_path)
thr_brainmask = pe.Node(Binarize(thresh_low=0.50), name="thr_brainmask")
final_n4 = pe.Node(
N4BiasFieldCorrection(
dimension=3,
save_bias=True,
copy_header=True,
n_iterations=[50] * 4,
convergence_threshold=1e-7,
rescale_intensities=True,
shrink_factor=4,
),
n_procs=omp_nthreads,
name="final_n4",
)
final_mask = pe.Node(ApplyMask(), name="final_mask")
# fmt: off
wf.connect([
(inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
(bspline_grid, final_n4, [("out", "args")]),
(denoise, final_n4, [("output_image", "input_image")]),
# Project template's brainmask into subject space
(norm, map_brainmask, [("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags")]),
(map_brainmask, thr_brainmask, [("output_image", "in_file")]),
# take a second pass of N4
(map_brainmask, final_n4, [("output_image", "mask_image")]),
(final_n4, final_mask, [("output_image", "in_file")]),
(thr_brainmask, final_mask, [("out_mask", "in_mask")]),
(final_n4, outputnode, [("output_image", "out_corrected")]),
(thr_brainmask, outputnode, [("out_mask", "out_mask")]),
(final_mask, outputnode, [("out_file", "out_brain")]),
])
# fmt: on
if interim_checkpoints:
final_apply = pe.Node(
ApplyTransforms(interpolation="BSpline", float=True),
name="final_apply",
mem_gb=1,
)
final_report = pe.Node(
SimpleBeforeAfter(after_label="target", before_label=f"tpl-{template_id}"),
name="final_report",
)
# fmt: off
wf.connect([
(inputnode, final_apply, [(("in_files", _pop), "reference_image")]),
(res_tmpl, final_apply, [("out_file", "input_image")]),
(norm, final_apply, [("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags")]),
(final_apply, final_report, [("output_image", "before")]),
(outputnode, final_report, [("out_corrected", "after"), ("out_mask", "wm_seg")]),
])
# fmt: on
if ants_affine_init:
# Initialize transforms with antsAI
lowres_tmpl = pe.Node(
RegridToZooms(zooms=LOWRES_ZOOMS, smooth=True), name="lowres_tmpl"
)
lowres_trgt = pe.Node(
RegridToZooms(zooms=LOWRES_ZOOMS, smooth=True), name="lowres_trgt"
)
init_aff = pe.Node(
AI(
convergence=(100, 1e-6, 10),
metric=("Mattes", 32, "Random", 0.25),
principal_axes=False,
search_factor=(factor, arc),
search_grid=(step, grid),
transform=("Affine", 0.1),
verbose=True,
),
name="init_aff",
n_procs=omp_nthreads,
)
# fmt: off
wf.connect([
(clip_inu, lowres_trgt, [("out_file", "in_file")]),
(lowres_trgt, init_aff, [("out_file", "moving_image")]),
(clip_tmpl, lowres_tmpl, [("out_file", "in_file")]),
(lowres_tmpl, init_aff, [("out_file", "fixed_image")]),
(init_aff, norm, [("output_transform", "initial_moving_transform")]),
])
# fmt: on
if tpl_regmask_path:
lowres_mask = pe.Node(
ApplyTransforms(
input_image=_pop(tpl_regmask_path),
transforms="identity",
interpolation="MultiLabel",
),
name="lowres_mask",
mem_gb=1,
)
# fmt: off
wf.connect([
(lowres_tmpl, lowres_mask, [("out_file", "reference_image")]),
(lowres_mask, init_aff, [("output_image", "fixed_image_mask")]),
])
# fmt: on
if interim_checkpoints:
init_apply = pe.Node(
ApplyTransforms(interpolation="BSpline", invert_transform_flags=[True]),
name="init_apply",
mem_gb=1,
)
init_mask = pe.Node(
ApplyTransforms(interpolation="Gaussian", invert_transform_flags=[True]),
name="init_mask",
mem_gb=1,
)
init_mask.inputs.input_image = str(tpl_brainmask_path)
init_report = pe.Node(
SimpleBeforeAfter(
out_report="init_report.svg",
before_label="target",
after_label="template",
),
name="init_report",
)
# fmt: off
wf.connect([
(lowres_trgt, init_apply, [("out_file", "reference_image")]),
(lowres_tmpl, init_apply, [("out_file", "input_image")]),
(init_aff, init_apply, [("output_transform", "transforms")]),
(lowres_trgt, init_report, [("out_file", "before")]),
(init_apply, init_report, [("output_image", "after")]),
(lowres_trgt, init_mask, [("out_file", "reference_image")]),
(init_aff, init_mask, [("output_transform", "transforms")]),
(init_mask, init_report, [("output_image", "wm_seg")]),
])
# fmt: on
else:
norm.inputs.initial_moving_transform_com = 1
if output_dir:
ds_final_inu = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir), desc="preproc", compress=True,
), name="ds_final_inu", run_without_submitting=True
)
ds_final_msk = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir), desc="brain", suffix="mask", compress=True,
), name="ds_final_msk", run_without_submitting=True
)
# fmt: off
wf.connect([
(inputnode, ds_final_inu, [("in_files", "source_file")]),
(inputnode, ds_final_msk, [("in_files", "source_file")]),
(outputnode, ds_final_inu, [("out_corrected", "in_file")]),
(outputnode, ds_final_msk, [("out_mask", "in_file")]),
])
# fmt: on
if interim_checkpoints:
ds_report = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir), desc="brain",
suffix="mask", datatype="figures"
), name="ds_report", run_without_submitting=True
)
# fmt: off
wf.connect([
(inputnode, ds_report, [("in_files", "source_file")]),
(final_report, ds_report, [("out_report", "in_file")]),
])
# fmt: on
if ants_affine_init and interim_checkpoints:
ds_report_init = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir), desc="init",
suffix="mask", datatype="figures"
), name="ds_report_init", run_without_submitting=True
)
# fmt: off
wf.connect([
(inputnode, ds_report_init, [("in_files", "source_file")]),
(init_report, ds_report_init, [("out_report", "in_file")]),
])
# fmt: on
return wf
def _pop(in_files):
if isinstance(in_files, (list, tuple)):
return in_files[0]
return in_files
def _bspline_grid(in_file):
import nibabel as nb
import numpy as np
import math
img = nb.load(in_file)
zooms = img.header.get_zooms()[:3]
extent = (np.array(img.shape[:3]) - 1) * zooms
# get mesh resolution ratio
retval = [f"{math.ceil(i / extent[np.argmin(extent)])}" for i in extent]
return f"-b [{'x'.join(retval)}]"
def _lap_sigma(in_file):
import numpy as np
import nibabel as nb
img = nb.load(in_file)
min_vox = np.amin(img.header.get_zooms())
return str(1.5 * min_vox ** 0.75)
def _norm_lap(in_file):
from pathlib import Path
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
img = nb.load(in_file)
data = img.get_fdata()
data -= | np.median(data) | numpy.median |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class implementation of the per-arm MovieLens Bandit environment."""
from __future__ import absolute_import
# Using Type Annotations.
import random
from typing import Optional, Text
import gin
import numpy as np
from tf_agents.bandits.environments import bandit_py_environment
from tf_agents.bandits.environments import dataset_utilities
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step as ts
GLOBAL_KEY = bandit_spec_utils.GLOBAL_FEATURE_KEY
PER_ARM_KEY = bandit_spec_utils.PER_ARM_FEATURE_KEY
@gin.configurable
class MovieLensPerArmPyEnvironment(bandit_py_environment.BanditPyEnvironment):
"""Implements the per-arm version of the MovieLens Bandit environment.
This environment implements the MovieLens 100K dataset, available at:
https://www.kaggle.com/prajitdatta/movielens-100k-dataset
This dataset contains 100K ratings from 943 users on 1682 items.
This csv list of:
user id | item id | rating | timestamp.
This environment computes a low-rank matrix factorization (using SVD) of the
data matrix `A`, such that: `A ~= U * Sigma * V^T`.
The environment uses the rows of `U` as global (or user) features, and the
rows of `V` as per-arm (or movie) features.
The reward of recommending movie `v` to user `u` is `u * Sigma * v^T`.
"""
def __init__(self,
data_dir: Text,
rank_k: int,
batch_size: int = 1,
num_actions: int = 50,
name: Optional[Text] = 'movielens_per_arm'):
"""Initializes the Per-arm MovieLens Bandit environment.
Args:
data_dir: (string) Directory where the data lies (in text form).
rank_k : (int) Which rank to use in the matrix factorization. This will
also be the feature dimension of both the user and the movie features.
batch_size: (int) Number of observations generated per call.
num_actions: (int) How many movies to choose from per round.
name: (string) The name of this environment instance.
"""
self._batch_size = batch_size
self._context_dim = rank_k
self._num_actions = num_actions
# Compute the matrix factorization.
self._data_matrix = dataset_utilities.load_movielens_data(data_dir)
self._num_users, self._num_movies = self._data_matrix.shape
# Compute the SVD.
u, s, vh = np.linalg.svd(self._data_matrix, full_matrices=False)
# Keep only the largest singular values.
self._u_hat = u[:, :rank_k].astype(np.float32)
self._s_hat = s[:rank_k].astype(np.float32)
self._v_hat = np.transpose(vh[:rank_k]).astype(np.float32)
self._approx_ratings_matrix = np.matmul(self._u_hat * self._s_hat,
np.transpose(self._v_hat))
self._action_spec = array_spec.BoundedArraySpec(
shape=(),
dtype=np.int32,
minimum=0,
maximum=num_actions - 1,
name='action')
observation_spec = {
GLOBAL_KEY:
array_spec.ArraySpec(shape=[rank_k], dtype=np.float32),
PER_ARM_KEY:
array_spec.ArraySpec(
shape=[num_actions, rank_k], dtype=np.float32),
}
self._time_step_spec = ts.time_step_spec(observation_spec)
self._current_user_indices = np.zeros(batch_size, dtype=np.int32)
self._previous_user_indices = np.zeros(batch_size, dtype=np.int32)
self._current_movie_indices = np.zeros([batch_size, num_actions],
dtype=np.int32)
self._previous_movie_indices = np.zeros([batch_size, num_actions],
dtype=np.int32)
self._observation = {
GLOBAL_KEY:
np.zeros([batch_size, rank_k]),
PER_ARM_KEY:
| np.zeros([batch_size, num_actions, rank_k]) | numpy.zeros |
"""
Robust MLR via iteratively reweighted least squares.
"""
import numpy as np
from utide.utilities import Bunch
# Weighting functions:
def andrews(r):
r = np.abs(r)
r = max(np.sqrt(np.spacing(1)), r)
w = (r < np.pi) * np.sin(r) / r
return w
def bisquare(r):
r = np.abs(r)
w = (r < 1) * (1 - r ** 2) ** 2
return w
def cauchy(r):
r = np.abs(r)
w = 1 / (1 + r ** 2)
return w
def fair(r):
w = 1 / (1 + np.abs(r))
return w
def huber(r):
w = 1 / max(1, np.abs(r))
return w
def logistic(r):
r = np.abs(r)
r = max(np.sqrt(np.single(1)), r)
w = np.tanh(r) / r
return w
def ols(r):
w = np.ones(len(r))
return w
def talwar(r):
w = (np.abs(r) < 1).astype(float)
return w
def welsch(r):
r = np.abs(r)
w = np.exp(-(r ** 2))
return w
wfuncdict = dict(
andrews=andrews,
bisquare=bisquare,
cauchy=cauchy,
fair=fair,
huber=huber,
logistic=logistic,
ols=ols,
talwar=talwar,
welsch=welsch,
)
tune_defaults = {
"andrews": 1.339,
"bisquare": 4.685,
"cauchy": 2.385,
"fair": 1.400,
"huber": 1.345,
"logistic": 1.205,
"ols": 1,
"talwar": 2.795,
"welsch": 2.985,
}
def sigma_hat(x):
"""
Robust estimate of standard deviation based on medians.
"""
# The center could be based on the mean or some other function.
return np.median(np.abs(x - np.median(x))) / 0.6745
def leverage(x):
"""
Calculate leverage as the diagonal of the "Hat" matrix of the
model matrix, x.
"""
# The Hat is x times its pseudo-inverse.
# In einum, the diagonal is calculated for each row of x
# and column of pinv as the dot product of column j of x.T
# and column j of pinv; hence the 'j' in the output means
# *don't* sum over j.
hdiag = np.einsum("ij, ij -> j", x.T, | np.linalg.pinv(x) | numpy.linalg.pinv |
from os.path import join
import pandas as pd
import numpy as np
import random
import cv2
from src import config
def get_correct_train_ids(train_csv_path, train_dir):
train_df = pd.read_csv(train_csv_path, index_col='id')
train_df.fillna('', inplace=True)
correct_ids = []
for index, row in train_df.iterrows():
image_path = join(train_dir, 'images', index + '.png')
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
pixel_sum = np.sum(image)
if pixel_sum:
correct_ids.append(index)
return correct_ids
def make_train_folds(train_csv_path, train_dir, depths_path, n_folds):
depths_df = pd.read_csv(depths_path, index_col='id')
train_ids = get_correct_train_ids(train_csv_path, train_dir)
depths_df = depths_df.loc[train_ids]
depths_df['image_path'] = depths_df.index.map(
lambda x: join(train_dir, 'images', x + '.png'))
depths_df['mask_path'] = depths_df.index.map(
lambda x: join(train_dir, 'masks', x + '.png'))
depths_df.sort_values('z', inplace=True)
depths_df['fold'] = [i % n_folds for i in range(depths_df.shape[0])]
depths_df.sort_index(inplace=True)
return depths_df
if __name__ == '__main__':
random.seed(42)
| np.random.seed(42) | numpy.random.seed |
import os
import shutil
import subprocess
import sparsechem as sc
import numpy as np
import string
import glob
import scipy.sparse
from urllib.request import urlretrieve
def download_chembl23(data_dir="test_chembl23", remove_previous=False):
if remove_previous and os.path.isdir(data_dir):
os.rmdir(data_dir)
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
files = ["chembl_23mini_x.npy",
"chembl_23mini_y.npy",
"chembl_23mini_folds.npy",
"chembl_23mini_class_weights.csv",
"chembl_23mini_regr_weights.csv",
"chembl_23mini_y_censored.npy"]
url = "https://www.esat.kuleuven.be/~jsimm/"
for f in files:
if not os.path.isfile(os.path.join(data_dir, f)):
print(f"Downloading '{f}' into '{data_dir}'.")
urlretrieve(f"{url}{f}", os.path.join(data_dir, f))
def create_weights(data_dir="test_chembl23"):
df = pd.DataFrame({
"task_id": np.arange(100),
"training_weight": np.clip(np.random.randn(100), 0, 1),
"task_type": np.random.choice(["adme", "panel", "other"], size=100),
})
df["aggregation_weight"] = np.sqrt(df.training_weight)
df.to_csv(f"{data_dir}/chembl_23mini_class_weights.csv", index=False)
## censored weights for regression
df["censored_weight"] = np.clip(np.random.randn(100), 0, 1)
df.to_csv(f"{data_dir}/chembl_23mini_regr_weights.csv", index=False)
def random_str(size):
return "".join([string.ascii_lowercase[i] for i in np.random.randint(0, 26, size=12)])
def test_classification(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_class ./{data_dir}/chembl_23mini_y.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 2" +
f" --lr 1e-3" +
f" --lr_steps 1" +
f" --dev {dev}" +
f" --verbose 1"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
conf_file = glob.glob(f"{output_dir}/*.json")[0]
model_file = glob.glob(f"{output_dir}/*.pt")[0]
results = sc.load_results(conf_file)
assert os.path.isdir(os.path.join(output_dir, "boards"))
assert "conf" in results
assert "validation" in results
assert results["validation"]["classification"].shape[0] > 0
cmd_pred = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --dev {dev}"
)
res_pred = subprocess.run(cmd_pred.split())
assert res_pred.returncode == 0
yhat = np.load(f"{output_dir}/yhat-class.npy")
assert results["conf"].class_output_size == yhat.shape[1]
assert (yhat >= 0).all()
assert (yhat <= 1).all()
## checking --last_hidden 1
cmd_hidden = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --last_hidden 1" +
f" --dev {dev}"
)
res_hidden = subprocess.run(cmd_hidden.split())
assert res_hidden.returncode == 0
hidden = | np.load(f"{output_dir}/yhat-hidden.npy") | numpy.load |
#!/local/anaconda/bin/python
# IMPORTANT: leave the above line as is.
import sys
import numpy as np
DIMENSION = 400 # Dimension of the original data.
DIMENSION_T = 401 # Dimension of the original data.
CLASSES = (-1, +1) # The classes that we are trying to predict.
def transform(x_orig):
return np.append(x_orig, 1)
def print_vector(v):
for x_i in np.nditer(v):
print(x_i),
print('')
def step(t, lam_sqrt, w_prev, x, y):
w_next = w_prev
eta = 1/np.sqrt(t)
if y*x.dot(w_prev) < 1:
w_prim = w_prev+eta*y*x
w_next = w_prim * min(1, 1/( | np.linalg.norm(w_prim) | numpy.linalg.norm |
import tensorflow as tf
import pickle
import time
import os
from tensorflow.python.keras.layers import Dense, Embedding, Conv2D, Dropout, Masking
from tensorflow.python.keras.regularizers import l1, l2
import numpy as np
#原版
class ADMN():
def __init__(self,args):
tf.set_random_seed(0)
np.random.seed(2019)
#模型基本参数
self.review_max_word = args["review_max_word"]
self.review_max_num = args["review_max_num"] #评论窗口
self.vocabulary_num = args["vocabulary_num"]
self.user_num = args["user_num"]
self.item_num = args["item_num"]
self.regularizers = args["regularizers"]
self.rating_weight = args["rating_weight"] #计算评分的方法
#用户id向量,文本,商品编码维度 ,一般用户和商品维度要一致
self.word_embedding_dimension = args["word_embedding_dimension"]
self.user_embedding_dimension = args["user_embedding_dimension"]
self.item_embedding_dimension = args["item_embedding_dimension"]
#cnn卷积层参数
self.cnn_filters = args["cnn_filters"]
self.cnn_padding = args["cnn_padding"]
self.cnn_activation = args["cnn_activation"]
self.cnn_kernel_regularizer = args["cnn_kernel_regularizer"]
self.cnn_kernel_size = args["cnn_kernel_size"]
self.cnn_strides = args["cnn_strides"]
self.dropout_size = args["dropout_size"]
#fm层参数
self.fm_size = args["fm_size"]
self.fm_K = args["fm_K"]
#训练参数
self.learning_rate = args["learning_rate"]
self.beta1 = args["beta1"]
self.beta2 = args["beta2"]
self.epsilon = args["epsilon"]
#self.word_embedding_path = os.path.join(args["root_path"],args["input_data_type"],"word_emb.pkl")
self.batch_size = args["batch_size"]
self.train_time = args["train_time"]
self.sess = args["sess"]
self.is_sample = args["is_sample"]
self.sample_ratio = args["sample_ratio"]
with tf.name_scope("creat_placeholder"):
# shape(none)对应batch大小
self.user_id = tf.placeholder(dtype="int32", shape=(None, 1), name="user_id") # user_id
self.item_id = tf.placeholder(dtype="int32", shape=(None, 1), name="item_id") # item_id
self.user_review = tf.placeholder(tf.float32, [None, self.review_max_num , self.review_max_word],
name="user_review") # user_review 用户评论
self.item_review = tf.placeholder(tf.float32, [None, self.review_max_num , self.review_max_word], name="item_review") # 商品评论
self.user_commented_items_id = tf.placeholder(dtype="int32", shape=(None, self.review_max_num),
name="user_commented_items_id") # 用户评论过的商品的id
self.user_commented_items_rate = tf.placeholder(dtype="float32", shape=(None,self.review_max_num),name="user_commented_items_rate") # 跟上面user_rid对应评论-评分
self.item_commented_users_id = tf.placeholder(dtype="int32", shape=(None, self.review_max_num), name="item_commented_users_id") # 商品评论的人的id
self.item_commented_users_rate = tf.placeholder(dtype="float32", shape=(None, self.review_max_num),name="item_commented_users_rate") # 商品的评论的人的给的分数
self.input_y = tf.placeholder(tf.float32,[None, 1], name="input_y")#评分 # item商品评论
with tf.name_scope("build_review_embedding"):
self.user_review_flat = tf.reshape(self.user_review,[-1,self.review_max_num*self.review_max_word])
print("user_review_flat:{}".format(self.user_review_flat.shape))
self.item_review_flat = tf.reshape(self.item_review,[-1,self.review_max_num*self.review_max_word])
print("item_review_flat:{}".format(self.item_review_flat.shape))
self.user_review_mask = Masking(mask_value=0,input_shape=(self.review_max_num,self.review_max_word))(self.user_review_flat)#mask掉0值,忽略0值
self.item_review_mask = Masking(mask_value=0,input_shape=(self.review_max_num,self.review_max_word))(self.item_review_flat)#忽略商品评论的0值
self.review_embedding_layer = Embedding(input_dim=self.vocabulary_num,output_dim=self.word_embedding_dimension,input_length=self.review_max_num*self.review_max_num)
self.user_review_embedding = self.review_embedding_layer(self.user_review_mask)
self.user_review_embedding = tf.reshape(self.user_review_embedding,shape=[-1, self.review_max_num, self.review_max_word, self.word_embedding_dimension])
print("user_review_embedding:{}".format(self.user_review_embedding.shape))
self.item_review_embedding = self.review_embedding_layer(self.item_review_mask)
self.item_review_embedding = tf.reshape(self.item_review_embedding,shape=[-1, self.review_max_num, self.review_max_word, self.word_embedding_dimension])
print("item_review_embedding:{}".format(self.item_review_embedding.shape))
self.user_review_embedding_sentence = tf.reduce_sum(self.user_review_embedding,axis=2)
print("user_review_embedding_sentence:{}".format(self.user_review_embedding_sentence.shape))
self.item_review_embedding_sentence = tf.reduce_sum(self.item_review_embedding,axis=2)
print("item_review_embedding_sentence:{}".format(self.item_review_embedding_sentence.shape))
#用户商品id向量编码
with tf.name_scope("build_user_item_id_embedding"):
self.user_embedding_layer = Embedding(input_dim=self.user_num,output_dim=self.user_embedding_dimension)
self.user_id_embedding = self.user_embedding_layer(self.user_id)
self.item_embedding_layer = Embedding(input_dim=self.item_num,output_dim=self.item_embedding_dimension)
self.item_id_embedding = self.item_embedding_layer(self.item_id)
self.user_commented_items_id_mask = Masking(mask_value=0)(self.user_commented_items_id)
self.item_commented_users_id_mask = Masking(mask_value=0)(self.item_commented_users_id)
self.user_commented_items_id_mask_embedding = self.item_embedding_layer(self.user_commented_items_id_mask)
self.item_commented_users_id_mask_embedding = self.user_embedding_layer(self.item_commented_users_id_mask)
print("user_commented_items_id_mask_embedding:{}".format(self.user_commented_items_id_mask_embedding.shape))
print("item_commented_users_id_mask_embedding:{}".format(self.item_commented_users_id_mask_embedding.shape))
with tf.name_scope("build_user_item_extra_embedding"):
if (self.rating_weight == "base"): # 1
self.user_commented_items_rate_sum = tf.reduce_sum(self.user_commented_items_rate, axis=1, keepdims=True)
self.user_commented_items_rate_base = self.user_commented_items_rate / self.user_commented_items_rate_sum
self.user_commented_items_rate_base_weight = tf.reshape(self.user_commented_items_rate_base,
shape=(-1, self.review_max_num, 1))
self.user_commented_items_weight = self.user_commented_items_rate_base_weight
self.item_commented_users_rate_sum = tf.reduce_sum(self.item_commented_users_rate, axis=1, keepdims=True)
self.item_commented_users_rate_base = self.item_commented_users_rate / self.item_commented_users_rate_sum
self.item_commented_users_rate_base_weight = tf.reshape(self.item_commented_users_rate_base,
shape=(-1, self.review_max_num, 1))
self.item_commented_users_weight = self.item_commented_users_rate_base_weight
if(self.rating_weight=="softmax"): #2
self.user_commented_items_rate_softmax = tf.reshape(tf.nn.softmax(self.user_commented_items_rate,axis=1,name="user_commented_item_rate_softmax"),shape=(-1,self.review_max_num,1))
self.user_commented_items_weight = self.user_commented_items_rate_softmax
print("user_commented_items_rate_softmax:{}".format(self.user_commented_items_rate_softmax.shape))
self.item_commented_users_rate_softmax = tf.reshape(tf.nn.softmax(self.item_commented_users_rate,axis=1,name="item_commented_item_rate_softmax"),shape=(-1,self.review_max_num,1))
print("item_commented_users_rate_softmax:{}".format(self.item_commented_users_rate_softmax.shape))
self.item_commented_users_weight = self.item_commented_users_rate_softmax
if(self.rating_weight == "unbias_softmax"): #3
self.user_commented_items_rate_mean = tf.reduce_mean(self.user_commented_items_rate,axis=1,keepdims=True)
self.user_commented_items_rate_unbias = self.user_commented_items_rate - self.user_commented_items_rate_mean
self.user_commented_items_rate_unbias_softmax = tf.reshape(tf.nn.softmax(self.user_commented_items_rate_unbias,axis=1,name="user_commented_items_rate_unbias_softmax"),shape=(-1,self.review_max_num,1))
self.user_commented_items_weight = self.user_commented_items_rate_unbias_softmax
self.item_commented_users_rate_mean = tf.reduce_mean(self.item_commented_users_rate,axis=1,keepdims=True)
self.item_commented_users_rate_unbias = self.item_commented_users_rate - self.item_commented_users_rate_mean
self.item_commented_users_rate_unbias_softmax = tf.reshape(tf.nn.softmax(self.item_commented_users_rate_unbias,axis=1,name="item_commented_user_rate_unbias_softmax"),shape=(-1,self.review_max_num,1))
self.item_commented_users_weight = self.item_commented_users_rate_unbias_softmax
if (self.rating_weight == "abs_unbias"): # 4
self.user_commented_items_rate_mean = tf.reduce_mean(self.user_commented_items_rate, axis=1,
keepdims=True)
self.user_commented_items_rate_abs_unbias = tf.abs(
self.user_commented_items_rate - self.user_commented_items_rate_mean)
self.user_commented_items_rate_abs_unbias_sum = tf.reduce_sum(self.user_commented_items_rate, axis=1,
keepdims=True)
self.user_commented_items_rate_abs_unbias_weight = self.user_commented_items_rate / self.user_commented_items_rate_abs_unbias_sum
self.user_commented_items_weight = tf.reshape(self.user_commented_items_rate_abs_unbias_weight,
shape=(-1, self.review_max_num, 1))
self.item_commented_users_rate_mean = tf.reduce_mean(self.item_commented_users_rate, axis=1,
keepdims=True)
self.item_commented_users_rate_abs_unbias = tf.abs(
self.item_commented_users_rate - self.item_commented_users_rate_mean)
self.item_commented_users_rate_abs_unbias_sum = tf.reduce_sum(self.item_commented_users_rate_abs_unbias,
axis=1, keepdims=True)
self.item_commented_users_rate_abs_unbias_weight = self.item_commented_users_rate / self.item_commented_users_rate_abs_unbias_sum
self.item_commented_users_weight = tf.reshape(self.item_commented_users_rate_abs_unbias_weight,
shape=(-1, self.review_max_num, 1))
if(self.rating_weight == "abs_unbias_softmax"): #5
self.user_commented_items_rate_mean = tf.reduce_mean(self.user_commented_items_rate, axis=1, keepdims=True)
self.user_commented_items_rate_abs_unbias = tf.abs(self.user_commented_items_rate - self.user_commented_items_rate_mean)
self.user_commented_items_rate_abs_unbias_softmax = tf.reshape(
tf.nn.softmax(self.user_commented_items_rate_abs_unbias, axis=1,
name="user_commented_items_rate_abs_unbias_softmax"), shape=(-1, self.review_max_num, 1))
self.user_commented_items_weight = self.user_commented_items_rate_abs_unbias_softmax
self.item_commented_users_rate_mean = tf.reduce_mean(self.item_commented_users_rate, axis=1, keepdims=True)
self.item_commented_users_rate_abs_unbias = tf.abs(self.item_commented_users_rate - self.item_commented_users_rate_mean)
self.item_commented_users_rate_abs_unbias_softmax = tf.reshape(
tf.nn.softmax(self.item_commented_users_rate_abs_unbias, axis=1,
name="item_commented_user_rate_abs_unbias_softmax"), shape=(-1, self.review_max_num, 1))
self.item_commented_users_weight = self.item_commented_users_rate_abs_unbias_softmax
if(self.rating_weight == "no_rating"): #6
self.user_review_to_itemId = tf.reshape(tf.multiply(self.user_commented_items_id_mask_embedding,self.item_id_embedding),shape=(-1,self.user_embedding_dimension))
self.user_review_to_itemId_dense = Dense(1,activation="relu")(self.user_review_to_itemId)
self.user_review_to_itemId_dense = tf.reshape(self.user_review_to_itemId_dense,shape=(-1,self.review_max_num,1))
print("user_review_to_itemId_dense:{}".format(self.user_review_to_itemId_dense.shape))
self.user_review_to_itemId_dense_softmax =tf.nn.softmax(self.user_review_to_itemId_dense, axis=1,name="user_review_to_itemId_dense_softmax")
print("user_review_to_itemId_dense_softmax:{}".format(self.user_review_to_itemId_dense_softmax.shape))
self.user_review_to_itemId_dense_softmax = tf.reshape( self.user_review_to_itemId_dense_softmax,shape=[-1,self.review_max_num,1])
self.user_commented_items_weight = self.user_review_to_itemId_dense_softmax
self.item_review_to_userId = tf.reshape(tf.multiply(self.item_commented_users_id_mask_embedding,self.user_id_embedding),shape=(-1,self.user_embedding_dimension))
self.item_review_to_userId_dense = Dense(1,activation="relu")(self.item_review_to_userId)
self.item_review_to_userId_dense = tf.reshape(self.item_review_to_userId_dense,shape=(-1,self.review_max_num,1))
self.item_review_to_userId_dense_softmax = tf.nn.softmax(self.item_review_to_userId_dense,axis=1,
name="item_review_to_userId_dense_softmax")
self.item_review_to_userId_dense_softmax = tf.reshape(self.item_review_to_userId_dense_softmax,shape=[-1,self.review_max_num,1])
self.item_commented_users_weight = self.item_review_to_userId_dense_softmax
self.user_review_weight = self.user_commented_items_weight * self.user_review_embedding_sentence
self.item_review_weight = self.item_commented_users_weight * self.item_review_embedding_sentence
self.user_review_feature = tf.reduce_sum(tf.multiply(self.user_review_weight,self.item_id_embedding),axis=1,keepdims=True)
self.item_review_feature = tf.reduce_sum(tf.multiply(self.item_review_weight,self.user_id_embedding),axis=1,keepdims=True)
print("user_review_feature:{}".format(self.user_review_feature.shape))
print("item_review_feature:{}".format(self.item_review_feature))
with tf.name_scope("build_item_attention"):
self.item_attention = tf.matmul(self.user_id_embedding,tf.transpose(self.item_review_embedding_sentence,[0,2,1]))
self.item_attention = tf.reshape(tf.nn.softmax(self.item_attention),shape=[-1,self.review_max_num,1])
print("item_attention:{}".format(self.item_attention.shape))
self.item_feature = self.item_attention * self.item_review_embedding_sentence
self.item_feature = tf.reduce_sum(self.item_feature,axis=1,keepdims=True)
with tf.name_scope("build_user_attention"):
self.user_attention = tf.matmul(self.item_id_embedding,tf.transpose(self.user_review_embedding_sentence,[0,2,1]))
self.user_attention = tf.reshape(tf.nn.softmax(self.user_attention),shape=[-1,self.review_max_num,1])
print("user_attention:{}".format(self.user_attention.shape))
self.user_feature = self.user_attention * self.user_review_embedding_sentence
self.user_feature = tf.reduce_sum(self.user_feature,axis=1,keepdims=True)
with tf.name_scope("build_concat_layer"):
self.user_feature_concat = tf.concat([self.user_id_embedding,self.user_feature,self.user_review_feature],axis=2,name="user_concat")
self.item_feature_concat = tf.concat([self.item_id_embedding,self.item_feature,self.item_review_feature],axis=2,name="item_concat")
print("user_feature_concat:{}".format(self.user_feature_concat.shape))
print("item_feature_concat:{}".format(self.item_feature_concat.shape))
self.user_feature_dense = Dense(self.user_embedding_dimension,activation="relu")(self.user_feature_concat)
self.item_feature_dense = Dense(self.item_embedding_dimension,activation="relu")(self.item_feature_concat)
print("user_feature_dense:{}".format(self.user_feature_dense.shape))
print("item_feature_dense:{}".format(self.item_feature_dense.shape))
with tf.name_scope("build_outer_product"):
self.user_item_matrix = tf.matmul(tf.transpose(self.user_feature_dense,perm=[0,2,1]),self.item_feature_dense)
self.user_item_matrix = tf.expand_dims(self.user_item_matrix,-1,name="tran3D")
with tf.name_scope("build_convolution_layer"):
self.first_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.user_item_matrix)
self.second_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.first_layer)
self.third_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.second_layer)
self.fourth_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.third_layer)
self.fifth_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.fourth_layer)
self.sixth_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.fifth_layer)
self.dropout_layer = Dropout(self.dropout_size)(self.sixth_layer)
with tf.name_scope("build_prediction"):
self.final_vector = tf.reshape(self.dropout_layer,shape=[-1,self.cnn_filters])
self.fm_w0 = tf.Variable(tf.zeros([1]))
self.fm_W = tf.Variable(tf.truncated_normal([self.cnn_filters]))
self.fm_V = tf.Variable(tf.random_normal([self.fm_K,self.cnn_filters],stddev=0.01))
self.linear_terms = tf.add(self.fm_w0,
tf.reduce_sum(
tf.multiply(self.fm_W,self.final_vector),axis=1,keepdims=True
))
self.interactions = tf.add(self.fm_w0,tf.reduce_sum(
tf.subtract(
tf.pow(tf.matmul(self.final_vector,tf.transpose(self.fm_V)),2),
tf.matmul(tf.pow(self.final_vector,2),tf.transpose(tf.pow(self.fm_V,2)))),
axis=1,keepdims=True
)
)
self.output = tf.add(self.linear_terms,self.interactions)
print("output:{}".format(self.output.shape))
self.error = tf.subtract(self.output, self.input_y)
with tf.name_scope("train_loss"):
self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.output, self.input_y))))
with tf.name_scope("test_loss"): #因为测试集没法一次性输入
self.test_loss = tf.square(tf.subtract(self.output,self.input_y))
def model_init(self):
self.init = tf.global_variables_initializer()
#sess.run(self.word_embedding_matrix.initializer, feed_dict={self.emb_initializer: self.emb})
self.sess.run(self.init)
def load_data(self,train_data,test_data,para_file):
#train_data为music.train
para_data = pickle.load(open(para_file,"rb"))
self.test_data = np.array(pickle.load(open(test_data,"rb")))
self.train_data = np.array(pickle.load(open(train_data,"rb"))) #这个只是用户商品评论数据
self.users_review = para_data['user_review']
self.items_review = para_data['item_review']
self.user_r_rating = para_data["user_r_rating"]
self.item_r_rating = para_data["item_r_rating"]
self.user_r_id = para_data["user_r_id"]
self.item_r_id = para_data["item_r_id"]
def search_train_data(self,uid, iid, user_r_id, item_r_id, user_r_rating, item_r_rating):
data_num = len(uid)
user_r_id_batch = np.zeros(shape=(data_num, self.review_max_num))
item_r_id_batch = np.zeros(shape=(data_num, self.review_max_num))
user_r_rating_batch = np.zeros(shape=(data_num, self.review_max_num))
item_r_rating_batch = np.zeros(shape=(data_num, self.review_max_num))
# user_r_id = list(user_r_id)
# print (user_r_id[2])
for i, item in enumerate(uid):
user_r_id_batch[i, :] = user_r_id[int(item)]
# print (user_r_id)
user_r_rating_batch[i, :] = user_r_rating[int(item)]
for i, item in enumerate(iid):
item_r_id_batch[i, :] = item_r_id[int(item)]
item_r_rating_batch[i, :] = item_r_rating[int(item)]
# print ()
return user_r_id_batch, item_r_id_batch, user_r_rating_batch, item_r_rating_batch
def model_train(self):
#self.model_init()
print("model_train")
#self.load_test_data()
self.test_loss_list = []
#self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.total_optimizer = tf.train.AdamOptimizer(learning_rate =self.learning_rate,beta1=self.beta1,beta2=self.beta2,epsilon=self.epsilon).minimize(self.loss)
self.train_data_size = len(self.train_data)
self.model_init()
print("data_size_train:{}".format(self.train_data_size))
self.ll = int(self.train_data_size / self.batch_size) + 1
print("train_time:{}".format(self.ll))
for epoch in range(self.train_time):
print("epoch_i:{}".format(epoch))
train_rmse = []
self.shuffle_index = np.random.permutation(np.arange(self.train_data_size))
self.shuffle_data = self.train_data[self.shuffle_index]
#print("shuffle_data:",self.shuffle_data.shape)
for batch_num in range(self.ll):
start_index = batch_num * self.batch_size
end_index = min((batch_num+1)*self.batch_size,self.train_data_size-1)
#print("end_index:",end_index)
data_train = self.shuffle_data[start_index:end_index]
batch_user_id,batch_item_id,batch_y = list(zip(*data_train))
batch_user_review = []
batch_item_review = []
for i in range(len(data_train)):
batch_user_review.append(self.users_review[batch_user_id[i][0]])
batch_item_review.append(self.items_review[batch_item_id[i][0]])
batch_user_review = np.array(batch_user_review)
batch_item_review = np.array(batch_item_review)
batch_user_r_id,batch_item_r_id,batch_user_r_rate,batch_item_r_rate =self.search_train_data(batch_user_id,
batch_item_id,
self.user_r_id,
self.item_r_id,
self.user_r_rating,
self.item_r_rating)
feed_dict = {
self.user_id: batch_user_id,
self.item_id: batch_item_id,
self.input_y: batch_y,
self.user_review: batch_user_review,
self.item_review: batch_item_review,
self.user_commented_items_id: batch_user_r_id,
self.user_commented_items_rate: batch_user_r_rate,
self.item_commented_users_id: batch_item_r_id,
self.item_commented_users_rate: batch_item_r_rate
}
_,t_rmse,error = self.sess.run([self.total_optimizer,self.loss,self.error],feed_dict)
if self.is_sample==True:
self.random_sample(batch_user_id,batch_item_id,batch_y,batch_user_r_id,batch_item_r_id,batch_user_r_rate,batch_item_r_rate,error)
#current_step = tf.train.global_step(self.sess, self.global_step)
train_rmse.append(t_rmse)
print("t_rmse:{}".format(t_rmse))
if batch_num ==(self.ll-1): #预测
print("\nEvaluation:")
print(batch_num)
self.model_test()
def show_test_result(self):
print((" test_loss_list:{}".format(self.test_loss_list)))
self.besr_test_mse = min(self.test_loss_list)
print("best test_mse:{}".format(self.besr_test_mse ))
print('end')
def model_test(self):
self.test_data_size = len(self.test_data)
self.ll_test = int(self.test_data_size / self.batch_size) + 1
test_cost = []
for batch_num in range(self.ll_test):
start_index = batch_num * self.batch_size
end_index = min((batch_num+1)*self.batch_size,self.test_data_size-1)
data_test = self.test_data[start_index:end_index]
user_id_test,item_id_test,y_test = list(zip(*data_test))
user_valid = []
item_valid = []
for i in range(len(data_test)):
user_valid.append(self.users_review[user_id_test[i][0]])
item_valid.append(self.items_review[item_id_test[i][0]])
user_valid = np.array(user_valid)
item_valid = np.array(item_valid)
user_r_id_batch, item_r_id_batch, user_r_rate_batch, item_r_rate_batch = self.search_train_data(
user_id_test, item_id_test, self.user_r_id, self.item_r_id, self.user_r_rating, self.item_r_rating)
feed_dict = {
self.user_id: user_id_test,
self.item_id: item_id_test,
self.input_y: y_test,
self.user_review: user_valid,
self.item_review: item_valid,
self.user_commented_items_id: user_r_id_batch,
self.user_commented_items_rate: user_r_rate_batch,
self.item_commented_users_id: item_r_id_batch,
self.item_commented_users_rate: item_r_rate_batch
}
test_loss = self.sess.run([self.test_loss],feed_dict)
test_cost.append(test_loss)
total_mse = 0
for i in test_cost:
for j in i:
for k in j:
total_mse += k
final_mse = total_mse/self.test_data_size
print("test_final_mse:{}".format(final_mse))
self.test_loss_list.append(final_mse)
def random_sample(self,user_id,item_id,y,user_r_id, item_r_id, user_r_rate, item_r_rate,loss):
num = len(user_id)
np.random.seed(2019)
loss =np.array(loss).flatten()
probability = np.exp(loss)/sum(np.exp(loss))
#print("probability.shape:{}".format(probability.shape))
#print("probability length:{}".format(len(probability)))
#print(probability)
#print("num:{}".format(num))
sample_ratio = self.sample_ratio
#print("sample:{}".format(int(num * sample_ratio)))
index = np.random.choice(num,size=int(num*sample_ratio),replace=False,p = probability)
s_user_id = np.array(user_id)[index]
s_item_id = np.array(item_id)[index]
s_y = np.array(y)[index]
s_user_r_id = | np.array(user_r_id) | numpy.array |
import cv2
import numpy as np
import math
from PIL import Image
import random
class DIP:
def __init__(self):
pass
def read(self, file):
return np.array(Image.open(file))
def save(self, file, image):
return cv2.imwrite(file, image )
def resize(self, image, size):
return cv2.resize(image, (size[0], size[1]))
def cvtGreyscale(self, image):
grey = np.dot(image[...,:3], [0.2989, 0.5870, 0.114])
grey /= np.max(grey)
return grey
def gaussianKernel(self, kernelSize, sigma, flag=True, BilSpatial=None):
normal = 1 / (2.0 * np.pi * sigma * sigma)
if flag:
center = kernelSize // 2
x, y = np.mgrid[-center:center + 1, -center:center + 1]
kernel = np.exp(-((x * x + y * y) / (2.0 * sigma * sigma))) * normal
else:
kernel = np.exp(-(kernelSize*kernelSize / (2.0 * sigma * sigma)))
kernel = np.multiply(kernel, BilSpatial)
return kernel
def gaussianFilter(self, image, kernelSize, sigma):
gKernel = self.gaussianKernel(kernelSize, sigma)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
output[col, row] = np.sum(gKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
output /= np.max(output)
return output
def gabf(self, image, kernelSize, sigmaS, sigmaR):
spatialKernel = self.gaussianKernel(kernelSize, sigmaS)
LP_guide = np.zeros(image.shape, np.float)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
LP_guide[col, row] = np.sum(spatialKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
LP_guide /= np.max(LP_guide)
padded_image = np.pad(LP_guide, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
neighb_win = padded_image[col:col + kernelSize, row:row + kernelSize]
intensity_diff = np.absolute(image[col, row] - neighb_win)
weights = self.gaussianKernel(intensity_diff, sigmaR, flag=False, BilSpatial=spatialKernel)
vals = np.sum(np.multiply(weights, neighb_win))
norm = np.sum(weights)
output[col, row] = np.divide(vals, norm, out=np.zeros_like(vals), where=norm != 0)
output /= np.max(output)
return output
def median(self, image, kernelSize):
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
neighb_win = padded_image[col:col + kernelSize, row:row + kernelSize]
output[col, row] = np.median(neighb_win)
output /= np.max(output)
return output
def gradient2x2(self, image):
kernelSize = 2
gX = np.array([
[-1, 1],
[-1, 1]
])
gY = np.array([
[1, 1],
[-1, -1]
])
G_x = np.zeros(image.shape, np.float)
G_y = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]): # loop through row
for col in range(image.shape[0]): # loop through col
pix = padded_image[col:col + kernelSize, row:row + kernelSize] # get pixel value
G_x[col, row] = np.sum(np.multiply(gX, pix))
G_y[col, row] = np.sum( | np.multiply(gY, pix) | numpy.multiply |
"""Script contain the ReplayBuffer object.
This script is adapted largely from: https://github.com/hill-a/stable-baselines
"""
import random
import numpy as np
class ReplayBuffer(object):
"""Experience replay buffer."""
def __init__(self, size):
"""Instantiate a ring buffer (FIFO).
Parameters
----------
size : int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
"""Return the number of elements stored."""
return len(self._storage)
@property
def storage(self):
"""Return the content of the replay buffer.
Of the form: [(np.ndarray, float, float, np.ndarray, bool)]
"""
return self._storage
@property
def buffer_size(self):
"""Return the (float) max capacity of the buffer."""
return self._maxsize
def can_sample(self, n_samples):
"""Check if n_samples samples can be sampled from the buffer.
Parameters
----------
n_samples : int
number of requested samples
Returns
-------
bool
True if enough sample exist, False otherwise
"""
return len(self) >= n_samples
def is_full(self):
"""Check whether the replay buffer is full or not.
Returns
-------
bool
True if it is full, False otherwise
"""
return len(self) == self.buffer_size
def add(self, obs_t, action, reward, obs_tp1, done):
"""Add a new transition to the buffer.
Parameters
----------
obs_t : Any
the last observation
action : array_like
the action
reward : float
the reward of the transition
obs_tp1 : Any
the current observation
done : float
is the episode done
"""
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), \
np.array(obses_tp1), np.array(dones)
def sample(self, batch_size, **_kwargs):
"""Sample a batch of experiences.
Parameters
----------
batch_size : int
How many transitions to sample.
Returns
-------
np.ndarray
batch of observations
numpy float
batch of actions executed given obs_batch
numpy float
rewards received as results of executing act_batch
np.ndarray
next set of observations seen after executing act_batch
numpy bool
done_mask[i] = 1 if executing act_batch[i] resulted in the end of
an episode and 0 otherwise.
"""
indices = [random.randint(0, len(self._storage) - 1)
for _ in range(batch_size)]
return self._encode_sample(indices)
class HierReplayBuffer(ReplayBuffer):
"""Hierarchical variant of ReplayBuffer."""
def add(self, obs_t, goal_t, action_t, reward_t, done, **kwargs):
"""Add a new transition to the buffer.
Parameters
----------
obs_t : array_like
the last observation
action_t : array_like
the action
goal_t : array_like
the goal
reward_t : list of float
the worker reward at every time step
done : list of float or list of bool
is the episode done
"""
data = (obs_t, goal_t, action_t, reward_t, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
"""Return a sample from the replay buffer based on indices.
Parameters
----------
idxes : list of int
list of random indices
Returns
-------
sample from replay buffer (s_t, g_t, a_t, r, s_t+1, done, h(s,g,s'))
"""
obses_t, goals, actions, rewards = [], [], [], []
done, h_t, g_up, obs_tp1 = [], [], [], []
for i in idxes:
# never use the last element, as it has no next state
if i == (self._next_idx - 1) % self._maxsize:
i = (i - 1) % self._maxsize
data = self._storage[i]
obstp1 = self._storage[(i+1) % self._maxsize][0]
obs_t, goals_t, actions_t, rewards_t, d, h, g_uptd = data
obses_t.append(np.array(obs_t, copy=False))
goals.append(np.array(goals_t, copy=False))
actions.append(np.array(actions_t, copy=False))
rewards.append(np.array(rewards_t, copy=False))
done.append(d)
h_t.append(np.array(h, copy=False))
g_up.append(np.array(g_uptd, copy=False))
obs_tp1.append( | np.array(obstp1, copy=False) | numpy.array |
"""Tests functionality of the ImageAugmenter class."""
from __future__ import print_function
# make sure that ImageAugmenter can be imported from parent directory
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import unittest
import numpy as np
from ImageAugmenter import ImageAugmenter
import random
from skimage import data
random.seed(123456789)
np.random.seed(123456789)
class TestImageAugmenter(unittest.TestCase):
"""Tests functionality of the ImageAugmenter class."""
def test_rotation(self):
"""Test rotation of 90 degrees on an image that should change
upon rotation."""
image_before = [[0, 255, 0],
[0, 255, 0],
[0, 255, 0]]
image_target = [[ 0, 0, 0],
[1.0, 1.0, 1.0],
[ 0, 0, 0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(3, 3, rotation_deg=(90, 90))
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_rotation_invariant(self):
"""Test rotation of -90 to 90 degrees on an rotation invariant image."""
image_before = [[0, 0, 0],
[0, 255, 0],
[0, 0, 0]]
image_target = [[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]]
images = np.array([image_before]).astype(np.uint8)
# random rotation of up to 180 degress
augmenter = ImageAugmenter(3, 3, rotation_deg=180)
# all must be similar to target
nb_similar = 0
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
# some tolerance here - interpolation problems can let the image
# change a bit, even though it should be invariant to rotations
if np.allclose(image_target, image_after, atol=0.1):
nb_similar += 1
self.assertEquals(nb_similar, 100)
def test_scaling(self):
"""Rough test for zooming/scaling (only zoom in / scaling >1.0).
The test is rough, because interpolation problems make the result
of scaling on synthetic images rather hard to predict (and unintuitive).
"""
size_x = 4
size_y = 4
# a 4x4 image of which the center 3x3 pixels are bright white,
# everything else black
image_before = np.zeros((size_y, size_x))
image_before[1:size_y-1, 1:size_x-1] = 255
images = np.array([image_before]).astype(np.uint8)
# about 200% zoom in
augmenter = ImageAugmenter(size_x, size_y, scale_to_percent=(1.99, 1.99),
scale_axis_equally=True)
image_after = augmenter.augment_batch(images)[0]
# we scale positively (zoom in), therefor we expect the center bright
# spot to grow, resulting in a higher total brightness
self.assertTrue(np.sum(image_after) > np.sum(image_before)/255)
def test_shear(self):
"""Very rough test of shear: It simply measures whether image tend
to be significantly different after shear (any change)."""
image_before = [[0, 255, 0],
[0, 255, 0],
[0, 255, 0]]
image_target = [[0, 1.0, 0],
[0, 1.0, 0],
[0, 1.0, 0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(3, 3, shear_deg=50)
# the majority should be different from the source image
nb_different = 0
nb_augment = 1000
for _ in range(nb_augment):
image_after = augmenter.augment_batch(images)[0]
if not np.allclose(image_target, image_after):
nb_different += 1
self.assertTrue(nb_different > nb_augment*0.9)
def test_translation_x(self):
"""Testing translation on the x-axis."""
#image_before = np.zeros((2, 2), dtype=np.uint8)
image_before = [[255, 0],
[255, 0]]
#image_after = np.zeros((2, 2), dtype=np.float32)
image_target = [[0, 1.0],
[0, 1.0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(2, 2, translation_x_px=(1,1))
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_translation_y(self):
"""Testing translation on the y-axis."""
image_before = [[ 0, 0],
[255, 255]]
image_target = [[1.0, 1.0],
[ 0, 0]]
images = np.array([image_before]).astype(np.uint8)
# translate always by -1px on y-axis
augmenter = ImageAugmenter(2, 2, translation_y_px=(-1,-1))
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_single_channel(self):
"""Tests images with channels (e.g. RGB channels)."""
# One single channel
# channel is last axis
# test by translating an image with one channel on the x-axis (1 px)
image_before = np.zeros((2, 2, 1), dtype=np.uint8)
image_before[0, 0, 0] = 255
image_before[1, 0, 0] = 255
image_target = np.zeros((2, 2, 1), dtype=np.float32)
image_target[0, 1, 0] = 1.0
image_target[1, 1, 0] = 1.0
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(2, 2, translation_x_px=(1,1))
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
# One single channel
# channel is first axis
# test by translating an image with one channel on the x-axis (1 px)
image_before = np.zeros((1, 2, 2), dtype=np.uint8)
image_before[0] = [[255, 0],
[255, 0]]
image_target = np.zeros((1, 2, 2), dtype=np.float32)
image_target[0] = [[0, 1.0],
[0, 1.0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(2, 2, translation_x_px=(1,1),
channel_is_first_axis=True)
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_two_channels(self):
"""Tests augmentation of images with two channels (either first or last
axis of each image). Tested using x-translation."""
# -----------------------------------------------
# two channels,
# channel is the FIRST axis of each image
# -----------------------------------------------
augmenter = ImageAugmenter(2, 2, translation_y_px=(0,1),
channel_is_first_axis=True)
image_before = np.zeros((2, 2, 2)).astype(np.uint8)
# 1st channel: top row white, bottom row black
image_before[0][0][0] = 255
image_before[0][0][1] = 255
image_before[0][1][0] = 0
image_before[0][1][1] = 0
# 2nd channel: top right corner white, everything else black
image_before[1][0][0] = 0
image_before[1][0][1] = 255
image_before[1][1][0] = 0
image_before[1][1][1] = 0
# ^ channel
# ^ y (row)
# ^ x (column)
image_target = np.zeros((2, 2, 2)).astype(np.float32)
# 1st channel: bottom row white, bottom row black
image_target[0][0][0] = 0
image_target[0][0][1] = 0
image_target[0][1][0] = 1.0
image_target[0][1][1] = 1.0
# 2nd channel: bottom right corner white, everything else black
image_target[1][0][0] = 0
image_target[1][0][1] = 0
image_target[1][1][0] = 0
image_target[1][1][1] = 1.0
nb_augment = 1000
image = np.array([image_before]).astype(np.uint8)
images = np.resize(image, (nb_augment, 2, 2, 2))
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_target, image_after):
nb_similar += 1
self.assertTrue(nb_similar > (nb_augment*0.4) and nb_similar < (nb_augment*0.6))
# -----------------------------------------------
# two channels,
# channel is the LAST axis of each image
# -----------------------------------------------
augmenter = ImageAugmenter(2, 2, translation_y_px=(0,1),
channel_is_first_axis=False)
image_before = np.zeros((2, 2, 2)).astype(np.uint8)
# 1st channel: top row white, bottom row black
image_before[0][0][0] = 255
image_before[0][1][0] = 255
image_before[1][0][0] = 0
image_before[1][1][0] = 0
# 2nd channel: top right corner white, everything else black
image_before[0][0][1] = 0
image_before[0][1][1] = 255
image_before[1][0][1] = 0
image_before[1][1][1] = 0
# ^ y
# ^ x
# ^ channel
image_target = np.zeros((2, 2, 2)).astype(np.float32)
# 1st channel: bottom row white, bottom row black
image_target[0][0][0] = 0
image_target[0][1][0] = 0
image_target[1][0][0] = 1.0
image_target[1][1][0] = 1.0
# 2nd channel: bottom right corner white, everything else black
image_target[0][0][1] = 0
image_target[0][1][1] = 0
image_target[1][0][1] = 0
image_target[1][1][1] = 1.0
nb_augment = 1000
image = np.array([image_before]).astype(np.uint8)
images = np.resize(image, (nb_augment, 2, 2, 2))
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_target, image_after):
nb_similar += 1
self.assertTrue(nb_similar > (nb_augment*0.4) and nb_similar < (nb_augment*0.6))
def test_transform_channels_unequally(self):
"""Tests whether 2 or more channels can be augmented non-identically
at the same time.
E.g. channel 0 is rotated by 20 degress, channel 1 (of the same image)
is rotated by 5 degrees.
"""
# two channels, channel is first axis of each image
augmenter = ImageAugmenter(3, 3, translation_x_px=(0,1),
transform_channels_equally=False,
channel_is_first_axis=True)
image_before = np.zeros((2, 3, 3)).astype(np.uint8)
image_before[0] = [[255, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0]]
image_before[1] = [[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 255, 0]]
# ^ channel
image_target = np.zeros((2, 3, 3)).astype(np.float32)
image_target[0] = [[ 0, 1.0, 0],
[ 0, 0, 0],
[ 0, 0, 0]]
image_target[1] = [[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 1.0]]
nb_similar_channel_0 = 0
nb_similar_channel_1 = 0
nb_equally_transformed = 0
#nb_unequally_transformed = 0
nb_augment = 1000
image = np.array([image_before]).astype(np.uint8)
images = np.resize(image, (nb_augment, 2, 3, 3))
images_augmented = augmenter.augment_batch(images)
# augment 1000 times and count how often the channels were transformed
# in equal or unequal ways.
for image_after in images_augmented:
similar_channel_0 = np.allclose(image_target[0], image_after[0])
similar_channel_1 = np.allclose(image_target[1], image_after[1])
if similar_channel_0:
nb_similar_channel_0 += 1
if similar_channel_1:
nb_similar_channel_1 += 1
if similar_channel_0 == similar_channel_1:
nb_equally_transformed += 1
#else:
# nb_unequally_transformed += 1
# each one should be around 50%
self.assertTrue(nb_similar_channel_0 > 0.40*nb_augment
and nb_similar_channel_0 < 0.60*nb_augment)
self.assertTrue(nb_similar_channel_1 > 0.40*nb_augment
and nb_similar_channel_1 < 0.60*nb_augment)
self.assertTrue(nb_equally_transformed > 0.40*nb_augment
and nb_equally_transformed < 0.60*nb_augment)
def test_no_blacks(self):
"""Test whether random augmentations can cause an image to turn
completely black (cval=0.0), which should never happen."""
image_before = data.camera()
y_size, x_size = image_before.shape
augmenter = ImageAugmenter(x_size, y_size,
scale_to_percent=1.5,
scale_axis_equally=False,
rotation_deg=90,
shear_deg=20,
translation_x_px=10,
translation_y_px=10)
image_black = np.zeros(image_before.shape, dtype=np.float32)
nb_augment = 100
images = np.resize([image_before], (nb_augment, y_size, x_size))
images_augmented = augmenter.augment_batch(images)
nb_black = 0
for image_after in images_augmented:
if np.allclose(image_after, image_black):
nb_black += 1
self.assertEqual(nb_black, 0)
def test_non_square_images(self):
"""Test whether transformation of images with unequal x and y axis sizes
works as expected."""
y_size = 11
x_size = 4
image_before = np.zeros((y_size, x_size), dtype=np.uint8)
image_target = np.zeros((y_size, x_size), dtype=np.float32)
# place a bright white line in the center (of the y-axis, so left to right)
# Augmenter will move it up by 2 (translation on y by -2)
y_line_pos = int(y_size/2) + 1
for x_pos in range(x_size):
image_before[y_line_pos][x_pos] = 255
image_target[y_line_pos - 2][x_pos] = 1.0
augmenter = ImageAugmenter(x_size, y_size, translation_y_px=(-2,-2))
nb_augment = 100
images = | np.resize([image_before], (nb_augment, y_size, x_size)) | numpy.resize |
# Copyright (c) Facebook, Inc. and its affiliates.
#Visualization Function
import cv2
import numpy as np
import PIL
from PIL.Image import Image
def __ValidateNumpyImg(inputImg):
if isinstance(inputImg, Image):
# inputImg = cv2.cvtColor(np.array(inputImg), cv2.COLOR_RGB2BGR)
inputImg = np.array(inputImg)
return inputImg #Q? is this copying someting (wasting memory or time?)?
veryFirstImShow = True
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def ImgSC(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
minVal = np.min(inputImg)
maxVal = np.max(inputImg)
#rescale
inputImg = (inputImg-minVal)/ (maxVal-minVal)*255
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
# import matplotlib.pyplot as plt
# def Plot(values, title=None):
# plt.plot(values)
# if title is not None:
# plt.title(title)#, loc='left', fontsize=12, fontweight=0, color='orange')
# plt.show()
#bbe: min_pt, max_pt
def Vis_Bbox_minmaxPt(inputImg, min_pt, max_pt, color=None):
bbr = [min_pt[0],min_pt[1], max_pt[0]- min_pt[0], max_pt[1]- min_pt[1]]
return Vis_Bbox(inputImg, bbr, color)
def Vis_Bbox_XYXY(inputImg, bbox_xyxy, color=None):
#draw biggest bbox
pt1 = ( int(bbox_xyxy[0]),int(bbox_xyxy[1]) )
pt2 = (int(bbox_xyxy[2]),int(bbox_xyxy[3]) )
if color is None:
color = (0,0,255)
cv2.rectangle(inputImg, pt1, pt2,color, 3)
return inputImg
def Vis_Bbox(inputImg, bbox_xyhw, color= None):
return Vis_Bbox_XYWH(inputImg, bbox_xyhw, color)
#bbe: [leftTop_x,leftTop_y,width,height]
def Vis_Bbox_XYWH(inputImg, bbox_xyhw, color= None):
inputImg = __ValidateNumpyImg(inputImg)
#draw biggest bbox
pt1 = ( int(bbox_xyhw[0]),int(bbox_xyhw[1]) )
pt2 = (int(bbox_xyhw[0] + bbox_xyhw[2]),int(bbox_xyhw[1] + bbox_xyhw[3]) )
if color is None:
color = (0,0,255)
cv2.rectangle(inputImg, pt1, pt2,color, 3)
return inputImg
def Vis_CocoBbox(inputImg, coco_annot):
inputImg = __ValidateNumpyImg(inputImg)
bbr = np.round(coco_annot['bbox']) #[leftTop_x,leftTop_y,width,height]
#draw biggest bbox
pt1 = ( int(bbr[0]),int(bbr[1]) )
pt2 = (int(bbr[0] + bbr[2]),int(bbr[1] + bbr[3]) )
cv2.rectangle(inputImg, pt1, pt2,(255,255,255), 3)
return inputImg
# connections_right = [
# {0, 2}, {2, 4}, {0, 6} //nect, rightEye, rightEar
# , {6, 8}, {8, 10}, {6,12}, {12,14} , {14, 16}
# };
#]
def Vis_CocoSkeleton(keypoints, image=None):
# def Vis_CocoSkeleton(inputImg, coco_annot):
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
image = __ValidateNumpyImg(image)
#COCO17 original annotation ordering
link2D = [ [0, 1], [1,3], #nose(0), leftEye(1), leftEar(3)
[0,5], [5, 7], [7, 9], #leftShoulder(5), leftArm(7), leftWrist(9)
[0, 11], [11, 13], [13, 15], #leftHip(11), leftKnee(13), leftAnkle(15)
[0,2], [2,4], #nose(0), rightEye(2), rightEar(4)
[0,6], [6, 8], [8, 10], #rightShoulder(6), rightArm(8), rightWrist(10)
[0, 12], [12, 14], [14, 16] #rightHip(12), rightKnee(14), rightAnkle(16)
]
bLeft = [ 1,1,
1, 1, 1,
1,1,1,
0,0,
0,0,0,
0,0,0]
# keypoints = np.round(coco_annot['keypoints']) #coco_annot['keypoints']: list with length 51
if keypoints.shape[0] == 51:
keypoints = np.reshape(keypoints, (-1,3)) #(17,3): (X, Y, Label)
else:
keypoints = np.reshape(keypoints, (-1,2)) #(17,3): (X, Y, Label)
radius = 4
for k in np.arange( len(keypoints) ):
cv2.circle(image, (int(keypoints[k][0]), int(keypoints[k][1]) ), radius,(0,0,255),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (200,200,200)
if keypoints[parent][0] ==0 or keypoints[child][0]==0: # //not annotated one
continue
cv2.line(image, (int(keypoints[parent][0]), int(keypoints[parent][1])), (int(keypoints[child][0]), int(keypoints[child][1])), c, radius - 2)
return image
DP_partIdx ={
'Torso_Back': 1,
'Torso_Front': 2,
'RHand': 3,
'LHand': 4,
'LFoot': 5,
'RFoot': 6,
'R_upperLeg_back': 7,
'L_upperLeg_back': 8,
'R_upperLeg_front': 9,
'L_upperLeg_front': 10,
'R_lowerLeg_back': 11,
'L_lowerLeg_back': 12,
'R_lowerLeg_front': 13,
'L_lowerLeg_front': 14,
'L_upperArm_front': 15,
'R_upperArm_front': 16,
'L_upperArm_back': 17,
'R_upperArm_back': 18,
'L_lowerArm_back': 19,
'R_lowerArm_back': 20,
'L_lowerArm_front': 21,
'R_lowerArm_front': 22,
'RFace': 23,
'LFace': 24
}
def Vis_Densepose(inputImg, coco_annot):
inputImg = __ValidateNumpyImg(inputImg)
import sys
sys.path.append('/home/hjoo/data/DensePose/detectron/utils/')
import densepose_methods as dp_utils
DP = dp_utils.DensePoseMethods()
if('dp_x' not in coco_annot.keys()):
print("## Warning: No Densepose coco_annotation")
return inputImg
bbr = np.round(coco_annot['bbox']) #[leftTop_x,leftTop_y,width,height]
Point_x = np.array(coco_annot['dp_x'])/ 255. * bbr[2] + bbr[0] # Strech the points to current box. from 255x255 -> [bboxWidth,bboxheight]
Point_y = | np.array(coco_annot['dp_y']) | numpy.array |
import argparse
import os, sys
import json
from copy import deepcopy
import numpy as np
from scipy.spatial import cKDTree
from tqdm import tqdm
import trimesh
from scipy.ndimage.morphology import binary_erosion, binary_dilation
from ..utils.scannet_utils import get_global_part_labels_description, load_pickle, get_scannet
from ..utils.vox import load_sample
from ..utils.transforms import apply_transform, apply_inverse_transform
import main_directories as md
mag_factors = {'chair': 1.25,
'table': 1.35,
'storagefurniture': 1.25,
'bed': 1.15,
'trashcan': 1.4}
struct_cats_to_scannet_cats = {'chair': ['chair'],
'table': ['table'],
'storagefurniture': ['cabinet', 'bookshelf'],
'bed': ['bed', 'sofa'],
'trashcan': ['garbagebin']}
def perform_correction(mask, voxel_transform, min_1, max_1, max_2, min_1_pd, max_1_pd, max_2_pd, mag_factor, size):
mask_coords = np.where(mask > 0)
mask_coords = np.stack([mask_coords[2], mask_coords[1], mask_coords[0]]).T
mask_coords = apply_transform(mask_coords, voxel_transform)
mask_coords += max_2
mask_coords *= max_1
mask_coords += min_1
mask_coords -= min_1_pd
mask_coords /= max_1_pd
mask_coords -= max_2_pd
mask_coords *= mag_factor
mask_coords = apply_inverse_transform(mask_coords, voxel_transform)
mask_tensor = np.zeros((32, 32, 32))
mask_coords = mask_coords.astype(int)
mask_coords = np.maximum(0, np.minimum(size - 1, mask_coords))
mask_tensor[mask_coords[:, 2], mask_coords[:, 1], mask_coords[:, 0]] = 1
mask_tensor = binary_dilation(mask_tensor, structure=np.ones((2, 2, 2)), iterations=2)
mask_tensor = binary_erosion(mask_tensor, structure=np.ones((2, 2, 2)), iterations=2)
mask_tensor = mask_tensor.astype(int)
return mask_tensor
def perform_correction_on_parts(mask, voxel_transform, min_1, max_1, max_2, min_1_pd, max_1_pd, max_2_pd, mag_factor, size):
mask_tensors = []
all_ratios = []
for part_mask in mask:
part_mask = np.squeeze(part_mask)
mask_coords = np.where(part_mask > 0)
mask_coords = np.stack([mask_coords[2], mask_coords[1], mask_coords[0]]).T
mask_coords = apply_transform(mask_coords, voxel_transform)
mask_coords += max_2
mask_coords *= max_1
mask_coords += min_1
mask_coords -= min_1_pd
mask_coords /= max_1_pd
mask_coords -= max_2_pd
mask_coords *= mag_factor
mask_coords = apply_inverse_transform(mask_coords, voxel_transform)
mask_tensor = np.zeros((32, 32, 32))
mask_coords = mask_coords.astype(int)
mask_coords_above_limits = [x[0] < 0 or x[0] >= 32 or x[1] < 0 or x[1] >= 32 or x[2] < 0 or x[2] >= 32 for x in
mask_coords]
above_limits_ratio = sum(mask_coords_above_limits) / mask_coords.shape[0]
all_ratios += [above_limits_ratio]
mask_coords = np.maximum(0, np.minimum(size - 1, mask_coords))
mask_tensor[mask_coords[:, 2], mask_coords[:, 1], mask_coords[:, 0]] = 1
mask_tensor = binary_dilation(mask_tensor, structure=np.ones((2, 2, 2)), iterations=2)
mask_tensor = binary_erosion(mask_tensor, structure=np.ones((2, 2, 2)), iterations=2)
mask_tensor = mask_tensor.astype(int)
mask_tensors += [mask_tensor[None, ...]]
mask_tensors = np.stack(mask_tensors)
return mask_tensors, max(all_ratios)
def iou(mask_1, mask_2):
smooth = 1e-4
intersection = mask_1 * mask_2
union = mask_1 + mask_2 - intersection
metric = intersection.sum() / (union.sum() + smooth)
return metric
def load_metadata(args):
global_labels = get_global_part_labels_description(md.DICTIONARIES)
partnet_to_shapenet_transforms_path = '../dictionaries/partnet_to_shapenet_transforms.pkl'
partnet_to_shapenet_transforms = load_pickle(partnet_to_shapenet_transforms_path)
with open(os.path.join(md.DICTIONARIES, 'full_annotations.json'), 'rb') as f:
scan2cad_anno = json.load(f)
VALID_PARTNET_IDS = os.path.join(args.trees_dir, f'{args.category}_hier/full.txt')
valid_partnet_ids = []
with open(VALID_PARTNET_IDS, 'r') as f:
lines = f.readlines()
for line in lines:
valid_partnet_ids += [line[:-1]]
with open(os.path.join(md.DICTIONARIES, 'scannetv2_train.txt'), 'r') as fin:
lines = fin.readlines()
scannet_train_scenes = [x[:-1] for x in lines]
with open(os.path.join(md.DICTIONARIES, 'scannetv2_val.txt'), 'r') as fin:
lines = fin.readlines()
scannet_val_scenes = [x[:-1] for x in lines]
mlcvnet_bboxes_filenames = []
for file in os.listdir(args.mlcvnet_output):
if file.endswith('.ply'):
mlcvnet_bboxes_filenames += [file]
global_id_to_semantic_class = load_pickle(os.path.join(md.DICTIONARIES, 'global_id_to_semantic_class.pkl'))
return scan2cad_anno, global_labels, partnet_to_shapenet_transforms, \
scannet_train_scenes, scannet_val_scenes, valid_partnet_ids, \
mlcvnet_bboxes_filenames, global_id_to_semantic_class
def process_mlcvnet(args, scannet_train_scenes, scannet_val_scenes,
scan2cad_anno, global_labels, partnet_to_shapenet_transforms, valid_partnet_ids,
global_id_to_semantic_class, save_dir):
size = args.voxel_dim
scannet_split = scannet_train_scenes if (args.split == 'train') else scannet_val_scenes
split_ids = []
type2class = {'cabinet': 0, 'bed': 1, 'chair': 2, 'sofa': 3, 'table': 4, 'door': 5,
'window': 6, 'bookshelf': 7, 'picture': 8, 'counter': 9, 'desk': 10, 'curtain': 11,
'refrigerator': 12, 'showercurtrain': 13, 'toilet': 14, 'sink': 15, 'bathtub': 16, 'garbagebin': 17}
class2type = {u: v for v, u in type2class.items()}
# for each scene at Scan2CAD
for i, anno_item in enumerate(tqdm(scan2cad_anno)):
all_scene_correspondences = []
# get Scan2CAD info
scan_id = anno_item['id_scan']
if scan_id not in scannet_split:
continue
scan_transform = anno_item["trs"]
aligned_models = anno_item['aligned_models']
# load raw scannet data
scan_path = os.path.join(args.scannet_dir, scan_id, scan_id + '_vh_clean_2.ply')
scan_data = trimesh.load_mesh(scan_path).metadata['ply_raw']['vertex']['data']
# get scannet point cloud (mesh vertices) and their colors
scan_points_origin = np.array([list(x) for x in scan_data[['x', 'y', 'z']]])
scan_color = np.array([list(x) for x in scan_data[['red', 'green', 'blue']]]) / 255
# transform scan to Scan2CAD coordinate system
scan_points = apply_transform(scan_points_origin, scan_transform)
meta_file = os.path.join(args.scannet_dir, scan_id, scan_id + '.txt')
lines = open(meta_file).readlines()
axis_align_matrix = None
for line in lines:
if 'axisAlignment' in line:
axis_align_matrix = [float(x) for x in line.rstrip().strip('axisAlignment = ').split(' ')]
assert axis_align_matrix is not None
axis_align_matrix = np.array(axis_align_matrix).reshape((4, 4))
all_instances = []
all_instances_world = []
all_instances_partnet = []
all_scans_aligned = []
all_partnet_labels = []
instance_ids = []
shape_plys = []
partnet_ids = []
category_ids = []
shape_ids = []
voxel_transforms = []
shape_transforms = []
partnet_transforms = []
all_buf = []
# for each aligned shape
object_id = 0
for j, anno_shape in enumerate(aligned_models):
# init_scan_mask
semantic_mask = -np.ones(len(scan_points))
instance_mask = -np.ones(len(scan_points))
# get Scan2CAD info about shape
category_id = anno_shape['catid_cad']
shape_id = anno_shape['id_cad']
# get_global_info
df_parts = global_labels[
(global_labels['category_id'] == category_id) & (global_labels['object_id'] == shape_id)
]
from_part_id_2_global_id = dict(df_parts.reset_index()[['part_id', 'global_id']].values)
if len(df_parts) == 0:
continue
# load shape pointcloud
partnet_id = df_parts.object_partnet_id.values[0]
partnet_path = os.path.join(args.partnet_dir, partnet_id, 'point_sample')
if partnet_id not in valid_partnet_ids:
continue
# load shape part labels
shape_label = np.loadtxt(os.path.join(partnet_path, 'label-10000.txt'), delimiter='\n')
shape_label = np.array([from_part_id_2_global_id[p_id] for p_id in shape_label])
# LOAD partnet modalities
shape_ply = np.loadtxt(os.path.join(partnet_path, 'pts-10000.pts'), delimiter=' ')[:, :3]
shape_vox = load_sample(os.path.join(md.PARTNET_VOXELIZED, partnet_id, 'full_vox.df'))
# LOAD WORLD -> SHAPENET -> PARTNET -> PARTNET_VOX transform
shape_transform = anno_shape["trs"]
if partnet_id not in partnet_to_shapenet_transforms:
print('Transform for Partnet shape ', partnet_id, ' not found')
continue
partnet_transform = partnet_to_shapenet_transforms[partnet_id]
voxel_transform = shape_vox.grid2world
# MAP scan points: WORLD -> SHAPENET -> PARTNET
scan_points_aligned = apply_inverse_transform(scan_points, shape_transform, partnet_transform)
# get instance points from the scan
# calculate distance
tree = cKDTree(shape_ply)
min_dist, min_idx = tree.query(scan_points_aligned)
# Color
for is_near, i_nn, i_point in zip(min_dist <= 0.07, min_idx, range(len(scan_points_aligned))):
if is_near:
instance_mask[i_point] = object_id
instance_points = scan_points_aligned[np.where(instance_mask == object_id)[0]]
instance_points_world = apply_transform(instance_points, partnet_transform, shape_transform)
if len(instance_points) > 0:
# Normalization
instance_points -= instance_points.min(0)
instance_points /= instance_points.max() / 0.95
instance_points -= instance_points.max(0) / 2
# MAP instance points: PARTNET -> PARTNET_VOX
instance_grid = apply_inverse_transform(instance_points, voxel_transform).astype('int')
instance_grid = np.maximum(0, np.minimum(size - 1, instance_grid))
# store data for further processing
all_instances += [instance_grid]
all_instances_world += [instance_points_world]
all_instances_partnet += [instance_points]
all_scans_aligned += [scan_points_aligned]
all_partnet_labels += [shape_label]
shape_plys += [shape_ply]
instance_ids += [j]
partnet_ids += [partnet_id]
category_ids += [category_id]
shape_ids += [shape_id]
voxel_transforms += [voxel_transform]
shape_transforms += [shape_transform]
partnet_transforms += [partnet_transform]
mask_instances = [x for x in os.listdir(os.path.join(args.mlcvnet_output, scan_id + '_vh_clean_2')) if
x.endswith('.ply')]
mask_classes = [int(x.split('_')[0]) for x in mask_instances]
mask_class_names = [class2type[x] for x in mask_classes]
mask_instances = [mask_instances[i] for i in range(len(mask_instances)) if mask_class_names[i] in struct_cats_to_scannet_cats[args.category]]
box_vertices = []
box_vertices_fixed = []
for k in range(len(mask_instances)):
box_meshes = trimesh.load(os.path.join(args.mlcvnet_output, scan_id + '_vh_clean_2', mask_instances[k])).split()
for mesh in box_meshes:
one_box_vertices = deepcopy(np.array(mesh.vertices))
z_vec = one_box_vertices[1] - one_box_vertices[0]
offset = np.array([0, 0, 0.5])
if z_vec[2] < 0:
buf = deepcopy(one_box_vertices[1])
one_box_vertices[1] = deepcopy(one_box_vertices[0])
one_box_vertices[0] = deepcopy(buf)
z_vec_fixed = one_box_vertices[1] - one_box_vertices[0] + offset
z_vec = one_box_vertices[1] - one_box_vertices[0]
y_vec = one_box_vertices[2] - one_box_vertices[0]
x_vec = one_box_vertices[4] - one_box_vertices[0]
z_points_fixed = np.linspace(one_box_vertices[0] - offset, one_box_vertices[1], int(round(abs(z_vec_fixed[2]) / 0.05)))[:, 2]
z_points = np.linspace(one_box_vertices[0], one_box_vertices[1], int(round(abs(z_vec[2]) / 0.05)))[:, 2]
y_points = np.linspace(one_box_vertices[0], one_box_vertices[2], int(round(abs(y_vec[1]) / 0.05)))[:, 1]
x_points = np.linspace(one_box_vertices[0], one_box_vertices[4], int(round(abs(x_vec[0]) / 0.05)))[:, 0]
box_grid = deepcopy(np.meshgrid(x_points, y_points, z_points))
box_grid_fixed = deepcopy(np.meshgrid(x_points, y_points, z_points_fixed))
box_vertices += [ | np.stack(box_grid) | numpy.stack |
#!/usr/bin/env python
#title :main.py
#description :Tensorflow implementation of CapsNet.
#author :<NAME>
#date :2019/04/30
#version :1.0
#usage :python3 main.py
#python_version :3.6.7
#==============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from capsnet import CapsNet
from tensorflow.examples.tutorials.mnist import input_data
import functools
mnist = input_data.read_data_sets('MNIST_data/')
batch_size = 10
tf.reset_default_graph()
tf.random.set_random_seed(0)
np.random.seed(0)
checkpoint_file = './tmp/model.ckpt'
def train(model, restore = False, n_epochs = 50):
init = tf.global_variables_initializer()
n_iter_train_per_epoch = mnist.train.num_examples // batch_size
n_iter_valid_per_epoch = mnist.validation.num_examples // batch_size
best_loss_val = np.infty
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("output", sess.graph)
if restore and tf.train.checkpoint_exists('checkpoint_file'):
saver.restore(sess, checkpoint_file)
else:
init.run()
print('\n\nRunning CapsNet ...\n')
count_params()
for epoch in range(n_epochs):
margin_loss_train_ep = []
recnst_loss_train_ep = []
loss_train_ep = []
acc_train_ep = []
for it in range(1, n_iter_train_per_epoch + 1):
X_batch, y_batch = mnist.train.next_batch(batch_size)
_, loss_batch_train, margin_loss_train, recnst_loss_train,acc_batch_train = sess.run(
[model.train_op,
model.margn_loss,
model.recnst_loss_scale,
model.batch_loss,
model.accuracy],
feed_dict = {model.X: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch,
model.reconstruction: True})
print("\rIter: {}/{} [{:.1f}%] loss : {:.5f}".format(
it, n_iter_train_per_epoch, 100.0 * it / n_iter_train_per_epoch, loss_batch_train), end="")
plot_imgs = sess.run(model.X_cropped, feed_dict = {model.X: X_batch.reshape([-1, 28, 28, 1])})
#print(plot_imgs.shape)
#print(X_batch[0])
#plt.imshow(X_batch[0].reshape((28,28)), cmap='gray')
#plt.show()
#plt.imshow(plot_imgs[0].reshape((28,28)), cmap='gray')
#plt.show()
loss_train_ep.append(loss_batch_train)
acc_train_ep.append(acc_batch_train)
margin_loss_train_ep.append(margin_loss_train)
recnst_loss_train_ep.append(recnst_loss_train)
loss_train = np.mean(loss_train_ep)
margin_loss_train = np.mean(margin_loss_train_ep)
recnst_loss_train = np.mean(recnst_loss_train_ep)
acc_train = np.mean(acc_train_ep)
loss_val_ep = []
acc_val_ep = []
for it in range(1, n_iter_valid_per_epoch + 1):
X_batch, y_batch = mnist.validation.next_batch(batch_size)
loss_batch_val, acc_batch_val = sess.run(
[model.batch_loss, model.accuracy],
feed_dict = {model.X_cropped: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch})
loss_val_ep.append(loss_batch_val)
acc_val_ep.append(acc_batch_val)
print("\rValidation {}/{} {:.1f}%".format(it,
n_iter_valid_per_epoch,
100.0 * it / n_iter_valid_per_epoch),
end=" "*30)
loss_val = np.mean(loss_val_ep)
acc_val = np.mean(acc_val_ep)
print("\repoch: {} loss_train: {:.5f}, loss_val: {:.5f}, margin_loss: {:.5f}, recnst_loss: {:.5f}, train_acc: {:.4f}%, valid_acc: {:.4f}% {}".format(
epoch + 1,
loss_train,
margin_loss_train,
recnst_loss_train,
loss_val,
acc_train * 100.0,
acc_val * 100.0,
"(improved)" if loss_val < best_loss_val else ""))
if loss_val < best_loss_val:
saver.save(sess, checkpoint_file)
best_loss_val = loss_val
writer.close()
def test(model):
n_iter_test_per_epoch = mnist.test.num_examples // batch_size
loss_test_ep = []
acc_test_ep = []
#init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
#init.run()
#saver = tf.train.import_meta_graph(checkpoint_file +'.meta')
saver.restore(sess, tf.train.latest_checkpoint('tmp/'))
#init.run()
print('\n\nTest\n')
for it in range(1, n_iter_test_per_epoch + 1):
X_batch, y_batch = mnist.test.next_batch(batch_size)
loss_batch_test, acc_batch_test = sess.run(
[model.batch_loss, model.accuracy],
feed_dict = { model.X_cropped: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch,
model.reconstruction: False})
loss_test_ep.append(loss_batch_test)
acc_test_ep.append(acc_batch_test)
print("\rTesting {}/{} {:.1f}%".format(it,
n_iter_test_per_epoch,
100.0 * it / n_iter_test_per_epoch),
end=" "*30)
loss_test = | np.mean(loss_test_ep) | numpy.mean |
import pickle
import re
import time
import numpy as np
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
from tensorflow.python.keras.utils import to_categorical
from lib.plot_curves import learning_curves
from lib.helper_functions import validation_split
from models.models_segmentation import select_model, IoU
from scripts.run_BUvsTD import setup, preprocessing, finalize
'''
Script for running a toy segmentation experiment. We extract a segmentation dataset
based on Fashion-MNIST classification task. We propagate the global label based on
thresholding the pixel values. The threshold is empirically set.
This leads to a dataset of 12 classes (original 10 + background and ignore class).
The ignore class contributes neither to loss nor to IoU computation.
To increase the complexity of the task we extract 2x2 object meshes, leading to a total
of 150000, 25000 training and testing samples respectively. Class weights are extracted
for dealing with class-imbalance.
'''
'''
Commandline inputs:
-d FMNIST_S (Fashion-MNIST)
-m Unet -l 0.01 -w 0 -e 20 -r 4 -b 128 -v 0.1 -s NA
-m FCN -l 0.01 -w 0 -e 20 -r 4 -b 128 -v 0.1 -s NA
-m TD -l 0.01 -w 0 -e 20 -r 4 -b 128 -v 0.1 -s NA
'''
def extract_segmentation_mask(X, y, thres, n_classes=12):
labels = np.zeros((X.shape[0], X.shape[1], X.shape[2], n_classes), dtype=int)
labels[..., -1] = np.logical_and(X[..., 0] > 0, X[..., 0] <= thres)
labels[..., -2] = X[..., 0] == 0
labels[..., :-2] = (X > thres) * to_categorical(y)[:, np.newaxis, np.newaxis, :]
class_priors = np.sum(labels, axis=(0, 1, 2))
class_weights = np.sum(class_priors) / (n_classes * class_priors)
class_weights = class_weights[y]
return labels, class_weights
def extract_segmentation_mesh(X, y, grid_ext, shuffle=True, n_classes=12):
train_indexes = np.arange(len(y), dtype=int)
if shuffle:
np.random.shuffle(train_indexes)
train_indexes = np.reshape(train_indexes, [len(train_indexes) // grid_ext ** 2, grid_ext ** 2])
train_data = []
train_labels = []
for i in train_indexes:
data_app = []
labels_app = []
for j in range(grid_ext):
data_app.append(np.concatenate(X[i[j * grid_ext:(j + 1) * grid_ext]], axis=0))
labels_app.append(np.concatenate(y[i[j * grid_ext:(j + 1) * grid_ext]], axis=0))
data_app = np.concatenate(data_app, axis=1)
labels_app = np.concatenate(labels_app, axis=1)
train_data.append(data_app)
train_labels.append(labels_app)
# class_weights = np.sum(train_labels) / (n_classes * np.sum(train_labels, axis=(0, 1, 2)))
class_weights = 1 / np.sum(train_labels, axis=(0, 1, 2))
class_weights /= class_weights[-2]
class_weights[-1] = 0
return np.array(train_data), np.array(train_labels), class_weights
def train(args, filepath, f_output, x_train, y_train, x_test, y_test, class_weights=None, method=None):
base_model_name = args.model_name
if args.extension is not None:
base_model_name = re.sub('_' + args.extension, '', base_model_name)
# Extracting statistics for every model-set combination and history for learning curves
n_classes = 12
history = []
test_acc = np.zeros(args.repetitions)
test_loss = np.zeros_like(test_acc)
test_IoU = np.zeros((args.repetitions, n_classes))
training_time = []
inference_time = np.zeros_like(test_acc)
callbacks = []
print(class_weights)
for i in range(args.repetitions):
if args.scheduler != 'NA':
sched = globals()[args.scheduler]
if 'stage' in args.scheduler:
print(args.scheduler)
cb_decayLR = tf.keras.callbacks.LearningRateScheduler(sched(args.learning_rate, args.num_epochs),
verbose=0)
else:
cb_decayLR = tf.keras.callbacks.LearningRateScheduler(sched, verbose=0)
else:
cb_decayLR = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, verbose=1,
mode='auto', min_delta=0.0001, cooldown=0,
min_lr=args.learning_rate/10)
if not callbacks:
callbacks.append(cb_decayLR)
else:
callbacks[0] = cb_decayLR
input_shape = x_train.shape[1:]
print('Loading model: ', base_model_name)
optimizer = tf.keras.optimizers.SGD(args.learning_rate, momentum=0.9, nesterov=True)
# optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
if method is not None:
model = select_model(input_shape, base_model_name, optimizer, args.weight_decay, method)
else:
model = select_model(input_shape, base_model_name, optimizer, args.weight_decay, class_weights=class_weights)
x_train, y_train = shuffle(x_train, y_train)
# Extract tranining and validation split indices
if args.val_split != 0:
train_ind, val_ind = validation_split(x_train, y_train, args.val_split, args.dataset == 'TOY')
# Timing training
start_train = time.time()
if args.augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False,
samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0,
width_shift_range=0.1, height_shift_range=0.1, brightness_range=None, shear_range=0.0, zoom_range=0,
channel_shift_range=0., fill_mode='nearest', cval=0., horizontal_flip=True, vertical_flip=False,
rescale=None, preprocessing_function=None, data_format='channels_last', validation_split=0,
dtype='float32')
datagen.fit(x_train[train_ind])
hist = model.fit_generator(datagen.flow(x_train[train_ind], y_train[train_ind], batch_size=args.batch_size),
epochs=args.num_epochs,
validation_data=(x_train[val_ind], y_train[val_ind]),
callbacks=callbacks, verbose=2)
else:
hist = model.fit(x_train[train_ind], y=y_train[train_ind], batch_size=args.batch_size,
epochs=args.num_epochs,
verbose=2, validation_data=(x_train[val_ind], y_train[val_ind]), callbacks=callbacks)
training_time.append(time.time() - start_train)
history.append(hist.history)
# Evaluate
test_loss[i], test_acc[i], _ = model.evaluate(x_test, y_test, batch_size=args.batch_size, verbose=0)
start_inference = time.time()
y_pred = model.predict(x_test, batch_size=args.batch_size, verbose=0)
inference_time[i] = time.time() - start_inference
test_IoU[i] = IoU(y_test, y_pred)
if i == args.repetitions - 1:
model.save(filepath['models'] + filepath['dataset'] + args.model_name + '.h5')
# Store history
with open(filepath['history'] + filepath['dataset'] + 'history_' + args.model_name + '.txt', 'wb') as f_history:
pickle.dump(history, f_history)
# Extract and output metrics
mean_test_loss = np.mean(test_loss)
std_test_loss = np.std(test_loss, ddof=1)
mean_test_acc = np.mean(test_acc)
std_test_acc = np.std(test_acc, ddof=1)
mean_inference_time = | np.mean(inference_time) | numpy.mean |
import numpy as np
import tvm
from layers import autodiff, tvm_op
tgt_host="llvm"
tgt="llvm"
dtype = "float32"
ctx = tvm.context(tgt, 0)
def test_matrix_elementwise_add():
shape = (500, 200)
x = np.random.uniform(0, 10, size=shape).astype(dtype)
y = np.random.uniform(0, 10, size=shape).astype(dtype)
z = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_z = tvm.nd.array(z, ctx=ctx)
elemwise_add = tvm_op.make_elemwise_add(shape, tgt, tgt_host, "elem_add")
elemwise_add(arr_x, arr_y, arr_z)
z = arr_z.asnumpy()
np.testing.assert_allclose(x + y, z, rtol=1e-5)
def test_matrix_elementwise_add_by_const():
shape = (2000, 3000)
x = np.random.uniform(0, 10, size=shape).astype(dtype)
const_val = np.random.uniform(0, 10)
y = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
elemwise_add_by_const = tvm_op.make_elemwise_add_by_const(shape, const_val, tgt, tgt_host, "elem_add_by_const")
elemwise_add_by_const(arr_x, arr_y)
y = arr_y.asnumpy()
np.testing.assert_allclose(x + const_val, y, rtol=1e-5)
def test_matrix_elementwise_mul():
shape = (500, 200)
x = np.random.uniform(0, 10, size=shape).astype(dtype)
y = np.random.uniform(0, 10, size=shape).astype(dtype)
z = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_z = tvm.nd.array(z, ctx=ctx)
elemwise_mul = tvm_op.make_elemwise_mul(shape, tgt, tgt_host, "elem_add")
elemwise_mul(arr_x, arr_y, arr_z)
z = arr_z.asnumpy()
np.testing.assert_allclose(x * y, z, rtol=1e-5)
def test_matrix_elementwise_mul_by_const():
shape = (2000, 3000)
x = np.random.uniform(0, 10, size=shape).astype(dtype)
const_val = np.random.uniform(0, 10)
y = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
elemwise_mul_by_const = tvm_op.make_elemwise_mul_by_const(shape, const_val, tgt, tgt_host, "elem_mul_by_const")
elemwise_mul_by_const(arr_x, arr_y)
y = arr_y.asnumpy()
np.testing.assert_allclose(x * const_val, y, rtol=1e-5)
def test_matrix_multiply():
shapeX = (500, 700)
shapeY = (700, 1000)
shapeZ = (500, 1000)
x = np.random.uniform(0, 10, size=shapeX).astype(dtype)
y = np.random.uniform(0, 10, size=shapeY).astype(dtype)
z = np.zeros(shapeZ).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_z = tvm.nd.array(z, ctx=ctx)
matrix_mul = tvm_op.make_matrix_mul(shapeX, False, shapeY, False, tgt, tgt_host, "matrix_mul")
matrix_mul(arr_x, arr_y, arr_z)
z = arr_z.asnumpy()
np.testing.assert_allclose(np.dot(x, y), z, rtol=1e-5)
shapeX = (1000, 500)
shapeY = (2000, 500)
shapeZ = (1000, 2000)
x = np.random.uniform(0, 10, size=shapeX).astype(dtype)
y = np.random.uniform(0, 10, size=shapeY).astype(dtype)
z = np.zeros(shapeZ).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_z = tvm.nd.array(z, ctx=ctx)
matrix_mul = tvm_op.make_matrix_mul(shapeX, False, shapeY, True, tgt, tgt_host, "matrix_mul")
matrix_mul(arr_x, arr_y, arr_z)
z = arr_z.asnumpy()
np.testing.assert_allclose(np.dot(x, np.transpose(y)), z, rtol=1e-5)
shapeX = (500, 1000)
shapeY = (500, 2000)
shapeZ = (1000, 2000)
x = np.random.uniform(0, 10, size=shapeX).astype(dtype)
y = np.random.uniform(0, 10, size=shapeY).astype(dtype)
z = np.zeros(shapeZ).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_z = tvm.nd.array(z, ctx=ctx)
matrix_mul = tvm_op.make_matrix_mul(shapeX, True, shapeY, False, tgt, tgt_host, "matrix_mul")
matrix_mul(arr_x, arr_y, arr_z)
z = arr_z.asnumpy()
np.testing.assert_allclose(np.dot(np.transpose(x), y), z, rtol=1e-5)
shapeX = (500, 1000)
shapeY = (2000, 500)
shapeZ = (1000, 2000)
x = np.random.uniform(0, 10, size=shapeX).astype(dtype)
y = np.random.uniform(0, 10, size=shapeY).astype(dtype)
z = np.zeros(shapeZ).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_z = tvm.nd.array(z, ctx=ctx)
matrix_mul = tvm_op.make_matrix_mul(shapeX, True, shapeY, True, tgt, tgt_host, "matrix_mul")
matrix_mul(arr_x, arr_y, arr_z)
z = arr_z.asnumpy()
np.testing.assert_allclose(np.dot(np.transpose(x), np.transpose(y)), z, rtol=1e-5)
def test_conv2d():
# im2col and np_conv2d are helper functions
def im2col(X, filter_H, filter_W, padding, stride):
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = int((H + 2 * padding - filter_H) / stride + 1)
out_W = int((W + 2 * padding - filter_W) / stride + 1)
y_row_size = C * filter_H * filter_W
y_col_size = out_H * out_W
y_shape = (N, y_row_size, y_col_size)
Y = np.empty(y_shape, dtype = X.dtype)
for batch_index in range(N):
for col_index in range(y_col_size):
out_y = int(col_index / out_W)
out_x = int(col_index % out_W)
in_y = out_y * stride - padding
in_x = out_x * stride - padding
row_idx = 0
for c in range(0, C):
for y in range(in_y, in_y + filter_H):
for x in range(in_x, in_x + filter_W):
if (x < 0 or x >= W or y < 0 or y >= H):
Y[batch_index, row_idx, col_index] = 0
else:
Y[batch_index, row_idx, col_index] = X[batch_index, c, y, x]
row_idx += 1
return Y
def np_conv2d(X, Filter, padding=0, stride=1):
"""Implement a conv2d as a matrix multiply after im2col."""
filter_outChannel, filter_inChannel, filter_H, filter_W = Filter.shape
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = int((H + 2 * padding - filter_H) / stride + 1)
out_W = int((W + 2 * padding - filter_W) / stride + 1)
im2col_matrix = im2col(X, int(filter_H), int(filter_W), int(padding), int(stride))
filter_matrix = Filter.reshape(filter_outChannel, -1)
return np.matmul(filter_matrix, im2col_matrix).reshape(N, filter_outChannel, out_H, out_W)
shapeX = (100, 3, 28, 28)
shapeF = (10, 3, 5, 5)
shapeY = (100, 10, 24, 24)
x = np.random.uniform(0, 10, size=shapeX).astype(dtype)
f = np.random.uniform(0, 10, size=shapeF).astype(dtype)
y = np.zeros(shapeY).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_f = tvm.nd.array(f, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
conv2d = tvm_op.make_conv2d(shapeX, shapeF, tgt, tgt_host, "conv2d")
conv2d(arr_x, arr_f, arr_y)
y = arr_y.asnumpy()
np.testing.assert_allclose(np_conv2d(x, f), y, rtol=1e-5)
def test_relu():
shape = (2000, 2500)
x = np.random.uniform(-1, 1, shape).astype(dtype)
y = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
relu = tvm_op.make_relu(shape, tgt, tgt_host, "relu")
relu(arr_x, arr_y)
y = arr_y.asnumpy()
np.testing.assert_allclose(np.maximum(x, 0).astype(dtype), y)
def test_relu_gradient():
shape = (2000, 2500)
x = np.random.uniform(-1, 1, shape).astype(dtype)
grad_x = np.random.uniform(-5, 5, shape).astype(dtype)
y = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_grad_x = tvm.nd.array(grad_x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
relu_gradient = tvm_op.make_relu_gradient(shape, tgt, tgt_host, "relu_gradient")
relu_gradient(arr_x, arr_grad_x, arr_y)
y = arr_y.asnumpy()
np.testing.assert_allclose(((x > 0) * grad_x).astype(dtype), y)
def test_softmax():
shape = (400, 1000)
x = np.random.uniform(-5, 5, shape).astype(dtype)
y = np.zeros(shape).astype(dtype)
arr_x = tvm.nd.array(x, ctx=ctx)
arr_y = tvm.nd.array(y, ctx=ctx)
matrix_softmax = tvm_op.make_matrix_softmax(shape, tgt, tgt_host, "matrix_softmax")
matrix_softmax(arr_x, arr_y)
y = arr_y.asnumpy()
np.testing.assert_allclose(autodiff.softmax_func(x), y, rtol=1e-5)
def test_softmax_cross_entropy():
shape = (400, 1000)
y = np.random.uniform(-5, 5, shape).astype(dtype)
y_ = np.random.uniform(-5, 5, shape).astype(dtype)
out = np.zeros((1,)).astype(dtype)
arr_y = tvm.nd.array(y, ctx=ctx)
arr_y_ = tvm.nd.array(y_, ctx=ctx)
arr_out = tvm.nd.array(out, ctx=ctx)
matrix_softmax_cross_entropy = tvm_op.make_matrix_softmax_cross_entropy(shape, tgt, tgt_host, "softmax_cross_entropy")
matrix_softmax_cross_entropy(arr_y, arr_y_, arr_out)
out = arr_out.asnumpy()
# numpy calculation
cross_entropy = np.mean(
-np.sum(y_ * np.log(autodiff.softmax_func(y)), axis=1), keepdims=True)
| np.testing.assert_allclose(cross_entropy, out, rtol=1e-5) | numpy.testing.assert_allclose |
import os, sys, time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import Colorbar
import matplotlib.colors as mcolors
from matplotlib.patches import Ellipse
from astropy.io import fits
from astropy.visualization import (AsinhStretch, LinearStretch, ImageNormalize)
from frank.radial_fitters import FrankFitter
from frank.geometry import FixedGeometry
from frank.io import save_fit
# controls
target = 'dx5_incl2'
#target = 'dx5_PA5'
#target = 'incl2_PA5'
#target = 'incl2_PA5_dx5'
#target = 'zr3_dx5'
#target = 'zr3_dy5'
#target = 'zr3_incl2'
#target = 'zr3_PA5'
frank = True
im_dat = False
im_res = True
im_mdl = False
annotate_res = False
# constants
c_ = 2.99792e10
k_ = 1.38057e-16
# residuals color map
c2 = plt.cm.Reds(np.linspace(0, 1, 32))
c1 = plt.cm.Blues_r(np.linspace(0, 1, 32))
c1 = np.vstack([c1, np.ones((12, 4))])
colors = np.vstack((c1, c2))
mymap = mcolors.LinearSegmentedColormap.from_list('eddymap', colors)
# crude passing mechanism
f = open('whichdisk.txt', 'w')
f.write(target)
f.close()
### - IMAGE THE DATA
if im_dat:
print('....')
print('Imaging the data')
print('....')
os.system('casa --nogui --nologger --nologfile -c data_imaging.py')
print('....')
print('Finished imaging the data')
print('....')
### - PLOT THE ANNOTATED IMAGE
# load data
dhdu = fits.open('data/dx1mas_data.JvMcorr.fits')
dimg, hd = np.squeeze(dhdu[0].data), dhdu[0].header
# parse coordinate frame indices into physical numbers
RA = 3600 * hd['CDELT1'] * (np.arange(hd['NAXIS1']) - (hd['CRPIX1'] - 1))
DEC = 3600 * hd['CDELT2'] * (np.arange(hd['NAXIS2']) - (hd['CRPIX2'] - 1))
dRA, dDEC = np.meshgrid(RA, DEC)
freq = hd['CRVAL3']
# disk-frame polar coordinates
inclr = np.radians(35.)
PAr = np.radians(110.)
xd = (dRA * np.cos(PAr) - dDEC * np.sin(PAr)) / np.cos(inclr)
yd = (dRA * np.sin(PAr) + dDEC * np.cos(PAr))
r, theta = np.sqrt(xd**2 + yd**2), np.degrees(np.arctan2(yd, xd))
# beam parameters
bmaj, bmin, bPA = 3600 * hd['BMAJ'], 3600 * hd['BMIN'], hd['BPA']
beam_area = (np.pi * bmaj * bmin / (4 * np.log(2))) / (3600 * 180 / np.pi)**2
# image setups
rout = 1.1
im_bounds = (dRA.max(), dRA.min(), dDEC.min(), dDEC.max())
dRA_lims, dDEC_lims = [1.5*rout, -1.5*rout], [-1.5*rout, 1.5*rout]
# intensity limits, and stretch
norm = ImageNormalize(vmin=0, vmax=50., stretch=AsinhStretch())
cmap = 'inferno'
### Plot the data image
plt.style.use('default')
fig = plt.figure(figsize=(7.0, 5.9))
gs = gridspec.GridSpec(1, 2, width_ratios=(1, 0.04))
# image (sky-plane)
ax = fig.add_subplot(gs[0,0])
Tb = (1e-23 * dimg / beam_area) * c_**2 / (2 * k_ * freq**2)
im = ax.imshow(Tb, origin='lower', cmap=cmap, extent=im_bounds,
norm=norm, aspect='equal')
# annotations
tbins = np.linspace(-np.pi, np.pi, 181)
rgapi = [0.15, 0.70]
rgapo = [0.18, 0.82]
for ir in range(len(rgapi)):
rgi = rgapi[ir]
rgo = rgapo[ir]
xgi, ygi = rgi * np.cos(tbins) * np.cos(inclr), rgi * np.sin(tbins)
ax.plot( xgi * np.cos(PAr) + ygi * np.sin(PAr),
-xgi * np.sin(PAr) + ygi * np.cos(PAr), ':w')
xgo, ygo = rgo * np.cos(tbins) * np.cos(inclr), rgo * np.sin(tbins)
ax.plot( xgo * np.cos(PAr) + ygo * np.sin(PAr),
-xgo * np.sin(PAr) + ygo * np.cos(PAr), ':w')
xout, yout = rout * np.cos(tbins) * np.cos(inclr), rout * np.sin(tbins)
ax.plot( xout * np.cos(PAr) + yout * np.sin(PAr),
-xout * np.sin(PAr) + yout * np.cos(PAr), '--w')
# beam
beam = Ellipse((dRA_lims[0] + 0.1*np.diff(dRA_lims),
dDEC_lims[0] + 0.1*np.diff(dDEC_lims)), bmaj, bmin, 90-bPA)
beam.set_facecolor('w')
ax.add_artist(beam)
# limits, labeling
ax.set_xlim(dRA_lims)
ax.set_ylim(dDEC_lims)
ax.set_xlabel('RA offset ($^{\prime\prime}$)')
ax.set_ylabel('DEC offset ($^{\prime\prime}$)')
# add a scalebar
cbax = fig.add_subplot(gs[:,1])
cb = Colorbar(ax=cbax, mappable=im, orientation='vertical',
ticklocation='right')
cb.set_label('brightness temperature (K)', rotation=270, labelpad=22)
# adjust layout
fig.subplots_adjust(wspace=0.02)
fig.subplots_adjust(left=0.11, right=0.89, bottom=0.1, top=0.98)
fig.savefig('../../figs/'+target+'_dataimage.pdf')
### - FRANK VISIBILITY MODELING
if frank:
print('....')
print('Performing visibility modeling')
print('....')
# load the visibility data
dat = np.load('data/'+target+'.vis.npz')
u, v, vis, wgt = dat['u'], dat['v'], dat['Vis'], dat['Wgt']
# set the disk viewing geometry
geom = FixedGeometry(35., 110., 0.0, 0.0)
# configure the fitting code setup
FF = FrankFitter(Rmax=2*rout, geometry=geom,
N=300, alpha=1.3, weights_smooth=0.1)
# fit the visibilities
sol = FF.fit(u, v, vis, wgt)
# save the fit
save_fit(u, v, vis, wgt, sol, prefix='fits/'+target)
print('....')
print('Finished visibility modeling')
print('....')
### Imaging
if im_res:
print('....')
print('Imaging residuals')
print('....')
os.system('casa --nogui --nologger --nologfile -c resid_imaging.py')
print('....')
print('Finished imaging residuals')
print('....')
if im_mdl:
print('....')
print('Imaging model')
print('....')
os.system('casa --nogui --nologerr --nologfile -c model_imaging.py')
print('....')
print('Finished imaging model')
print('....')
### +/- Residual plot
if os.path.exists('data/'+target+'_resid.JvMcorr.fits'):
print('....')
print('Making residual +/- plot')
print('using file created on: %s' % \
time.ctime(os.path.getctime('data/'+target+'_resid.JvMcorr.fits')))
print('....')
# load residual image
rhdu = fits.open('data/'+target+'_resid.JvMcorr.fits')
rimg = np.squeeze(rhdu[0].data)
# set up plot
plt.style.use('classic')
fig = plt.figure(figsize=(7.0, 5.9))
gs = gridspec.GridSpec(1, 2, width_ratios=(1, 0.04))
# image (sky-plane)
ax = fig.add_subplot(gs[0,0])
vmin, vmax = -50, 50 # these are in microJy/beam units
norm = ImageNormalize(vmin=vmin, vmax=vmax, stretch=LinearStretch())
im = ax.imshow(1e6*rimg, origin='lower', cmap=mymap, extent=im_bounds,
norm=norm, aspect='equal')
# gap markers
gcols = ['k', 'darkgray']
for ir in range(len(rgapi)):
rgi = rgapi[ir]
rgo = rgapo[ir]
xgi, ygi = rgi * np.cos(tbins) * np.cos(inclr), rgi * np.sin(tbins)
ax.plot( xgi * np.cos(PAr) + ygi * np.sin(PAr),
-xgi * np.sin(PAr) + ygi * np.cos(PAr), gcols[ir])
xgo, ygo = rgo * np.cos(tbins) * | np.cos(inclr) | numpy.cos |
# This file is part of the
# Bartolina Project (https://github.com/exiliadadelsur/Bartolina).
# Copyright (c) 2020 <NAME> and <NAME>
# License: MIT
# Full Text: https://github.com/exiliadadelsur/Bartolina/blob/master/LICENSE
"""Bartolina : real space reconstruction algorithm for redshift."""
import astropy.constants as const
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.cosmology import LambdaCDM, z_at_value
import attr
import camb
import cluster_toolkit as ctoolkit
from halotools.empirical_models import NFWProfile
import numpy as np
import pmesh
import pyfof
from sklearn.base import BaseEstimator, ClusterMixin, clone as clone_estimator
from sklearn.cluster import DBSCAN
# ============================================================================
# CONSTANTS
# ============================================================================
N_MONTE_CARLO = 300000
# ============================================================================
# AUXILIARY CLASS
# ============================================================================
@attr.s(frozen=True)
class Halo(object):
"""Store properties of dark matter halos.
Attributes
----------
xyzcenters : ndarray
Cartesian coordinates in Mpc to center of each halo.
dc_centers : array_like
Comoving distance to center of each halo.
radius : array_like
Radius of each halo in Mpc.
mass : array_like
Mass of each halo in solar mass.
label_h_massive : array_like
Label of halos with mass greater than the threshold mass.
"""
xyz_centers = attr.ib()
dc_centers = attr.ib()
z_centers = attr.ib()
radius = attr.ib()
mass = attr.ib()
labels_h_massive = attr.ib()
@attr.s(frozen=True)
class GalInHalo(object):
"""Store clustering results.
Attributes
----------
groups : array_like
Cluster labels for each galaxy. Noisy samples are given the label -1.
id_group : array_like
List of ids used in groups attribute.
"""
groups = attr.ib()
id_groups = attr.ib()
# =============================================================================
# FOF WRAPPER
# =============================================================================
class FoF(ClusterMixin, BaseEstimator):
"""SK-Learn like implementation of the Friend-of-Friends algorithm.
Internally uses the pyfof library.
"""
def __init__(self, linking_length: float = 0.5):
"""Write description.
Parameters
----------
linking_length : float, optional
DESCRIPTION. The default is 0.5.
Raises
------
TypeError
DESCRIPTION.
ValueError
DESCRIPTION.
Returns
-------
None.
"""
if not isinstance(linking_length, (int, float)):
raise TypeError("linking_length must be a float instance")
elif linking_length <= 0:
raise ValueError("linking_length must be > 0")
self.linking_length = linking_length
def fit(self, x, y=None, sample_weight=None):
"""Perform FoF clustering from features, or distance matrix.
Parameters
----------
x : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
y : Ignored
Not used, present here for API consistency by convention.
sample_weights : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
# pyfof returns a list of N elements, where N is the number of groups
# found. Each list contains the indices of x that belongs to that group
groups = pyfof.friends_of_friends(x, self.linking_length)
# to turn the groups into sklearn-like labels, we create an array of
# "labels" with the same size as data is in x
x_len = len(x)
labels = np.empty(x_len, dtype=int)
# then we iterate over the groups, and assign the position of the group
# as a label in the group indexes
for idx, group in enumerate(groups):
labels[group] = idx
# Finally we assing he labels as an attribute of the object.
self.labels_ = labels
return self
# ============================================================================
# MAIN CLASS
# ============================================================================
@attr.s
class ReZSpace(object):
"""Real space reconstruction algorithm.
This class have methods for corrects galaxy positions affected by Kaiser
and Finger of God (FoG) effects.
Parameters
----------
ra : array_like
Right ascension astronomy coordinate in decimal degrees.
dec : array_like
Declination astronomy coordinate in decimal degrees.
z : array_like
Observational redshift.
cosmo : object, optional
Instance of an astropy cosmology. Default cosmology is
LambdaCDM with H0=100, Om0=0.27, Ode0=0.73.
Mth : float, optional
The threshold mass that determines massive halos in solar mass.
Default is 10 ** 12.5.
delta_c : string, optional
Overdensity constant. Default is "200m".
halo_clustering : sklearn.base.BaseEstimator
Algorithm to identify the halos. Default ``sklearn.cluster.DBSCAN``.
Notes
-----
For the corrections is needed the center, radius and mass of
each dark matter halo. For this, we consider the geometric center, and the
mass is calculated following a NFW profile [navarro97]_ .
The radius is estimated as in Merchán & Zandivarez (2005) [merchan2005]_ .
The threshold mass is used to determine which of the halos are massive.
This is, massive halos are those whose mass are higher than the threshold
mass.
For the identification of halos by default we use the DBSCAN method of
the scikit-learn package, selecting the eps and min_samples parameters to
obtained the same galaxy groups of Zapata et al. (2009) [zapata2009]_ that
have more of 150 members.
Any other estimator from sklearn can be used.
References
----------
.. [navarro97] <NAME>., <NAME>., & <NAME>. (1997).
A universal density profile from hierarchical clustering.
The Astrophysical Journal, 490(2), 493.
.. [merchan2005] <NAME>., & <NAME>. (2005).
Galaxy groups in the third data release of the sloan
digital sky survey. The Astrophysical Journal, 630(2), 759.
.. [zapata2009] <NAME>., <NAME>., <NAME>., & <NAME>. (2009).
The influence of halo assembly on galaxies and galaxy groups.
Monthly Notices of the Royal Astronomical Society, 394(4), 2229-2237.
"""
# User input params
ra = attr.ib()
dec = attr.ib()
z = attr.ib()
cosmo = attr.ib()
Mth = attr.ib(default=(10 ** 12.5))
delta_c = attr.ib(default="200m")
_halo_clustering = attr.ib(
validator=attr.validators.instance_of(BaseEstimator)
)
@cosmo.default
def _cosmo_default(self):
return LambdaCDM(H0=100, Om0=0.27, Ode0=0.73)
@_halo_clustering.default
def __halo_clustering_default(self):
return DBSCAN(eps=1.2, min_samples=24)
# ========================================================================
# Public methods
# ========================================================================
def dark_matter_halos(self):
"""Find properties of massive dark matter halos.
Find massive dark matter halos and cartesian coordinates of his
centers. Necesary for all the other methods.
Properties of halos: geometric center, comoving distance to center,
redshift to center, radius of halo and mass of halo.
Returns
-------
halos : object
This class store properties of dark matter halos i.e. mass,
radius, centers.
galinhalo :
This class store clustering results.
Example
-------
>>> import bartolina as bt
>>> barto = bt.ReZSpace(ra, dec, z)
>>> halos, galinhalo = barto.dark_matter_halos()
Notes
-----
This method is separated into 3 small methods that perform each step
separately (xyzcoordinates, groups and group_prop).
"""
# cartesian coordinates for galaxies
xyz = self.xyzcoordinates()
# finding group of galaxies
groups, id_groups = self._groups(xyz)
# distance and redshifts to halo center radius and mass of halo
xyz_center, dc_center, z_center, rad, mass = self._group_prop(
id_groups, groups, xyz
)
# selec massive halos
labels_h_massive = np.where(mass > self.Mth)
# store results of clustering
galinhalo = GalInHalo(groups, id_groups)
# store properties of halos
halos = Halo(
xyz_center, dc_center, z_center, rad, mass, labels_h_massive
)
return halos, galinhalo
def xyzcoordinates(self):
"""Convert galaxies coordinates to Cartesian coordinates xyz.
Returns
-------
xyz : ndarray
Array containing Cartesian galaxies coordinates. Array has 3
columns and the same length as the number of galaxies.
Example
-------
>>> import bartolina as bt
>>> barto = bt.ReZSpace(ra, dec, z)
>>> xyz = barto.xyzcoordinates()
"""
# comoving distance to galaxies
dc = self.cosmo.comoving_distance(self.z)
# set Ra and Dec in degrees. Comoving distance in Mpc
c = SkyCoord(
ra=np.array(self.ra) * u.degree,
dec=np.array(self.dec) * u.degree,
distance=np.array(dc) * u.mpc,
)
# create an array with the results
xyz = np.array([c.cartesian.x, c.cartesian.y, c.cartesian.z]).T
return xyz
def _groups(self, xyz):
"""Galaxies clustering.
Finds groups of galaxies.
"""
# set weights for clustering
weights = self.z * 100
# clustering of galaxies
# we never use the instance level cluster, we always clone
# and use it internally
clustering = clone_estimator(self._halo_clustering)
clustering.fit(xyz, sample_weight=weights)
# select only galaxies in groups
unique_elements, counts_elements = np.unique(
clustering.labels_, return_counts=True
)
return clustering.labels_, unique_elements
def _radius(self, ra, dec, z):
"""Dark matter halos radius.
Calculate the radius of the halos.
"""
# number of galaxies
galnum = len(ra)
# comoving distance to galaxies
dc = self.cosmo.comoving_distance(z)
# prepare the coordinates for distance calculation
c1 = SkyCoord(np.array(ra) * u.deg, np.array(dec) * u.deg)
c2 = SkyCoord(np.array(ra) * u.deg, np.array(dec) * u.deg)
# equation 6 of Merchán & Zandivarez (2005) [merchan2005]_
sum_rij = 0
indi = np.arange(galnum)
for i in indi:
sep = c1[i].separation(c2[ | np.where(indi > i) | numpy.where |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ReplicaExchangeMC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
def effective_sample_size(x, **kwargs):
"""tfp.mcmc.effective_sample_size, with a maximum appropriate for HMC."""
# Since ESS is an estimate, it can go wrong... E.g. we can have negatively
# correlated samples, which *do* have ESS > N, but this ESS is only applicable
# for variance reduction power for estimation of the mean. We want to
# (blindly) use ESS everywhere (e.g. variance estimates)....and so...
ess = tfp.mcmc.effective_sample_size(x, **kwargs)
n = tf.cast(prefer_static.size0(x), x.dtype)
return tf.minimum(ess, n)
def _set_seed():
"""Helper which uses graph seed if using TFE."""
# TODO(b/68017812): Deprecate once TFE supports seed.
seed_stream = test_util.test_seed_stream()
if tf.executing_eagerly():
tf.random.set_seed(seed_stream())
return None
return seed_stream()
@test_util.test_graph_and_eager_modes
class DefaultSwapProposedFnTest(test_util.TestCase):
@parameterized.named_parameters(
('prob1p0_n1', 1.0, 1),
('prob1p0_n2', 1.0, 2),
('prob1p0_n4', 1.0, 4),
('prob1p0_n5', 1.0, 5),
('prob0p5_n1', 0.5, 1),
('prob0p5_n4', 0.5, 4),
('prob0p5_n7', 0.5, 7),
('prob0p0_n1', 0.0, 1),
('prob0p0_n2', 0.0, 2),
('prob0p0_n5', 0.0, 5),
)
def testProbSwapNumReplicaNoBatch(self, prob_swap, num_replica):
fn = tfp.mcmc.default_swap_proposal_fn(prob_swap)
num_results = 100
swaps = tf.stack(
[fn(num_replica, seed=i) for i in range(num_results)],
axis=0)
self.assertAllEqual((num_results, num_replica), swaps.shape)
self.check_swaps_with_no_batch_shape(self.evaluate(swaps), prob_swap)
@parameterized.named_parameters(
('prob1p0_n1', 1.0, 1),
('prob1p0_n2', 1.0, 2),
('prob1p0_n5', 1.0, 5),
('prob0p5_n1', 0.5, 1),
('prob0p5_n2', 0.5, 2),
('prob0p5_n3', 0.5, 3),
('prob0p0_n1', 0.0, 1),
('prob0p0_n2', 0.0, 2),
('prob0p0_n5', 0.0, 5),
)
def testProbSwapNumReplicaWithBatch(self, prob_swap, num_replica):
fn = tfp.mcmc.default_swap_proposal_fn(prob_swap)
num_results = 100
swaps = tf.stack(
[fn(num_replica, batch_shape=[2], seed=i) for i in range(num_results)],
axis=0)
self.assertAllEqual((num_results, num_replica, 2), swaps.shape)
swaps_ = self.evaluate(swaps)
# Batch members should have distinct swaps in most cases.
frac_same = np.mean(swaps_[..., 0] == swaps_[..., 1])
# If prob_swap == 0, swap is the null_swap always.
if (prob_swap == 0 or
# If num_replica == 1, swap = [0] always.
num_replica == 1 or
# In this case, we always swap and it's always [1, 0].
(num_replica == 2 and prob_swap == 1)):
self.assertEqual(1.0, frac_same)
else:
self.assertLess(frac_same, 0.9)
# Check that each batch member has proper statistics.
for i in range(swaps_.shape[-1]):
self.check_swaps_with_no_batch_shape(swaps_[..., i], prob_swap)
def check_swaps_with_no_batch_shape(self, swaps_, prob_swap):
assert swaps_.ndim == 2, 'Expected shape [num_results, num_replica]'
num_results, num_replica = swaps_.shape
null_swaps = np.arange(num_replica)
# Check that we propose at least one swap, prob_swap fraction of the
# time.
# An exception is made for when num_replica == 1, since in this case the
# only swap is the null swap.
expected_prob_swap = prob_swap * np.float32(num_replica > 1)
observed_prob_swap = np.mean(np.any(swaps_ != null_swaps, axis=1))
self.assertAllClose(
expected_prob_swap,
observed_prob_swap,
rtol=0,
# Accurate to 4 standard errors.
atol=4 * np.sqrt(prob_swap * (1 - prob_swap) / num_results))
# Verify the swap is "once only."
for n in range(20):
self.assertAllEqual(null_swaps, np.take(swaps_[n], swaps_[n]))
@test_util.test_graph_and_eager_modes
class REMCTest(test_util.TestCase):
def setUp(self):
tf.random.set_seed(123)
super(REMCTest, self).setUp()
def _checkNormalREMCSampling(self,
inverse_temperatures,
num_results=1000,
prob_swap=1.0,
dtype=np.float32):
"""Sampling from standard normal with REMC."""
target = tfd.Normal(dtype(0.), dtype(1.))
inverse_temperatures = dtype(inverse_temperatures)
num_replica = len(inverse_temperatures)
step_size = 0.51234 / np.sqrt(inverse_temperatures)
num_leapfrog_steps = 3
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_size,
store_parameters_in_results=True,
num_leapfrog_steps=num_leapfrog_steps)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
swap_proposal_fn=tfp.mcmc.default_swap_proposal_fn(
prob_swap),
seed=_set_seed())
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=target.sample(seed=_set_seed()),
kernel=remc,
num_burnin_steps=50,
trace_fn=lambda _, results: results,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results,), states.shape)
states_, kr_, replica_ess_ = self.evaluate([
states,
kernel_results,
# Get the first (and only) state part for all replicas.
effective_sample_size(kernel_results.post_swap_replica_states[0]),
])
logging.vlog(
2, '---- execution:{} mean:{} stddev:{}'.format(
'eager' if tf.executing_eagerly() else 'graph',
states_.mean(), states_.std()))
# Some shortened names.
replica_log_accept_ratio = (
kr_.post_swap_replica_results.log_accept_ratio)
replica_states_ = kr_.post_swap_replica_states[0] # Get rid of "parts"
# Target state is at index 0.
self.assertAllClose(states_, replica_states_[:, 0])
# Check that *each* replica has correct marginal.
def _check_sample_stats(replica_idx):
x = replica_states_[:, replica_idx]
ess = replica_ess_[replica_idx]
err_msg = 'replica_idx={}'.format(replica_idx)
mean_atol = 5 * 1.0 / np.sqrt(ess)
self.assertAllClose(x.mean(), 0.0, atol=mean_atol, msg=err_msg)
# For a tempered Normal, Variance = T.
expected_var = 1 / inverse_temperatures[replica_idx]
var_atol = 5 * expected_var * np.sqrt(2) / np.sqrt(ess)
self.assertAllClose(np.var(x), expected_var, atol=var_atol, msg=err_msg)
for replica_idx in range(num_replica):
_check_sample_stats(replica_idx)
# Test log_accept_ratio and replica_log_accept_ratio.
self.assertAllEqual((num_results, num_replica),
replica_log_accept_ratio.shape)
replica_mean_accept_ratio = np.mean(
np.exp(np.minimum(0, replica_log_accept_ratio)), axis=0)
for accept_ratio in replica_mean_accept_ratio:
# Every single replica should have a decent P[Accept]
self.assertBetween(accept_ratio, 0.2, 0.99)
# Check swap probabilities for adjacent swaps.
self.assertAllEqual((num_results, num_replica - 1),
kr_.is_swap_accepted_adjacent.shape)
conditional_swap_prob = (
np.sum(kr_.is_swap_accepted_adjacent, axis=0) /
| np.sum(kr_.is_swap_proposed_adjacent, axis=0) | numpy.sum |
import pytest
from unittest.mock import Mock
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from figpptx.slide_editor import SlideEditor, SlideTransformer
@pytest.mark.parametrize(
"instance, expected",
[
((1, 2), (11, 58)),
([3, 5], [13, 55]),
([1, 2, 3, 5], [11, 58, 13, 55]),
(np.array([[1, 2], [3, 5]]), np.array([[11, 58], [13, 55]])),
],
)
def test_transform(instance, expected):
slide = Mock()
left = 10
top = 20
size = (30, 40)
transformer = SlideTransformer(left=left, top=top, size=size)
editor = SlideEditor(slide, transformer)
target = editor.transform(instance)
assert np.allclose(np.array(target), | np.array(expected) | numpy.array |
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
from scipy.stats import multivariate_normal
| np.random.seed(104) | numpy.random.seed |
import transform
import unittest
import numpy as np
import sys
sys.path.insert(0, '../')
import pdb
import rotateCorrection as rc
# another crude transformation computer
def _angle(x1, y1, x2, y2):
# inputs are in angle
dx = x2 - x1
dy = y2 - y1
if dy < 0:
return np.arctan(abs(dy)/dx)
else:
return -np.arctan(abs(dy)/dx)
def simple_angle_converter(pointpx, top_right, top_left, bottom_left, bottom_right, imagesize):
"""
All coordinate tuples are in (x, y) i.e. (lon, lat) convention.
pointpx (x, y): pixel coordinates counted from top left corner.
top_right, top_left, bottom_left, bottom_right: (lon, lat) pairs
imagesize: (width, height) tuple
"""
# first wrangle inputs
image_width, image_height = imagesize
px, py = pointpx
tr, tl, bl, br = top_right, top_left, bottom_left, bottom_right
# now start converting
image_width_in_lon = (tr[0] - tl[0] + br[0] - bl[0])/2
image_height_in_lat = (tl[1] - bl[1] + tr[1] - br[1])/2
top_left_lon, top_left_lat = tl
# 2. now convert (px, py) -> (dlon, dlat)
dlon = px*image_width_in_lon/image_width
dlat = py*image_height_in_lat/image_height
# compute the angle via simple trig.
angle_est1 = _angle(tl[0], tl[1], tr[0], tr[1])
angle_est2 = _angle(bl[0], bl[1], br[0], br[1])
angle = (angle_est1+angle_est2)/2
print(f"angle: {angle}")
rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
# apply reverse rotation: x2, y2 (unit: meter)
x2, y2 = np.dot(rot_matrix, np.array([dlon, dlat]))
# convert x2, y2 to lon, lat
actual_lon = top_left_lon + x2
actual_lat = top_left_lat - y2
return actual_lon, actual_lat
class TestTransformLukas(unittest.TestCase):
def test_unrotated_picture(self):
width = 10
height = 60
lon_min = 20
lon_max = 30
lat_min = 10
lat_max = 70
top_left = np.array([lon_min, lat_max])
top_right = np.array([lon_max, lat_max])
bottom_left = np.array([lon_min, lat_min])
bottom_right = np.array([lon_max, lat_min])
# check that top left is sane
res = transform.transform(np.array([0, 0]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, top_left))
# check that top right is sane
res = transform.transform(np.array([width,0]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, top_right))
# check that bottom left is sane
res = transform.transform(np.array([0,height]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, bottom_left))
# check that bottom right is sane
res = transform.transform(np.array([width,height]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, bottom_right))
class TestBasicTransform(unittest.TestCase):
def test_unrotated_picture(self):
width = 10
height = 60
lon_min = 20
lon_max = 30
lat_min = 10
lat_max = 70
top_left = np.array([lon_min, lat_max])
top_right = np.array([lon_max, lat_max])
bottom_left = np.array([lon_min, lat_min])
bottom_right = np.array([lon_max, lat_min])
# check that top left is sane
res = simple_angle_converter(np.array([0, 0]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, top_left))
# check that top right is sane
res = simple_angle_converter(np.array([width,0]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue(np.allclose(res, top_right))
# check that bottom left is sane
res = simple_angle_converter(np.array([0,height]), top_right, top_left,
bottom_left, bottom_right, np.array([width, height]))
self.assertTrue( | np.allclose(res, bottom_left) | numpy.allclose |
import numpy as np
from .method.utils import sim_ranks, compute_sim
from tqdm import tqdm
def expand_query(query, database, db_indices):
"""
Expands a query with descriptors from database
:param query: a single query vector
:param database: database vectors
:param db_indices: indices of database vectors to expand query
:return:
An expanded query vector.
"""
renewed_query = np.copy(query)
for db_id in db_indices:
renewed_query += database[db_id, :]
renewed_query = renewed_query / np.linalg.norm(renewed_query)
return renewed_query
def apply_qe(query, database, k):
"""
Performs query expansion.
:param query: a single query vector
:param database: database vectors
:param k: Top-k ranks to employ.
:return:
Re-ranked retrieval results.
"""
ranks = | np.zeros(shape=(query.shape[0], database.shape[0])) | numpy.zeros |
"""
Reaction Wheel discipline for CADRE: Reaction Wheel Dynamics component.
"""
from __future__ import print_function, division, absolute_import
from six.moves import range
import numpy as np
from openmdao.api import ExplicitComponent
class ReactionWheelDynamics(ExplicitComponent):
"""
Compute the angular velocity vector of reaction wheel.
"""
def initialize(self):
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
self.options.declare('J_RW', 2.8e-5,
desc="Mass moment of inertia of the reaction wheel.")
def setup(self):
nn = self.options['num_nodes']
J_RW = self.options['J_RW']
# Inputs
self.add_input('w_B', np.zeros((nn, 3)), units='1/s',
desc='Angular velocity vector in body-fixed frame over time')
self.add_input('T_RW', | np.zeros((nn, 3)) | numpy.zeros |
import numpy as np
import logging
import time
class TrainingEnvironment:
""" Class to handle the processing of the game loop."""
def __init__(self,
env,
model,
replay_memory,
input_processor,
phi_length,
frame_skip,
image_size,
training,
epochs,
batches_per_epoch,
reward_clip,
null_op_max,
play_epsilon,
minibatch_size,
save_path,
consecutive_max):
self._env = env
self.model = model
self.replay_memory = replay_memory
self._process_image = input_processor
self.logger = logging.getLogger(__name__)
self.phi_length = phi_length
self.frame_skip = frame_skip
self.image_size = image_size
self.training = training
self.epochs = epochs
self.batches_per_epoch = batches_per_epoch
self.reward_clip = reward_clip
self.null_op_max = null_op_max
self.play_epsilon = play_epsilon
self.minibatch_size = minibatch_size
self.save_path = save_path
self.consecutive_max = consecutive_max
self.reset_recent_frames()
def run(self):
for epoch in range(self.epochs):
start = time.time()
reward, episodes = self.run_epoch(num_steps=self.batches_per_epoch, training=self.training)
# Get model info for reporting
avg_loss, average_q = self.model.get_training_info()
eps = self.model.get_epsilon()
batches = self.model._batch_updates
self.logger.info \
('Epoch: {epoch} | # Episodes: {episodes} | # Batches: {batches} | Average Reward: {reward} | Average Q Value: {avgq} | Average Loss: {loss} | Epsilon: {epsilon} | Elapsed Time: {time} mins'.format(
batches = batches, epoch=epoch, reward=reward, loss=avg_loss, epsilon=eps, episodes=episodes, time=(time.time() - start ) /60., avgq = average_q))
if self.training:
self.save_checkpoint(epoch=epoch)
def reset_recent_frames(self):
self._recent_frames = np.zeros(shape=(self.phi_length, self.image_size[0], self.image_size[1]), dtype=np.int8)
def save_checkpoint(self, epoch):
self.model.save(checkpoint=epoch, model_path = self.save_path)
def render(self):
self._env.render()
def run_episode(self, training=True):
state = self.reset_environment()
is_terminal = False
total_reward = 0
steps = 0
while not is_terminal:
if not training:
self.render()
action = self.get_action(state, training)
next_state, reward, is_terminal = self.step(action)
if training:
clipped_reward = | np.clip(reward ,-self.reward_clip ,self.reward_clip) | numpy.clip |
from __future__ import annotations
from copy import deepcopy
import numpy as np
import torch
from torch import Tensor, nn
from torch.utils import data
from torch.ao.quantization import disable_observer
from torch.ao.quantization.quantize import convert, prepare_qat
from torch.ao.quantization.qconfig import get_default_qat_qconfig
from torchvision import transforms
import torchvision
from .runner import Timer, Accumulator, Animator, show_images
class Fx:
ones = torch.ones
zeros = torch.zeros
tensor = torch.tensor
arange = torch.arange
meshgrid = torch.meshgrid
sin = torch.sin
sinh = torch.sinh
cos = torch.cos
cosh = torch.cosh
tanh = torch.tanh
linspace = torch.linspace
exp = torch.exp
log = torch.log
normal = torch.normal
rand = torch.rand
matmul = torch.matmul
int32 = torch.int32
float32 = torch.float32
concat = torch.cat
stack = torch.stack
abs = torch.abs
eye = torch.eye
numpy = lambda x, *args, **kwargs: x.detach().numpy(*args, **kwargs)
size = lambda x, *args, **kwargs: x.numel(*args, **kwargs)
reshape = lambda x, *args, **kwargs: x.reshape(*args, **kwargs)
to = lambda x, *args, **kwargs: x.to(*args, **kwargs)
reduce_sum = lambda x, *args, **kwargs: x.sum(*args, **kwargs)
argmax = lambda x, *args, **kwargs: x.argmax(*args, **kwargs)
astype = lambda x, *args, **kwargs: x.type(*args, **kwargs)
transpose = lambda x, *args, **kwargs: x.t(*args, **kwargs)
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu().
"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
def try_all_gpus():
"""Return all available GPUs, or [cpu(),] if no GPU exists.
"""
devices = [torch.device(f'cuda:{i}')
for i in range(torch.cuda.device_count())]
return devices if devices else [torch.device('cpu')]
class ModuleTool:
'''将 inputs 转换为 NumPy 格式
Args:
inputs: 批量数据
mean: 默认为 ImageNet 的 mean
std: 默认为 ImageNet 的 std
channel: 取值范围为 ['first', 'last']
'''
def __init__(self,
inputs: Tensor,
channel: str = 'first',
mean: list[float] = [0.485, 0.456, 0.406],
std: list[float] = [0.229, 0.224, 0.225]):
self.inputs = inputs
self.channel = channel
self.mean, self.std = [mean, std]
@property
def images(self):
inputs = self.inputs.cpu().numpy()
if self.channel == 'first':
inputs = inputs.transpose(0, 2, 3, 1)
mean, std = (np.array(x) for x in [self.mean, self.std])
inputs = std * inputs + mean
inputs = | np.clip(inputs, 0, 1) | numpy.clip |
"""
Helper functions for PASTIS.
"""
import glob
import os
import datetime
import importlib
import itertools
import time
from shutil import copy
import sys
from astropy.io import fits
import astropy.units as u
import fpdf
import logging
import logging.handlers
import numpy as np
from PyPDF2 import PdfFileMerger
from pastis.config import CONFIG_PASTIS
log = logging.getLogger()
def write_fits(data, filepath, header=None, metadata=None):
"""
Writes a fits file and adds header and metadata when necessary.
:param data: numpy data (aka image)
:param filepath: path to save the file, include filename.
:param header: astropy hdu.header.
:param metadata: list of MetaDataEntry objects that will get added to header.
:return: filepath
"""
# Make sure file ends with fit or fits.
#if not (filepath.endswith(".fit") or filepath.endswith(".fits")):
# filepath += ".fits"
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
# Create a PrimaryHDU object to encapsulate the data.
hdu = fits.PrimaryHDU(data)
if header is not None:
hdu.header = header
# Add metadata to header.
if metadata is not None:
for entry in metadata:
if len(entry.name_8chars) > 8:
print('Fits Header Keyword: ' + entry.name_8chars +
' is greater than 8 characters and will be truncated.')
if len(entry.comment) > 47:
print('Fits Header comment for ' + entry.name_8chars +
' is greater than 47 characters and will be truncated.')
hdu.header[entry.name_8chars[:8]] = (entry.value, entry.comment)
# Create a HDUList to contain the newly created primary HDU, and write to a new file.
fits.HDUList([hdu])
hdu.writeto(filepath, overwrite=True)
#print('Wrote ' + filepath)
return filepath
def write_all_fits_to_cube(path):
"""
Write all fits files in a directory to an image cube.
Directory can *only* contain fits files, and only files that you want in the cube. Subdirectories will be ignored.
:param path: string, path to directory that contains all fits files that should be put into cube; cube gets saved
into that same directory
"""
# Collect all filenames
all_file_names = [fname for fname in os.listdir(path) if os.path.isfile(os.path.join(path, fname))]
# Read all files into list
allfiles = []
for fname in all_file_names:
allfiles.append(fits.getdata(os.path.join(path, fname)))
cube = np.array(allfiles)
write_fits(cube, os.path.join(path, 'psf_cube.fits'))
def circle_mask(im, xc, yc, rcirc):
""" Create a circle on array im centered on xc, yc with radius rcirc; inside circle equals 1."""
x, y = | np.shape(im) | numpy.shape |
import unittest
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from clusterz.algs.kzmedian import (
DistributedKZMedian, BELDistributedKMedian, k_median_my, kz_median, KZMedian, KMedianWrapped
)
class MyTestCase(unittest.TestCase):
def setUp(self):
cluster = np.random.uniform(-1, 1, size=(40, 2))
self.centers_ = np.array([
[0, 30], [0, -30]
])
self.outliers_ = np.array([
[80, 0], [-80, 0]
])
# data set on a single machine
self.X_without_outliers_ = np.vstack(
[self.centers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + np.array([-5, 0]),
cluster + self.centers_[1] + np.array([5, 0]),
cluster + self.centers_[1] + np.array([-5, 0])])
self.X_with_outliers_ = np.vstack(
[self.centers_,
self.outliers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + np.array([-5, 0]),
cluster + self.centers_[1] + | np.array([5, 0]) | numpy.array |
import numpy as np
from Constants import BETS, HOUR, HOUSES, LINK, TEAMS, DECIMALS
def analize_bet(bet):
"""
It analize which is the value of a bet and the percentages to invest in each option for
achieving this value
:param bet: List of floats (or something convertible to float). List of possible bets
:return:
List of floats. Percentage to invest in each bet
Float. Cost of winning a unit of money (if positive is a sure bet).
"""
# Transform it to numpy for easier calculation
bet = np.array(bet, dtype=np.float)
# Check the cost of each odd (The amount of money you would need to invest to win an unit of money)
odds_cost = 1/bet
# Transform this cost to a percentage (The percentage that should be invested at each option)
odd_percentage = odds_cost/np.sum(odds_cost)
# Calculate the cost of the bet (The money that would cost to win an euro) [If positive it is a sure bet]
total_euro_cost = 1-np.sum(odds_cost)
return odd_percentage, total_euro_cost
def process_information(information):
"""
Takes all the information extracted and prints all the sure bets and the general statistics
:param information: List of dictionaries. List of dictionaries extracted from the MainScraper
"""
sure_bets, bad_bets = [], []
# For each sport
for sport, days in information.items():
# For each day
for day, bets in days.items():
#For each bet
for bet in bets:
# Get the value of the bet
percentage, value = analize_bet(bet=bet[BETS])
# If is sure
if value > 0:
# Save it as sure and print the alert
sure_bets.append((value, percentage, sport, day, bet))
#print(get_surebet_text(sport=sport, day=day, bet=bet, percentage=percentage, value=value))
# If not
else:
# Add it to the bad bets list
bad_bets.append((value, percentage, sport, day, bet))
# Print the alerts about surebets
alert_about_all_surebets(sure_bets=sure_bets)
# Print the summary of the analysis
print(get_txt_summary(sure_bets=sure_bets, bad_bets=bad_bets))
def alert_about_all_surebets(sure_bets):
"""
:param sure_bets:
:return:
"""
sure_bets = np.array(sure_bets, dtype=[('value', np.float), ('percentage', np.object), ('sport', '<U64'), ('day', '<U64'), ('dict', np.object)])
sure_bets.sort(order='value')
for sure_bet in sure_bets:
print(get_surebet_text(sport=sure_bet['sport'], day=sure_bet['day'], bet=sure_bet['dict'],
percentage=sure_bet['percentage'], value=sure_bet['value']))
def get_best_credible_values(values, min_bet=5, max_bet=80):
all_posible_bets = np.arange(start=min_bet, stop=max_bet, step=0.01, dtype=values.dtype)
all_posible_bets = np.repeat(all_posible_bets, len(values)).reshape(-1, len(values))
values = np.repeat(a=values, repeats=len(all_posible_bets)).reshape(len(values),-1).T
possible_invests = values*all_posible_bets
possible_rounded_invests = np.round(possible_invests)
losses = np.sum( | np.abs(possible_rounded_invests-possible_invests) | numpy.abs |
# ========================
# Stress Tensor Estimation
# ========================
'''
Contributions
-------------
fractoolbox was initiated by <NAME> https://github.com/ICWallis/fractoolbox
as part of Doctoral Research at the University of Auckland that is
supervised by <NAME> https://github.com/ddempsey and
Julie (JR) Rowland, with math/code contributions from <NAME>
https://github.com/edur409.
Licence
-------
fractoolbox is distributed under an Apache 2.0 licence
https://choosealicense.com/licenses/apache-2.0/
'''
import numpy as np
from scipy import integrate
def linear_Sv(maxdepth,obsdepth,density):
"""Magnitude of overburden stress [Sv in MPa] at a given observation depth
Simple integration model that uses single average density and
returns Sv for the observation depth or list of depths.
Args:
maxdepth (float): The maximum depth of the stress model [m]
obsdepth (float or list of floats): Depth(s) where Sv will be returned [m]
density (float): average rock density [kg/m3] which is typically 2200 - 2800
All args accept float or interger values
Returns:
Sv at obsdepth [MPa] as float or list of floats
"""
depth_model = np.array([0,maxdepth])
density_model = np.array([density,density])
gravity = 9.8
# trapezoid integration with unit conversion from Pa to MPa
Sv_model = (integrate.cumtrapz(density_model * gravity, depth_model, initial=0)) * 1.e-6
# linear interpolation from the Sv model
Sv_obsdepth = np.around(( | np.interp(obsdepth, depth_model, Sv_model) | numpy.interp |
class EmptyError(Exception):
print(Exception)
#==========================================================================================
#
#
#==========================================================================================
def read_proxy_metadata_S1csv(datadir_proxy, datafile_proxy, proxy_region, proxy_resolution, \
proxy_definition):
#==========================================================================================
#
# ... reads metadata worksheet from PAGES2K_DatabaseS1 dataset ...
#
#==========================================================================================
import sys
import numpy as np
from random import sample
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Parsing dictionary of proxy definitions
proxy_list = {}; # dict list containing proxy types and associated proxy id's (sites)
sites_assim = {}
sites_eval = {}
proxy_types = list(proxy_definition.keys())
# check if dealing with with "order" digits or not in definition of proxies
try:
proxy_types_unordered = [i.split(':', 1)[1] for i in list(proxy_definition.keys())]
except:
proxy_types_unordered = proxy_types
for t in proxy_types:
proxy_list[t] = []
sites_assim[t] = []
sites_eval[t] = []
proxy_category = [item.split('_')[0] for item in proxy_types_unordered]
# Define name of file & open
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# ====================
# Read in the metadata
# ====================
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# =================================================================
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
# =================================================================
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_category:
if proxy_metadata[row_index]['Resolution (yr)'] in proxy_resolution:
indt = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
proxy_measurement = [proxy_definition[proxy_types[indt[k]]] for k in range(len(indt))]
indm = [i for i, s in enumerate(proxy_measurement) if proxy_metadata[row_index]['Proxy measurement'] in s]
if indm:
indtype = indt[indm[0]]
# Add chronology ID to appropriate list in dictionary
proxy_list[proxy_types[indtype]].append(str(proxy_metadata[row_index]['PAGES ID']))
return proxy_list
def create_proxy_lists_from_metadata_S1csv(datadir_proxy, datafile_proxy, proxy_region, proxy_resolution, \
proxy_definition, proxy_frac, psm_data, psm_r_crit):
#==========================================================================================
#
# ... reads metadata worksheet from PAGES2K_DatabaseS1 dataset ...
#
#==========================================================================================
import sys
import numpy as np
from random import sample
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Parsing dictionary of proxy definitions
proxy_list = {}; # dict list containing proxy types and associated proxy id's (sites)
sites_assim = {}
sites_eval = {}
proxy_types = list(proxy_definition.keys())
proxy_types_unordered = [i.split(':', 1)[1] for i in list(proxy_definition.keys())]
for t in proxy_types:
proxy_list[t] = []
sites_assim[t] = []
sites_eval[t] = []
proxy_category = [item.split('_')[0] for item in proxy_types_unordered]
# Define name of file & open
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# ====================
# Read in the metadata
# ====================
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_category:
if proxy_metadata[row_index]['Resolution (yr)'] in proxy_resolution:
indt = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
proxy_measurement = [proxy_definition[proxy_types[indt[k]]] for k in range(len(indt))]
indm = [i for i, s in enumerate(proxy_measurement) if proxy_metadata[row_index]['Proxy measurement'] in s]
if indm:
indtype = indt[indm[0]]
# Add chronology ID to appropriate list in dictionary
proxy_list[proxy_types[indtype]].append(str(proxy_metadata[row_index]['PAGES ID']))
# =========================================================================
# Filter list to retain sites with PSM calibration correlation > PSM_r_crit
# =========================================================================
if psm_data is not None:
proxy_TypesSites_psm = list(psm_data.keys())
proxy_TypesSites_psm_ok = [t for t in proxy_TypesSites_psm if abs(psm_data[t]['PSMcorrel']) > psm_r_crit]
proxy_list_ok = {}
for t in list(proxy_list.keys()):
proxy = t.split(':', 1)[1]
list_ok = [proxy_TypesSites_psm_ok[k][1] for k in range(len(proxy_TypesSites_psm_ok)) if proxy_TypesSites_psm_ok[k][0] == proxy]
proxy_list_ok[t] = list_ok
else:
proxy_list_ok = proxy_list
# ================================================================
# Create lists of sites to assimilate / keep for recon. evaluation
# ================================================================
if proxy_frac < 1.0:
# List all sites, regardless of proxy type
mergedlist = []
tmp = [proxy_list_ok[x] for x in proxy_list_ok]
nbtype = len(tmp)
for k in range(nbtype):
mergedlist.extend(tmp[k])
nbsites = len(mergedlist)
nbsites_assim = int(nbsites*proxy_frac)
# random selection over entire site list
ind_assim = sample(list(range(0, nbsites)), nbsites_assim)
ind_eval = set(range(0,nbsites)) - set(ind_assim) # list indices of sites not chosen
p_assim = [mergedlist[p] for p in ind_assim]
p_eval = [mergedlist[p] for p in ind_eval]
#ind = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
# Re-populate lists by proxy type
for t in proxy_types:
inda = [i for i, s in enumerate(p_assim) if s in proxy_list_ok[t]]
sites_assim[t] = [p_assim[k] for k in inda]
inde = [i for i, s in enumerate(p_eval) if s in proxy_list_ok[t]]
sites_eval[t] = [p_eval[k] for k in inde]
else:
sites_assim = proxy_list_ok
# leave sites_eval list empty
return sites_assim, sites_eval
def read_proxy_metadata_S1csv_old(datadir_proxy, datafile_proxy, proxy_region, proxy_resolution, \
proxy_type, proxy_measurement):
#==========================================================================================
#
# ... reads metadata worksheet from PAGES2K_DatabaseS1 dataset ...
#
#==========================================================================================
import sys
import numpy as np
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Uploading proxy data
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# Read in the metadata
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
proxy_type_to_assim = [];
proxy_id_to_assim = [];
proxy_lat_to_assim = [];
proxy_lon_to_assim = [];
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_type:
if proxy_metadata[row_index]['Proxy measurement'] in proxy_measurement:
if proxy_metadata[row_index]['Resolution (yr)'] in proxy_resolution:
proxy_id_to_assim.append(proxy_metadata[row_index]['PAGES ID'])
proxy_type_to_assim.append(proxy_metadata[row_index]['Archive type'])
proxy_lat_to_assim.append(proxy_metadata[row_index]['Lat (N)'])
proxy_lon_to_assim.append(proxy_metadata[row_index]['Lon (E)'])
site_list = [str(item) for item in proxy_id_to_assim]; # getting rid of unicode
site_lat = proxy_lat_to_assim
site_lon = proxy_lon_to_assim
return site_list, site_lat, site_lon
def read_proxy_data_S1csv_site(datadir_proxy, datafile_proxy, proxy_site):
#==========================================================================================
#
# ... reads data from a selected site (chronology) in PAGES2K_DatabaseS1 ...
# ... site is passed as argument ...
#
#==========================================================================================
import sys
import numpy as np
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Uploading proxy data
proxy_file = datadir_proxy + '/'+datafile_proxy;
#print 'Reading file: ', proxy_file
workbook = xlrd.open_workbook(proxy_file);
# Getting general (number & names of worksheets) info on file content
nb_worksheets = workbook.nsheets;
#worksheet_list = workbook.sheet_names();
worksheet_list = [str(item) for item in workbook.sheet_names()]; # getting rid of unicode
# Create list of worksheet names containing data
worksheet_list_data = worksheet_list;
del worksheet_list[worksheet_list_data.index('ReadMe')]
del worksheet_list[worksheet_list_data.index('Metadata')]
# Read in the metadata
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
proxy_type_to_assim = [];
proxy_id_to_assim = [];
proxy_lat_to_assim = [];
proxy_lon_to_assim = [];
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES ID'] in proxy_site:
proxy_id_to_assim.append(proxy_metadata[row_index]['PAGES ID'])
proxy_type_to_assim.append(proxy_metadata[row_index]['Archive type'])
proxy_lat_to_assim.append(proxy_metadata[row_index]['Lat (N)'])
proxy_lon_to_assim.append(proxy_metadata[row_index]['Lon (E)'])
proxy_id_to_assim = [str(item) for item in proxy_id_to_assim]; # getting rid of unicode encoding
proxy_type_to_assim = [str(item) for item in proxy_type_to_assim]; # getting rid of unicode encoding
# ------------------------------------------
# Loop over worksheets containing proxy data
# ------------------------------------------
# Dictionary containing proxy metadata & data
proxy_data = {}
nb_ob = -1
for worksheet in worksheet_list_data:
data = workbook.sheet_by_name(worksheet)
num_cols = data.ncols - 1
# Get columns headers
tmp_headers = [data.cell(0,col_index).value for col_index in range(data.ncols)]
data_headers = [str(item) for item in tmp_headers]; # getting rid of unicode encoding
tmp_refs = [data.cell(1,col_index).value for col_index in range(data.ncols)]
data_refs = [str(item) for item in tmp_refs] # getting rid of unicode encoding
data_headers[0] = data_refs[0]; # correct tag for years
# Column indices of proxy id's in proxy_id_to_assim list
col_assim = [i for i, item in enumerate(data_headers) if item in proxy_id_to_assim]
if col_assim: # if non-empty list
for row_index in range(2,data.nrows):
for col_index in col_assim:
found = False
# associate metadata to data record
for meta_row_index in range(1,len(proxy_metadata)):
if proxy_metadata[meta_row_index]['PAGES ID'] == data_headers[col_index]:
found = True
typedat = proxy_metadata[meta_row_index]['Archive type']
measure = proxy_metadata[meta_row_index]['Proxy measurement']
resolution = proxy_metadata[meta_row_index]['Resolution (yr)']
lat = proxy_metadata[meta_row_index]['Lat (N)']
lon = proxy_metadata[meta_row_index]['Lon (E)']
alt = 0.0 # no altitude info in data file
if lon < 0:
lon = 360 + lon
if found:
if data.cell(row_index, col_index).value: # only keep those with non-empty values
nb_ob = nb_ob + 1
proxy_data[nb_ob] = {}
proxy_data[nb_ob]['id'] = data_headers[col_index]
proxy_data[nb_ob]['type'] = str(typedat)
proxy_data[nb_ob]['meas'] = str(measure)
proxy_data[nb_ob]['resol'] = resolution
proxy_data[nb_ob]['lat'] = lat
proxy_data[nb_ob]['lon'] = lon
proxy_data[nb_ob]['alt'] = alt
proxy_data[nb_ob]['time'] = data.cell(row_index, 0).value
proxy_data[nb_ob]['value'] = data.cell(row_index, col_index).value
id = proxy_data[0]['id']
lat = proxy_data[0]['lat']
lon = proxy_data[0]['lon']
alt = proxy_data[0]['alt']
# proxy time series
time = [proxy_data[k]['time'] for k in range(0,len(proxy_data))]
value = [proxy_data[k]['value'] for k in range(0,len(proxy_data))]
return id, lat, lon, alt, time, value # could add more output here as we develop further
#return proxy_data
def read_proxy_data_S1csv(self, datadir_proxy, datafile_proxy, proxy_region, proxy_type, proxy_measurement):
#==========================================================================================
#
# ... reads data from all sites (chronologies) in PAGES2K_DatabaseS1 dataset meeting
# selection criteria from NAMELIST ...
#
#==========================================================================================
import sys
import numpy as np
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Uploading proxy data
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# Getting general (number & names of worksheets) info on file content
nb_worksheets = workbook.nsheets;
#worksheet_list = workbook.sheet_names();
worksheet_list = [str(item) for item in workbook.sheet_names()]; # getting rid of unicode
# Create list of worksheet names containing the data
worksheet_list_data = worksheet_list;
del worksheet_list[worksheet_list_data.index('ReadMe')]
del worksheet_list[worksheet_list_data.index('Metadata')]
# Read in the metadata
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
proxy_type_to_assim = [];
proxy_id_to_assim = [];
proxy_lat_to_assim = [];
proxy_lon_to_assim = [];
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_type:
if proxy_metadata[row_index]['Proxy measurement'] in proxy_measurement:
proxy_id_to_assim.append(proxy_metadata[row_index]['PAGES ID'])
proxy_type_to_assim.append(proxy_metadata[row_index]['Archive type'])
proxy_lat_to_assim.append(proxy_metadata[row_index]['Lat (N)'])
proxy_lon_to_assim.append(proxy_metadata[row_index]['Lon (E)'])
# Loop over worksheets containing proxy data
# dictionary containing proxy metadata & data
proxy_data = {}
nb_ob = -1
for worksheet in worksheet_list_data:
#print 'worksheet: ', worksheet
data = workbook.sheet_by_name(worksheet)
num_cols = data.ncols - 1
# Get columns headers
tmp_headers = [data.cell(0,col_index).value for col_index in range(data.ncols)]
data_headers = [str(item) for item in tmp_headers]; # getting rid of unicode encoding
tmp_refs = [data.cell(1,col_index).value for col_index in range(data.ncols)]
data_refs = [str(item) for item in tmp_refs] # getting rid of unicode encoding
data_headers[0] = data_refs[0]; # correct tag for years
# Column indices of proxy id's in proxy_id_to_assim list
col_assim = [i for i, item in enumerate(data_headers) if item in proxy_id_to_assim]
if col_assim: # if non-empty list
for row_index in range(2,data.nrows):
for col_index in col_assim:
found = False
# associate metadata to data record
for meta_row_index in range(1,len(proxy_metadata)):
if proxy_metadata[meta_row_index]['PAGES ID'] == data_headers[col_index]:
found = True
typedat = proxy_metadata[meta_row_index]['Archive type']
measure = proxy_metadata[meta_row_index]['Proxy measurement']
resolution = proxy_metadata[meta_row_index]['Resolution (yr)']
lat = proxy_metadata[meta_row_index]['Lat (N)']
lon = proxy_metadata[meta_row_index]['Lon (E)']
alt = 0.0 # no altitude info in data file
if found:
if data.cell(row_index, col_index).value: # only keep those with non-empty values
nb_ob = nb_ob + 1
proxy_data[nb_ob] = {}
proxy_data[nb_ob]['id'] = data_headers[col_index]
proxy_data[nb_ob]['type'] = str(typedat)
proxy_data[nb_ob]['meas'] = str(measure)
proxy_data[nb_ob]['resol'] = resolution
proxy_data[nb_ob]['lat'] = lat
proxy_data[nb_ob]['lon'] = lon
proxy_data[nb_ob]['alt'] = alt
proxy_data[nb_ob]['time'] = data.cell(row_index, 0).value
proxy_data[nb_ob]['value'] = data.cell(row_index, col_index).value
id = [proxy_data[k]['id'] for k in range(0,len(proxy_data))]
lat = [proxy_data[k]['lat'] for k in range(0,len(proxy_data))]
lon = [proxy_data[k]['lon'] for k in range(0,len(proxy_data))]
alt = [proxy_data[k]['alt'] for k in range(0,len(proxy_data))]
time = [proxy_data[k]['time'] for k in range(0,len(proxy_data))]
value = [proxy_data[k]['value'] for k in range(0,len(proxy_data))]
return id, lat, lon, alt, time, value # should add more output here as we develop further
#return proxy_data
#==========================================================================================
#
#
#==========================================================================================
# =========================================================================================
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# =========================================================================================
def create_proxy_lists_from_metadata_NCDC(datadir_proxy, datafile_proxy, proxy_resolution, \
proxy_definition, proxy_frac):
#==========================================================================================
#
# ... reads metadata worksheet for NCDC formatted proxy dataset ...
#
#==========================================================================================
import sys
import numpy as np
from random import sample
# NEED TO THINK OF SOMETHING ELSE HERE... ... ... ... ... ... ... ... ...
# ... provide this library as part of LMR distribution?
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Parsing dictionary of proxy definitions
proxy_list = {}; # dict list containing proxy types and associated proxy id's (sites)
sites_assim = {}
sites_eval = {}
proxy_types = list(proxy_definition.keys())
proxy_types_unordered = [i.split(':', 1)[1] for i in list(proxy_definition.keys())]
for t in proxy_types:
proxy_list[t] = []
sites_assim[t] = []
sites_eval[t] = []
proxy_category = [item.split('_')[0] for item in proxy_types_unordered]
# Define name of file & open
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# ====================
# Read in the metadata
# ====================
metadata = workbook.sheet_by_name('Master Metadata File');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# =================================================================
# Restrict to proxy_assim items listed in NAMELIST
# =================================================================
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['Archive'] in proxy_category:
if proxy_metadata[row_index]['Resolution'] in proxy_resolution:
indt = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive'] in s]
proxy_measurement = [proxy_definition[proxy_types[indt[k]]] for k in range(len(indt))]
l1 = proxy_metadata[row_index]['Variable Short Names'].split(",")
l2 = [item.strip("[").strip("]").strip("'").strip().strip("'") for item in l1] # clean the crud...
l3 = [str(l2[k]) for k in range(len(l2))]
# Common elements in lists?
for indm in range(len(proxy_measurement)):
common_set = set(l3)&set(proxy_measurement[indm])
if common_set: # if common element has been found
indtype = indt[indm]
# Add chronology ID to appropriate list in dictionary
# Do a check on consistency between 'Unique Identifier' & 'Filename.txt' ... sometimes doesn't match!
siteid_from_filename = proxy_metadata[row_index]['Filename.txt'][:-4] # strip the '.txt'
if str(proxy_metadata[row_index]['Unique Identifier']) != siteid_from_filename:
print('Filename & Unique Identifier DO NOT MATCH: using filename instead ...', siteid_from_filename, \
'vs', str(proxy_metadata[row_index]['Unique Identifier']))
proxy_list[proxy_types[indtype]].append(str(siteid_from_filename))
else:
proxy_list[proxy_types[indtype]].append(str(proxy_metadata[row_index]['Unique Identifier']))
# Create lists of sites to assimilate / keep for recon. evaluation
if proxy_frac < 1.0:
# List all sites, regardless of proxy type
mergedlist = []
tmp = [proxy_list[x] for x in proxy_list]
nbtype = len(tmp)
for k in range(nbtype):
mergedlist.extend(tmp[k])
nbsites = len(mergedlist)
nbsites_assim = int(nbsites*proxy_frac)
# random selection over merged site list
ind_assim = sample(list(range(0, nbsites)), nbsites_assim)
ind_eval = set(range(0,nbsites)) - set(ind_assim) # list indices of sites not chosen
p_assim = [mergedlist[p] for p in ind_assim]
p_eval = [mergedlist[p] for p in ind_eval]
#ind = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
# Re-populate lists by proxy type
for t in proxy_types:
inda = [i for i, s in enumerate(p_assim) if s in proxy_list[t]]
sites_assim[t] = [p_assim[k] for k in inda]
inde = [i for i, s in enumerate(p_eval) if s in proxy_list[t]]
sites_eval[t] = [p_eval[k] for k in inde]
else:
sites_assim = proxy_list
# leave sites_eval list empty
# print ' '
# for t in proxy_types:
# print t, proxy_list[t]
# print ' '
print('Assim:', sites_assim)
print(' ')
print('Eval:', sites_eval)
return sites_assim, sites_eval
# =========================================================================================
def colonReader(string, fCon, fCon_low, end):
'''This function seeks a specified string (or list of strings) within
the transcribed file fCon (lowercase version fCon_low) until a specified
character (typically end of the line) is found.x
If a list of strings is provided, make sure they encompass all possibilities
From <NAME> (Univ. of Southern California)
'''
if isinstance(string, str):
lstr = string + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex] # returned string
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
return rstring.strip()
else:
#print "Error: property " + string + " not found"
return ""
else:
num_str = len(string)
rstring = "" # initialize returned string
for k in range(0,num_str): # loop over possible strings
lstr = string[k] + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex]
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
if rstring == "":
#print "Error: property " + string[0] + " not found"
return ""
else:
return rstring.strip()
# =========================================================================================
def read_proxy_data_NCDCtxt_site(datadir, site, measurement):
#==========================================================================================
# Purpose: Reads data from a selected site (chronology) in NCDC proxy dataset
#
# Input :
# - datadir : Directory where proxy data files are located.
# - site : Site ID (ex. 00aust01a)
# - measurement : List of possible proxy measurement labels for specific proxy type
# (ex. ['d18O','d18o','d18o_stk','d18o_int','d18o_norm'] for delta 18 oxygen isotope
# measurements)
#
# Returns :
# - id : Site id read from the data file
# - lat/lon : latitude & longitude of the site
# - alt : Elevation of the site
# - time : Array containing the time of uploaded data
# - value : Array of uploaded proxy data
#
# Author(s): <NAME>, Univ. of Washington, Dept. of Atmospheric Sciences
# based on "ncdc_file_parser.py" code from <NAME>
# (Univ. of Southern California)
#
# Date : March 2015
#
# Revision : None
#
#==========================================================================================
import os
import numpy as np
# Possible header definitions of time in data files ...
time_defs = ['age','Age_AD','age_AD','age_AD_ass','age_AD_int','Midpt_year',\
'age_yb1950','yb_1950','yrb_1950',\
'yb_1989','age_yb1989',\
'yr_b2k','yb_2k','ky_b2k','kyb_2k','kab2k','ka_b2k','ky_BP','kyr_BP','ka_BP','age_kaBP',\
'yr_BP','calyr_BP','Age(yrBP)','age_calBP']
filename = datadir+'/'+site+'.txt'
if os.path.isfile(filename):
print('File:', filename)
# Define root string for filename
file_s = filename.replace(" ", '_') # strip all whitespaces if present
fileroot = '_'.join(file_s.split('.')[:-1])
# Open the file and port content to a string object
filein = open(filename,'U') # use the "universal newline mode" (U) to handle DOS formatted files
fileContent = filein.read()
fileContent_low = fileContent.lower()
# Initialize empty dictionary
d = {}
# Assign default values to some metadata
d['ElevationUnit'] = 'm'
d['TimeUnit'] = 'y_ad'
# note: 8240/2030 ASCII code for "permil"
# ===========================================================================
# Extract metadata from file
# ===========================================================================
try:
# 'Archive' is the proxy type
d['Archive'] = colonReader('archive', fileContent, fileContent_low, '\n')
# Other info
d['Title'] = colonReader('study_name', fileContent, fileContent_low, '\n')
investigators = colonReader('investigators', fileContent, fileContent_low, '\n')
d['Investigators'] = investigators.replace(';',' and') # take out the ; so that turtle doesn't freak out.
d['PubDOI'] = colonReader('doi', fileContent, fileContent_low, '\n')
d['SiteName'] = colonReader('site_name', fileContent, fileContent_low, '\n')
str_lst = ['northernmost_latitude', 'northernmost latitude'] # documented instances of this field property
d['NorthernmostLatitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
str_lst = ['southernmost_latitude', 'southernmost latitude'] # documented instances of this field property
d['SouthernmostLatitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
str_lst = ['easternmost_longitude', 'easternmost longitude'] # documented instances of this field property
d['EasternmostLongitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
str_lst = ['westernmost_longitude', 'westernmost longitude'] # documented instances of this field property
d['WesternmostLongitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
elev = colonReader('elevation', fileContent, fileContent_low, '\n')
if elev != 'nan' and len(elev)>0:
elev_s = elev.split(' ')
d['Elevation'] = float(''.join(c for c in elev_s[0] if c.isdigit())) # to only keep digits ...
else:
d['Elevation'] = float('NaN')
d['CollectionName'] = colonReader('collection_name', fileContent, fileContent_low, '\n')
d['EarliestYear'] = float(colonReader('earliest_year', fileContent, fileContent_low, '\n'))
d['MostRecentYear'] = float(colonReader('most_recent_year', fileContent, fileContent_low, '\n'))
d['TimeUnit'] = colonReader('time_unit', fileContent, fileContent_low, '\n')
if not d['TimeUnit']:
d['TimeUnit'] = colonReader('time unit', fileContent, fileContent_low, '\n')
except EmptyError as e:
print(e)
# ===========================================================================
# Extract information from the "Variables" section of the file
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Variables:')
if sline_begin == -1:
sline_begin = fileContent.find('# Variables')
# Find end of block
sline_end = fileContent.find('# Data:')
if sline_end == -1:
sline_end = fileContent.find('# Data\n')
VarDesc = fileContent[sline_begin:sline_end].splitlines()
nvar = 0 # counter for variable number
for line in VarDesc: # handle all the NCDC convention changes
# (TODO: more clever/general exception handling)
if line and line[0] != '' and line[0] != ' ' and line[0:2] != '#-' and line[0:2] != '# ' and line != '#':
#print line
nvar = nvar + 1
line2 = line.replace('\t',',') # clean up
sp_line = line2.split(',') # split line along commas
if len(sp_line) < 9:
continue
else:
d['DataColumn' + format(nvar, '02') + '_ShortName'] = sp_line[0].strip('#').strip(' ')
d['DataColumn' + format(nvar, '02') + '_LongName'] = sp_line[1]
d['DataColumn' + format(nvar, '02') + '_Material'] = sp_line[2]
d['DataColumn' + format(nvar, '02') + '_Uncertainty'] = sp_line[3]
d['DataColumn' + format(nvar, '02') + '_Units'] = sp_line[4]
d['DataColumn' + format(nvar, '02') + '_Seasonality'] = sp_line[5]
d['DataColumn' + format(nvar, '02') + '_Archive'] = sp_line[6]
d['DataColumn' + format(nvar, '02') + '_Detail'] = sp_line[7]
d['DataColumn' + format(nvar, '02') + '_Method'] = sp_line[8]
d['DataColumn' + format(nvar, '02') + '_CharOrNum'] = sp_line[9].strip(' ')
# ===========================================================================
# Extract the data from the "Data" section of the file
# ===========================================================================
# Find line number at beginning of data block
sline = fileContent.find('# Data:')
if sline == -1:
sline = fileContent.find('# Data\n')
fileContent_datalines = fileContent[sline:].splitlines()
start_line_index = 0
line_nb = 0
for line in fileContent_datalines: # skip lines without actual data
#print line
if not line or line[0]=='#' or line[0] == ' ':
start_line_index += 1
else:
start_line_index2 = line_nb
break
line_nb +=1
#print start_line_index, start_line_index2
# Extract column descriptions (headers) of the data matrix
DataColumn_headers = fileContent_datalines[start_line_index].splitlines()[0].split('\t')
# Strip possible blanks in column headers
DataColumn_headers = [item.strip() for item in DataColumn_headers]
nc = len(DataColumn_headers)
#print '-:' + str(nvar) + ' variables identified in metadata'
#print '-:' + str(nc) + ' columns in data matrix'
# Which column contains the important data (time & proxy values) to be extracted?
time_list = []
data_list = []
# Time
TimeColumn_ided = False
TimeColumn_tag = list(set(DataColumn_headers).intersection(time_defs))
if len(TimeColumn_tag) > 0:
if len(TimeColumn_tag) == 1: # single match -> ok
time_col_index = DataColumn_headers.index(', '.join(TimeColumn_tag))
TimeColumn_ided = True
else:
print('TimeColumn: More than one match ...do what then?')
# Proxy data
DataColumn_ided = False
DataColumn_tag = list(set(DataColumn_headers).intersection(measurement))
if len(DataColumn_tag) > 0:
if len(DataColumn_tag) == 1: # single match -> ok
data_col_index = DataColumn_headers.index(', '.join(DataColumn_tag))
DataColumn_ided = True
else:
print('DataColumn: More than one match ...do what then?')
print('Taking first one...')
DataColumn_tag.remove(DataColumn_tag[1])
data_col_index = DataColumn_headers.index(', '.join(DataColumn_tag))
DataColumn_ided = True
# If both columns identified, then load arrays with the data
if TimeColumn_ided and DataColumn_ided:
datalines = fileContent_datalines[start_line_index+1:] # +1 to skip 1st line (header line)
for line in datalines:
datalist = line.split()
# if line not empty
if datalist:
try:
# If data not empty, not NaN & only digits -> OK then fill lists
if datalist and datalist[time_col_index] and datalist[data_col_index] and \
is_number(datalist[data_col_index]) and datalist[data_col_index].lower() != 'nan':
time_list.append(datalist[time_col_index])
data_list.append(datalist[data_col_index])
except:
continue
# transform to numpy arrays => proxy time series
time = np.asarray(time_list,dtype=np.float64)
value = np.asarray(data_list,dtype=np.float64)
# proxy identifier and geo location
id = d['CollectionName']
alt = d['Elevation']
# Something crude in assignement of lat/lon:
if d['NorthernmostLatitude'] != d['SouthernmostLatitude']:
lat = (d['NorthernmostLatitude'] + d['SouthernmostLatitude'])/2.0
else:
lat = d['NorthernmostLatitude']
if d['EasternmostLongitude'] != d['WesternmostLongitude']:
lon = (d['EasternmostLongitude'] + d['WesternmostLongitude'])/2.0
else:
lon = d['EasternmostLongitude']
# Modify "time" array into "years AD" if not already
#print 'TimeUnit:', d['TimeUnit']
tdef = d['TimeUnit']
tdef_parsed = tdef.split('_')
if len(tdef_parsed) == 2 and tdef_parsed[0] and tdef_parsed[1]:
# tdef has expected structure ...
if tdef_parsed[0] == 'yb' and is_number(tdef_parsed[1]):
time = float(tdef_parsed[1]) - time
elif tdef_parsed[0] == 'kyb' and is_number(tdef_parsed[1]):
time = float(tdef_parsed[1]) - 1000.0*time
elif tdef_parsed[0] == 'y' and tdef_parsed[1] == 'ad':
pass # do nothing, time already in years_AD
else:
print('Unrecognized time definition. Returning empty arrays!')
time = np.asarray([],dtype=np.float64)
value = np.asarray([],dtype=np.float64)
else:
print('*** WARNING *** Unexpected time definition: string has more elements than expected. Returning empty arrays!')
time = np.asarray([],dtype=np.float64)
value = | np.asarray([],dtype=np.float64) | numpy.asarray |
import math
import warnings
import numpy as np
from scipy import stats
from statsmodels.tsa import stattools
from scipy.interpolate import interp1d
from scipy.stats import chi2
from .Base import Base
from .features.irregular_autoregressive import IAR_phi, CIAR_phiR_beta
from .features.structure_function import SF_ML_amplitude, SF_ML_gamma
from .features.damped_random_walk import GP_DRW_sigma, GP_DRW_tau
from .features.harmonics import Harmonics
from .features.periods import Period_fit_v2, PeriodPowerRate, PeriodLS_v2
from .features.conditional_autoregressive import CAR_mean, CAR_sigma, CAR_tau
class Amplitude(Base):
"""Half the difference between the maximum and the minimum magnitude"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
n = len(magnitude)
sorted_mag = np.sort(magnitude)
return (np.median(sorted_mag[int(-math.ceil(0.05 * n)):]) -
np.median(sorted_mag[0:int(math.ceil(0.05 * n))])) / 2.0
class Rcs(Base):
"""Range of cumulative sum"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sigma = np.std(magnitude)
N = len(magnitude)
m = np.mean(magnitude)
s = np.cumsum(magnitude - m) * 1.0 / (N * sigma)
R = np.max(s) - np.min(s)
return R
class StetsonK(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'error']
def fit(self, data):
magnitude = data[0]
error = data[2]
n = len(magnitude)
mean_mag = (np.sum(magnitude/(error*error))/np.sum(1.0 / (error * error)))
sigmap = (np.sqrt(n * 1.0 / (n - 1)) *
(magnitude - mean_mag) / error)
k = (1 / np.sqrt(n * 1.0) * np.sum(np.abs(sigmap)) / np.sqrt(np.sum(sigmap ** 2)))
return k
class Meanvariance(Base):
"""variability index"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
return np.std(magnitude) / np.mean(magnitude)
class Autocor_length(Base):
def __init__(self, shared_data, lags=100):
super().__init__(shared_data)
self.Data = ['magnitude']
self.nlags = lags
def fit(self, data):
magnitude = data[0]
ac = stattools.acf(magnitude, nlags=self.nlags, fft=False)
k = next((index for index, value in
enumerate(ac) if value < np.exp(-1)), None)
while k is None:
if self.nlags > len(magnitude):
warnings.warn('Setting autocorrelation length as light curve length')
return len(magnitude)
self.nlags = self.nlags + 100
ac = stattools.acf(magnitude, nlags=self.nlags, fft=False)
k = next((index for index, value in
enumerate(ac) if value < np.exp(-1)), None)
return k
class SlottedA_length(Base):
def __init__(self, shared_data, T=-99):
"""
lc: MACHO lightcurve in a pandas DataFrame
k: lag (default: 1)
T: tau (slot size in days. default: 4)
"""
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
SlottedA_length.SAC = []
self.T = T
def slotted_autocorrelation(self, data, time, T, K,
second_round=False, K1=100):
slots = np.zeros((K, 1))
i = 1
# make time start from 0
time = time - np.min(time)
# subtract mean from mag values
m = np.mean(data)
data = data - m
prod = np.zeros((K, 1))
pairs = np.subtract.outer(time, time)
pairs[np.tril_indices_from(pairs)] = 10000000
ks = np.int64(np.floor(np.abs(pairs) / T + 0.5))
# We calculate the slotted autocorrelation for k=0 separately
idx = np.where(ks == 0)
prod[0] = ((sum(data ** 2) + sum(data[idx[0]] *
data[idx[1]])) / (len(idx[0]) + len(data)))
slots[0] = 0
# We calculate it for the rest of the ks
if second_round is False:
for k in np.arange(1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i] = k
i = i + 1
else:
prod[k] = np.infty
else:
for k in np.arange(K1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i - 1] = k
i = i + 1
else:
prod[k] = np.infty
np.trim_zeros(prod, trim='b')
slots = np.trim_zeros(slots, trim='b')
return prod / prod[0], np.int64(slots).flatten()
def fit(self, data):
magnitude = data[0]
time = data[1]
N = len(time)
if self.T == -99:
deltaT = time[1:] - time[:-1]
sorted_deltaT = np.sort(deltaT)
self.T = sorted_deltaT[int(N * 0.05)+1]
K = 100
[SAC, slots] = self.slotted_autocorrelation(magnitude, time, self.T, K)
SAC2 = SAC[slots]
SlottedA_length.autocor_vector = SAC2
k = next((index for index, value in
enumerate(SAC2) if value < np.exp(-1)), None)
while k is None:
K = K+K
if K > (np.max(time) - np.min(time)) / self.T:
break
else:
[SAC, slots] = self.slotted_autocorrelation(magnitude,
time, self.T, K,
second_round=True,
K1=K/2)
SAC2 = SAC[slots]
k = next((index for index, value in
enumerate(SAC2) if value < np.exp(-1)), None)
return slots[k] * self.T
def getAtt(self):
return SlottedA_length.autocor_vector
class StetsonK_AC(SlottedA_length):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'error']
def fit(self, data):
try:
a = StetsonK_AC(self.shared_data)
autocor_vector = a.getAtt()
N_autocor = len(autocor_vector)
sigmap = (np.sqrt(N_autocor * 1.0 / (N_autocor - 1)) *
(autocor_vector - np.mean(autocor_vector)) /
np.std(autocor_vector))
K = (1 / np.sqrt(N_autocor * 1.0) *
np.sum(np.abs(sigmap)) / np.sqrt(np.sum(sigmap ** 2)))
return K
except:
print("error: please run SlottedA_length first to generate values for StetsonK_AC ")
class StetsonL(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'error', 'magnitude2', 'error2']
def fit(self, data):
aligned_magnitude = data[4]
aligned_magnitude2 = data[5]
aligned_error = data[7]
aligned_error2 = data[8]
N = len(aligned_magnitude)
mean_mag = (np.sum(aligned_magnitude/(aligned_error*aligned_error)) /
np.sum(1.0 / (aligned_error * aligned_error)))
mean_mag2 = (np.sum(aligned_magnitude2/(aligned_error2*aligned_error2)) /
np.sum(1.0 / (aligned_error2 * aligned_error2)))
sigmap = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude[:N] - mean_mag) /
aligned_error)
sigmaq = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude2[:N] - mean_mag2) /
aligned_error2)
sigma_i = sigmap * sigmaq
J = (1.0 / len(sigma_i) *
np.sum(np.sign(sigma_i) * np.sqrt(np.abs(sigma_i))))
K = (1 / np.sqrt(N * 1.0) *
np.sum(np.abs(sigma_i)) / np.sqrt(np.sum(sigma_i ** 2)))
return J * K / 0.798
class Con(Base):
"""Index introduced for selection of variable starts from OGLE database.
To calculate Con, we counted the number of three consecutive measurements
that are out of 2sigma range, and normalized by N-2
Pavlos not happy
"""
def __init__(self, shared_data, consecutiveStar=3):
super().__init__(shared_data)
self.Data = ['magnitude']
self.consecutiveStar = consecutiveStar
def fit(self, data):
magnitude = data[0]
N = len(magnitude)
if N < self.consecutiveStar:
return 0
sigma = np.std(magnitude)
m = np.mean(magnitude)
count = 0
for i in range(N - self.consecutiveStar + 1):
flag = 0
for j in range(self.consecutiveStar):
if (magnitude[i + j] > m + 2 * sigma
or magnitude[i + j] < m - 2 * sigma):
flag = 1
else:
flag = 0
break
if flag:
count = count + 1
return count * 1.0 / (N - self.consecutiveStar + 1)
class Color(Base):
"""Average color for each MACHO lightcurve
mean(B1) - mean(B2)
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'magnitude2']
def fit(self, data):
magnitude = data[0]
magnitude2 = data[3]
return np.mean(magnitude) - np.mean(magnitude2)
class Beyond1Std(Base):
"""Percentage of points beyond one st. dev. from the weighted
(by photometric errors) mean
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'error']
def fit(self, data):
magnitude = data[0]
error = data[2]
n = len(magnitude)
weighted_mean = np.average(magnitude, weights=1 / error ** 2)
# Standard deviation with respect to the weighted mean
var = sum((magnitude - weighted_mean) ** 2)
std = np.sqrt((1.0 / (n - 1)) * var)
count = np.sum(np.logical_or(magnitude > weighted_mean + std,
magnitude < weighted_mean - std))
return float(count) / n
class SmallKurtosis(Base):
"""Small sample kurtosis of the magnitudes.
See http://www.xycoon.com/peakedness_small_sample_test_1.htm
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
n = len(magnitude)
mean = np.mean(magnitude)
std = np.std(magnitude)
S = sum(((magnitude - mean) / std) ** 4)
c1 = float(n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))
c2 = float(3 * (n - 1) ** 2) / ((n - 2) * (n - 3))
return c1 * S - c2
class Std(Base):
"""Standard deviation of the magnitudes"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
return np.std(magnitude)
class Skew(Base):
"""Skewness of the magnitudes"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
return stats.skew(magnitude)
class StetsonJ(Base):
"""Stetson (1996) variability index, a robust standard deviation"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'error', 'magnitude2', 'error2']
#lc fields are [data, mjd, error, second_data, aligned_data, aligned_second_data, aligned_mjd]
def fit(self, data):
aligned_magnitude = data[4]
aligned_magnitude2 = data[5]
aligned_error = data[7]
aligned_error2 = data[8]
N = len(aligned_magnitude)
mean_mag = (np.sum(aligned_magnitude/(aligned_error*aligned_error)) /
np.sum(1.0 / (aligned_error * aligned_error)))
mean_mag2 = (np.sum(aligned_magnitude2 / (aligned_error2*aligned_error2)) /
np.sum(1.0 / (aligned_error2 * aligned_error2)))
sigmap = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude[:N] - mean_mag) /
aligned_error)
sigmaq = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude2[:N] - mean_mag2) /
aligned_error2)
sigma_i = sigmap * sigmaq
J = (1.0 / len(sigma_i) * np.sum(np.sign(sigma_i) *
np.sqrt(np.abs(sigma_i))))
return J
class MaxSlope(Base):
"""
Examining successive (time-sorted) magnitudes, the maximal first difference
(value of delta magnitude over delta time)
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
def fit(self, data):
magnitude = data[0]
time = data[1]
slope = np.abs(magnitude[1:] - magnitude[:-1]) / (time[1:] - time[:-1])
np.max(slope)
return np.max(slope)
class MedianAbsDev(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
median = np.median(magnitude)
devs = (abs(magnitude - median))
return np.median(devs)
class MedianBRP(Base):
"""Median buffer range percentage
Fraction (<= 1) of photometric points within amplitude/10
of the median magnitude
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
median = np.median(magnitude)
amplitude = (np.max(magnitude) - np.min(magnitude)) / 10
n = len(magnitude)
count = np.sum(np.logical_and(magnitude < median + amplitude,
magnitude > median - amplitude))
return float(count) / n
class PairSlopeTrend(Base):
"""
Considering the last 30 (time-sorted) measurements of source magnitude,
the fraction of increasing first differences minus the fraction of
decreasing first differences.
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
data_last = magnitude[-30:]
return (float(len(np.where(np.diff(data_last) > 0)[0]) -
len(np.where(np.diff(data_last) <= 0)[0])) / 30)
class FluxPercentileRatioMid20(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_60_index = math.ceil(0.60 * lc_length)
F_40_index = math.ceil(0.40 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_40_60 = sorted_data[F_60_index] - sorted_data[F_40_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid20 = F_40_60 / F_5_95
return F_mid20
class FluxPercentileRatioMid35(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_325_index = math.ceil(0.325 * lc_length)
F_675_index = math.ceil(0.675 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_325_675 = sorted_data[F_675_index] - sorted_data[F_325_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid35 = F_325_675 / F_5_95
return F_mid35
class FluxPercentileRatioMid50(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_25_index = math.ceil(0.25 * lc_length)
F_75_index = math.ceil(0.75 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_25_75 = sorted_data[F_75_index] - sorted_data[F_25_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid50 = F_25_75 / F_5_95
return F_mid50
class FluxPercentileRatioMid65(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_175_index = math.ceil(0.175 * lc_length)
F_825_index = math.ceil(0.825 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_175_825 = sorted_data[F_825_index] - sorted_data[F_175_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid65 = F_175_825 / F_5_95
return F_mid65
class FluxPercentileRatioMid80(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = | np.sort(magnitude) | numpy.sort |
import numpy as np
from numpy import linalg as LA
class Hamiltonian:
"""
Contains: Matrix structure, elements,
consistency equations, total energy equation
and both static and dynamic parameters
Model_Params must be a dictionary and at least contain:
N_dim
N_cells
Filling
mat_dim
MF_Params must be a 1D np.array
The class must contain the methods:
update_variables
Mat_q_calc
Consistency
Calculate_Energy
All iterations done in HFA solver.
"""
def __init__(self, Model_params={}, MF_params=np.array([0, 0, 0, 0, 0])):
# MFPs Names
self.Dict = {0: 'Charge Modulation',
1: 'Ferromagnetism',
2: 'Orbital Disproportionation',
3: 'Anti Ferromagnetism',
4: 'Anti Ferroorbital'}
# K_patn for bandstructure
self.k_points = np.array([[0, 0, 0],
[np.pi, 0, 0],
[np.pi, np.pi/2, 0],
[0, 0, 0]])
self.k_labels = ['M', r'$\Gamma$', 'X', 'M']
# initiates Model parameters
self.mat_dim = 8
self.BZ_rot = 1
self.b = 0
self.k_res = 100
self.n_dim = 2
self.Filling = 0.25
self.stress = 0
self.Delta_CT = 0
self.eps = 0
self.t_1 = 1
self.t_2 = 0.15
self.t_4 = 0.05
self.U = 0
self.J = 0
for key, value in Model_params.items():
setattr(self, key, value)
# initiates Mean field parameters
self.MF_params = MF_params
# define some numbers
N = np.power(self.k_res, self.n_dim) * self.mat_dim
N_c = 8
N_k = N / N_c
self.N_ni = 2 * N_k
N_s = N_k * 4
# projectors for DOSs
Id = np.identity(8)
z2_projectors = Id[[0, 1, 4, 5]]
x2my2_projectors = Id[[2, 3, 6, 7]]
hom1_up = Id[[0, 2]]
hom2_up = Id[[1, 3]]
hom1_down = Id[[4, 6]]
hom2_down = Id[[5, 7]]
site1_up = (hom1_up+hom2_up)/np.sqrt(2.)
site1_down = (hom1_down+hom2_down)/np.sqrt(2.)
site2_up = (hom1_up-hom2_up)/np.sqrt(2.)
site2_down = (hom1_down-hom2_down)/np.sqrt(2.)
self.proj_configs = [
{"name": 'Orbit',
"title": '',
"proj_1": z2_projectors,
"proj_2": x2my2_projectors,
"normalizer": self.N_ni,
"/dE": True,
"label_1": r'$3z^2-r^2$',
"label_2": r'$x^2-y^2$'
},
{"name": 'Site 1',
"title": 'Site 1',
"proj_1": site1_up,
"proj_2": site1_down,
"normalizer": 0.5*self.N_ni,
"/dE": True,
"label_1": r'$\uparrow$',
"label_2": r'$\downarrow$'
},
{"name": 'Site 2',
"title": 'Site 2',
"proj_1": site2_up,
"proj_2": site2_down,
"normalizer": 0.5*self.N_ni,
"/dE": True,
"label_1": r'$\uparrow$',
"label_2": r'$\downarrow$'
},
# {"name": 'spins',
# "title": '',
# "proj_1": hom1_up + hom2_up,
# "proj_2": hom1_down + hom2_down,
# "normalizer": self.N_ni,
# "/dE": True,
# "label_1": r'$\uparrow$',
# "label_2": r'$\downarrow$'
# }
]
def func_tzz(self, Q):
qx, qy, qz = Q
B = self.b
return -2*self.t_1*(B*np.cos(qz) + 1/4*(np.cos(qx) + np.cos(qy)))\
- 2*self.t_4*(B*np.cos(2*qz) + 1/4*(np.cos(2*qx) + np.cos(2*qy)))\
- 2*self.t_2*(np.cos(qx)*np.cos(qy) - 2*B*np.cos(qz)*(np.cos(qy) + np.cos(qx)))
def func_tz_bz_b(self, Q):
qx, qy, qz = Q
return -3/2*self.t_1*(np.cos(qx) + np.cos(qy))\
- 3/2*self.t_4*(np.cos(2*qx) + np.cos(2*qy))\
+ 6*self.t_2*np.cos(qx)*np.cos(qy)
def func_tzz_b(self, Q):
qx, qy, qz = Q
B = self.b
return np.sqrt(3)/2*self.t_1*(np.cos(qx) - np.cos(qy))\
+ np.sqrt(3)/2*self.t_4*(np.cos(2*qx) - np.cos(2*qy))\
- 2*np.sqrt(3)*self.t_2*B*np.cos(qz)*(np.cos(qx) - np.cos(qy))
def static_variables(self):
# Static variables, these never change, may depend on momentum indices
# Strain Effect
decay = 1
self.f = 4*self.Filling
self.t_1 = self.t_1*np.exp(-decay*self.stress)
self.t_2 = self.t_2*np.exp(-decay*np.sqrt(2)*self.stress)
self.t_4 = self.t_4*np.exp(-decay*2*self.stress)
self.N_cells = np.power(self.k_res, self.n_dim)
# self.Q gets updated outside
qc = np.pi
Q = self.Q
Qc = self.Q + qc
self.tzz = self.func_tzz(Q)
self.tzz_c = self.func_tzz(Qc)
self.tz_bz_b = self.func_tz_bz_b(Q)
self.tz_bz_b_c = self.func_tz_bz_b(Qc)
self.tzz_b = self.func_tzz_b(Q)
self.tzz_b_c = self.func_tzz_b(Qc)
def dynamic_variables(self):
"""
Calculate dynamic variables
These depend on MFP, not on momentum
"""
self.U_0 = (self.U + self.J)/2
self.U_bar = (3*self.U - 5*self.J)/4
self.J_bar = (-self.U + 5*self.J)/2
# Distortion
alpha = 1
beta = 27/4*alpha*self.MF_params[0]**2
if | np.abs(self.MF_params[0]) | numpy.abs |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_array_less)
from mne.inverse_sparse.mxne_optim import (mixed_norm_solver,
tf_mixed_norm_solver,
iterative_mixed_norm_solver,
iterative_tf_mixed_norm_solver,
norm_epsilon_inf, norm_epsilon,
_Phi, _PhiT, dgap_l21l1)
from mne.time_frequency._stft import stft_norm2
def _generate_tf_data():
n, p, t = 30, 40, 64
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
active_set = [0, 4]
times = np.linspace(0, 2 * np.pi, t)
X[0] = np.sin(times)
X[4] = -2 * np.sin(4 * times)
X[4, times <= np.pi / 2] = 0
X[4, times >= np.pi] = 0
M = np.dot(G, X)
M += 1 * rng.randn(*M.shape)
return M, G, active_set
def test_l21_mxne():
"""Test convergence of MxNE solver."""
n, p, t, alpha = 30, 40, 20, 1.
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
args = (M, G, alpha, 1000, 1e-8)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _, gap_cd = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='cd', return_gap=True)
assert_array_less(gap_cd, 1e-8)
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='bcd', return_gap=True)
assert_array_less(gap_bcd, 9.6e-9)
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
def test_tf_mxne():
"""Test convergence of TF-MxNE solver."""
alpha_space = 10.
alpha_time = 5.
M, G, active_set = _generate_tf_data()
with pytest.warns(None): # CD
X_hat_tf, active_set_hat_tf, E, gap_tfmxne = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
n_orient=1, tstep=4, wsize=32, return_gap=True)
assert_array_less(gap_tfmxne, 1e-8)
assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
def test_norm_epsilon():
"""Test computation of espilon norm on TF coefficients."""
tstep = np.array([2])
wsize = np.array([4])
n_times = 10
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
Y = np.zeros(n_steps * n_freqs)
l1_ratio = 0.03
assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.)
Y[0] = 2.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
l1_ratio = 1.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
# dummy value without random:
Y = np.arange(n_steps * n_freqs).reshape(-1, )
l1_ratio = 0.0
assert_allclose(norm_epsilon(Y, l1_ratio, phi) ** 2,
stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0])))
l1_ratio = 0.03
# test that vanilla epsilon norm = weights equal to 1
w_time = np.ones(n_coefs[0])
Y = np.abs(np.random.randn(n_coefs[0]))
assert_allclose(norm_epsilon(Y, l1_ratio, phi),
norm_epsilon(Y, l1_ratio, phi, w_time=w_time))
# scaling w_time and w_space by the same amount should divide
# epsilon norm by the same amount
Y = np.arange(n_coefs) + 1
mult = 2.
assert_allclose(
norm_epsilon(Y, l1_ratio, phi, w_space=1,
w_time=np.ones(n_coefs)) / mult,
norm_epsilon(Y, l1_ratio, phi, w_space=mult,
w_time=mult * np.ones(n_coefs)))
@pytest.mark.timeout(60) # ~30 sec on Travis OSX and Linux OpenBLAS
def test_dgapl21l1():
"""Test duality gap for L21 + L1 regularization."""
n_orient = 2
M, G, active_set = _generate_tf_data()
n_times = M.shape[1]
n_sources = G.shape[1]
tstep, wsize = np.array([4, 2]), np.array([64, 16])
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
for l1_ratio in [0.05, 0.1]:
alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient)
alpha_space = (1. - l1_ratio) * alpha_max
alpha_time = l1_ratio * alpha_max
Z = np.zeros([n_sources, phi.n_coefs.sum()])
# for alpha = alpha_max, Z = 0 is the solution so the dgap is 0
gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool),
alpha_space, alpha_time, phi, phiT,
n_orient, -np.inf)[0]
assert_allclose(0., gap)
# check that solution for alpha smaller than alpha_max is non 0:
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 1.01, alpha_time / 1.01, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
# allow possible small numerical errors (negative gap)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 5., alpha_time / 5., maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
def test_tf_mxne_vs_mxne():
"""Test equivalence of TF-MxNE (with alpha_time=0) and MxNE."""
alpha_space = 60.
alpha_time = 0.
M, G, active_set = _generate_tf_data()
X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=1, tstep=4, wsize=32)
# Also run L21 and check that we get the same
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False)
assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1)
def test_iterative_reweighted_mxne():
"""Test convergence of irMxNE solver."""
n, p, t, alpha = 30, 40, 20, 1
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
with pytest.warns(None): # CD
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='prox')
assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3)
assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='cd')
assert_array_equal( | np.where(active_set) | numpy.where |
#!/usr/bin/env python3
"""
corrections.py: Script to apply corrections to the images.
"""
import os
from argparse import ArgumentParser
from datetime import date, datetime
from typing import Optional, Sequence
import numpy as np
from astropy.io import fits
from dresscode.utils import load_config
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = ArgumentParser()
parser.add_argument(
"-c", "--config", help="path to config.txt", default="config.txt"
)
args = parser.parse_args(argv)
config = load_config(args.config)
galaxy = config["galaxy"]
path = config["path"] + galaxy + "/working_dir/"
years = config["years"]
# Loop over the different years.
for year in years:
print("Year: " + year)
yearpath = path + year + "/"
# PART 1: Apply a coincidence loss correction.
print("Applying coincidence loss corrections...")
if os.path.isfile(yearpath + "sum_um2_nm.img"):
coicorr(yearpath + "sum_um2_nm.img")
if os.path.isfile(yearpath + "sum_uw2_nm.img"):
coicorr(yearpath + "sum_uw2_nm.img")
if os.path.isfile(yearpath + "sum_uw1_nm.img"):
coicorr(yearpath + "sum_uw1_nm.img")
# PART 2: Apply a large scale sensitivity correction.
print("Applying large scale sensitivity corrections...")
if os.path.isfile(yearpath + "sum_um2_nm_coi.img"):
lsscorr(yearpath + "sum_um2_nm_coi.img")
if os.path.isfile(yearpath + "sum_uw2_nm_coi.img"):
lsscorr(yearpath + "sum_uw2_nm_coi.img")
if os.path.isfile(yearpath + "sum_uw1_nm_coi.img"):
lsscorr(yearpath + "sum_uw1_nm_coi.img")
# PART 3: Apply a zero point correction.
print("Applying zero point corrections...")
if os.path.isfile(yearpath + "sum_um2_nm_coilss.img"):
zeropoint(yearpath + "sum_um2_nm_coilss.img", -2.330e-3, -1.361e-3)
if os.path.isfile(yearpath + "sum_uw2_nm_coilss.img"):
zeropoint(yearpath + "sum_uw2_nm_coilss.img", 1.108e-3, -1.960e-3)
if os.path.isfile(yearpath + "sum_uw1_nm_coilss.img"):
zeropoint(yearpath + "sum_uw1_nm_coilss.img", 2.041e-3, -1.748e-3)
return 0
# Functions for PART 1: Coincidence loss correction.
def coicorr(filename):
# Open the image. Create arrays with zeros with the shape of the image.
hdulist = fits.open(filename)
data = hdulist[0].data
header = hdulist[0].header
total_flux = | np.full_like(data, np.nan, dtype=np.float64) | numpy.full_like |
import colorspacious
import numpy as np
colorspace = 'CAM02-LCD'
def mask_rgb(rgb, a, b, mask):
'''
function that masks an rgb colormap with np.nan according to the string mask
Args:
rgb: (l,l,3) matrix
a,b: values of a and b. if mask = 'circle' anyting with sqrt(a**2+b**2)>1 will be np.nan
mask: string:
'circle' -> masks everything outside a circle defined as where sqrt(a**2+b**2)>1
'no-mask' -> do nothing
'unavailable'-> masks invalid rgb values (i.e. <0 or >1)
'''
if mask == 'unavailable':
rgb[rgb[:,:,:]<0] = np.nan
rgb[rgb[:,:,:]>1] = np.nan
mask = np.isnan(np.sum(rgb[:,:,:], axis = -1))
rgb[:,:,0][mask] = np.nan
rgb[:,:,1][mask] = np.nan
rgb[:,:,2][mask] = np.nan
elif mask == 'no_mask':
None
elif mask == 'circle':
l = rgb.shape[1]
a_1 = np.linspace(a[0],a[1],l)
b_1 = np.linspace(b[0],b[1],l)
ab = np.sqrt(a_1[:,np.newaxis]**2+b_1[np.newaxis,:]**2)
mask = ab > 1
rgb[:,:,0][mask] = np.nan
rgb[:,:,1][mask] = np.nan
rgb[:,:,2][mask] = np.nan
else:
raise ValueError("mask must be 'no_mask', 'unavailable' or 'circle'")
def set_ab_rot(Jab, ar, br, rot):
'''
sets the [:,:,1] and [:,:,2] axes of a Jab colormap to ar and br
then rotates the ab color plane according to the angle rot
Args:
Jab: (l,l,3) colormap
ar: 1d array, typically made by np.linspace()
br: 1d array, typically made by np.linspace()
rot: angle in degrees
returns:
None (but Jab changed in-place)
'''
if rot==0:
Jab[:,:,1] = ar[:,np.newaxis]
Jab[:,:,2] = br[np.newaxis,:]
else:
ab = np.sqrt(ar[:,np.newaxis]**2+br[np.newaxis,:]**2)
Jab[:,:,1] = ar[:,np.newaxis]
Jab[:,:,2] = br[np.newaxis,:]
phi = np.arctan2(Jab[:,:,1],Jab[:,:,2])+rot*np.pi/180
Jab[:,:,2] = ab*np.cos(phi)
Jab[:,:,1] = ab*np.sin(phi)
def get_const_J(J = 95, a = (-1,1), b = (-1,1), r = 33.0, l=256, mask = 'no_mask', rot = 0):
'''
Generates am rgb colormap of (l,l,3) that attempts to keep a constant lightness in the CAM02-LCD colorspace
The colormap is based on the a-b plane of the Jab colorspace for a constant J.
Args:
J: float (lighness), default 95, range approximately 1->128,
a: tuple of 2 floats, default (-1,1). The limit along the a-axis will be (a[0]*r,a[1]*r)
b: tuple of 2 floats, default (-1,1). The limit along the b-axis will be (b[0]*r,b[1]*r)
r: float, default 33.0. The saturation where a or b is 1. (named 'r' for radius in the a-b plane)
l: int, default 256. Size of the colormap.
mask: string, default 'no_mask'.
If 'circle' makes a circular mask, and everything outside will be np.nan
If 'unavailable' makes a colors that "should" have rgb<0 or rgb>1 when transformed to sRGB will be np.nan
rot: rotation of the hues on the a-b plane, in degrees
returns:
a (l,l,3) numpy array of rgb values
'''
Jab = np.zeros((l,l,3))
Jab[:,:,0] = J
ar = np.linspace(r*a[0], r*a[1],l)
br = np.linspace(r*b[0], r*b[1],l)
set_ab_rot(Jab, ar, br, rot)
rgb = colorspacious.cspace_convert(Jab, colorspace, "sRGB1")
mask_rgb(rgb, a, b, mask)
rgb[rgb[:,:,:]<0] = 0
rgb[rgb[:,:,:]>1] = 1
return rgb
def get_var_J(J = [95,128.5], a = (-1,1), b = (-1,1), r = 33.0, l=256, mask = 'no_mask', rot = 0, limit_sat = None):
'''
Generates am rgb colormap of (l,l,3) that attempts to keep a constant lightness in the CAM02-LCD colorspace
The colormap is based on the a-b plane of the Jab colorspace for a constant J.
Args:
J: (lighness) tuple of 2 floats, default [95,128.5] defining the range of lightness for the colormap, default 95,
max range of J approximately 1 to 128.5
a: tuple of 2 floats, default (-1,1). The limit along the a-axis will be (a[0]*r,a[1]*r)
b: tuple of 2 floats, default (-1,1). The limit along the b-axis will be (b[0]*r,b[1]*r)
r: float, default 33.0. The saturation where a or b is 1. (named 'r' for radius in the a-b plane)
l: int, default 256. Size of the colormap.
mask: string, default 'no_mask'.
If 'circle' makes a circular mask, and everything outside will be np.nan
If 'unavailable' makes a colors that "should" have rgb<0 or rgb>1 when transformed to sRGB will be np.nan
rot: rotation of the hues on the a-b plane, in degrees
returns:
a (l,l,3) numpy array of rgb values
'''
Jab = np.zeros((l,l,3))
ar = np.linspace(r*a[0], r*a[1],l)
br = np.linspace(r*b[0], r*b[1],l)
set_ab_rot(Jab, ar, br, rot)
ab = np.sqrt(ar[:,np.newaxis]**2+br[np.newaxis,:]**2)
a_1 = np.linspace(a[0], a[1],l)
b_1 = np.linspace(b[0], b[1],l)
Jab[:,:,0] = J[0] + (J[1]-J[0])*(1-np.sqrt(a_1[:,np.newaxis]**2+b_1[np.newaxis,:]**2))
Jab[Jab[:,:,0]<1,0] = 1
Jab[Jab[:,:,0]>128,0] = 128
if not (limit_sat is None):
apply_radial_sat_limit(Jab, limit_sat = limit_sat)
rgb = colorspacious.cspace_convert(Jab, colorspace, "sRGB1")
mask_rgb(rgb, a, b, mask)
rgb[rgb[:,:,:]<0] = 0
rgb[rgb[:,:,:]>1] = 1
#print(r)
return rgb
def parse_name_postfix(cmap, a, b):
'''
if a cmap name has a postfix that details the quadrant/side, this will translate that to ranges in a and/or b.
example: parse_name_postfix('cone tr', a, b) return a and b so that they span the top right quadrant
inputs a and b so that both can be returned even if only one is changed
Args:
cmap: string, potentially with a postfix detailing quadrant/side following a space, i.e. 'cone tr'.
The postfix translates:
'b' -> bottom,
't' -> top,
'l' -> left,
'r' -> right.
Any combination (b,t)+(l,r) is possible to select quadrants
a: current limits for a, not checked but should be a tuple of length 2
b: current limits for b, not checked but should be a tuple of length 2
returns:
tuple (cmap, a, b):
cmap (stripped of the postfix)
a, tuple of length 2
b, tuple of length 2
'''
# check if the cmap name has additional info regarding quadrant/side
if len(cmap.split(' '))>1:
param = cmap.split(' ')[1]
if 'b' in param: a = (0,1)
if 't' in param: a = (-1,0)
if 'r' in param: b = (0,1)
if 'l' in param: b = (-1,0)
cmap = cmap.split(' ')[0]
return cmap, a, b
def get_cmap(name, l = None, rot = None, J = None, sat = None, limit_sat = None, a= None, b = None):
'''
getter function for named colormaps
the 'alt' colormaps are rotated 45 degrees
flat colormaps are: ---------------- 'flat', 'disk'
colormaps with a bright center: ---- 'peak', 'cone'
colormaps with a dark center: ------ 'abyss', 'funnel'
alternate with a bright center: ---- 'hsv', 'fourCorners', 'fourEdges', 'teuling0w' to 'teuling3w'
colormaps with lighness on y axis: - 'barrel', 'cut', 'blues', 'reds', 'greens', 'yellows'
teuling colormaps: --------- 'teuling0f', 'teuling1f', 'teuling2f', 'teuling3f', 'teuling0w', 'teuling1w', 'teuling2w', 'teuling3w'
any matplotlib colormap can also be converted to a colormap with lighness on y-axis
Args:
name: string
For radial colormaps the name may have a postfix separated by a space, i.e. 'cone tr'
the postfix must be some combination of (t,b) and/or (l,r) which defines the quadrant/side of the colormap to include
t-> top, b-> bottom, r-> right, l-> left, and 'tr'-> top right, etc.
l: int, the size of the colormap will be (l,l), defaults to 256 if None
rot: float, rotation of the colormap (where applicable)
J: array-like of length 2 (float,float), determins min and max luminocity where applicable
sat: float, maximum saturation where applicable
limit_sat: string, 'individual' or 'shared'. How saturation is limited for relevant colormaps when colors outside sRGB are required
'individual': each combination J, hue in the colormap has an individual limit to saturation
'shared': for each J, all hues share a limit, the maximum where all hues can be represented
a: range along a-axis, array-like [min,max]
Used to move the center of the colormap where applicable.
Defaults to (-1,1) which is then multiplied internally with sat
b: range along b-axis, see a.
returns:
a (l,l,3) numpy array of rgb values
'''
if l is None: l = 256
if rot is None: rot = 0
if sat is None: sat = 33.0
if a is None: a = (-1,1)
if b is None: b = (-1,1)
name, a, b = parse_name_postfix(name, a, b) # if there is a _ in the name, set a and b according to the postfix, and remove the postfix
if name == 'flat':
if J is None: J = [95]
return get_const_J( J = J[0], a = a, b = b, r = sat, l = l, rot = rot)
elif name == 'disk':
if J is None: J = [95]
return get_const_J(J = J[0], a = a, b = b, r = sat, l = l, rot = rot, mask = 'circle')
elif name == 'peak':
if J is None: J = [95,128.5]
return get_var_J(J = J, a = a, b = b, r = sat, l = l, rot = rot, limit_sat = limit_sat)
elif name == 'cone':
if J is None: J = [95,128.5]
return get_var_J(J = J, a = a, b = b, r = sat, l = l, rot = rot, mask = 'circle', limit_sat = limit_sat)
elif name == 'abyss':
if J is None: J = [95,1]
return get_var_J(J = J, a = a, b = b, r = sat, l = l, rot = rot, limit_sat = limit_sat)
elif name == 'funnel':
if J is None: J = [95,1]
return get_var_J(J = J, a = a, b = b, r = sat, l = l, rot = rot, mask = 'circle', limit_sat = limit_sat)
elif name == 'hsv':
hsv = np.ones((l,l,3))
ar = np.linspace(a[0],a[1],l)[:,np.newaxis]*np.ones((l,l))
br = np.linspace(b[0],b[1],l)[np.newaxis,:]*np.ones((l,l))
phi = np.arctan2(ar,br)+rot*np.pi/180
hsv[:,:,0] = phi/np.pi*0.5+0.5
hsv[:,:,1] = np.sqrt(ar**2+br**2)/np.sqrt(2)
hsv[:,:,2] = 1
RGB = matplotlib.colors.hsv_to_rgb(hsv)
return RGB
elif name == 'fourEdges':
return four_edges(l=l, a=a, b=b, rot = rot+90)
elif name == 'fourCorners':
return four_edges(l=l, a=(-0.85,0.85), b=(-0.85,0.85), rot = 45)
# these are shared by all that follow
if J is None: J = [15,120]
if limit_sat is None: limit_sat = 'shared'
# rest of colormaps
if name == 'barrel':
return barrel(sat = sat, phi = [-180,180], J =J, l = l, limit_sat = limit_sat)
elif name == 'cut':
return cut(a = a, sat = sat, rot = rot, J = J, l = l, limit_sat = limit_sat)
elif name == 'blues':
return cut(a = [0,1], sat = sat, rot = 180, J = J, l = l, limit_sat = limit_sat)
elif name == 'reds':
return cut(a = [0,1], sat = sat, rot = 90, J = J, l = l, limit_sat = limit_sat)
elif name == 'greens':
return cut(a = [0,1], sat = sat, rot = -90, J = J, l = l, limit_sat = limit_sat)
elif name == 'yellows':
return cut(a = [0,1], sat = sat, rot = 0, J = J, l = l, limit_sat = limit_sat)
elif name == 'teuling0f':
return teuling(l = l, a = 0.32, order = [0,1,2])
elif name == 'teuling1f':
return teuling(l = l, a = 0.72, order = [1,0,2])
elif name == 'teuling2f':
return teuling(l = l, a = 0.32, order = [1,0,2])
elif name == 'teuling3f':
return teuling(l = l, a = 0.32, order = [1,0,2], green_multiplier = 0.75)
elif name == 'teuling0w':
return teuling(l = l, a = 0.32, order = [0,1,2], white_center = True)
elif name == 'teuling1w':
return teuling(l = l, a = 0.72, order = [1,0,2], white_center = True)
elif name == 'teuling2w':
return teuling(l = l, a = 0.32, order = [1,0,2], white_center = True)
elif name == 'teuling3w':
return teuling(l = l, a = 0.32, order = [1,0,2], green_multiplier = 0.75, white_center = True)
elif name == 'orangeBlue':
return bilinear(l)
elif name == 'greenPurple':
return bilinear(l, c0 = [0.5,1,0], c1 = [0.5,0,1])
elif name == 'greenTealBlue':
return bilinear(l, c0 = [0,1,0], c1 = [0,0,1])
elif name == 'redPurpleBlue':
return bilinear(l, c0 = [1,0,0], c1 = [0,0,1])
elif name in mpl_cmaps:
return get_2dcmap_from_mpl(name, J = J, l = l, limit_sat = limit_sat)
else:
raise ValueError(f'colormap {name} not known')
def get_sat_limts():
'''
returns the a 2d matrix of approximate limits to sat (radius in a-b space) in terms of phi and J
'''
if not 'limit' in globals():
global limit, limit_ax_0_J, limit_ax_1_phi
phi = np.linspace(-np.pi, np.pi, 256+1)
J = np.linspace(1,130,128)
sat = np.linspace(0,70,256)
J_phi_sat = np.empty((len(J),len(phi),len(sat),3))
J_phi_sat[:,:,:,0] = J[:,np.newaxis,np.newaxis]
J_phi_sat[:,:,:,1] = phi[np.newaxis,:,np.newaxis]
J_phi_sat[:,:,:,2] = sat[np.newaxis,np.newaxis,:]
Jab = np.empty(J_phi_sat.shape)
Jab[:,:,:,0] = J_phi_sat[:,:,:,0]
Jab[:,:,:,1] = J_phi_sat[:,:,:,2]*np.sin(J_phi_sat[:,:,:,1])
Jab[:,:,:,2] = J_phi_sat[:,:,:,2]*np.cos(J_phi_sat[:,:,:,1])
rgb = colorspacious.cspace_convert(Jab, colorspace, "sRGB1")
rgb[rgb>1] = np.nan
rgb[rgb<0] = np.nan
flat_rgb = np.sum(rgb, axis = -1)
flat_rgb[:,:,0] = 0
# there are some strange regsions in the limits-overview because there are 'jumps' as we go through phi
# therefore limit the derivative in phi
for i, _ in enumerate(sat[:-1]):
flat_rgb[:,0,i] += flat_rgb[:,-1,i]
flat_rgb[:,-1,i] += flat_rgb[:,0,i]
flat_rgb[:,1:,i+1] += flat_rgb[:,:-1,i]
flat_rgb[:,:-1,i+1] += flat_rgb[:,1:,i]
flat_rgb[:,0,-1] += flat_rgb[:,-1,-1]
flat_rgb[:,-1,-1] += flat_rgb[:,0,-1]
valid = np.invert(np.isnan(flat_rgb)) + np.linspace(0,0.9,len(sat))[np.newaxis,np.newaxis,:]
valid_argmax = np.argmax(valid, axis = -1)
limit = sat[valid_argmax]
limit_ax_0_J = J
limit_ax_1_phi = phi
return limit, limit_ax_0_J, limit_ax_1_phi
import scipy.interpolate
import matplotlib.cm
def apply_sat_limit(Jab, limit_sat = 'shared'):
'''
apply a saturation limit to Jab in order to ensure valid saturation when the limit of the RGB colorspace is reached
Args:
Jab: np array of shape (n,m,3) encoded in the colorspace
limit_sat: 'shared' or 'individual'
if 'shared', all hues share same limit to saturation (the minimum where all saturation values present in the colormap can be represented)
if 'individual', different hues have different sauration limits
returns:
None (Jab is modified in-place)
'''
#limit = sat[valid_argmax]
#limit_ax_0_J = J
#limit_ax_1_phi = phi
limit, limit_ax_0_J, limit_ax_1_phi = get_sat_limts()
inerpolator = scipy.interpolate.RectBivariateSpline(limit_ax_0_J, limit_ax_1_phi, limit)
phi = np.arctan2(Jab[:,:,1],Jab[:,:,2])
sat = np.sqrt(Jab[:,:,1]**2 + Jab[:,:,2]**2)
max_sat = inerpolator( Jab[:,:,0], phi, grid = False)
if limit_sat == 'shared':
max_sat[:,:] = np.min(max_sat, axis=1)[:,np.newaxis]
mask = sat>max_sat
#sat[mask] = max_sat[mask]
change = (max_sat[mask]+0.000000001)/(sat[mask]+0.000000001)
Jab[mask,1] *= change
Jab[mask,2] *= change
def apply_radial_sat_limit(Jab, limit_sat = 'shared'):
'''
apply a radial saturation limit to Jab in order to make the saturation radial when
the limit of the RGB colorspace is reached
the behaviour if limit_sat == 'shared' is different from apply_sat_limit()
in this function all possible hues are always included, but for apply_sat_limit() only present hues are considered
Args:
Jab: np array of shape (n,m,3) encoded in the colorspace
limit_sat: 'shared' or 'individual'
if 'shared', all hues share same limit to saturation (the minimum where all are present)
if 'individual', different hues have different sauration limits
returns:
None (Jab is modified in-place)
'''
limit, limit_ax_0_J, limit_ax_1_phi = get_sat_limts()
if limit_sat == 'shared':
limit_shared = np.min(limit, axis=1)
inerpolator = scipy.interpolate.interp1d(limit_ax_0_J, limit_shared)
max_sat = inerpolator( Jab[:,:,0])
else:
inerpolator = scipy.interpolate.RectBivariateSpline(limit_ax_0_J, limit_ax_1_phi, limit)
phi = np.arctan2(Jab[:,:,1],Jab[:,:,2])
max_sat = inerpolator( Jab[:,:,0], phi, grid = False)
sat = np.sqrt(Jab[:,:,1]**2 + Jab[:,:,2]**2)
mask = sat>max_sat
#sat[mask] = max_sat[mask]
change = (max_sat[mask]+0.000000001)/(sat[mask]+0.000000001)
Jab[mask,1] *= change
Jab[mask,2] *= change
def get_2dcmap_from_mpl(string, J = [15,120], l = 256, limit_sat = 'shared'):
'''
Generates a 2d colormap from a 1d colormap found in matplotlib
Args:
string: name of the matplotlib colormap
J: limits to lighness on the y-axis, array like of length 2, default [15,120]
l: desired size (l,l,3) of the colormap
limit_sat: string, how to limit the saturation to say within the limits of the RGB colorspace
'shared': all hues share same limits
'individual': different hues have different limits
returns:
a (l,l,3) numpy array of rgb values
'''
cmap = matplotlib.cm.get_cmap(string)
# make 2d cmap in Jab colorspace
rgb = np.zeros((l,l,3))
rgb[:,:,:] = cmap(np.linspace(0,1,l))[np.newaxis,:,:3]
Jab = colorspacious.cspace_convert(rgb, "sRGB1", colorspace)
J = np.linspace(J[0], J[1], l)
Jab[:,:,0] = J[:,np.newaxis]
# Jab now has colors that cannot be represented in rgb
# limit the 'saturation' defined as radius in a-b space for a given J according to get_max_ab(J):
apply_sat_limit(Jab, limit_sat = limit_sat)
# convert the now limited Jab colorspace to rgb
rgb = colorspacious.cspace_convert(Jab, colorspace,"sRGB1")
rgb[rgb<0] = 0
rgb[rgb>1] = 1
return rgb
mpl_cmaps = ['Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r',
'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r',
'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r',
'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd',
'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r',
'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r',
'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot',
'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis',
'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r',
'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow',
'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray',
'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r', 'nipy_spectral', 'nipy_spectral_r',
'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'seismic', 'seismic_r', 'spring',
'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r',
'turbo', 'turbo_r', 'twilight', 'twilight_r', 'twilight_shifted', 'twilight_shifted_r', 'viridis', 'viridis_r', 'winter', 'winter_r']
def barrel(sat = 33, phi = [-180,180], J = [15,120], l = 256, limit_sat = 'shared'):
'''
Generates a 2d colormap that cycles different hues on the x-axis and has lighness on the y-axis
Args:
sat: float, default 33. Desired saturation
phi: range for the hues on the x-axis in degrees, array like of length 2, default [-180,180]
J: limits to lighness on the y-axis, array like of length 2, default [15,120]
l: desired size (l,l,3) of the colormap
limit_sat: string, how to limit the saturation to say within the limits of the RGB colorspace
'shared': all hues share same limits
'individual': different hues have different limits
returns:
a (l,l,3) numpy array of rgb values
'''
Jab = np.empty((l,l,3))
J = | np.linspace(J[0], J[1], l) | numpy.linspace |
#!/usr/bin/env python
# coding: utf-8
# Python TOV solver
# <NAME>: Date 05/11/2020
''' Information about the code:
This code solves TOV equations for mass radius relations. This also can plot the mass radius curve.
USE: To use the code, here are the steps:
1) Include the file in your main code e.g. import tov_class as tc
2) Load the EoS using the ToV loader, tc.ToV(filename, arraysize)
3) call the solver as tc.ToV.mass_radius(min_pressure, max_pressure)
4) To plot, follow the code in main() on creating the dictionary of inputs
Updates: Version 0.0.1-1
Solves ToV, can only take inputs of pressure, energy density in MeV, baryon density in fm^-3
in ascending order.
'''
import numpy as np
from scipy.integrate import odeint
from scipy.interpolate import interp1d
import pylab
from scipy.interpolate import InterpolatedUnivariateSpline
# constants
msol = 1.116e60 # Mass of sun in MeV
Ggrav = 1.324e-42 # Mev^-1*fm
rsol = 2.954e18
rhosol = msol * 3 / (4.0 * np.pi * rsol**3) # Schwarrzschild radius of the sun in fm
class EoS:
""" EoS Loader. Interpolates Energy, pressure and number density"""
alf = 41325.0
def __init__(self, filename):
self.file = filename
self.e_in = np.empty(1000)
self.p_in = np.empty(1000)
self.nb_in = np.empty(1000)
def open_file(self):
data = np.loadtxt(self.file)
self.e_in = data[:, 0]
self.p_in = data[:, 1]
self.nb_in = data[:, 2]
print(self.e_in[0], self.p_in[0], self.nb_in[0])
return self.e_in, self.p_in, self.nb_in
@staticmethod
def energy_from_pressure(self, pressure):
nidx = np.where(self.nb_in == 0.08)
pcrust = self.p_in[nidx]
plow = 1e-10
if pressure < plow:
return 2.6e-310
elif pressure < pcrust:
pres = [self.p_in[i] for i in range(48)]
eden = [self.e_in[i] for i in range(48)]
e1 = interp1d(pres, eden, axis=0, kind='linear', fill_value="extrapolate")
return e1(pressure)
else:
e1 = interp1d(self.p_in, self.e_in, axis=0, kind='linear', fill_value="extrapolate")
return e1(pressure)
@staticmethod
def pressure_from_energy(self, energy):
p1 = interp1d(self.e_in, self.p_in, axis=0, kind='cubic', fill_value="extrapolate")
return p1(energy)
@staticmethod
def baryon_from_energy(self, energy):
n1 = interp1d(self.e_in, self.nb_in, axis=0, kind='cubic', fill_value='extrapolate')
return n1(energy)
class ToV(EoS):
''' Solves TOV equations and gives data-table, mass-radius plot and max. mass, central pressure
and central density '''
alf = 41325.0
def __init__(self, filename, imax):
super().__init__(filename)
self.imax = imax
self.radius = np.empty(self.imax)
self.mass = np.empty(self.imax)
def tov_rhs(self, initial, x):
pres = initial[0]
mass = initial[1]
edn = EoS.energy_from_pressure(self, pres)
# print("edn", edn, mass, ToV.alf, x)
# Equations one: pressure, 2: mass
one = -0.5 * edn * mass * (1.0 + (pres / edn)) * (1. + (4. * np.pi / ToV.alf) * (pres / mass) * x**3) / (x**2 - x * mass)
two = 4.0 * np.pi * x**2 * edn / ToV.alf
f = [one, two]
return f
def tovsolve(self, pcent, xfinal):
eden = EoS.energy_from_pressure(self, pcent)
#print("Eden", pcent, eden)
dx = 0.001
x = np.arange(dx, xfinal, dx)
initial = pcent, 4 * np.pi * dx**3 / (3.0 * ToV.alf)
psol = odeint(self.tov_rhs, initial, x)
rstar = 0.
mstar = 0.
count = 0
for i in psol[:, 0]:
if i > 1.e-7:
# print("i =", i, count)
count += 1
rstar += 2.95 * dx
mstar = psol[count, 1]
return rstar, mstar
def mass_radius(self, pmin, pmax):
pc = | np.zeros(self.imax) | numpy.zeros |
import numpy as np
from abc import ABCMeta, abstractmethod
import tensorflow as tf
import tqdm
from neural_nets import DuelingNetwork, QNetwork
from replay_memory import ReplayMemory, PrioritizedReplayMemory
from losses import weighted_mean_squared_error
from mixin import AgentMixin
class BaseDDQNAgent(AgentMixin, metaclass=ABCMeta):
@abstractmethod
def __init__(self, name, model, replay_memory,
loss_fn, optimizer, batch_size,
learning_rate, update_target_network_every,
update_prediction_network_every, replay_memory_capacity,
batches_before_training, gamma, eps_initial, eps_minimum,
eps_decay, input_shape, output_dims, pretrained_weights,
tf_summary_writer):
self.name = name
if tf_summary_writer:
self.create_tf_summary_writer()
# prediction network
self.model = model(output_dims=output_dims)
self.model.build(input_shape=input_shape)
if pretrained_weights:
self.model.load_weights(pretrained_weights)
#print(self.model.trainable_weights[0][0][0])
# target network
self.target_model = model(output_dims=output_dims)
self.target_model.build(input_shape=input_shape)
self.target_model.set_weights(self.model.get_weights())
# optimizer and loss function
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=learning_rate,
decay_steps=4096,
end_learning_rate=learning_rate * 0.1,
power=1,
cycle=False,
)
self.optimizer = optimizer(learning_rate_fn)
self.loss_fn = loss_fn
# replay memory
self.replay_memory = replay_memory(capacity=replay_memory_capacity)
self.batches_before_training = batches_before_training
self.batch_size = batch_size
self.update_target_network_every = update_target_network_every
self.update_prediction_network_every = update_prediction_network_every
self.gamma = gamma
self.epsilon = eps_initial
self.eps_minimum = eps_minimum
self.eps_decay = eps_decay
def _obtain_batch(self):
batch = self.replay_memory.sample(self.batch_size)
if len(batch) == 3:
# if prioritized replay memory
batch, importance, indices = batch
else:
# if vanilla replay memory
importance, indices = None, None
states = tf.convert_to_tensor(batch[0], dtype=tf.float32)
actions = tf.convert_to_tensor(batch[1], dtype=tf.int32)
rewards = tf.convert_to_tensor(batch[2], dtype=tf.float32)
next_states = tf.convert_to_tensor(batch[3], dtype=tf.float32)
dones = tf.convert_to_tensor(batch[4], dtype=tf.float32)
return (
(states, actions, rewards, next_states, dones), importance, indices
)
def _update_prediction_network(self):
(states, actions, rewards, next_states, dones), importance, indices = \
self._obtain_batch()
with tf.GradientTape() as tape:
# current states -> current Q-values
current_Q = self.model(states, training=True)
# reduce Q to an 1-d array corresponding to the actions taken
idx_to_gather = tf.stack(
[tf.range(actions.shape[0]), actions], axis=1)
current_Q = tf.gather_nd(current_Q, idx_to_gather)
# next states -> next Q-values
expected_Q = self.target_model(next_states).numpy()
# Q learning: obtain target Q-values
expected_Q = (
rewards + self.gamma
* tf.math.reduce_max(expected_Q, axis=1)
* (1 - dones)
)
loss = self.loss_fn(current_Q, expected_Q, importance)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
if hasattr(self.replay_memory, 'update'):
abs_td_error = np.abs((expected_Q - current_Q).numpy())
self.replay_memory.update(indices, abs_td_error)
def _train(self, env, num_episodes, random_seed):
tf.random.set_seed(random_seed)
best_reward = float('-inf')
# initialize accumulators
self.errors = []
self.num_actions = []
self.rewards = []
self.td_errors = []
pbar = tqdm.tqdm(range(num_episodes), desc=' episodes')
for _ in pbar:
np.random.seed(random_seed)
random_seed += 1
state = env.reset()
episode_td_error = []
while True:
q_values = self.model(state[np.newaxis], training=True)
q_values = np.squeeze(q_values.numpy())
if | np.random.random() | numpy.random.random |
#!/usr/bin/python
import sys
import time
import threading
import numpy
import string
import copy
from scipy.optimize import curve_fit
from math import sqrt,exp,log,pi,acos,atan,cos,asin
def g_CIMP(x):
x=x[0,:]
g=numpy.zeros(len(x))
#g=2.691*(1-0.2288964/x)*1/((1+0.16*x)*(1+1.35*numpy.exp(-x/0.2)))
if (max(x)<1):
g=-18.743261164767357+101.6507221241339*x**(0.33333333)-104.59646433814892*numpy.sqrt(x)+33.73393945878933*x-10.325598001906716*x**(1.5)
elif (min(x)>1):
g=1.1976693536243692*(1-0.2660904859953754/x)*1/((1+0.04920104690300144*x)*(1-0.5874697493344921*numpy.exp(-x*0.09913039025775051)))
else:
g=2.691*(1-0.2288964/x)*1/((1+0.16*x)*(1+1.35*numpy.exp(-x/0.2)))
return g
def gauss_function(x, a, x0, sigma):
return a*numpy.exp(-(x-x0)**2/(2*sigma**2))
def calc_sigma(pos,profile):
max_val=max(profile)
min_val=min(profile)
profile=(profile-min_val)/max_val
aux=profile*pos
ycm=numpy.sum(aux)/numpy.sum(profile)
aux=profile*(pos-ycm)**2
yvar=sqrt(numpy.sum(aux)/numpy.sum(profile))
return (ycm,yvar)
def Calc_Growth(twiss,param):
#Define parameters
brel=sqrt(1-1/param['gamma']**2)
ex=param['exi']
ey=param['eyi']
ss=param['ssi']
sp=param['spi']
#Define twiss arrays
s=numpy.zeros(len(twiss))
betax=numpy.zeros(len(twiss))
alphax=numpy.zeros(len(twiss))
betay=numpy.zeros(len(twiss))
alphay=numpy.zeros(len(twiss))
Dx=numpy.zeros(len(twiss))
Dpx=numpy.zeros(len(twiss))
Dy=numpy.zeros(len(twiss))
Dpy=numpy.zeros(len(twiss))
s=twiss[:,0]
betax=twiss[:,2]
alphax=twiss[:,3]
betay=twiss[:,6]
alphay=twiss[:,7]
Dx=twiss[:,4]
Dpx=twiss[:,5]
Dy=twiss[:,8]
Dpy=twiss[:,9]
#Calculate the parameters
A=param['cluz']*param['Np']*param['r0']**2/(64*numpy.pi**2*brel**3*param['gamma']**4*ex*ey*ss*sp)
logCIMP=numpy.log(param['gamma']**2*ex*numpy.sqrt(betay*ey)/(param['r0']*betax))
Hx=1/betax*[Dx**2+(betax*Dpx+alphax*Dx)**2]
Hy=1/betay*[Dy**2+(betay*Dpy+alphay*Dy)**2]
SigH=numpy.sqrt(1/sp**2+Hx/ex+Hy/ey)**(-1)
aCIMP=SigH/param['gamma']*numpy.sqrt(betax/ex)
bCIMP=SigH/param['gamma']*numpy.sqrt(betay/ey)
#Calculate Function g
g_ab=g_CIMP(aCIMP/bCIMP)
g_ba=g_CIMP(bCIMP/aCIMP)
#Saves values for the ration a/b and b/a
#f=open('RatioAB.txt','w')
#for j in range(len(aCIMP[0,:])):
# f.write(str(aCIMP[0,j]/bCIMP[0,j])+'\t\t'+str(bCIMP[0,j]/aCIMP[0,j])+'\n')
#f.close()
#Calculate Growth Rates
fp=A*logCIMP*(SigH**2/sp**2)*(g_ba/aCIMP+g_ab/bCIMP)
fx=A*logCIMP*(-aCIMP*g_ba+Hx*SigH**2/ex*(g_ba/aCIMP+g_ab/bCIMP))
fy=A*logCIMP*(-bCIMP*g_ab+Hy*SigH**2/ey*(g_ba/aCIMP+g_ab/bCIMP))
#Integrate along the s coordinate
invTp=2*pi**(3.0/2.0)*numpy.trapz(fp,s)
invTx=2*pi**(3.0/2.0)*numpy.trapz(fx,s)
invTy=2*pi**(3.0/2.0)*numpy.trapz(fy,s)
#Calculate growth
Tp=invTp
Tx=invTx
Ty=invTy
return (Tx,Ty,Tp)
# Fucntion that iterates emittances for the case with no harmonic system (simple calculation of the bunch length)
def Iterate_emittances(twiss,param):
#Define differences
i=1
time=0
diff1=1
diff2=1
diff3=1
diff4=1
difftot=diff1+diff2+diff3+diff4
#Calculate U0
U0=param['Cgamma']/(2*pi)*(param['En']/1e+9)**4*param['I2']*1e+9
#print U0
#Calculate damping partition numbers
Jx=1-param['I4']/param['I2']
Jy=1
Jp=2+param['I4']/param['I2']
#print Jx,Jy,Jp
# Caluclate damping times
taux=(2*param['En']*param['C'])/(Jx*U0*param['cluz'])
tauy=(2*param['En']*param['C'])/(Jy*U0*param['cluz'])
taup=(2*param['En']*param['C'])/(Jp*U0*param['cluz'])
#print taux,tauy,taup
#Define step for iteration
tt=taux/5
# Synchrotron tune
Qs0=sqrt(param['ap']*param['hh']*sqrt(param['Vrf']**2-U0**2)/(2*pi*param['En']))
#Cretaes an array that's a subgroup of param
inter={}
inter['exi']=param['ex0']
inter['eyi']=(param['k_dw']+param['k_beta'])*param['ex0']
inter['ssi']=param['ss0']
inter['spi']=param['sp0']
inter['gamma']=param['gamma']
inter['r0']=param['r0']
inter['Np']=param['Np']
inter['cluz']=param['cluz']
while (difftot>10**(-7)):
(Tx,Ty,Tp)=Calc_Growth(twiss,inter)
Tx=float(Tx)/param['C']
Ty=float(Ty)/param['C']
Tp=float(Tp)/param['C']
#print Tx,Ty,Tp
exx=(-param['ex0']+exp(2*tt*(Tx-1/taux))*(param['ex0']+inter['exi']*(-1+Tx*taux)))/(-1+Tx*taux)
eyy=(-(param['k_dw']*param['ex0']+param['k_beta']*exx*(1-tauy/Ty))+exp(2*tt*(Ty-1/tauy))*((param['k_dw']*param['ex0']+param['k_beta']*exx*(1-tauy/Ty))+inter['eyi']*(-1+Ty*tauy)))/(-1+Ty*tauy)
spp=(-param['sp0']+exp(tt*(Tp-1/taup))*(param['sp0']+inter['spi']*(-1+Tp*taup)))/(-1+Tp*taup)
# Accelerating cavity system only
sss=inter['spi']*param['C']*sqrt(param['ap']*param['En']/(2*pi*param['hh']*(param['Vrf']**2-U0**2)**0.5));
#print exx,eyy,spp,sss
diff1=abs(exx-inter['exi'])/inter['exi']
diff2=abs(eyy-inter['eyi'])/inter['eyi']
diff3=abs(spp-inter['spi'])/inter['spi']
diff4=abs(sss-inter['ssi'])/inter['ssi']
difftot=diff1+diff2+diff3+diff4
#print difftot
inter['exi']=exx;
inter['eyi']=eyy;
inter['spi']=spp;
inter['ssi']=sss;
time=i*tt;
i=i+1
return (exx,eyy,spp,sss)
# Function that iterates emittances using the results from tracking to calculate bunch length
def Iterate_emittances3HC(twiss,param,phimain,Vmain,phiharm,Vharm):
#Define differences
i=1
time=0
diff1=1
diff2=1
diff3=1
diff4=1
difftot=diff1+diff2+diff3+diff4
#Calculate U0
U0=param['Cgamma']/(2*pi)*(param['En']/1e+9)**4*param['I2']*1e+9
#Calculate synchronous phase
Phi_sync_nat=asin(U0/param['Vrf'])
#Calculate damping partition numbers
Jx=1-param['I4']/param['I2']
Jy=1
Jp=2+param['I4']/param['I2']
#print Jx,Jy,Jp
# Caluclate damping times
taux=(2*param['En']*param['C'])/(Jx*U0*param['cluz'])
tauy=(2*param['En']*param['C'])/(Jy*U0*param['cluz'])
taup=(2*param['En']*param['C'])/(Jp*U0*param['cluz'])
#print taux,tauy,taup
#Define step for iteration
tt=taux/5
#RF frequency
w_rf =2*pi*(param['hh']*param['cluz']/param['C']-param['Detune0']) #Generator Frequency
#Creates arrays for 3HC calculation
posz=numpy.zeros(5000)
perfil= | numpy.zeros(5000) | numpy.zeros |
import numpy as np
import cvxpy as cp
from scipy.special import softmax
from pprint import pformat
import random
class ControlledRangeVariance:
def __init__(self, seed, wsupport, expwsq, rvala=1, rvalb=1, tv=None):
wmax = max(wsupport)
assert wmax > 1
assert wmax >= expwsq
assert min(wsupport) < 1
self.wsupport = np.sort(np.array(wsupport))
wnice = self.wsupport / wmax
A = | np.array([wnice, wnice * wnice]) | numpy.array |
from builtins import range
import pandas as pd
import numpy as np
from chemml.chem import Molecule
class CoulombMatrix(object):
"""
The implementation of coulomb matrix descriptors by <NAME> et. al. 2012, PRL (All 3 different variations).
Parameters
----------
CMtype: str, optional (default='SC')
The coulomb matrix type, one of the following types:
* 'Unsorted_Matrix' or 'UM'
* 'Unsorted_Triangular' or 'UT'
* 'Eigenspectrum' or 'E'
* 'Sorted_Coulomb' or 'SC'
* 'Random_Coulomb' or 'RC'
max_n_atoms: int or 'auto', optional (default = 'auto')
Set the maximum number of atoms per molecule (to which all representations will be padded).
If 'auto', we find it based on all input molecules.
nPerm: int, optional (default = 3)
Number of permutation of coulomb matrix per molecule for Random_Coulomb (RC)
type of representation.
const: float, optional (default = 1)
The constant value for coordinates unit conversion to atomic unit
example: atomic unit -> const=1, Angstrom -> const=0.529
const/|Ri-Rj|, which denominator is the euclidean distance
between atoms i and j
"""
def __init__(self, CMtype='SC', max_n_atoms = 'auto', nPerm=3, const=1):
self.CMtype = CMtype
self.max_n_atoms = max_n_atoms
self.nPerm = nPerm
self.const = const
def __cal_coul_mat(self, mol):
"""
Parameters
----------
mol: molecule object
Returns
-------
"""
if isinstance(mol, Molecule):
if mol.xyz is None:
msg = "The molecule must be a chemml.chem.Molecule object with xyz information."
raise ValueError(msg)
else:
msg = "The molecule must be a chemml.chem.Molecule object."
raise ValueError(msg)
mol = np.append(mol.xyz.atomic_numbers,mol.xyz.geometry, axis=1)
cm = []
for i in range(len(mol)):
vect = []
for k in range(0,i):
vect.append(cm[k][i])
for j in range(i,len(mol)):
if i==j:
vect.append(0.5*mol[i,0]**2.4)
else:
vect.append((mol[i,0]*mol[j,0]*self.const)/np.linalg.norm(mol[i,1:]-mol[j,1:]))
for m in range(len(mol),self.max_n_atoms):
vect.append(0.0)
cm.append(vect)
for m in range(len(mol),self.max_n_atoms):
cm.append([0]*self.max_n_atoms)
return np.array(cm) #shape nAtoms*nAtoms
def represent(self, molecules):
"""
provides coulomb matrix representation for input molecules.
Parameters
----------
molecules: chemml.chem.Molecule object or list
If list, it must be a list of chemml.chem.Molecule objects, otherwise we raise a ValueError.
In addition, all the molecule objects must provide the XYZ information. Please make sure the XYZ geometry has been
stored or optimized in advance.
Returns
-------
Pandas DataFrame
A data frame with same number of rows as number of molecules will be returned.
The exact shape of the dataframe depends on the type of CM as follows:
- shape of Unsorted_Matrix (UM): (n_molecules, max_n_atoms**2)
- shape of Unsorted_Triangular (UT): (n_molecules, max_n_atoms*(max_n_atoms+1)/2)
- shape of eigenspectrums (E): (n_molecules, max_n_atoms)
- shape of Sorted_Coulomb (SC): (n_molecules, max_n_atoms*(max_n_atoms+1)/2)
- shape of Random_Coulomb (RC): (n_molecules, nPerm * max_n_atoms * (max_n_atoms+1)/2)
"""
if isinstance(molecules, list):
molecules = | np.array(molecules) | numpy.array |
# Modified from sklearn GP example.
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x) + x * np.cos(2 * x) + 10 * | np.sin(x) | numpy.sin |
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import cunumeric as num
fns = ["bartlett", "blackman", "hamming", "hanning"]
def test():
for fn in fns:
print(f"Testing cunumeric.{fn}")
np_fn = getattr(np, fn)
num_fn = getattr(num, fn)
out_np = np_fn(0)
out_num = num_fn(0)
assert np.allclose(out_np, out_num)
out_np = np_fn(1)
out_num = num_fn(1)
assert np.allclose(out_np, out_num)
out_np = np_fn(10)
out_num = num_fn(10)
assert np.allclose(out_np, out_num)
out_np = np_fn(100)
out_num = num_fn(100)
assert np.allclose(out_np, out_num)
print("Testing cunumeric.kaiser")
out_np = np.kaiser(0, 0)
out_num = num.kaiser(0, 0)
assert np.allclose(out_np, out_num)
out_np = | np.kaiser(1, 0) | numpy.kaiser |
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:53
# @Author : zhoujun
import math
import random
import pyclipper
import numpy as np
import cv2
from .augment import DataAugment
data_aug = DataAugment()
def check_and_validate_polys(polys, xxx_todo_changeme):
'''
check so that the text poly is in the same direction,
and also filter some invalid polygons
:param polys:
:param tags:
:return:
'''
(h, w) = xxx_todo_changeme
if polys.shape[0] == 0:
return polys
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1) # x coord not max w-1, and not min 0
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1) # y coord not max h-1, and not min 0
validated_polys = []
for poly in polys:
p_area = cv2.contourArea(poly)
if abs(p_area) < 1:
continue
validated_polys.append(poly)
return np.array(validated_polys)
def unshrink_offset(poly,ratio):
area = cv2.contourArea(poly)
peri = cv2.arcLength(poly, True)
a = 8
b = peri - 4
c = 1-0.5 * peri - area/ratio
return quadratic(a,b,c)
def quadratic(a, b, c):
if (b * b - 4 * a * c) < 0:
return 'None'
Delte = math.sqrt(b * b - 4 * a * c)
if Delte > 0:
x = (- b + Delte) / (2 * a)
y = (- b - Delte) / (2 * a)
return x, y
else:
x = (- b) / (2 * a)
return x
def generate_rbox(im_size, text_polys, text_tags,training_mask, shrink_ratio):
"""
生成mask图,白色部分是文本,黑色是北京
:param im_size: 图像的h,w
:param text_polys: 框的坐标
:param text_tags: 标注文本框是否参与训练
:param training_mask: 忽略标注为 DO NOT CARE 的矩阵
:return: 生成的mask图
"""
h, w = im_size
score_map = np.zeros((h, w), dtype=np.uint8)
for i, (poly, tag) in enumerate(zip(text_polys, text_tags)):
try:
poly = poly.astype(np.int)
# d_i = cv2.contourArea(poly) * (1 - shrink_ratio * shrink_ratio) / cv2.arcLength(poly, True)
d_i = cv2.contourArea(poly) * (1 - shrink_ratio) / cv2.arcLength(poly, True) + 0.5
pco = pyclipper.PyclipperOffset()
pco.AddPath(poly, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrinked_poly = np.array(pco.Execute(-d_i))
cv2.fillPoly(score_map, shrinked_poly, i + 1)
if not tag:
cv2.fillPoly(training_mask, shrinked_poly, 0)
except:
print(poly)
return score_map, training_mask
def augmentation(im: np.ndarray, text_polys: np.ndarray, scales: np.ndarray, degrees: int) -> tuple:
# the images are rescaled with ratio {0.5, 1.0, 2.0, 3.0} randomly
# im, text_polys = data_aug.random_scale(im, text_polys, scales)
# the images are horizontally fliped and rotated in range [−10◦, 10◦] randomly
if random.random() < 0.5:
im, text_polys = data_aug.vertical_flip(im, text_polys)
if random.random() < 0.5:
im, text_polys = data_aug.random_rotate_img_bbox(im, text_polys, degrees)
return im, text_polys
def image_label(im: np.ndarray, text_polys: np.ndarray, text_tags: list, input_size: int = 640,
shrink_ratio: float = 0.5, degrees: int = 10, train: bool = True,
scales: np.ndarray = np.array([0.5, 1, 2.0, 3.0])) -> tuple:
"""
读取图片并生成label
:param im: 图片
:param text_polys: 文本标注框
:param text_tags: 是否忽略文本的标致:true 忽略, false 不忽略
:param input_size: 输出图像的尺寸
:param shrink_ratio: gt收缩的比例
:param degrees: 随机旋转的角度
:param scales: 随机缩放的尺度
:return:
"""
h, w, _ = im.shape
# 检查越界
text_polys = check_and_validate_polys(text_polys, (h, w))
if train:
im, text_polys = augmentation(im, text_polys, scales, degrees)
h, w, _ = im.shape
short_edge = min(h, w)
if short_edge < input_size:
# 保证短边 >= inputsize
scale = input_size / short_edge
im = cv2.resize(im, dsize=None, fx=scale, fy=scale)
text_polys *= scale
h, w, _ = im.shape
training_mask = np.ones((h, w), dtype=np.uint8)
score_maps = []
for i in (1, shrink_ratio):
score_map, training_mask = generate_rbox((h, w), text_polys, text_tags,training_mask, i)
score_maps.append(score_map)
score_maps = | np.array(score_maps, dtype=np.float32) | numpy.array |
import numpy as np
import pandas as pd
import os
import cv2
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import torch
from efficientnet_pytorch import EfficientNet
import pickle
import pydicom
import glob
def window(x, WL=50, WW=350):
upper, lower = WL+WW//2, WL-WW//2
x = np.clip(x, lower, upper)
x = x - np.min(x)
x = x / np.max(x)
return x
class BboxDataset(Dataset):
def __init__(self, series_list):
self.series_list = series_list
def __len__(self):
return len(self.series_list)
def __getitem__(self,index):
return index
class BboxCollator(object):
def __init__(self, series_list):
self.series_list = series_list
def _load_dicom_array(self, f):
dicom_files = glob.glob(os.path.join(f, '*.dcm'))
dicoms = [pydicom.dcmread(d) for d in dicom_files]
M = np.float32(dicoms[0].RescaleSlope)
B = np.float32(dicoms[0].RescaleIntercept)
z_pos = [float(d.ImagePositionPatient[-1]) for d in dicoms]
sorted_idx = | np.argsort(z_pos) | numpy.argsort |
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.functional as tf
import torch.utils.data
import time
from tqdm import tqdm
import model
import argparse
try:
import nvidia_smi
NVIDIA_SMI = True
except:
NVIDIA_SMI = False
import sys
import os
import pathlib
class Dataset(torch.utils.data.Dataset):
"""
Dataset class that will provide data during training. Modify it accordingly
for your dataset. This one shows how to do augmenting during training for a
very simple training set
"""
def __init__(self, n_training):
"""
Very simple training set made of 200 Gaussians of width between 0.5 and 1.5
We later augment this with a velocity and amplitude.
Args:
n_training (int): number of training examples including augmenting
"""
super(Dataset, self).__init__()
self.n_training = n_training
x = np.linspace(-5.0, 5.0, 100)
self.sigma = 1.0 * np.random.rand(200) + 0.5
self.y = np.exp(-x[None,:]**2 / self.sigma[:,None]**2)
self.indices = np.random.randint(low=0, high=200, size=self.n_training)
self.amplitude = 2.0 * np.random.rand(self.n_training) + 0.1
def __getitem__(self, index):
amplitude = self.amplitude[index]
sigma = self.sigma[self.indices[index]]
inp = amplitude * self.y[self.indices[index],:]
out = | np.array([sigma, amplitude]) | numpy.array |
import os
import gzip
import argparse
import pickle
import numpy as np
import tensorflow as tf
import tensorflow.keras as K
import tensorflow.contrib.eager as tfe
from pathlib import Path
from model import Model
lr_start = 1e-5
lr_end = 1e-5
max_epochs = 50
def load_instance(filename):
with gzip.open(filename, 'rb') as file:
sample = pickle.load(file)
features = tf.convert_to_tensor(sample['solving_stats'], dtype=tf.float32)
response = tf.convert_to_tensor(sample['nb_nodes_left'], dtype=tf.float32)
instance = sample['instance_path']
return features, response, instance
def load_batch(batch_filenames):
batch_features, batch_responses, batch_instances = [], [], []
for count, filename in enumerate(batch_filenames):
features, response, instance = load_instance(filename)
batch_features.append(features)
batch_responses.append(response)
batch_instances.append(instance)
batch_features = tf.stack(batch_features, axis=0)
batch_responses = tf.stack(batch_responses, axis=0)
batch_instances = tf.stack(batch_instances, axis=0)
return batch_features, batch_responses, batch_instances
def get_feature_stats(data, folder):
outfile = folder/"feature_stats.pickle"
if outfile.exists():
with outfile.open('rb') as file:
feature_stats = pickle.load(file)
feature_means = feature_stats['feature_means']
feature_stds = feature_stats['feature_stds']
else:
feature_means, feature_stds = [], []
for features, _, _ in data:
feature_means.append(tf.reduce_mean(features, axis=(0, 1)))
mean = tf.expand_dims(tf.expand_dims(feature_means[-1], axis=0), axis=0)
std = tf.reduce_mean(tf.reduce_mean((features - mean) ** 2, axis=0), axis=0)
feature_stds.append(tf.sqrt(std))
feature_means = tf.reduce_mean(tf.stack(feature_means, axis=0), axis=0).numpy()
feature_stds = tf.reduce_mean(tf.stack(feature_stds, axis=0), axis=0).numpy()
feature_stds[feature_stds < 1e-5] = 1.
with outfile.open('wb') as file:
pickle.dump({'feature_means': feature_means, 'feature_stds': feature_stds}, file)
return feature_means, feature_stds
def learning_rate(episode):
return (lr_start-lr_end) / np.e ** episode + lr_end
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='Setcover or cauctions',
type=str,
choices=['setcover', 'cauctions'],
)
parser.add_argument(
'-g', '--gpu',
help='CUDA GPU id (-1 for CPU).',
type=int,
default=0,
)
args = parser.parse_args()
if args.gpu == -1:
print(f"Using CPU")
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
print(f"Using GPU {args.gpu}")
os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
tf.enable_eager_execution(tfconfig)
tf.set_random_seed(seed=0)
rng = np.random.RandomState(0)
data_folder = Path('data/bnb_size_prediction')/args.problem
train_folder = data_folder/"train"
valid_folder = data_folder/"valid"
output_folder = Path('results')/args.problem
output_folder.mkdir(parents=True, exist_ok=True)
train_filenames = [str(filename) for filename in train_folder.glob('sample*.pkl')]
train_data = tf.data.Dataset.from_tensor_slices(train_filenames).batch(32)
train_data = train_data.map(lambda x: tf.py_func(load_batch, [x], [tf.float32, tf.float32, tf.string]))
train_data = train_data.prefetch(1)
with (train_folder/"benchmark.pkl").open("rb") as file:
train_benchmark = pickle.load(file)
valid_filenames = [str(filename) for filename in valid_folder.glob('sample*.pkl')]
valid_data = tf.data.Dataset.from_tensor_slices(valid_filenames).batch(128)
valid_data = valid_data.map(lambda x: tf.py_func(load_batch, [x], [tf.float32, tf.float32, tf.string]))
valid_data = valid_data.prefetch(1)
with (valid_folder/"benchmark.pkl").open("rb") as file:
valid_benchmark = pickle.load(file)
feature_means, feature_stds = get_feature_stats(train_data, train_folder)
model = Model(feature_means, feature_stds)
optimizer = tf.train.AdamOptimizer(lambda: lr)
best_valid_loss = np.inf
for epoch in range(max_epochs):
K.backend.set_learning_phase(1) # Set train
epoch_train_filenames = rng.choice(train_filenames, len(train_filenames), replace=False)
train_data = tf.data.Dataset.from_tensor_slices(epoch_train_filenames).batch(32)
train_data = train_data.map(lambda x: tf.py_func(load_batch, [x], [tf.float32, tf.float32, tf.string]))
train_data = train_data.prefetch(1)
train_loss = []
for count, (features, responses, instances) in enumerate(train_data):
response_centers, response_scales = [], []
for instance in instances:
instance = instance.numpy().decode('utf-8')
response_centers.append(np.mean(train_benchmark[instance]['nb_nodes']))
response_scales.append(np.mean(train_benchmark[instance]['nb_nodes'])/np.sqrt(12) + | np.std(train_benchmark[instance]['nb_nodes']) | numpy.std |
import random
import os
import time
import threading
import sys
import subprocess
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
import gzip
import os.path
import shutil
import urllib.request as request
import requests
import zipfile
import io
currentdir = os.path.dirname(os.path.realpath(__file__))
def getChromosomeNum(string):
if string == 'X':
return 23
elif string == 'Y':
return 24
else:
try:
return int(string)
except:
return None
def parseConfigFile(configFile):
config = open(configFile)
settings = {}
for line in config:
if line[0] == '#':
continue
line = line.strip()
if len(line) == 0:
continue
line = line.split('\t')
settings[line[0]] = line[1]
return settings
def doWork(st, sema,shell):
try:
print (st)
t = time.time()
lst = st.split()
pipe = None
if lst[-2] == '>':
pipe = lst[-1]
lst = lst[:-2]
if shell:
lst = ' '.join(lst)
if not pipe:
p = subprocess.Popen(lst,shell=shell)
print(p.communicate())
else:
with open(pipe,'w') as output:
p = subprocess.Popen(lst,stdout=output,shell=shell)
print (p.communicate())
print ('Time = ' + str(time.time()-t))
except Exception as ex:
print(ex)
pass
sema.release()
def runLsts(lsts,threads,shell=False):
print('start')
for i in range(0,len(lsts)):
lst = lsts[i]
numThreads = threads[i]
sema = threading.Semaphore(numThreads) #max number of threads at once
for item in lst:
sema.acquire()
t = threading.Thread(target=doWork, args=(item,sema,shell))
t.start()
for i in range(0,numThreads):
sema.acquire()
for i in range(0,numThreads):
sema.release()
def calcPrecisionRecallLsts(lst):
#finalR = []
#finalP = []
#finalR.append([])
#finalP.append([])
lst = np.asarray(lst)
ind = np.argsort(-lst[:,0])
lst = lst[ind,:]
#get total true and cumulative sum of true
totalPositive = np.sum(lst[:,1])
totalNegative = lst.shape[0]-totalPositive
finalR = np.cumsum(lst[:,1])
FP = np.arange(1,finalR.shape[0]+1)-finalR
#create precision array (recall counts / total counts)
finalP = finalR/np.arange(1,lst.shape[0]+1)
#find ties
x = np.arange(finalR.shape[0]-1)
ties = list(np.where(lst[x,0] == lst[x+1,0])[0])
for idx in range(len(ties)-1,-1,-1):
finalR[ties[idx]] = finalR[ties[idx]+1]
finalP[ties[idx]] = finalP[ties[idx]+1]
FP[ties[idx]] = FP[ties[idx]+1]
TN = totalNegative - FP
ACC = (TN + finalR)/finalR.shape[0]
TNR = TN/totalNegative
#scale recall from 0 to 1
finalR = finalR / totalPositive
return (finalP,finalR,ACC,TNR)
def calcAndPlotCurvesLsts(predictionsLst,classLst,datanames,fileName,title,curveType,lineStyleLst=None,legFont=1,lineWidthLst=None,font=None,removeMargins=False,xMax=None,yMax=None,markerLst=None,colorLst=None,size=None,fig=None,dpi=300,reducePoints=None,frameon=True):
xAxis = []
yAxis = []
for i in range(0,len(predictionsLst)):
(prec,recall,acc,tnr) = calcPrecisionRecallLsts(np.hstack((np.expand_dims(predictionsLst[i],0).T, | np.expand_dims(classLst[i],0) | numpy.expand_dims |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: <NAME>
# contact: <EMAIL>
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import SimpleITK as sitk
import numpy as np
import nibabel as nib
from torch.autograd import Variable
from skimage.transform import resize
from torchvision import transforms
from time import gmtime, strftime
from tqdm import tqdm
import pdb
import os
from . import maybe_download
from ..helpers import utils
from ..helpers import postprocessing
from ..helpers import preprocessing
from .. import brainmask
from os.path import expanduser
home = expanduser("~")
#========================================================================================
class tumorSeg():
"""
class performs segmentation for a given sequence of patient data.
to main platform for segmentation mask estimation
one for the patient data in brats format
other with any random format
step followed for in estimation of segmentation mask
1. ABLnet for reducing false positives outside the brain
Air Brain Lesson model (2D model, 103 layered)
2. BNet3Dnet 3D network for inner class classification
Dual Path way network
3. Tir3Dnet 57 layered 3D convolutional network for inner class
classification
more on training details and network information:
(https://link.springer.com/chapter/10.1007/978-3-030-11726-9_43<Paste>)
=========================
quick: True (just evaluates on Dual path network (BNet3D)
else copmutes an ensumble over all four networks
"""
def __init__(self,
quick = False,
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")):
map_location = device
#========================================================================================
ckpt_tir3D = os.path.join(home, '.DeepBrainSeg/BestModels/tumor_Tramisu_FC57_3D.pth.tar')
ckpt_BNET3D = os.path.join(home, '.DeepBrainSeg/BestModels/tumor_BrainNet_3D.pth.tar')
ckpt_ABL = os.path.join(home, '.DeepBrainSeg/BestModels/tumor_ABL_2D.pth.tar')
#========================================================================================
# air brain lesion segmentation..............
from .models.modelABL import FCDenseNet103
self.ABLnclasses = 3
self.ABLnet = FCDenseNet103(n_classes = self.ABLnclasses) ## intialize the graph
maybe_download(ckpt_ABL)
saved_parms=torch.load(ckpt_ABL, map_location=map_location)
self.ABLnet.load_state_dict(saved_parms['state_dict']) ## fill the model with trained params
print ("================================ ABLNET2D Loaded ==============================")
self.ABLnet.eval()
self.ABLnet = self.ABLnet.to(device)
#========================================================================================
# Tir3D model...................
from .models.modelTir3D import FCDenseNet57
self.T3Dnclasses = 5
self.Tir3Dnet = FCDenseNet57(self.T3Dnclasses)
maybe_download(ckpt_tir3D)
ckpt = torch.load(ckpt_tir3D, map_location=map_location)
self.Tir3Dnet.load_state_dict(ckpt['state_dict'])
print ("=============================== TIRNET2D Loaded ==============================")
self.Tir3Dnet.eval()
self.Tir3Dnet = self.Tir3Dnet.to(device)
if not quick:
# BrainNet3D model......................
from .models.model3DBNET import BrainNet_3D_Inception
self.B3Dnclasses = 5
self.BNET3Dnet = BrainNet_3D_Inception()
maybe_download(ckpt_BNET3D)
ckpt = torch.load(ckpt_BNET3D, map_location=map_location)
self.BNET3Dnet.load_state_dict(ckpt['state_dict'])
print ("================================ KAMNET3D Loaded ==============================")
self.BNET3Dnet.eval()
self.BNET3Dnet = self.BNET3Dnet.to(device)
#========================================================================================
self.device = device
self.quick = quick
def get_localization(self, t1, t1ce, t2, flair, brain_mask):
"""
ABLnetwork output, finds the brain, Whole tumor region
t1_v = t1 volume (numpy array)
t1c_v = t1c volume (numpy array)
t2_v = t2 volume (numpy array)
flair_v = flair volume (numpy array)
brain_mask = brain, whole tumor mask (numpy array, output of ANTs pieline)
"""
t1 = preprocessing.standardize(t1, brain_mask)
t1ce = preprocessing.standardize(t1ce, brain_mask)
t2 = preprocessing.standardize(t2, brain_mask)
flair = preprocessing.standardize(flair, brain_mask)
generated_output_logits = np.empty((self.ABLnclasses, flair.shape[0], flair.shape[1], flair.shape[2]))
for _slice_ in tqdm(range(flair.shape[2])):
flair_slice = np.transpose(flair[:,:,_slice_])
t2_slice = np.transpose(t2[:,:,_slice_])
t1ce_slice = np.transpose(t1ce[:,:,_slice_])
t1_slice = np.transpose(t1[:,:,_slice_])
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],4))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array[:,:,3] = t1_slice
transformed_array = torch.from_numpy(utils.convert_image(array)).float()
transformed_array = transformed_array.unsqueeze(0) ## neccessary if batch size == 1
transformed_array = transformed_array.to(self.device)
logits = self.ABLnet(transformed_array).detach().cpu().numpy()# 3 x 240 x 240
generated_output_logits[:,:,:, _slice_] = logits.transpose(0, 1, 3, 2)
final_pred = utils.apply_argmax_to_logits(generated_output_logits)
final_pred = postprocessing.class_wise_cc(final_pred)
final_pred = utils.adjust_classes_air_brain_tumour(np.uint8(final_pred))
return np.uint8(final_pred)
def inner_class_classification_with_logits_NCube(self, t1,
t1ce, t2, flair,
brain_mask, mask, N = 64):
"""
output of 3D tiramisu model (tir3Dnet)
mask = numpy array output of ABLnet
N = patch size during inference
"""
t1 = preprocessing.standardize(t1, brain_mask)
t1ce = preprocessing.standardize(t1ce, brain_mask)
t2 = preprocessing.standardize(t2, brain_mask)
flair = preprocessing.standardize(flair, brain_mask)
vol = {}
vol['t1'] = t1
vol['t2'] = t2
vol['t1ce'] = t1ce
vol['flair'] = flair
s = N//4
for key in vol.keys():
vol[key] = np.pad(vol[key], ((s, s), (s, s), (s,s)))
shape = vol['t1'].shape # to exclude batch_size
final_prediction = np.zeros((self.T3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = 0, shape[0], 0, shape[1], 0, shape[2]
x_min, x_max, y_min, y_max, z_min, z_max = x_min, min(shape[0] - N, x_max), y_min, min(shape[1] - N, y_max), z_min, min(shape[2] - N, z_max)
with torch.no_grad():
for x in tqdm(range(x_min, x_max, N//2)):
for y in range(y_min, y_max, N//2):
for z in range(z_min, z_max, N//2):
high = | np.zeros((1, 4, N, N, N)) | numpy.zeros |
import numpy as np
from gym.spaces import Box
import pyflex
from softgym.envs.fluid_env import FluidEnv
import copy
from softgym.utils.misc import rotate_rigid_object, quatFromAxisAngle
from shapely.geometry import Polygon
import random, math
class PourWaterPosControlEnv(FluidEnv):
def __init__(self, observation_mode, action_mode,
config=None, cached_states_path='pour_water_init_states.pkl', **kwargs):
'''
This class implements a pouring water task.
observation_mode: "cam_rgb" or "point_cloud" or "key_point"
action_mode: "rotation_bottom, rotation_top"
'''
assert observation_mode in ['cam_rgb', 'point_cloud', 'key_point']
assert action_mode in ['rotation_bottom', 'rotation_top']
if action_mode == 'rotation_top':
cached_states_path = 'pour_water_init_states_top.pkl'
self.observation_mode = observation_mode
self.action_mode = action_mode
self.wall_num = 5 # number of glass walls. floor/left/right/front/back
super().__init__(**kwargs)
self.get_cached_configs_and_states(cached_states_path, self.num_variations)
if observation_mode in ['point_cloud', 'key_point']:
if observation_mode == 'key_point':
obs_dim = 0
obs_dim += 13 # Pos (x, z, theta) and shape (w, h, l) of the two cups and the water height.
else:
max_particle_num = 13 * 13 * 13 * 4
obs_dim = max_particle_num * 3
self.particle_obs_dim = obs_dim
# z and theta of the second cup (poured_glass) does not change and thus are omitted.
# add: frac of water in control cup, frac of water in target cup
self.observation_space = Box(low=np.array([-np.inf] * obs_dim), high=np.array([np.inf] * obs_dim), dtype=np.float32)
elif observation_mode == 'cam_rgb':
self.observation_space = Box(low=-np.inf, high=np.inf, shape=(self.camera_height, self.camera_width, 3),
dtype=np.float32)
default_config = self.get_default_config()
border = default_config['glass']['border']
if action_mode in ["rotation_bottom", "rotation_top"]:
self.action_direct_dim = 3
# control the (x, y) corrdinate of the floor center, and theta its rotation angle.
action_low = np.array([-0.01, -0.01, -0.015])
action_high = np.array([0.01, 0.01, 0.015])
self.action_space = Box(action_low, action_high, dtype=np.float32)
else:
raise NotImplementedError
self.prev_reward = 0
self.reward_min = 0
self.reward_max = 1
self.reward_range = self.reward_max - self.reward_min
def get_default_config(self):
config = {
'fluid': {
'radius': 0.033,
'rest_dis_coef': 0.55,
'cohesion': 0.1, # not actually used, instead, is computed as viscosity * 0.01
'viscosity': 2,
'surfaceTension': 0,
'adhesion': 0.0, # not actually used, instead, is computed as viscosity * 0.001
'vorticityConfinement': 40,
'solidpressure': 0.,
'dim_x': 8,
'dim_y': 18,
'dim_z': 8,
},
'glass': {
'border': 0.045,
'height': 0.6,
'glass_distance': 1.0,
'poured_border': 0.04,
'poured_height': 0.6,
},
'camera_name': 'default_camera',
}
return config
def generate_env_variation(self, num_variations=5, config=None, **kwargs):
dim_xs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
dim_zs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.cached_configs = []
self.cached_init_states = []
if config is None:
config = self.get_default_config()
config_variations = [copy.deepcopy(config) for _ in range(num_variations)]
for idx in range(num_variations):
print("pour water generate env variations {}".format(idx))
dim_x = random.choice(dim_xs)
dim_z = random.choice(dim_zs)
m = min(dim_x, dim_z)
p = np.random.rand()
water_radius = config['fluid']['radius'] * config['fluid']['rest_dis_coef']
if p < 0.5: # midium water volumes
print("generate env variation: medium volume water")
dim_y = int(3.5 * m)
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 2
glass_height = h + (np.random.rand() - 0.5) * 0.001 + config['glass']['border']
else:
print("generate env variation: large volume water")
dim_y = 4 * m
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 3
glass_height = h + (m + np.random.rand()) * 0.001 + config['glass']['border']
config_variations[idx]['fluid']['dim_x'] = dim_x
config_variations[idx]['fluid']['dim_y'] = dim_y
config_variations[idx]['fluid']['dim_z'] = dim_z
# if you want to change viscosity also, uncomment this
# config_variations[idx]['fluid']['viscosity'] = self.rand_float(2.0, 10.0)
config_variations[idx]['glass']['height'] = glass_height
config_variations[idx]['glass']['poured_height'] = glass_height + np.random.rand() * 0.1
config_variations[idx]['glass']['glass_distance'] = self.rand_float(0.05 * m, 0.09 * m) + (dim_x + 4) * water_radius / 2.
config_variations[idx]['glass']['poured_border'] = 0.03
self.set_scene(config_variations[idx])
init_state = copy.deepcopy(self.get_state())
self.cached_configs.append(config_variations[idx])
self.cached_init_states.append(init_state)
combined = [self.cached_configs, self.cached_init_states]
return self.cached_configs, self.cached_init_states
def get_config(self):
if self.deterministic:
config_idx = 0
else:
config_idx = np.random.randint(len(self.config_variations))
self.config = self.config_variations[config_idx]
return self.config
def _reset(self):
'''
reset to environment to the initial state.
return the initial observation.
'''
self.inner_step = 0
self.performance_init = None
info = self._get_info()
self.performance_init = info['performance']
pyflex.step(render=True)
return self._get_obs()
def get_state(self):
'''
get the postion, velocity of flex particles, and postions of flex shapes.
'''
particle_pos = pyflex.get_positions()
particle_vel = pyflex.get_velocities()
shape_position = pyflex.get_shape_states()
return {'particle_pos': particle_pos, 'particle_vel': particle_vel, 'shape_pos': shape_position,
'glass_x': self.glass_x, 'glass_y': self.glass_y, 'glass_rotation': self.glass_rotation,
'glass_states': self.glass_states, 'poured_glass_states': self.poured_glass_states,
'glass_params': self.glass_params, 'config_id': self.current_config_id}
def set_state(self, state_dic):
'''
set the postion, velocity of flex particles, and postions of flex shapes.
'''
pyflex.set_positions(state_dic["particle_pos"])
pyflex.set_velocities(state_dic["particle_vel"])
pyflex.set_shape_states(state_dic["shape_pos"])
self.glass_x = state_dic['glass_x']
self.glass_y = state_dic['glass_y']
self.glass_rotation = state_dic['glass_rotation']
self.glass_states = state_dic['glass_states']
self.poured_glass_states = state_dic['poured_glass_states']
for _ in range(5):
pyflex.step()
def initialize_camera(self):
self.camera_params = {
'default_camera': {'pos': np.array([1.4, 1.5, 0.1]),
'angle': np.array([0.45 * np.pi, -60 / 180. * np.pi, 0]),
'width': self.camera_width,
'height': self.camera_height},
'cam_2d': {'pos': np.array([0.5, .7, 4.]),
'angle': np.array([0, 0, 0.]),
'width': self.camera_width,
'height': self.camera_height}
}
def set_poured_glass_params(self, config):
params = config
self.glass_distance = params['glass_distance']
self.poured_border = params['poured_border']
self.poured_height = params['poured_height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.poured_glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.07 # glass floor length
self.poured_glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.07 # glass width
params['poured_glass_dis_x'] = self.poured_glass_dis_x
params['poured_glass_dis_z'] = self.poured_glass_dis_z
params['poured_glass_x_center'] = self.x_center + params['glass_distance']
self.glass_params.update(params)
def set_pouring_glass_params(self, config):
params = config
self.border = params['border']
self.height = params['height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.1 # glass floor length
self.glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.1 # glass width
params['glass_dis_x'] = self.glass_dis_x
params['glass_dis_z'] = self.glass_dis_z
params['glass_x_center'] = self.x_center
self.glass_params = params
def set_scene(self, config, states=None, create_only=False):
'''
Construct the pouring water scence.
'''
# create fluid
super().set_scene(config) # do not sample fluid parameters, as it's very likely to generate very strange fluid
# compute glass params
if states is None:
self.set_pouring_glass_params(config["glass"])
self.set_poured_glass_params(config["glass"])
else:
glass_params = states['glass_params']
self.border = glass_params['border']
self.height = glass_params['height']
self.glass_dis_x = glass_params['glass_dis_x']
self.glass_dis_z = glass_params['glass_dis_z']
self.glass_distance = glass_params['glass_distance']
self.poured_border = glass_params['poured_border']
self.poured_height = glass_params['poured_height']
self.poured_glass_dis_x = glass_params['poured_glass_dis_x']
self.poured_glass_dis_z = glass_params['poured_glass_dis_z']
self.glass_params = glass_params
# create pouring glass & poured glass
self.create_glass(self.glass_dis_x, self.glass_dis_z, self.height, self.border)
self.create_glass(self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
# move pouring glass to be at ground
self.glass_states = self.init_glass_state(self.x_center, 0, self.glass_dis_x, self.glass_dis_z, self.height, self.border)
# move poured glass to be at ground
self.poured_glass_states = self.init_glass_state(self.x_center + self.glass_distance, 0,
self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
self.set_shape_states(self.glass_states, self.poured_glass_states)
# record glass floor center x, y, and rotation
self.glass_x = self.x_center
if self.action_mode == 'rotation_bottom':
self.glass_y = 0
elif self.action_mode == 'rotation_top':
self.glass_y = 0.5 * self.border + self.height
self.glass_rotation = 0
# only create the glass and water, without setting their states
# this is only used in the pourwater amount env.
if create_only:
return
# no cached init states passed in
if states is None:
fluid_pos = np.ones((self.particle_num, self.dim_position))
# move water all inside the glass
fluid_radius = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
fluid_dis = np.array([1.0 * fluid_radius, fluid_radius * 0.5, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 2. + self.glass_params['border']
lower_z = -self.glass_params['glass_dis_z'] / 2 + self.glass_params['border']
lower_y = self.glass_params['border']
if self.action_mode in ['sawyer', 'franka']:
lower_y += 0.56 # NOTE: robotics table
lower = np.array([lower_x, lower_y, lower_z])
cnt = 0
rx = int(self.fluid_params['dim_x'] * 1)
ry = int(self.fluid_params['dim_y'] * 1)
rz = int(self.fluid_params['dim_z'] / 1)
for x in range(rx):
for y in range(ry):
for z in range(rz):
fluid_pos[cnt][:3] = lower + np.array([x, y, z]) * fluid_dis # + np.random.rand() * 0.01
cnt += 1
pyflex.set_positions(fluid_pos)
print("stablize water!")
for _ in range(100):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = np.sum(not_in_glass)
while not_total_num > 0:
max_height_now = np.max(water_state[:, 1])
fluid_dis = np.array([1.0 * fluid_radius, fluid_radius * 1, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 4
lower_z = -self.glass_params['glass_dis_z'] / 4
lower_y = max_height_now
lower = np.array([lower_x, lower_y, lower_z])
cnt = 0
dim_x = config['fluid']['dim_x']
dim_z = config['fluid']['dim_z']
for w_idx in range(len(water_state)):
if not in_glass[w_idx]:
water_state[w_idx][:3] = lower + fluid_dis * np.array([cnt % dim_x, cnt // (dim_x * dim_z), (cnt // dim_x) % dim_z])
cnt += 1
pyflex.set_positions(water_state)
for _ in range(40):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = np.sum(not_in_glass)
for _ in range(30):
pyflex.step()
else: # set to passed-in cached init states
self.set_state(states)
def _get_obs(self):
'''
return the observation based on the current flex state.
'''
if self.observation_mode == 'cam_rgb':
return self.get_image(self.camera_width, self.camera_height)
elif self.observation_mode == 'point_cloud':
particle_pos = np.array(pyflex.get_positions()).reshape([-1, 4])[:, :3].flatten()
pos = np.zeros(shape=self.particle_obs_dim, dtype=np.float)
pos[:len(particle_pos)] = particle_pos
return pos.flatten()
elif 'key_point' in self.observation_mode:
pos = np.empty(0, dtype=np.float)
water_state = pyflex.get_positions().reshape([-1, 4])
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
in_poured_glass = float(np.sum(in_poured_glass)) / len(water_state)
in_control_glass = float(np.sum(in_control_glass)) / len(water_state)
cup_state = np.array([self.glass_x, self.glass_y, self.glass_rotation, self.glass_dis_x, self.glass_dis_z, self.height,
self.glass_distance + self.glass_x, self.poured_height, self.poured_glass_dis_x, self.poured_glass_dis_z,
self._get_current_water_height(), in_poured_glass, in_control_glass])
return np.hstack([pos, cup_state]).flatten()
else:
raise NotImplementedError
def compute_reward(self, obs=None, action=None, set_prev_reward=False):
"""
The reward is computed as the fraction of water in the poured glass.
NOTE: the obs and action params are made here to be compatiable with the MultiTask env wrapper.
"""
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = np.sum(good_water)
reward = float(good_water_num) / water_num
return reward
def _get_info(self):
# Duplicate of the compute reward function!
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = np.sum(good_water)
performance = float(good_water_num) / water_num
performance_init = performance if self.performance_init is None else self.performance_init # Use the original performance
return {
'normalized_performance': (performance - performance_init) / (self.reward_max - performance_init),
'performance': performance
}
def _step(self, action):
'''
action: np.ndarray of dim 1x3, (x, y, theta). (x, y) specifies the floor center coordinate, and theta
specifies the rotation.
'''
# make action as increasement, clip its range
move = action[:2]
rotate = action[2]
move = np.clip(move, a_min=self.action_space.low[0], a_max=self.action_space.high[0])
rotate = np.clip(rotate, a_min=self.action_space.low[2], a_max=self.action_space.high[2])
dx, dy, dtheta = move[0], move[1], rotate
x, y, theta = self.glass_x + dx, self.glass_y + dy, self.glass_rotation + dtheta
# check if the movement of the pouring glass collide with the poured glass.
# the action only take effects if there is no collision
new_states = self.rotate_glass(self.glass_states, x, y, theta)
if not self.judge_glass_collide(new_states, theta) and self.above_floor(new_states, theta):
self.glass_states = new_states
self.glass_x, self.glass_y, self.glass_rotation = x, y, theta
else: # invalid move, old state becomes the same as the current state
self.glass_states[:, 3:6] = self.glass_states[:, :3].copy()
self.glass_states[:, 10:] = self.glass_states[:, 6:10].copy()
# pyflex takes a step to update the glass and the water fluid
self.set_shape_states(self.glass_states, self.poured_glass_states)
pyflex.step(render=True)
self.inner_step += 1
def create_glass(self, glass_dis_x, glass_dis_z, height, border):
"""
the glass is a box, with each wall of it being a very thin box in Flex.
each wall of the real box is represented by a box object in Flex with really small thickness (determined by the param border)
dis_x: the length of the glass
dis_z: the width of the glass
height: the height of the glass.
border: the thickness of the glass wall.
the halfEdge determines the center point of each wall.
Note: this is merely setting the length of each dimension of the wall, but not the actual position of them.
That's why left and right walls have exactly the same params, and so do front and back walls.
"""
center = np.array([0., 0., 0.])
quat = quatFromAxisAngle([0, 0, -1.], 0.)
boxes = []
# floor
halfEdge = np.array([glass_dis_x / 2. + border, border / 2., glass_dis_z / 2. + border])
boxes.append([halfEdge, center, quat])
# left wall
halfEdge = np.array([border / 2., (height) / 2., glass_dis_z / 2. + border])
boxes.append([halfEdge, center, quat])
# right wall
boxes.append([halfEdge, center, quat])
# back wall
halfEdge = np.array([(glass_dis_x) / 2., (height) / 2., border / 2.])
boxes.append([halfEdge, center, quat])
# front wall
boxes.append([halfEdge, center, quat])
for i in range(len(boxes)):
halfEdge = boxes[i][0]
center = boxes[i][1]
quat = boxes[i][2]
pyflex.add_box(halfEdge, center, quat)
return boxes
def rotate_glass(self, prev_states, x, y, theta):
'''
given the previous states of the glass, rotate it with angle theta.
update the states of the 5 boxes that form the box: floor, left/right wall, back/front wall.
rotate the glass, where the center point is the center of the floor or the top.
state:
0-3: current (x, y, z) coordinate of the center point
3-6: previous (x, y, z) coordinate of the center point
6-10: current quat
10-14: previous quat
'''
dis_x, dis_z = self.glass_dis_x, self.glass_dis_z
quat_curr = quatFromAxisAngle([0, 0, -1.], theta)
border = self.border
# states of 5 walls
states = np.zeros((5, self.dim_shape_state))
for i in range(5):
states[i][3:6] = prev_states[i][:3]
states[i][10:] = prev_states[i][6:10]
x_center = x
# rotation center is the floor center
rotate_center = np.array([x_center, y, 0.])
if self.action_mode == 'rotation_bottom':
# floor: center position does not change
states[0, :3] = np.array([x_center, y, 0.])
# left wall: center must move right and move down.
relative_coord = np.array([-(dis_x+ border) / 2., (self.height) / 2., 0.])
states[1, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# right wall
relative_coord = np.array([(dis_x+ border) / 2., (self.height) / 2., 0.])
states[2, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# back wall
relative_coord = np.array([0, (self.height) / 2., -(dis_z+ border) / 2.])
states[3, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# front wall
relative_coord = np.array([0, (self.height) / 2., (dis_z+ border) / 2.])
states[4, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
elif self.action_mode == 'rotation_top':
# floor
relative_coord = np.array([0, -self.height, 0.])
states[0, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# left wall
relative_coord = np.array([-(dis_x+ border) / 2., -self.height / 2., 0.])
states[1, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# right wall
relative_coord = np.array([(dis_x+ border) / 2., -self.height / 2., 0.])
states[2, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# back wall
relative_coord = np.array([0, -self.height / 2., -(dis_z+ border) / 2.])
states[3, :3] = rotate_rigid_object(center=rotate_center, axis= | np.array([0, 0, -1]) | numpy.array |
import pandas as pd
from matplotlib import pyplot as plt
import PIL
from PIL import ImageDraw
import numpy as np
from sklearn import preprocessing
csv = pd.read_csv("output/results/results.csv")
# print(result_csv)
# result_csv.hist(column="06_MUCOSA", bins=50)
# result_csv.hist(column="07_ADIPOSE", bins=50)
# result_csv.hist(column="04_LYMPHO", bins=50)
# result_csv.hist(column="03_COMPLEX", bins=50)
# result_csv.hist(column="02_STROMA", bins=50)
# result_csv.hist(column="01_TUMOR", bins=50)
# result_csv.hist(column="uncertainty", bins=50)
# plt.show()
result_csv = csv.iloc[2145:]
width = np.max(result_csv["patch_x"]) - np.min(result_csv["patch_x"])
width_ = width+width*0.3
height = np.max(result_csv["patch_y"]) - np.min(result_csv["patch_y"])
height_ = height+height*0.3
img = PIL.Image.new("RGB",(int(width_) , int(height_)), (255, 255, 255))
Drawer = ImageDraw.Draw(img)
colors = ["green", "red", "blue", "purple", "yellow", "white", "black", "pink"]
# min_max_scaler = preprocessing.MinMaxScaler()
# scaled = (result_csv["uncertainty"]-result_csv["uncertainty"].min())/(result_csv["uncertainty"].max()-result_csv["uncertainty"].min())
for patch in range(len(result_csv)):
# print(result_csv.iloc[patch])
x = result_csv.iloc[patch]["patch_x"] - np.min(result_csv["patch_x"])
y = result_csv.iloc[patch]["patch_y"] - | np.min(result_csv["patch_y"]) | numpy.min |
import cv2
import librosa
import numpy as np
from support.data_model import CLASSES
FRAME_DIMS = [120, 160]
MIN_LEFT_COLUMN = 1
MIN_TOP_ROW = 1
MAX_RIGHT_COLUMN = FRAME_DIMS[1] - 1
MAX_BOTTOM_ROW = FRAME_DIMS[0] - 1
MAX_DIMENSION = min(MAX_BOTTOM_ROW-MIN_TOP_ROW, MAX_RIGHT_COLUMN-MIN_LEFT_COLUMN)
def extract_hdf5_frames(hdf5_frames):
"""
Extract frames from HDF5 dataset. This converts the frames to a list.
:param hdf5_frames: original video frames
:return [frame] list of frames
"""
frames = []
for i in range(len(hdf5_frames)):
hdf5_frame = hdf5_frames[str(i)]
assert len(hdf5_frame) == 120
frame_rows = []
for rnum in range(len(hdf5_frame)):
row = hdf5_frame[rnum]
frame_rows.append(row)
frames.append(np.array(frame_rows))
return frames
def extract_hdf5_crops(hdf5_crops):
"""
Extract crops from HDF5 dataset. This converts the crops to a list.
:param hdf5_crops: stored crops
:return [crop] list of crops
"""
crops = []
for i in range(len(hdf5_crops)):
crops.append(hdf5_crops[str(i)][1])
return crops
def center_position(low_bound, high_bound, low_limit, high_limit, space):
"""
Center bounds within available space.
:param low_bound: current lower bound
:param high_bound: current upper bound
:param low_limit: minimum allowed bound
:param high_limit: maximum allowed bound
:param space: available space
:return: centered low bound, centered high bound
"""
size = high_bound - low_bound
extra = space - size
if extra > 0:
if low_bound == low_limit:
return 0, size
elif high_bound == high_limit:
return space - size, space
else:
leading_pad = extra // 2
adjusted_low_bound = low_bound - leading_pad
if adjusted_low_bound < low_limit:
leading_pad = low_bound - low_limit
adjusted_high_bound = low_bound - leading_pad + space
if adjusted_high_bound > high_limit:
leading_pad = space - size - (high_limit - high_bound)
return leading_pad, leading_pad + size
else:
return 0, size
def square_bounds(bounds, size):
l, t, r, b = bounds
if b-t != size or r-l != size:
# pad or crop bounds to square
size = min(size, MAX_DIMENSION)
dc0, dc1 = center_position(l, r, MIN_LEFT_COLUMN, MAX_RIGHT_COLUMN, size)
dr0, dr1 = center_position(t, b, MIN_TOP_ROW, MAX_BOTTOM_ROW, size)
# adjust values for cropping frames
t -= dr0
b = t + size
l -= dc0
r = l + size
assert t >= MIN_TOP_ROW and b <= MAX_BOTTOM_ROW and l >= MIN_LEFT_COLUMN and r <= MAX_RIGHT_COLUMN, \
f'square_cropped original bounds {bounds} with output bounds {(l, t, r, b)}'
return (l, t, r, b)
def normalize(x):
x -= x.min()
xmax = x.max()
if xmax > 0:
x = x.astype(np.float32) * (255 / xmax)
return x
def clip_and_scale(frame):
"""
Clip values in frame to [mean - 1.5*std, mean + 3*std], and scale to 0-255 range.
:param frame: frame
:return: clipped and scaled frame
"""
frame_min = np.min(frame)
frame_max = np.max(frame)
if frame_max > frame_min:
frame_mean = np.mean(frame)
frame_std = np.std(frame)
use_min = max(frame_min, frame_mean - 3 * frame_std // 2)
use_range = frame_max - use_min
assert use_range > 0, f'clipping with use_min {use_min}, frame_max {frame_max}'
frame = np.clip(frame, use_min, frame_max) - use_min
return (frame / use_range) * 255
else:
return frame
def prune_frames(icrops, iframes, ibounds, imasses, clip_key, track_key, keepfn=lambda a, b: True):
"""
Drop useless frames, based on the cropped frame data. If a crop is either all zero, or the same bounds as the prior
crop and with no significant change in pixel values, it's dropped from the list (along with the matching frame,
bound, and mass). For all frames retained a count of non-zero pixels in the cropped frame data is calculated and
returned. All returned values, except for the crops and frames, are in the form of numpy arrays.
:param icrops: dataset crops
:param iframes: original video frame
:param ibounds: input bounds
:param imasses: input masses
:param clip_key: clip identifier
:param track_key: track identifier
:param keepfn: function (sum of pixels in frame, total difference in frames) to decide whether frame is kept
:return: (crops, adjusts, bounds, masses, pixels)
"""
ocrops = []
oframes = []
obounds = []
omasses = []
opixels = []
zero_count = 0
static_count = 0
last_crop = None
last_bound = None
for crop, frame, bound, mass in zip(icrops, iframes, ibounds, imasses):
if not np.any(crop):
zero_count += 1
else:
bound = np.array(bound)
if last_crop is not None and np.array_equal(bound, last_bound) and not keepfn(np.sum(crop), np.sum(np.abs(crop - last_crop))):
static_count += 1
else:
last_crop = crop
last_bound = bound
ocrops.append(crop)
oframes.append(frame)
obounds.append(bound)
omasses.append(mass)
opixels.append((crop > 0).sum())
if zero_count > 0 or static_count > 0:
print(f'{clip_key}-{track_key} dropped {zero_count} zero crops and {static_count} static crops leaving {len(ocrops)} frames')
return ocrops, oframes, np.array(obounds), np.array(omasses), np.array(opixels)
def stepped_resizer(resize_ratios):
def calculate_resize_stepped(maxdim):
return resize_ratios[maxdim]
return calculate_resize_stepped
def smooth_resizer(sample_dim):
def calculate_smooth_resize(maxdim):
ratio = maxdim / sample_dim
if ratio < 0:
return min(1, ratio * 1.5)
else:
return ratio
return calculate_smooth_resize
def convert_frames(iframes, background, ibounds, out_dim, resize_calculatorfn):
"""
Convert input crops and raw frames to standardized form for use with a model. The largest dimension of the crop is
used to lookup a resize ratio. The portion of frame data used as input to the resizing is centered on the crop area
(respecting the borders of the frame), and is first adjusted by subtracting the background and normalizing.
:param iframes: original video frame
:param background: background frame
:param ibounds: input bounds
:param out_dim: output size (same dimension for both rows and columns)
:param resize_calculatorfn: function to calculate ratio for resizing images
:return: (aframes, obounds, ratios)
"""
aframes = []
obounds = []
ratios = []
for frame, bound in zip(iframes, ibounds):
af = frame - background
af *= af > 0
l, t, r, b = bound
maxdim = max(b-t, r-l)
resize_ratio = resize_calculatorfn(maxdim)
usedim = int(resize_ratio * out_dim)
bnds = bound
# dc0, dc1 = center_position(l, r, MIN_LEFT_COLUMN, MAX_RIGHT_COLUMN, usedim)
# dr0, dr1 = center_position(t, b, MIN_TOP_ROW, MAX_BOTTOM_ROW, usedim)
# expanded_crop = np.zeros((usedim,usedim), np.float32)
# expanded_crop[dr0:dr1,dc0:dc1] = crop
# crop = expanded_crop
bnds = square_bounds(bnds, usedim)
l, t, r, b = bnds
af = clip_and_scale(af[t:b, l:r].astype(np.float32))
if not af.max() < 255.5:
print(f'convert_frames invalid af.max()={af.max()} after initial clip_and_scale, maxdim={maxdim}, bounds={bound}')
resize_inter = cv2.INTER_AREA if resize_ratio > 1 else cv2.INTER_CUBIC if resize_ratio < 1 else None
if resize_inter is not None:
resize_dims = (out_dim, out_dim)
#crop = cv2.resize(crop, resize_dims, interpolation=resize_inter)
af = cv2.resize(af, resize_dims, interpolation=resize_inter)
afscaled = normalize(librosa.power_to_db(af, ref=np.max))
# crop = normalize(crop)
# if not crop.max() < 255.5:
# print(f'convert_frames invalid crop.max()={crop.max()} after normalize, maxdim={maxdim}, bounds={bounds}')
af = normalize(af)
if not af.max() < 255.5:
print(f'convert_frames invalid af.max()={af.max()} after normalize, maxdim={maxdim}, bounds={bound}')
# crop = np.round(crop).astype(np.uint8)
# ocrops.append(crop)
af = | np.round(af) | numpy.round |
"""
stscan
~~~~~~
Implements the "prospective" space-time permutation scan statistic algorithm.
This was originally described in (1) in reference to disease outbreak
detection. The algorithm is implemented in the software package (2). We
apply it to crime predication as in (3).
We look at events which have occurred in the past, and try to detect "clusters"
which are existing up to the current time. To do this, a simple statistic
which measures deviation was expected randomness is computed for every
possible space/time "cylinder": events which occur is a circular disk in space,
in an interval of time (always ending at the point of prediction). The space/
time cylinder with the largest statistic is deemed the most likely "cluster".
Further clusters are computed by finding the next most likely cluster which
does not intersect (in space only) the existing cluster.
As detailed in (1) and (2) it is possible to use monte-carlo methods to
estimate the p-value of the primary cluster, but for prediction purposes this
is not necessary. As adapted from (3), we use the clusters in order to find
a relative "risk" of crime.
References
~~~~~~~~~~
1. Kulldorff et al, "A Space–Time Permutation Scan Statistic for Disease
Outbreak Detection", PLoS Med 2(3): e59, DOI:10.1371/journal.pmed.0020059
2. Kulldorff M. and Information Management Services, Inc. SaTScanTM v8.0:
Software for the spatial and space-time scan statistics.
http://www.satscan.org/, 2016.
3. Adepeju, <NAME>, "Novel evaluation metrics for sparse spatiotemporal
point process hotspot predictions - a crime case study", International
Journal of Geographical Information Science, 30:11, 2133-2154,
DOI:10.1080/13658816.2016.1159684
"""
from . import predictors
from . import data
import numpy as _np
import collections as _collections
import datetime as _datetime
Cluster = _collections.namedtuple("Cluster", ["centre", "radius"])
def _possible_start_times(timestamps, max_interval_length, end_time):
times = _np.datetime64(end_time) - timestamps
zerotime = _np.timedelta64(0,"s")
times = timestamps[(zerotime <= times) & (times <= max_interval_length)]
if len(times) <= 1:
return times
deltas = times[1:] - times[:-1]
return _np.hstack(([times[0]],times[1:][deltas > zerotime]))
def _possible_space_clusters(points, max_radius=_np.inf):
discs = []
for pt in points.T:
distances = pt[:,None] - points
distances = _np.sqrt(_np.sum(distances**2, axis=0))
distances.sort()
discs.extend(Cluster(pt, r*1.00001) for r in distances if r <= max_radius)
# Reduce number
# Use a tuple here so we can use a set; this is _much_ faster
allmasks = [tuple(_np.sum((points - cluster.centre[:,None])**2, axis=0) <= cluster.radius**2)
for cluster in discs]
masks = []
set_masks = set()
for i,m in enumerate(allmasks):
if m not in set_masks:
masks.append(i)
set_masks.add(m)
return [discs[i] for i in masks]
def grid_timed_points(timed_points, region, grid_size):
"""Return a new instance of :class:`TimedPoints` where each space
coordinate is moved to the centre of each grid cell.
:param timed_points: Input data.
:param region: A `data.RectangularRegion` instance giving the
region to grid to. Only the x,y offset is used.
:param grid_size: The width and height of each grid cell.
"""
offset = _np.array([region.xmin, region.ymin])
newcoords = _np.floor((timed_points.coords - offset[:,None]) / grid_size) + 0.5
newcoords = newcoords * grid_size + offset[:,None]
return data.TimedPoints(timed_points.timestamps, newcoords)
def bin_timestamps(timed_points, offset, bin_length):
"""Return a new instance of :class:`TimedPoints` where each timestamped is
adjusted. Any timestamp between `offset` and `offset + bin_length` is
mapped to `offset`; timestamps between `offset + bin_length` and
`offset + 2 * bin_length` are mapped to `offset + bin_length`, and so
forth.
:param timed_points: Input data.
:param offset: A datetime-like object which is the start of the binning.
:param bin_length: A timedelta-like object which is the length of each bin.
"""
offset = _np.datetime64(offset)
bin_length = _np.timedelta64(bin_length)
new_times = _np.floor((timed_points.timestamps - offset) / bin_length)
new_times = offset + new_times * bin_length
return data.TimedPoints(new_times, timed_points.coords)
class _STSTrainerBase(predictors.DataTrainer):
"""Internal class, abstracting out some common features."""
def __init__(self):
self.geographic_population_limit = 0.5
self.geographic_radius_limit = 3000
self.time_population_limit = 0.5
self.time_max_interval = _np.timedelta64(12, "W")
self.data = None
self.region = None
@property
def region(self):
"""The :class:`data.RectangularRegion` which contains the data; used
by the output to generate grids etc. If set to `None` then will
automatically be the bounding-box of the input data.
"""
if self._region is None:
self.region = None
return self._region
@region.setter
def region(self, value):
if value is None and self.data is not None:
value = self.data.bounding_box
self._region = value
@property
def geographic_population_limit(self):
"""No space disc can contain more than this fraction of the total
number of events.
"""
return self._geo_pop_limit
@geographic_population_limit.setter
def geographic_population_limit(self, value):
if value < 0 or value > 1:
raise ValueError("Should be fraction of total population, so value between 0 and 1")
self._geo_pop_limit = value
@property
def geographic_radius_limit(self):
"""The maximum radius of the space discs."""
return self._geo_max_radius
@geographic_radius_limit.setter
def geographic_radius_limit(self, value):
self._geo_max_radius = value
@property
def time_population_limit(self):
"""No time interval can contain more than this fraction of the total
number of events.start_times
"""
return self._time_pop_limit
@time_population_limit.setter
def time_population_limit(self, value):
if value < 0 or value > 1:
raise ValueError("Should be fraction of total population, so value between 0 and 1")
self._time_pop_limit = value
@property
def time_max_interval(self):
"""The maximum length of a time interval."""
return self._time_max_len
@time_max_interval.setter
def time_max_interval(self, value):
self._time_max_len = _np.timedelta64(value)
def _copy_settings(self, other):
other.geographic_population_limit = self.geographic_population_limit
other.geographic_radius_limit = self.geographic_radius_limit
other.time_population_limit = self.time_population_limit
other.time_max_interval = self.time_max_interval
def bin_timestamps(self, offset, bin_length):
"""Returns a new instance with the underlying timestamped data
adjusted. Any timestamp between `offset` and `offset + bin_length`
is mapped to `offset`; timestamps between `offset + bin_length`
and `offset + 2 * bin_length` are mapped to `offset + bin_length`,
and so forth.
:param offset: A datetime-like object which is the start of the
binning.
:param bin_length: A timedelta-like object which is the length of
each bin.
"""
new = self.clone()
new.data = bin_timestamps(self.data, offset, bin_length)
return new
def grid_coords(self, region, grid_size):
"""Returns a new instance with the underlying coordinate data
adjusted to always be the centre point of grid cells.
:param region: A `data.RectangularRegion` instance giving the
region to grid to. Only the x,y offset is used.
:param grid_size: The width and height of each grid cell.
"""
new = self.clone()
new.data = grid_timed_points(self.data, region, grid_size)
return new
@staticmethod
def _statistic(actual, expected, total):
"""Calculate the log likelihood"""
stat = actual * (_np.log(actual) - _np.log(expected))
stat += (total - actual) * (_np.log(total - actual) - _np.log(total - expected))
return stat
def maximise_clusters(self, clusters, time=None):
"""The prediction method will return the smallest clusters (subject
to each cluster being centred on the coordinates of an event). This
method will enlarge each cluster to the maxmimum radius it can be
without including further events.
:param clusters: List-like object of :class:`Cluster` instances.
:param time: Only data up to and including this time is used when
computing clusters. If `None` then use the last timestamp of the
data.
:return: Array of clusters with larger radii.
"""
events, time = self._events_time(time)
out = []
for disc in clusters:
distances = _np.sum((events.coords - disc.centre[:,None])**2, axis=0)
rr = disc.radius ** 2
new_radius = _np.sqrt(min( dd for dd in distances if dd > rr ))
out.append(Cluster(disc.centre, new_radius))
return out
def to_satscan(self, filename):
"""Writes the training data to two SaTScan compatible files. Does
*not* currently write settings, so these will need to be entered
manually.
:param filename: Saves files "filename.geo" and "filename.cas"
containing the geometry and "cases" repsectively.
"""
def timeformatter(t):
t = _np.datetime64(t, "s")
return str(t)
unique_coords = list(set( (x,y) for x,y in self.data.coords.T ))
with open(filename + ".geo", "w") as geofile:
for i, (x,y) in enumerate(unique_coords):
print("{}\t{}\t{}".format(i+1, x, y), file=geofile)
unique_times = list(set( t for t in self.data.timestamps ))
with open(filename + ".cas", "w") as casefile:
for i, (t) in enumerate(unique_times):
pts = self.data.coords.T[self.data.timestamps == t]
pts = [ (x,y) for x,y in pts ]
import collections
c = collections.Counter(pts)
for pt in c:
index = unique_coords.index(pt)
print("{}\t{}\t{}".format(index+1, c[pt], timeformatter(t)), file=casefile)
def _events_time(self, time=None):
"""If time is `None` set to last event in data. Return data clamped to
time range, and timestamp actually used."""
events = self.data.events_before(time)
if time is None:
time = self.data.timestamps[-1]
return events, time
from . import stscan2 as _stscan2
class STSTrainer(_STSTrainerBase):
"""From past events, produce an instance of :class:`STSResult` which
stores details of the found clusters. Contains a variety of properties
which may be changed to affect the prediction behaviour.
This version uses numpy code, and is far faster. As the *exact order* we
consider regions in is not stable, the clusters found will be slightly
different.
"""
def __init__(self):
super().__init__()
def clone(self):
"""Return a new instance which has all the underlying settings but with
no data.
"""
new = STSTrainer()
self._copy_settings(new)
return new
_TIME_UNIT = _np.timedelta64(1, "ms")
def to_scanner(self, time=None):
"""Transform the input data into the "abstract representation". For
testing.
:param time: Timestamp of the prediction point. Only data up to
and including this time is used when computing clusters. If `None`
then use the last timestamp of the data.
:return: An instance of :class:`STScanNumpy`.
"""
events, time = self._events_time(time)
times_into_past = (time - events.timestamps) / self._TIME_UNIT
scanner = _stscan2.STScanNumpy(events.coords, times_into_past)
self._copy_settings(scanner)
scanner.time_max_interval = self.time_max_interval / self._TIME_UNIT
return scanner, time
def predict(self, time=None, max_clusters=None):
"""Make a prediction.
:param time: Timestamp of the prediction point. Only data up to
and including this time is used when computing clusters. If `None`
then use the last timestamp of the data.
:param max_clusters: If not `None` then return at most this many
clusters.
:return: A instance of :class:`STSResult` giving the found clusters.
"""
scanner, time = self.to_scanner(time)
clusters = []
time_regions = []
stats = []
for cluster in scanner.find_all_clusters():
clusters.append(Cluster(cluster.centre, cluster.radius))
start_time = time - cluster.time * self._TIME_UNIT
time_regions.append((start_time, time))
stats.append(cluster.statistic)
max_clusters = self.maximise_clusters(clusters, time)
return STSResult(self.region, clusters, max_clusters,
time_ranges=time_regions, statistics=stats)
class STSTrainerSlow(_STSTrainerBase):
"""From past events, produce an instance of :class:`STSResult` which
stores details of the found clusters. Contains a variety of properties
which may be changed to affect the prediction behaviour.
"""
def __init__(self):
super().__init__()
def clone(self):
"""Return a new instance which has all the underlying settings but with
no data.
"""
new = STSTrainerSlow()
self._copy_settings(new)
return new
def _possible_start_times(self, end_time, timestamps):
"""A generator returing all possible start times"""
N = len(timestamps)
times = _np.unique(timestamps)
for st in times:
events_in_time = (timestamps >= st) & (timestamps <= end_time)
count = _np.sum(events_in_time)
if count <= self.time_population_limit * N:
yield st, count, events_in_time
def _disc_generator(self, discs, events):
"""A generator which yields triples `(disc, count, mask)` where `disc`
is a :class:`Cluster` giving the space disk, `count` is the number of
events in this disc, and `mask` is the boolean mask of which events are
in the disc.
:param discs: An iterable giving the discs
"""
for disc in discs:
space_counts = ( _np.sum((events.coords - disc.centre[:,None])**2, axis=0)
<= disc.radius ** 2 )
count = _np.sum(space_counts)
yield disc, count, space_counts
def _possible_discs(self, events):
"""Yield all possible discs which satisfy our limits"""
all_discs = _possible_space_clusters(events.coords, self.geographic_radius_limit)
N = events.number_data_points
for disc, count, space_counts in self._disc_generator(all_discs, events):
if count <= N * self.geographic_population_limit:
yield disc, count, space_counts
def _time_regions(self, disc_times, events, end_time, N, times_lookup):
times = _np.unique(disc_times)
for start_time in times:
if end_time - start_time > self.time_max_interval:
continue
total_count = times_lookup.get(start_time)
if total_count is None or total_count > self.time_population_limit * N:
continue
count = _np.sum(disc_times >= start_time)
yield start_time, total_count, count
def _scan_all(self, end_time, events, discs_generator, disc_output=None, timestamps=None):
if timestamps is None:
timestamps = events.timestamps
best = (None, -_np.inf, None)
N = events.number_data_points
times_lookup = { time:count for time, count, _ in
self._possible_start_times(end_time, timestamps) }
for disc, space_count, space_mask in discs_generator:
if disc_output is not None:
disc_output.append(disc)
for start, time_count, actual in self._time_regions(
events.timestamps[space_mask], events, end_time, N, times_lookup):
expected = time_count * space_count / N
if actual > expected and actual > 1:
stat = self._statistic(actual, expected, N)
if stat > best[1]:
best = (disc, stat, start)
return best
def _remove_intersecting(self, all_discs, disc):
return [ d for d in all_discs
if _np.sum((d.centre - disc.centre)**2) > (d.radius + disc.radius)**2
]
def predict(self, time=None, max_clusters=None):
"""Make a prediction.
:param time: Timestamp of the prediction point. Only data up to
and including this time is used when computing clusters. If `None`
then use the last timestamp of the data.
:param max_clusters: If not `None` then return at most this many
clusters.
:return: A instance of :class:`STSResult` giving the found clusters.
"""
events, time = self._events_time(time)
all_discs = []
clusters = []
best_disc, stat, start_time = self._scan_all(time, events,
self._possible_discs(events), all_discs)
while best_disc is not None:
clusters.append((best_disc, stat, start_time))
all_discs = self._remove_intersecting(all_discs, best_disc)
if len(all_discs) == 0:
break
if max_clusters is not None and len(clusters) >= max_clusters:
break
best_disc, stat, start_time = self._scan_all(time, events,
self._disc_generator(all_discs, events))
clusters, stats, start_times = zip(*clusters)
time_regions = [(s,time) for s in start_times]
max_clusters = self.maximise_clusters(clusters, time)
return STSResult(self.region, clusters, max_clusters,
time_ranges=time_regions, statistics=stats)
def monte_carlo_simulate(self, time=None, runs=999):
"""Perform a monte carlo simulation for the purposes of estimating
p-values. We repeatedly shuffle the timestamps of the data and then
find the most likely cluster for each new dataset. This method is
more efficient than calling :method:`predict` repeatedly with
shuffled data.
:param time: Optionally restrict the data to before this time, as for
:method:`predict`
:param runs: The number of samples to take, by default 999
:return: An ordered list of statistics.
"""
events, time = self._events_time(time)
all_discs = []
best_disc, stat, start_time = self._scan_all(time, events,
self._possible_discs(events), all_discs)
timestamps = _np.array(events.timestamps)
stats = []
for _ in range(runs):
_np.random.shuffle(timestamps)
_,stat,_ = self._scan_all(time, events,
self._disc_generator(all_discs, events), timestamps = timestamps)
stats.append(stat)
stats = _np.asarray(stats)
stats.sort()
return stats
class STSContinuousPrediction(predictors.ContinuousPrediction):
"""A :class:`predictors.ContinuousPrediction` which uses the computed
clusters and a user-defined weight to generate a continuous "risk"
prediction. Set the :attr:`weight` to change weight.
It is not clear that the generated "risk" has much to do with reality!
We, by default, use enlarged cluster sizes (with removes the problem of
clusters with zero radius!) which can lead to overlapping clusters.
:param clusters: List of computed clusters.
"""
def __init__(self, clusters):
super().__init__()
self.weight = self.quatric_weight
self.clusters = clusters
pass
@staticmethod
def quatric_weight(t):
return (1 - t * t) ** 2
@property
def weight(self):
"""A function-like object which when called with a float between 0 and
1 (interpreted as the distance to the edge of a unit disc) returns a
float between 0 and 1, the "intensity". Default is the quatric
function :math:`t \mapsto (1-t^2)^2`.
"""
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
def _vectorised_weight(self, values):
"""Allows values to be a one-dimensional array. Returns 0 is the
value is not in the interval [0,1).
"""
values = _np.asarray(values)
allowed = (values >= 0) & (values < 1)
if len(values.shape) > 0:
return _np.asarray([self.weight(x) if a else 0.0 for x, a in zip(values,allowed)])
return self.weight(values) if allowed else 0.0
def risk(self, x, y):
"""The relative "risk", varying between 0 and `n`, the number of
clusters detected.
"""
pt = _np.array([x,y])
if len(pt.shape) == 1:
pt = pt[:,None]
risk = _np.zeros(pt.shape[1])
for n, cluster in enumerate(self.clusters):
dist = ( _np.sqrt(_np.sum((pt - _np.asarray(cluster.centre)[:,None])**2, axis=0))
/ cluster.radius )
weights = self._vectorised_weight(dist)
risk += (len(self.clusters) - n - 1 + weights) * (weights > 0)
return risk
class STSResult():
"""Stores the computed clusters from :class:`STSTrainer`. These can be
used to produce gridded or continuous "risk" predictions.
:param region: The rectangular region enclosing the data.
:param clusters: A list of :class:`Cluster` instances describing the found
clusters.
:param max_clusters: A list of :class:`Cluster` instances describing the
clusters with radii enlarged to the maximal extent.
:param time_ranges: The time range associated with each cluster.
:param statistics: The value of the log likelihood for each cluster.
:param pvalues: (Optionally) the estimated p-values.
"""
def __init__(self, region, clusters, max_clusters=None, time_ranges=None,
statistics=None, pvalues=None):
self.region = region
self.clusters = clusters
if max_clusters is None:
max_clusters = clusters
self.max_clusters = max_clusters
self.time_ranges = time_ranges
self.statistics = statistics
self.pvalues = pvalues
pass
def _add_cluster(self, cluster, risk_matrix, grid_size, base_risk):
"""Adds risk in base_risk + (0,1]"""
cells = []
for y in range(risk_matrix.shape[0]):
for x in range(risk_matrix.shape[1]):
xcoord = (x + 0.5) * grid_size + self.region.xmin
ycoord = (y + 0.5) * grid_size + self.region.ymin
distance = _np.sqrt((xcoord - cluster.centre[0]) ** 2 +
(ycoord - cluster.centre[1]) ** 2)
if distance <= cluster.radius:
cells.append((x,y,distance))
cells.sort(key = lambda triple : triple[2], reverse=True)
for i, (x,y,d) in enumerate(cells):
risk_matrix[y][x] = base_risk + (i+1) / len(cells)
def grid_prediction(self, grid_size, use_maximal_clusters=False):
"""Using the grid size, construct a grid from the region and
produce an instance of :class:`predictors.GridPredictionArray` which
contains the relative "risk".
We treat each cluster in order, so that the primary cluster has higher
risk than the secondary cluster, and so on. Within each cluster,
cells near the centre have a higher risk than cells near the boundary.
A grid cell is considered to be "in" the cluster is the centre of the
grid is inside the cluster.
:param grid_size: The size of resulting grid.
:param use_maximal_clusters: If `True` then use the largest possible
radii for each cluster.
"""
xs, ys = self.region.grid_size(grid_size)
risk_matrix = | _np.zeros((ys, xs)) | numpy.zeros |
import numpy as np
import warnings
def evaluate(env, agent, writer, all_rewards, all_deviations_mean, all_deviations_std, over_episodes=100):
done, ep_reward, values, values_std, action_values, entropies, qval_delta = False, [], [], [], [], [], []
state = env.reset()
while not done:
action, value, entropy = agent.policy(state, eval=True)
state, reward, done, _ = env.step(action)
ep_reward.append(reward)
values.append(value.numpy().mean())
values_std.append(value.numpy().std())
if len(value.view(-1)) > action:
action_values.append(value.view(-1)[action].item())
else:
action_values.append(np.nan)
entropies.append(entropy)
# calculate target discounted reward
cum_reward, cr = np.zeros_like(ep_reward), 0
for i in reversed(range(len(ep_reward))):
cr = cr + ep_reward[i]
cum_reward[i] = cr
cr *= agent.discount
for i, qval in enumerate(action_values):
# compare action value with real outcome
qval_delta.append(qval - cum_reward[i])
all_rewards.append(sum(ep_reward))
all_deviations_mean.append(np.mean(qval_delta))
all_deviations_std.append(np.std(qval_delta))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
writer.add_scalar("eval/Reward", sum(ep_reward), len(all_rewards))
writer.add_scalar("eval/Reward (SMA)", np.nanmean(all_rewards[-over_episodes:]), len(all_rewards))
writer.add_scalar("eval/Action-Value deviation (mean)", np.nanmean(qval_delta), len(all_rewards))
writer.add_scalar("eval/Action-Value deviation (mean) (SMA)", np.nanmean(all_deviations_mean[-over_episodes:]),
len(all_rewards))
writer.add_scalar("eval/Action-Value deviation (std)", np.nanstd(qval_delta), len(all_rewards))
writer.add_scalar("eval/Action-Value deviation (std) (SMA)", np.nanmean(all_deviations_std[-over_episodes:]),
len(all_rewards))
writer.add_scalar("eval/Max-Action-Value (mean)", np.nanmean(action_values), len(all_rewards))
writer.add_scalar("eval/Max-Action-Value (std)", np.nanstd(action_values), len(all_rewards))
writer.add_scalar("eval/Values", np.nanmean(values), len(all_rewards))
writer.add_scalar("eval/Action-Values std", np.nanmean(values_std), len(all_rewards))
writer.add_scalar("eval/Entropy", | np.nanmean(entropies) | numpy.nanmean |
"""
Tests for Univariate Hawkes processes
"""
import unittest as ut
import mock
import numpy as np
import os
from hawkeslib.model.uv_exp import UnivariateExpHawkesProcess as UVHP
class UVExpSamplerTests(ut.TestCase):
T = 10000
def setUp(self):
self.uv = UVHP()
self.uv.set_params(.5, .2, 10.)
def test_branching_correct_number_samples(self):
smp = self.uv.sample(self.T, method="branching")
EN = .5 * self.T / (1 - self.uv._alpha)
N = float(len(smp))
assert abs(N - EN) / N < .05
@mock.patch('hawkeslib.model.uv_exp.uv_exp_sample_branching')
def test_branching_calls_correct_cython(self, mock_method):
smp = self.uv.sample(self.T, method="branching")
mock_method.assert_called_with(self.T, .5, .2, 10.)
def test_ogata_correct_number_samples(self):
smp = self.uv.sample(self.T, method="ogata")
EN = .5 * self.T / (1 - self.uv._alpha)
N = float(len(smp))
assert abs(N - EN) / N < .05
@mock.patch('hawkeslib.model.uv_exp.uv_exp_sample_ogata')
def test_ogata_calls_correct_cython(self, mock_method):
smp = self.uv.sample(self.T, method="ogata")
mock_method.assert_called_with(self.T, .5, .2, 10.)
class UVExpSetterGetterTests(ut.TestCase):
def test_params_start_none(self):
uv = UVHP()
self.assertIsNone(uv._alpha)
self.assertIsNone(uv._mu)
self.assertIsNone(uv._theta)
def test_getter(self):
uv = UVHP()
uv._mu, uv._alpha, uv._theta = .5, .4, .3
pars1 = np.array(uv.get_params())
pars2 = np.array([.5, .4, .3])
np.testing.assert_allclose(pars1, pars2)
def test_setter(self):
uv = UVHP()
uv.set_params(.5, .4, .2)
pars1 = np.array(uv.get_params())
pars2 = | np.array([.5, .4, .2]) | numpy.array |
# Digital Signal Processing - Lab 1 - Part 4 (BONUS)
# <NAME> - 03117037
# <NAME> - 03117165
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import librosa
import sounddevice as sd
plt.close('all')
counter = 0
# Part 4 (Bonus)
#4.1 Open .wav file of salsa music signal 1
salsa1, fs = librosa.load('salsa_excerpt1.mp3')
sd.play(salsa1, fs) #kommatara :)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = salsa1[10000:75536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "salsa_excerpt1.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.006
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = np.arange(0, 8192, 1)
xd3 = np.interp(nvalues, n, xd3)
n = np.arange(0, 4096, 1)
xd4 = np.interp(nvalues, n, xd4)
n = np.arange(0, 2048, 1)
xd5 = np.interp(nvalues, n, xd5)
n = np.arange(0, 1024, 1)
xd6 = np.interp(nvalues, n, xd6)
n = np.arange(0, 512, 1)
xd7 = np.interp(nvalues, n, xd7)
n = np.arange(0, 512, 1)
xa7 = np.interp(nvalues, n, xa7)
xsum = xd1+xd2+xd3+xd4+xd5+xd6+xd7+xa7
autocorrelation = np.correlate(xsum,xsum, 'full')[len(xsum)-1:]
autocorrelation = sp.ndimage.filters.gaussian_filter1d(autocorrelation,150)
counter = counter+1
plt.figure(counter)
t = np.arange(Ts,np.size(autocorrelation)*Ts*2, 2*Ts) #time index
plt.plot(t, autocorrelation)
plt.xlabel('Time [sec]')
plt.title('Autocorrelation of Salsa Excerpt 1')
#Find the maximums of Autocorrelation
maximums = np.array(sp.signal.argrelextrema(autocorrelation, np.greater))
#Keep every two of them - Maximums of great amplitude will show as the beat
maximums = maximums[0,::2]
#Calculate number of samples between every two peaks of autocorrelation
samplesbetween = np.zeros(np.size(maximums))
for i in range(1,np.size(maximums)):
samplesbetween[i] = maximums[i]-maximums[i-1]
samplesbetween = samplesbetween[1:(np.size(samplesbetween))]
#Find the mean number of samples between every two peaks of autocorrelation
samplebeat = np.mean(samplesbetween)
print('Salsa1: Autocorrelation peaks every %i samples.' %samplebeat)
#Convert to time
timebeat = samplebeat*2*Ts*1000 #msec
print('Salsa1: Autocorrelation peaks approximately every %d msec.' %timebeat)
#Calculate BPM os salsa1
bpm_rate = 60*(1000/(timebeat))
print('Salsa1: Beats Per Minute Rate = %d bpm.' %bpm_rate)
#Visualise BPM of salsa1 with help of plotting
counter = counter+1
plt.figure(counter)
plt.plot(60/t,autocorrelation)
plt.xlim(20, 180)
plt.xlabel('Beats Per Minute (BPM)')
plt.ylabel('Autocorrelation')
plt.title('BPM of Salsa Excerpt 1')
#################### SALSA 2 #####################
#4.1 Open .wav file of salsa music signal 2
salsa2, fs = librosa.load('salsa_excerpt2.mp3')
#sd.play(salsa2, fs)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = salsa2[60000:125536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "salsa_excerpt2.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.003
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = np.arange(0, 8192, 1)
xd3 = np.interp(nvalues, n, xd3)
n = np.arange(0, 4096, 1)
xd4 = np.interp(nvalues, n, xd4)
n = np.arange(0, 2048, 1)
xd5 = np.interp(nvalues, n, xd5)
n = np.arange(0, 1024, 1)
xd6 = np.interp(nvalues, n, xd6)
n = np.arange(0, 512, 1)
xd7 = np.interp(nvalues, n, xd7)
n = np.arange(0, 512, 1)
xa7 = np.interp(nvalues, n, xa7)
xsum = xd1+xd2+xd3+xd4+xd5+xd6+xd7+xa7
autocorrelation = np.correlate(xsum,xsum, 'full')[len(xsum)-1:]
autocorrelation = sp.ndimage.filters.gaussian_filter1d(autocorrelation,130)
counter = counter+1
plt.figure(counter)
t = np.arange(Ts,np.size(autocorrelation)*Ts*2, 2*Ts) #time index
plt.plot(t, autocorrelation)
plt.xlabel('Time [sec]')
plt.title('Autocorrelation of Salsa Excerpt 2')
#Find the maximums of Autocorrelation
maximums = np.array(sp.signal.argrelextrema(autocorrelation, np.greater))
#Keep every two of them - Maximums of great amplitude will show as the beat
maximums = maximums[0,::2]
#Calculate number of samples between every two peaks of autocorrelation
samplesbetween = np.zeros(np.size(maximums))
for i in range(1,np.size(maximums)):
samplesbetween[i] = maximums[i]-maximums[i-1]
samplesbetween = samplesbetween[1:(np.size(samplesbetween))]
#Find the mean number of samples between every two peaks of autocorrelation
samplebeat = np.mean(samplesbetween)
print('Salsa2: Autocorrelation peaks every %i samples.' %samplebeat)
#Convert to time
timebeat = samplebeat*2*Ts*1000 #msec
print('Salsa2: Autocorrelation peaks approximately every %d msec.' %timebeat)
#Calculate BPM os salsa1
bpm_rate = 60*(1000/(timebeat))
print('Salsa2: Beats Per Minute Rate = %d bpm.' %bpm_rate)
#Visualise BPM of salsa1 with help of plotting
counter = counter+1
plt.figure(counter)
plt.plot(60/t,autocorrelation)
plt.xlim(20, 180)
plt.xlabel('Beats Per Minute (BPM)')
plt.ylabel('Autocorrelation')
plt.title('BPM of Salsa Excerpt 2')
#################### RUMBA #####################
#4.1 Open .wav file of rumba music signal
rumba, fs = librosa.load('rumba_excerpt.mp3')
#sd.play(rumba,fs)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = rumba[350000:415536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "rumba_excerpt.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.0005
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = np.arange(0, 8192, 1)
xd3 = | np.interp(nvalues, n, xd3) | numpy.interp |
from glob import glob
import os
import fitsio
import numpy as np
from scipy.ndimage.filters import median_filter
from scipy.ndimage.morphology import binary_dilation
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.multiproc import multiproc
dirprefix = '/global/cfs/cdirs/cosmo/staging/'
def one_file(fn):
T = fits_table()
T.filename = []
T.ext = []
T.ccdname = []
T.expnum = []
T.obsid = []
T.acqnam = []
T.filter = []
T.wcs_ok = []
T.n_masked = []
T.n_dilated_masked = []
T.oow_min = []
T.oow_max = []
T.oow_median = []
T.oow_percentiles = []
T.oow_unmasked_min = []
T.oow_unmasked_max = []
T.oow_unmasked_median = []
T.oow_unmasked_percentiles = []
T.oow_dilated_min = []
T.oow_dilated_max = []
T.oow_dilated_median = []
T.oow_dilated_percentiles = []
T.oow_m3_min = []
T.oow_m3_max = []
T.oow_m3_median = []
T.oow_m3_percentiles = []
T.oow_m5_min = []
T.oow_m5_max = []
T.oow_m5_median = []
T.oow_m5_percentiles = []
print(fn)
F = fitsio.FITS(fn)
phdr = F[0].read_header()
D = fitsio.FITS(fn.replace('_oow_', '_ood_'))
wcs_ok = (phdr.get('WCSCAL', '').strip().lower().startswith('success') or
phdr.get('SCAMPFLG', -1) == 0)
#print(len(F), 'extensions')
for ext in range(1, len(F)):
oow = F[ext].read()
hdr = F[ext].read_header()
ood = D[ext].read()
pct = np.arange(101)
T.filename.append(fn.replace(dirprefix, ''))
T.wcs_ok.append(wcs_ok)
T.ext.append(ext)
T.ccdname.append(hdr['EXTNAME'])
expnum = phdr.get('EXPNUM', 0)
# /global/cfs/cdirs/cosmo/staging/mosaic/CP/V4.3/CP20160124/k4m_160125_104535_oow_zd_ls9.fits.fz
# has EXPNUM blank -> becomes None
if expnum is None:
expnum = 0
print(fn, ext, expnum)
T.expnum.append(expnum)
T.obsid.append(phdr.get('OBSID', ''))
T.acqnam.append(phdr.get('DTACQNAM', ''))
T.filter.append(phdr.get('FILTER'))
bad = (ood > 0)
T.n_masked.append(np.sum(bad))
dbad = binary_dilation(bad, structure=np.ones((3,3),bool))
T.n_dilated_masked.append(np.sum(dbad))
uw = oow[np.logical_not(dbad)]
if len(uw) == 0:
med = np.median(oow)
T.oow_dilated_min.append(0.)
T.oow_dilated_max.append(0.)
T.oow_dilated_median.append(0.)
T.oow_dilated_percentiles.append(np.zeros(len(pct), np.float32))
else:
med = np.median(uw)
T.oow_dilated_min.append(uw.min())
T.oow_dilated_max.append(uw.max())
T.oow_dilated_median.append(np.median(uw))
T.oow_dilated_percentiles.append(np.percentile(uw, pct, interpolation='nearest').astype(np.float32))
T.oow_min.append(oow.min())
T.oow_max.append(oow.max())
T.oow_median.append(np.median(oow))
T.oow_percentiles.append(np.percentile(oow, pct, interpolation='nearest').astype(np.float32))
uw = oow[ood == 0]
if len(uw) == 0:
med = np.median(oow)
T.oow_unmasked_min.append(0.)
T.oow_unmasked_max.append(0.)
T.oow_unmasked_median.append(0.)
T.oow_unmasked_percentiles.append(np.zeros(len(pct), np.float32))
else:
med = np.median(uw)
T.oow_unmasked_min.append(uw.min())
T.oow_unmasked_max.append(uw.max())
T.oow_unmasked_median.append(np.median(uw))
T.oow_unmasked_percentiles.append(np.percentile(uw, pct, interpolation='nearest').astype(np.float32))
# Fill masked OOW pixels with the median value.
oow[ood > 0] = med
# Median filter
m3 = median_filter(oow, 3, mode='constant', cval=med)
m5 = median_filter(oow, 5, mode='constant', cval=med)
T.oow_m3_min.append(m3.min())
T.oow_m3_max.append(m3.max())
T.oow_m3_median.append( | np.median(m3) | numpy.median |
# -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate.toolkits._tf_utils as _utils
from .._tf_model import TensorFlowModel
import numpy as _np
from turicreate._deps.minimal_package import _minimal_package_import_check
def _lazy_import_tensorflow():
_tf = _minimal_package_import_check("tensorflow")
return _tf
# Constant parameters for the neural network
CONV_H = 64
LSTM_H = 200
DENSE_H = 128
class ActivityTensorFlowModel(TensorFlowModel):
def __init__(
self,
net_params,
batch_size,
num_features,
num_classes,
prediction_window,
seq_len,
seed,
):
_utils.suppress_tensorflow_warnings()
self.num_classes = num_classes
self.batch_size = batch_size
tf = _lazy_import_tensorflow()
keras = tf.keras
#############################################
# Define the Neural Network
#############################################
inputs = keras.Input(shape=(prediction_window * seq_len, num_features))
# First dense layer
dense = keras.layers.Conv1D(
filters=CONV_H,
kernel_size=(prediction_window),
padding='same',
strides=prediction_window,
use_bias=True,
activation='relu',
)
cur_outputs = dense(inputs)
# First dropout layer
dropout = keras.layers.Dropout(
rate=0.2,
seed=seed,
)
cur_outputs = dropout(cur_outputs)
# LSTM layer
lstm = keras.layers.LSTM(
units=LSTM_H,
return_sequences=True,
use_bias=True,
)
cur_outputs = lstm(cur_outputs)
# Second dense layer
dense2 = keras.layers.Dense(DENSE_H)
cur_outputs = dense2(cur_outputs)
# Batch norm layer
batch_norm = keras.layers.BatchNormalization()
cur_outputs = batch_norm(cur_outputs)
# ReLU layer
relu = keras.layers.ReLU()
cur_outputs = relu(cur_outputs)
# Final dropout layer
dropout = keras.layers.Dropout(rate=0.5, seed=seed)
cur_outputs = dropout(cur_outputs)
# Final dense layer
dense3 = keras.layers.Dense(num_classes, use_bias=False)
cur_outputs = dense3(cur_outputs)
# Softmax layer
softmax = keras.layers.Softmax()
cur_outputs = softmax(cur_outputs)
self.model = keras.Model(inputs=inputs, outputs=cur_outputs)
self.model.compile(
loss=tf.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
sample_weight_mode="temporal"
)
#############################################
# Load the Weights of the Neural Network
#############################################
for key in net_params.keys():
net_params[key] = _utils.convert_shared_float_array_to_numpy(net_params[key])
# Set weight for first dense layer
l = self.model.layers[1]
l.set_weights(
(_utils.convert_conv1d_coreml_to_tf(net_params["conv_weight"]),
net_params["conv_bias"])
)
# Set LSTM weights
i2h, h2h, bias = [], [], []
for i in ('i', 'f', 'c', 'o'):
i2h.append(eval('net_params["lstm_i2h_%s_weight"]' % i))
h2h.append(eval('net_params["lstm_h2h_%s_weight"]' % i))
bias.append(eval('net_params["lstm_h2h_%s_bias"]' % i))
i2h = _np.concatenate(i2h, axis=0)
h2h = _np.concatenate(h2h, axis=0)
bias = _np.concatenate(bias, axis=0)
i2h = _np.swapaxes(i2h, 1, 0)
h2h = _np.swapaxes(h2h, 1, 0)
l = self.model.layers[3]
l.set_weights((i2h, h2h, bias))
# Set weight for second dense layer
l = self.model.layers[4]
l.set_weights(
(
net_params['dense0_weight'].reshape(DENSE_H, LSTM_H).swapaxes(0, 1),
net_params['dense0_bias']
)
)
# Set batch Norm weights
l = self.model.layers[5]
l.set_weights(
(
net_params['bn_gamma'],
net_params['bn_beta'],
net_params['bn_running_mean'],
net_params['bn_running_var']
)
)
# Set weights for last dense layer
l = self.model.layers[8]
l.set_weights(
(
net_params['dense1_weight'].reshape((self.num_classes, DENSE_H)).swapaxes(0,1),
)
)
def train(self, feed_dict):
"""
Run session for training with new batch of data (inputs, labels and weights)
Parameters
----------
feed_dict: Dictionary
Dictionary to store a batch of input data, corresponding labels and weights. This is currently
passed from the ac_data_iterator.cpp file when a new batch of data is sent.
Returns
-------
result: Dictionary
Loss per batch and probabilities
"""
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
feed_dict[key] = _np.squeeze(feed_dict[key], axis=1)
feed_dict[key] = _np.reshape(
feed_dict[key],
(
feed_dict[key].shape[0],
feed_dict[key].shape[1],
feed_dict[key].shape[2],
),
)
keras = _lazy_import_tensorflow().keras
loss = self.model.train_on_batch(
x=feed_dict['input'],
y=keras.utils.to_categorical(feed_dict['labels'], num_classes=self.num_classes),
sample_weight=_np.reshape(feed_dict['weights'], (self.batch_size, 20))
)
prob = self.model.predict(feed_dict['input'])
probabilities = _np.reshape(
prob, (prob.shape[0], prob.shape[1] * prob.shape[2])
)
result = {"loss": _np.array(loss), "output": _np.array(probabilities)}
return result
def predict(self, feed_dict):
"""
Run session for predicting with new batch of validation data (inputs, labels and weights) as well as test data (inputs)
Parameters
----------
feed_dict: Dictionary
Dictionary to store a batch of input data, corresponding labels and weights. This is currently
passed from the ac_data_iterator.cpp file when a new batch of data is sent.
Returns
-------
result: Dictionary
Loss per batch and probabilities (in case of validation data)
Probabilities (in case only inputs are provided)
"""
# Convert input
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
feed_dict[key] = _np.squeeze(feed_dict[key], axis=1)
feed_dict[key] = _np.reshape(
feed_dict[key],
(
feed_dict[key].shape[0],
feed_dict[key].shape[1],
feed_dict[key].shape[2],
),
)
# Generate predictions
prob = self.model.predict(feed_dict['input'])
probabilities = _np.reshape(
prob, (prob.shape[0], prob.shape[1] * prob.shape[2])
)
result = {"output": probabilities}
if "labels" in feed_dict.keys(): # Validation data?
keras = _lazy_import_tensorflow().keras
labels = keras.utils.to_categorical(feed_dict['labels'], num_classes=self.num_classes)
loss = self.model.loss(y_true=labels, y_pred=prob)
loss = keras.backend.get_value(loss)
weights = feed_dict["weights"].reshape(loss.shape)
loss = loss * weights
loss = _np.sum(loss, axis=1)
result["loss"] = loss
return result
def export_weights(self):
"""
Function to store TensorFlow weights back to into a dict in CoreML format to be used
by the C++ implementation
Returns
-------
tf_export_params: Dictionary
Dictionary of weights from TensorFlow stored as {weight_name: weight_value}
"""
tf_export_params = {}
# First dense layer
l = self.model.layers[1]
tf_export_params["conv_weight"], tf_export_params["conv_bias"] = l.get_weights()
tf_export_params["conv_weight"] = _utils.convert_conv1d_tf_to_coreml(
tf_export_params["conv_weight"]
)
# LSTM layer
l = self.model.layers[3]
i2h, h2h, bias = l.get_weights()
biases = _np.split(bias, 4)
i2h = _np.swapaxes(i2h, 0, 1)
i2h = | _np.split(i2h, 4) | numpy.split |
#stats.py
#catalog creation for heliocats
#https://github.com/cmoestl/heliocats
import numpy as np
import pandas as pd
import scipy
from sunpy.time import parse_time
import copy
import matplotlib.dates as mdates
import matplotlib
import seaborn as sns
import datetime
import urllib
import json
import os
import pdb
import scipy.io
import pickle
import sys
import astropy
from astropy.constants import au
import importlib
import cdflib
import matplotlib.pyplot as plt
import heliosat
import heliopy.data.spice as spicedata
import heliopy.spice as spice
from astropy.io.votable import parse_single_table
from config import data_path
from heliocats import data as hd
importlib.reload(hd) #reload again while debugging
#define AU in km
AU=au.value/1e3
######################## general position functions
# def get_mars_position_array():
# ############### Mars position
# planet_kernel=spicedata.get_kernel('planet_trajectories')
# starttime = datetime.datetime(2007, 1, 1)
# endtime = datetime.datetime(2020, 12, 31)
# res_in_hours=1
# mars_time = []
# while starttime < endtime:
# mars_time.append(starttime)
# starttime += datetime.timedelta(hours=res_in_hours)
# mars=spice.Trajectory('4')
# frame='HEEQ'
# mars.generate_positions(mars_time,'Sun',frame)
# mars.change_units(astropy.units.AU)
# [mars_r, mars_lat, mars_lon]=hd.cart2sphere(mars.x,mars.y,mars.z)
# print('mars position done')
# mars_time=np.array(mars_time)
# mars_r=np.array(mars_r)
# mars_lat=np.array(mars_lat)
# mars_lon=np.array(mars_lon)
# return [mars_time,mars_r,np.degrees(mars_lat),np.degrees(mars_lon)]
################################ HI arrival catalog ARRCAT operations ##############################
def load_higeocat_vot(file):
#read HIGEOCAT from https://www.helcats-fp7.eu/catalogues/wp3_cat.html
#https://docs.astropy.org/en/stable/io/votable/
table = parse_single_table('data/HCME_WP3_V06.vot')
higeocat = table.array
#usage e.g.
#higeocat['Date']=parse_time(higeocat['Date'][10]).datetime
#access data
#a=table.array['HM HEEQ Long'][10]
return higeocat
def get_insitu_position_time(time1,insitu_location_string,insitu_str,insitu_kernel):
insitu_exist=True
if insitu_location_string=='PSP':
#exclude if time before launch time
if parse_time(time1).plot_date < parse_time(datetime.datetime(2018, 8, 13)).plot_date:
insitu_exist=False
if insitu_location_string=='Solo':
if parse_time(time1).plot_date < parse_time(datetime.datetime(2020, 3, 1)).plot_date:
insitu_exist=False
if insitu_location_string=='Bepi':
if parse_time(time1).plot_date < parse_time(datetime.datetime(2018, 10, 24)).plot_date:
insitu_exist=False
if insitu_location_string=='STB':
if parse_time(time1).plot_date > parse_time(datetime.datetime(2014, 9, 27)).plot_date:
insitu_exist=False
if insitu_location_string=='Ulysses':
#cut off ulysses when no decent in situ data is available anymore
if parse_time(time1).plot_date > parse_time(datetime.datetime(2008, 5, 1)).plot_date:
insitu_exist=False
if insitu_exist == True:
#insitu_kernel=spicedata.get_kernel('insitu_trajectories')
#this needs to be an array, so make two similar times and take the first entry later
insitu_time=[parse_time(time1).datetime,parse_time(time1).datetime]
insitu=spice.Trajectory(insitu_str)
frame='HEEQ'
insitu.generate_positions(insitu_time,'Sun',frame)
insitu.change_units(astropy.units.AU)
[insitu_r, insitu_lat, insitu_lon]=hd.cart2sphere(insitu.x,insitu.y,insitu.z)
#Earth position to Earth L1
if insitu_str=='3': insitu_r[0]=insitu_r[0]-1.5*1e6/AU
insitu_time=np.array(insitu_time)[0]
insitu_r=np.array(insitu_r)[0]
insitu_lat=np.array(insitu_lat)[0]
insitu_lon=np.array(insitu_lon)[0]
else:
insitu_time=np.nan
insitu_r=np.nan
insitu_lat=np.nan
insitu_lon=np.nan
return [insitu_time,insitu_r,np.degrees(insitu_lat),np.degrees(insitu_lon)]
def calculate_arrival(vsse,delta,lamda,rdist,t0_num):
#calculate arrival time after Möstl and Davies 2013 but using ta=t0+Ri/Visse equivalent to ta=t0+Risse/Vsse
visse=vsse * ( np.cos(np.radians(delta)) \
+ np.sqrt( np.sin(np.radians(lamda))**2-np.sin(np.radians(delta))**2 ) ) \
/(1+np.sin(np.radians(lamda)) )
#arrival time: convert AU to km and seconds to days
ta=t0_num+(rdist*AU/visse)/(3600*24)
return [mdates.num2date(ta),visse]
def make_arrival_catalog_insitu_ssef30(higeocat,arrcat,ac_old, insitu_location_string, column_list):
#get parameters from HIGEOCAT for arrival catalog
higeocat_time=parse_time(higeocat['Date']).datetime #first HI observation
higeocat_t0=parse_time(higeocat['SSE Launch']).datetime #backprojected launch time
higeocat_t0_num=parse_time(higeocat_t0).plot_date
higeocat_vsse=np.array(higeocat['SSE Speed'])
higeocat_vsse_err=np.array(higeocat['SSE Speed Err'])
higeocat_sse_lon=np.array(higeocat['SSE HEEQ Long' ])
higeocat_sse_lat=np.array(higeocat['SSE HEEQ Lat' ])
higeocat_id=np.array(higeocat['ID'])
higeocat_sc=np.array(higeocat['SC'])
higeocat_pan=np.array(higeocat['PA-N'])
higeocat_pas=np.array(higeocat['PA-S'])
higeocat_pafit=np.array(higeocat['PA-fit'])
higeocat_pacenter=abs((higeocat_pan+higeocat_pas)/2)
#load spice here once for each spacecraft
if insitu_location_string=='STB':
insitu_str='-235'
insitu_kernel=spicedata.get_kernel('stereo_b')
target_name='STEREO-B'
if insitu_location_string=='STA':
insitu_str='-234'
insitu_kernel=spicedata.get_kernel('stereo_a_pred')
insitu_kernel2=spicedata.get_kernel('stereo_a')
spice.furnish(insitu_kernel2)
target_name='STEREO-A'
if insitu_location_string=='Mercury':
insitu_str='1'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Mercury'
if insitu_location_string=='Venus':
insitu_str='2'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Venus'
if insitu_location_string=='Earth':
insitu_str='3'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Earth_L1'
if insitu_location_string=='Mars':
insitu_str='4'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Mars'
if insitu_location_string=='PSP':
insitu_str='-96'
insitu_kernel=spicedata.get_kernel('psp_pred')
target_name='PSP'
if insitu_location_string=='Solo':
insitu_str='Solar Orbiter'
insitu_kernel=spicedata.get_kernel('solo_2020')
target_name='SolarOrbiter'
if insitu_location_string=='Bepi':
insitu_str='BEPICOLOMBO MPO'
insitu_kernel=spicedata.get_kernel('bepi_pred')
target_name='BepiColombo'
if insitu_location_string=='Ulysses':
insitu_str='ulysses'
insitu_kernel=spicedata.get_kernel('ulysses')
target_name='Ulysses'
spice.furnish(insitu_kernel)
#half width for SSEF30
lamda=30.0
#new version of ARRCAT with iteration
arrcat_insitu_list = []
#old version without iteration
arrcat_insitu_list_old = []
#go through all HIGEOCAT CME events and check for hit at insitu, with 4 iterations in total
for i in np.arange(len(higeocat_time)):
#get insitu position for launch time t0
[insitu_time,insitu_r,insitu_lat,insitu_lon]=get_insitu_position_time(higeocat_t0[i], insitu_location_string,insitu_str, insitu_kernel)
delta=abs(higeocat_sse_lon[i]-insitu_lon)
#print([insitu_time,insitu_r,insitu_lat,insitu_lon])
if delta < 30:
#calculate arrival time
#print(delta,lamda,insitu_r)
[ta,visse]=calculate_arrival(higeocat_vsse[i],delta, lamda, insitu_r,higeocat_t0_num[i])
#make old version of ARRCAT without iteration and errors
list_old=[higeocat_id[i].decode(),higeocat_sc[i].decode(),target_name,\
parse_time(higeocat_t0[i]).iso[:-7],parse_time(ta).iso[:-7],0,\
np.round(insitu_r,3), np.round(insitu_lon,2), np.round(insitu_lat,2),np.round(insitu_lon-higeocat_sse_lon[i],1),\
higeocat_sse_lon[i],higeocat_sse_lat[i],higeocat_vsse[i],\
higeocat_vsse_err[i], int(np.rint(visse)),0,higeocat_pafit[i],higeocat_pan[i],higeocat_pas[i],higeocat_pacenter[i]]
#print(list1)
arrcat_insitu_list_old.append(list_old)
[insitu_time2,insitu_r2,insitu_lat2,insitu_lon2]=get_insitu_position_time(ta, insitu_location_string,insitu_str, insitu_kernel)
#print(insitu_lon-insitu_lon2)
delta2=abs(higeocat_sse_lon[i]-insitu_lon2)
if delta2 <30:
[ta2,visse2]=calculate_arrival(higeocat_vsse[i],delta2, lamda, insitu_r2,higeocat_t0_num[i])
#print(int((parse_time(ta2).plot_date-parse_time(ta).plot_date)*24))
[insitu_time3,insitu_r3,insitu_lat3,insitu_lon3]=get_insitu_position_time(ta2, insitu_location_string,insitu_str, insitu_kernel)
delta3=abs(higeocat_sse_lon[i]-insitu_lon3)
if delta3 <30:
[ta3,visse3]=calculate_arrival(higeocat_vsse[i],delta3, lamda, insitu_r3,higeocat_t0_num[i])
#print(np.round((parse_time(ta3).plot_date-parse_time(ta2).plot_date)*24,1),int(delta3))
[insitu_time4,insitu_r4,insitu_lat4,insitu_lon4]=get_insitu_position_time(ta3, insitu_location_string,insitu_str, insitu_kernel)
delta4=abs(higeocat_sse_lon[i]-insitu_lon4)
if delta4 <30:
#calculate finally iterated arrival time
[ta4,visse4]=calculate_arrival(higeocat_vsse[i],delta4, lamda, insitu_r4,higeocat_t0_num[i])
#print(np.round((parse_time(ta4).plot_date-parse_time(ta3).plot_date)*24,1),int(delta4))
#print(int(delta4-delta))
#estimate error bar on arrival time adding or subtracting the error in the Vsse speed
[ta4_low,visse4_low]=calculate_arrival(higeocat_vsse[i]-higeocat_vsse_err[i],delta4, lamda, insitu_r4,higeocat_t0_num[i])
[ta4_high,visse4_high]=calculate_arrival(higeocat_vsse[i]+higeocat_vsse_err[i],delta4, lamda, insitu_r4,higeocat_t0_num[i])
#calculate difference in ours high / low to original arrival time and convert to hours
ta4_err_low=abs(parse_time(ta4).plot_date-parse_time(ta4_low).plot_date)*24
ta4_err_high=abs(parse_time(ta4).plot_date-parse_time(ta4_high).plot_date)*24
ta4_err=np.round(np.mean([ta4_err_high,ta4_err_low]),1)
#print(ta4_err_low,ta4_err_high,ta4_err)
#same for arrival speed error
visse4_err_low=abs(visse4_low-visse4)
visse4_err_high=abs(visse4_high-visse4)
visse4_err=int(np.rint(np.mean([visse4_err_high,visse4_err_low])))
#print(visse4_err_low,visse4_err_high,visse4_err,higeocat_vsse_err[i])
#print()
list1=[higeocat_id[i].decode(),higeocat_sc[i].decode(),target_name,\
parse_time(higeocat_t0[i]).iso[:-7],parse_time(ta4).iso[:-7],ta4_err,\
np.round(insitu_r4,3), np.round(insitu_lon4,2), np.round(insitu_lat4,2),np.round(insitu_lon4-higeocat_sse_lon[i],1),\
higeocat_sse_lon[i],higeocat_sse_lat[i],higeocat_vsse[i],\
higeocat_vsse_err[i], int(np.rint(visse4)),visse4_err,higeocat_pafit[i],higeocat_pan[i],higeocat_pas[i],higeocat_pacenter[i]]
#print(list1)
arrcat_insitu_list.append(list1)
#arrcat_insitu=np.array(arrcat_insitu_list)
#print(arrcat_insitu_list)
#make dataframe out of list
ac_old1 = pd.DataFrame(arrcat_insitu_list_old, columns = column_list)
ac_old=ac_old.append(ac_old1)
#make dataframe out of list
ac1 = pd.DataFrame(arrcat_insitu_list, columns = column_list)
arrcat=arrcat.append(ac1)
print('SSEF30 events: ',len(arrcat_insitu_list) )
print(insitu_location_string,' SSEF30 arrival catalog finished.')
print()
return [arrcat,ac_old]
###################################### SIRCAT operations ################################
def load_helio4cast_sircat_master_from_excel(file):
''' convert excel master file to pandas dataframe and convert times
to datetime objects
'''
print('load HELCATS SIRCAT from file:', file)
sc=pd.read_excel(file)
sc=sc.drop(columns='Unnamed: 0')
#get beginning of tags for STA to identify allen and jian events
tag_list=[]
for i in np.arange(0,len(sc)):
tag_list.append(sc.sircat_id[i][13]) #j
#convert all times to datetime objects
for i in np.arange(0,sc.shape[0]):
#for STEREO and MAVEN same
if sc.sc_insitu[i] == 'STEREO-A':
#jian events
if tag_list[i] =='J':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'sir_start_time']= parse_time(str(sc.sir_start_time[i]).strip()).datetime
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'sir_end_time']= parse_time(str(sc.sir_end_time[i]).strip()).datetime
#allen events
if tag_list[i] =='A':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'hss_end_time']= parse_time(str(sc.hss_end_time[i]).strip()).datetime
#for Wind PSP convert different - check PSP wind different sources if needed (Allen and Grandin)
if sc.sc_insitu[i] == 'Wind':
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'hss_end_time']=parse_time(str(sc.hss_end_time[i]).strip()).datetime
if sc.sc_insitu[i] == 'PSP':
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'hss_end_time']=parse_time(str(sc.hss_end_time[i]).strip()).datetime
if sc.sc_insitu[i] == 'STEREO-B':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'sir_start_time']= parse_time(str(sc.sir_start_time[i]).strip()).datetime
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'sir_end_time']= parse_time(str(sc.sir_end_time[i]).strip()).datetime
if sc.sc_insitu[i] == 'MAVEN':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'sir_start_time']= parse_time(str(sc.sir_start_time[i]).strip()).datetime
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'sir_end_time']= parse_time(str(sc.sir_end_time[i]).strip()).datetime
return sc
def get_sircat_parameters(sc, sci, scat, name):
'''
get parameters
sc - spacecraft data recarray
sci - indscates for this spacecraft in sircat
scat - scatmecat pandas dataframe
'''
fileind='sircat/indices_sircat/SIRCAT_indices_'+name+'.p'
################ extract indices of ICMEs in the respective data (time consuming, so do it once and save)
if os.path.isfile(fileind) == False:
print('extract indices of SIRs in '+ name+ ' data')
#### get all ICMECAT times for this spacecraft as datenum
sc_sir_start=scat.sir_start_time[sci]
sc_hss_start=scat.hss_start_time[sci]
sc_sir_end=scat.sir_end_time[sci]
sc_hss_end=scat.hss_end_time[sci]
### arrays containing the indices of where the SIRs are in the data
sir_start_ind=np.zeros(len(sci),dtype=int)
hss_start_ind=np.zeros(len(sci),dtype=int)
sir_end_ind=np.zeros(len(sci),dtype=int)
hss_end_ind=np.zeros(len(sci),dtype=int)
#check where vt is < or > 450 km/s
vt_lt_450=np.where(sc.vt < 450)[0]
vt_gt_450=np.where(sc.vt > 450)[0]
#check where vt is < or > 350 km/s
vt_lt_350=np.where(sc.vt < 350)[0]
vt_gt_350=np.where(sc.vt > 350)[0]
#this takes some time, get indices in data for each SIRCAT time
for i in np.arange(sci[0],sci[-1]+1):
print(i-sci[0])
if (name== 'STEREO-A'):
tag=scat.sircat_id[i][13]
if tag=='J': #Jian events
print('J', sc_sir_start[i] )
sir_start_ind[i-sci[0]]=np.where(sc.time > sc_sir_start[i])[0][0]-1
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
sir_end_ind[i-sci[0]]=np.where(sc.time > sc_sir_end[i])[0][0]-1
if tag=='A': #Allen events
print('A', sc_sir_start[i])
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
hss_end_ind[i-sci[0]]=np.where(sc.time > sc_hss_end[i])[0][0]-1
if (name== 'STEREO-B'):
sir_start_ind[i-sci[0]]=np.where(sc.time > sc_sir_start[i])[0][0]-1
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
sir_end_ind[i-sci[0]]=np.where(sc.time > sc_sir_end[i])[0][0]-1
#here the hss_end_time needs to be extracted - criteria similar to Grandin et al. 2018
#where stream goes back to (< 450 km/s) after hss start time
#check the indices in the 450 array that are greater than the hss_start index +0.5 days
#24*60 data points
#and take the first one
#take next data point > 450 km/s after hss_start + 6 hours (for getting rid of rapid variations)
#next450=np.where(vt_gt_450 > hss_start_ind[i-sci[0]])[0][0]+6*60
#print(hss_start_ind[i-sci[0]],vt_gt_450[next450])
#then take next data point below 450 after this
#hss_end_ind[i-sci[0]]=vt_lt_450[ np.where(vt_lt_450 > vt_gt_450[next450])[0][0] ]
#print('hss duration in hours ',(hss_end_ind[i-sci[0]]-hss_start_ind[i-sci[0]])/60)
#print(hss_start_ind[i-sci[0]],hss_end_ind[i-sci[0]])
if name== 'MAVEN':
sir_start_ind[i-sci[0]]=np.where(sc.time > sc_sir_start[i])[0][0]-1
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
sir_end_ind[i-sci[0]]=np.where(sc.time > sc_sir_end[i])[0][0]-1
#hss_end_ind[i-sci[0]]=vt_lt_450[np.where(vt_lt_450 > sir_end_ind[i-sci[0]])[0][0] ]
#take next data point > 450 km/s after hss_start + 2 orbits (for getting rid of rapid variations)
#next350=np.where(vt_gt_350 > hss_start_ind[i-sci[0]])[0][0]+2
#print(hss_start_ind[i-sci[0]],vt_gt_450[next450])
#then take next data point below 450 after this
#hss_end_ind[i-sci[0]]=vt_lt_350[ np.where(vt_lt_350 > vt_gt_350[next350])[0][0] ]
#print('hss duration in hours ',(hss_end_ind[i-sci[0]]-hss_start_ind[i-sci[0]])*4.5)
#print(hss_start_ind[i-sci[0]],hss_end_ind[i-sci[0]])
if name=='Wind':
#here only hss start and hss end exist
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
hss_end_ind[i-sci[0]]=np.where(sc.time > sc_hss_end[i])[0][0]-1
#future update: set hss_start as sir_start, and add time for hss_start by pt max after sir_start
if name=='PSP':
#here only hss start and hss end exist
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
hss_end_ind[i-sci[0]]=np.where(sc.time > sc_hss_end[i])[0][0]-1
#future update: set hss_start as sir_start, and add time for hss_start by pt max after sir_start
pickle.dump([sir_start_ind,hss_start_ind,sir_end_ind,hss_end_ind], open(fileind, 'wb'))
############################################
[sir_start_ind, hss_start_ind,sir_end_ind,hss_end_ind]=pickle.load(open(fileind, 'rb'))
#first make hss end time for STEREO-A/B from hss_end_ind index
#if (name== 'STEREO-A') or (name== 'STEREO-B') or (name== 'MAVEN'):
# for i in np.arange(len(sci))-1:
# scat.at[sci[i],'hss_end_time']=sc.time[hss_end_ind[i]]
print('Get parameters for ',name)
####### position
print('position')
#SIR heliodistance
for i in np.arange(len(sci))-1:
scat.at[sci[i],'sc_heliodistance']=np.round(sc.r[hss_start_ind[i]],4)
#SIR longitude
scat.at[sci[i],'sc_long_heeq']=np.round(sc.lon[hss_start_ind[i]],2)
##SIR latitude
scat.at[sci[i],'sc_lat_heeq']=np.round(sc.lat[hss_start_ind[i]],2)
print('hss')
if (name=='PSP'):
sci_istart=mdates.date2num(scat.hss_start_time[sci])
sci_hss_iend=mdates.date2num(scat.hss_end_time[sci])
scat.at[sci,'hss_duration']=np.round((sci_hss_iend-sci_istart)*24,2)
for i in np.arange(0,len(sci)):
#print(i)
#print('hss duration in hours ',(hss_end_ind[i]-hss_start_ind[i])/60)
#v_max
scat.at[sci[i],'hss_vtmax']=np.nan
try:
vmax=np.round(np.nanmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#if vmax ok:
if np.isnan(vmax)==False:
scat.at[sci[i],'hss_vtmax']=vmax
#vtmaxtime - search for index in sliced array and at beginning of array to see the index in the whole dataset
scat.at[sci[i],'hss_vtmax_time']=sc.time[np.nanargmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]])+hss_start_ind[i]]
except:
print('vmax nan')
# v_mean
try:
scat.at[sci[i],'hss_vtmean']=np.round(np.nanmean(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
except:
print()
#v_bstd
try:
scat.at[sci[i],'hss_vtstd']=np.round(np.nanstd(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
except:
print()
try:
#B_max
scat.at[sci[i],'hss_btmax']=np.round(np.nanmax(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'hss_btmean']=np.round(np.nanmean(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bstd
scat.at[sci[i],'hss_btstd']=np.round(np.nanstd(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bz
scat.at[sci[i],'hss_bzmin']=np.round(np.nanmin(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzmean']=np.round(np.nanmean(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzstd']=np.round(np.nanstd(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
except:
print()
if (name== 'Wind'):
############ HSS duration
sci_istart=mdates.date2num(scat.hss_start_time[sci])
sci_hss_iend=mdates.date2num(scat.hss_end_time[sci])
scat.at[sci,'hss_duration']=np.round((sci_hss_iend-sci_istart)*24,2)
for i in np.arange(0,len(sci)):
#print(i)
#print('hss duration in hours ',(hss_end_ind[i]-hss_start_ind[i])/60)
tag=scat.sircat_id[i][13]
#v_max
scat.at[sci[i],'hss_vtmax']=np.round(np.nanmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#vtmaxtime - search for index in sliced array and at beginning of array to see the index in the whole dataset
scat.at[sci[i],'hss_vtmax_time']=sc.time[np.nanargmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]])+hss_start_ind[i]]
# v_mean
scat.at[sci[i],'hss_vtmean']=np.round(np.nanmean(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'hss_vtstd']=np.round(np.nanstd(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#B_max
scat.at[sci[i],'hss_btmax']=np.round(np.nanmax(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'hss_btmean']=np.round(np.nanmean(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bstd
scat.at[sci[i],'hss_btstd']=np.round(np.nanstd(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bz
scat.at[sci[i],'hss_bzmin']=np.round(np.nanmin(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzmean']=np.round(np.nanmean(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzstd']=np.round(np.nanstd(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
print('sir')
###SIR parameters only for STEREO and MAVEN
############ SIR duration
if (name== 'STEREO-B') or (name== 'MAVEN'):
sci_istart=mdates.date2num(scat.hss_start_time[sci]) ##***Fehler? sir_start?
sci_iend=mdates.date2num(scat.sir_end_time[sci])
scat.at[sci,'sir_duration']=np.round((sci_iend-sci_istart)*24,2)
########## SIR general parameters
for i in np.arange(0,len(sci)):
#v_max
scat.at[sci[i],'sir_vtmax']=np.round(np.nanmax(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
# v_mean
scat.at[sci[i],'sir_vtmean']=np.round(np.nanmean(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'sir_vtstd']=np.round(np.nanstd(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#B_max
scat.at[sci[i],'sir_btmax']=np.round(np.nanmax(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'sir_btmean']=np.round(np.nanmean(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bstd
scat.at[sci[i],'sir_btstd']=np.round(np.nanstd(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bz
scat.at[sci[i],'sir_bzmin']=np.round(np.nanmin(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzmean']=np.round(np.nanmean(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzstd']=np.round(np.nanstd(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
if (name== 'STEREO-A'):
for i in np.arange(0,len(sci)):
#check which catalog
tag=scat.sircat_id[sci[i]][13]
if tag=='J': #Jian events
sci_istart=mdates.date2num(scat.sir_start_time[sci[i]])
sci_iend=mdates.date2num(scat.sir_end_time[sci[i]])
scat.at[sci[i],'sir_duration']=np.round((sci_iend-sci_istart)*24,2)
#v_max
scat.at[sci[i],'sir_vtmax']=np.round(np.nanmax(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
# v_mean
scat.at[sci[i],'sir_vtmean']=np.round(np.nanmean(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'sir_vtstd']=np.round(np.nanstd(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#B_max
scat.at[sci[i],'sir_btmax']=np.round(np.nanmax(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'sir_btmean']=np.round(np.nanmean(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bstd
scat.at[sci[i],'sir_btstd']=np.round(np.nanstd(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bz
scat.at[sci[i],'sir_bzmin']=np.round(np.nanmin(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzmean']=np.round(np.nanmean(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzstd']=np.round(np.nanstd(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
if tag=='A': #Allen events
############ HSS duration
sci_istart=mdates.date2num(scat.hss_start_time[sci[i]])
sci_hss_iend=mdates.date2num(scat.hss_end_time[sci[i]])
scat.at[sci[i],'hss_duration']=np.round((sci_hss_iend-sci_istart)*24,2)
#v_max
scat.at[sci[i],'hss_vtmax']=np.round(np.nanmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#vtmaxtime - search for index in sliced array and at beginning of array to see the index in the whole dataset
scat.at[sci[i],'hss_vtmax_time']=sc.time[np.nanargmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]])+hss_start_ind[i]]
# v_mean
scat.at[sci[i],'hss_vtmean']=np.round(np.nanmean(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'hss_vtstd']=np.round(np.nanstd(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#B_max
scat.at[sci[i],'hss_btmax']=np.round(np.nanmax(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'hss_btmean']=np.round(np.nanmean(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bstd
scat.at[sci[i],'hss_btstd']=np.round(np.nanstd(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bz
scat.at[sci[i],'hss_bzmin']=np.round(np.nanmin(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzmean']=np.round(np.nanmean(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzstd']=np.round(np.nanstd(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
return scat
###################################### ICMECAT operations ################################
def load_helcats_icmecat_master_from_excel(file):
''' convert excel master file to pandas dataframe and convert times
to datetime objects
'''
print('load HELCATS ICMECAT from file:', file)
ic=pd.read_excel(file)
#convert all times to datetime objects
for i in np.arange(0,ic.shape[0]):
#remove leading and ending blank spaces if any and write datetime object into dataframe
ic.at[i,'icme_start_time']= parse_time(str(ic.icme_start_time[i]).strip()).datetime
ic.at[i,'mo_start_time']=parse_time(str(ic.mo_start_time[i]).strip()).datetime
ic.at[i,'mo_end_time']=parse_time(str(ic.mo_end_time[i]).strip()).datetime
return ic
def pdyn(density, speed):
'''
make dynamic pressure from density []# ccm-3] and speed [km/s]
assume pdyn is only due to protons
pdyn=np.zeros(len([density])) #in nano Pascals
'''
proton_mass=1.6726219*1e-27 #kg
pdyn=np.multiply(np.square(speed*1e3),density)*1e6*proton_mass*1e9 #in nanoPascal
return pdyn
def load_pickle(file):
ic=pickle.load( open(file, 'rb'))
return ic
def get_cat_parameters(sc, sci, ic, name):
'''
get parameters
sc - spacecraft data recarray
sci - indices for this spacecraft in icmecat
ic - icmecat pandas dataframe
'''
fileind='icmecat/indices_icmecat/ICMECAT_indices_'+name+'.p'
#### extract indices of ICMEs in the respective data (time consuming, so do it once)
if os.path.isfile(fileind) == False:
print('extract indices of ICMEs in '+ name+ ' data')
#### get all ICMECAT times for this spacecraft as datenum
sc_icme_start=ic.icme_start_time[sci]
sc_mo_start=ic.mo_start_time[sci]
sc_mo_end=ic.mo_end_time[sci]
### arrays containing the indices of where the ICMEs are in the data
icme_start_ind=np.zeros(len(sci),dtype=int)
mo_start_ind=np.zeros(len(sci),dtype=int)
mo_end_ind=np.zeros(len(sci),dtype=int)
#this takes some time, get indices in data for each ICMECAT
for i in np.arange(sci[0],sci[-1]+1):
print(i-sci[0])
icme_start_ind[i-sci[0]]=np.where(sc.time > sc_icme_start[i])[0][0]-1
#print(icme_start_ind[i])
mo_start_ind[i-sci[0]]=np.where(sc.time > sc_mo_start[i])[0][0]-1
mo_end_ind[i-sci[0]]=np.where(sc.time > sc_mo_end[i])[0][0]-1
pickle.dump([icme_start_ind, mo_start_ind,mo_end_ind], open(fileind, 'wb'))
############################################
[icme_start_ind, mo_start_ind,mo_end_ind]=pickle.load(open(fileind, 'rb'))
#plasma available?
if name=='Wind': plasma=True
if name=='STEREO-A': plasma=True
if name=='STEREO-B': plasma=True
if name=='ULYSSES': plasma=True
if name=='MAVEN': plasma=True
if name=='PSP': plasma=True
if name=='VEX': plasma=False
if name=='MESSENGER': plasma=False
if name=='SolarOrbiter': plasma=False
if name=='BepiColombo': plasma=False
print('Get parameters for ',name)
####### position
#MO heliodistance
for i in np.arange(len(sci))-1:
ic.at[sci[i],'mo_sc_heliodistance']=np.round(sc.r[mo_start_ind[i]],4)
#MO longitude
ic.at[sci[i],'mo_sc_long_heeq']=np.round(sc.lon[mo_start_ind[i]],2)
#MO latitude
ic.at[sci[i],'mo_sc_lat_heeq']=np.round(sc.lat[mo_start_ind[i]],2)
############ ICME
# ICME duration
sci_istart=mdates.date2num(ic.icme_start_time[sci])
sci_iend=mdates.date2num(ic.mo_end_time[sci])
ic.at[sci,'icme_duration']=np.round((sci_iend-sci_istart)*24,2)
for i in np.arange(0,len(sci)):
#ICME B_max
ic.at[sci[i],'icme_bmax']=np.round(np.nanmax(sc.bt[icme_start_ind[i]:mo_end_ind[i]]),1)
#ICME B_mean
ic.at[sci[i],'icme_bmean']=np.round(np.nanmean(sc.bt[icme_start_ind[i]:mo_end_ind[i]]),1)
#icme_bstd
ic.at[sci[i],'icme_bstd']=np.round(np.nanstd(sc.bt[icme_start_ind[i]:mo_end_ind[i]]),1)
if plasma==True:
#ICME speed_mean and std
for i in np.arange(len(sci))-1:
ic.at[sci[i],'icme_speed_mean']=np.round(np.nanmean(sc.vt[icme_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'icme_speed_std']=np.round(np.nanstd(sc.vt[icme_start_ind[i]:mo_end_ind[i]]),1)
else: #set nan
for i in np.arange(len(sci))-1:
ic.at[sci[i],'icme_speed_mean']=np.nan
ic.at[sci[i],'icme_speed_std']=np.nan
########### MO
# MO duration
sci_istart=mdates.date2num(ic.mo_start_time[sci])
sci_iend=mdates.date2num(ic.mo_end_time[sci])
ic.at[sci,'mo_duration']=np.round((sci_iend-sci_istart)*24,2)
#print(sci_istart)
#print(sci_iend)
#print(mo_start_ind[i])
#print(mo_end_ind[i])
for i in np.arange(len(sci))-1:
#MO B_max
ic.at[sci[i],'mo_bmax']=np.round(np.nanmax(sc.bt[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO B_mean
ic.at[sci[i],'mo_bmean']=np.round(np.nanmean(sc.bt[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO B_std
ic.at[sci[i],'mo_bstd']=np.round(np.nanstd(sc.bt[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO Bz_mean
ic.at[sci[i],'mo_bzmean']=np.round(np.nanmean(sc.bz[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO Bz_min
ic.at[sci[i],'mo_bzmin']=np.round(np.nanmin(sc.bz[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO Bz_std
ic.at[sci[i],'mo_bzstd']=np.round( | np.nanstd(sc.bz[mo_start_ind[i]:mo_end_ind[i]]) | numpy.nanstd |
# codeing=utf-8
"""This module contains feasible region classes for the experiements."""
from abc import ABC, abstractmethod
import logging
import math
from cvxopt import matrix, sparse, solvers
import networkx as nx
import numpy as np
from scipy.optimize import linprog
from scipy.sparse.linalg import eigsh
from pflacg.experiments.experiments_helper import max_vertex
from gurobipy import GRB, read, Column
run_config_gurobi = {
'solution_only': True,
'verbosity': 'normal',
'OutputFlag': 0,
'dual_gap_acc': 1e-06,
'runningTimeLimit': None,
'use_LPSep_oracle': True,
'max_lsFW': 100000,
'strict_dropSteps': True,
'max_stepsSub': 100000,
'max_lsSub': 100000,
'LPsolver_timelimit': 100000,
'K': 1
}
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s :: %(asctime)s :: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
LOGGER = logging.getLogger()
# Helper functions
# Generate a valid DAG such that we can solve the shortest path problem.
def generateRandomGraph(n, p):
DG = nx.gnr_graph(n, p)
return DG
# Graph with a source and a sink, and a number of layers specified by layers
# and a number of nodes per layer equal to nodes_per_layer.
def generateStructuredGraph(layers, nodes_per_layer):
m = layers
s = nodes_per_layer
DG = nx.DiGraph()
DG.add_nodes_from(range(0, m * s + 1))
# Add first edges between source
DG.add_edges_from([(0, x + 1) for x in range(s)])
# Add all the edges in the subsequent layers.
for i in range(m - 1):
DG.add_edges_from(
[(x + 1 + s * i, y + 1 + s * (i + 1)) for x in range(s) for y in range(s)]
)
DG.add_edges_from([(x + 1 + s * (m - 1), m * s + 1) for x in range(s)])
return DG
# Core classes
class _AbstractFeasibleRegion(ABC):
"""An abstract class to construct feasible region objects."""
def __init__(self, *args, **kwargs):
"""Initialise abstract feasible region class."""
pass
@property
def initial_point(self):
raise NotImplementedError(
"Initial point has not been set for this feasible region!"
)
@property
def initial_active_set(self):
raise NotImplementedError(
"Initial active set has not been set for this feasible region!"
)
@abstractmethod
def lp_oracle(self, d):
"""
Compute the linear oracle.
Parameters
----------
d : np.ndarray
The direction.
Returns
-------
np.ndarray
"""
pass
@abstractmethod
def away_oracle(self, d, point_x):
"""
Compute the away oracle.
Parameters
----------
d: np.ndarray
The direction.
point_x: Point
Point x with its proper support.
Returns
-------
Point
"""
pass
def projection(self, x, accuracy):
raise NotImplementedError(
"Projection has not been implemented for this feasible region!"
)
class ConvexHull(_AbstractFeasibleRegion):
"""Convex hull given a set of vertice."""
def __init__(self, vertices):
self.vertices = vertices
@property
def initial_point(self):
return self.vertices[0]
@property
def initial_active_set(self):
return [self.vertices[0]]
def lp_oracle(self, d):
val, index = d.dot(self.vertices[0]), 0
for _index, vertex in enumerate(self.vertices):
_val = d.dot(vertex)
if _val < val:
val, index = _val, _index
return self.vertices[index]
def away_oracle(self, d, point_x):
return max_vertex(d, point_x.support)
def projection(self, x, accuracy):
pass
class gurobi_MIP(_AbstractFeasibleRegion):
"""LP model implemented via Gurobi."""
def __init__(self, modelFilename):
model = read(modelFilename)
model.params.TimeLimit = run_config_gurobi['LPsolver_timelimit']
model.setParam('OutputFlag', False)
model.params.threads = 4
model.params.MIPFocus = 0
model.update()
self.dim = len(model.getVars())
self.model = model
return
@property
def initial_point(self):
v = np.ones(self.dim)
return self.lp_oracle(v)
@property
def initial_active_set(self):
return [self.initial_point()]
def lp_oracle(self, cc):
m = self.model
for it, v in enumerate(m.getVars()):
v.setAttr(GRB.attr.Obj, cc[it])
#Update the model with the new atributes.
m.update()
m.optimize(lambda mod, where: self.fakeCallback(mod, where, GRB.INFINITY))
# Status checking
status = m.getAttr(GRB.Attr.Status)
if status == GRB.INF_OR_UNBD or \
status == GRB.INFEASIBLE or \
status == GRB.UNBOUNDED:
assert False, "The model cannot be solved because it is infeasible or unbounded"
if status != GRB.OPTIMAL:
print(status)
assert False, "Optimization was stopped."
#Store the solution that will be outputted.
solution = np.array([v.x for v in m.getVars()], dtype=float)[:]
#Check that the initial number of constraints and the final number is the same.
return solution
def away_oracle(self, grad, point_x):
return max_vertex(grad, point_x.support)
def fakeCallback(self, model, where, value):
ggEps = 1e-08
if where == GRB.Callback.MIPSOL:
obj = model.cbGet(GRB.Callback.MIPSOL_OBJ)
if where == GRB.Callback.MIP:
objBnd = model.cbGet(GRB.Callback.MIP_OBJBND)
if objBnd >= value + ggEps:
pass
class BirkhoffPolytope(_AbstractFeasibleRegion):
def __init__(self, dim):
self.dim = dim
self.mat_dim = int(np.sqrt(dim))
@property
def initial_point(self):
return np.identity(self.mat_dim).flatten()
@property
def initial_active_set(self):
return [self.initial_point()]
def lp_oracle(self, d):
from scipy.optimize import linear_sum_assignment
objective = d.reshape((self.mat_dim, self.mat_dim))
matching = linear_sum_assignment(objective)
solution = np.zeros((self.mat_dim, self.mat_dim))
solution[matching] = 1
return solution.reshape(self.dim)
def away_oracle(self, grad, point_x):
return max_vertex(grad, point_x.support)
class ConstrainedBirkhoffPolytope(_AbstractFeasibleRegion):
def __init__(
self,
dim,
const_vector_ineq=None,
const_matrix_ineq=None,
const_matrix_eq=None,
const_vector_eq=None,
linear_equality_vector=None,
scipy_solver="revised simplex",
):
self.dim = dim
self.matdim = int(np.sqrt(dim))
self.scipy_solver = scipy_solver
self.A = np.zeros((2 * self.matdim - 1, self.dim))
# Condition on the columns
for j in range(self.matdim):
for i in range(self.matdim):
self.A[j, int(i * self.matdim) + j] = 1.0
# Condition on the rows
for j in range(self.matdim - 1):
for i in range(self.matdim):
self.A[self.matdim + j, int(j * self.matdim) + i] = 1.0
if linear_equality_vector is not None:
self.b = linear_equality_vector
else:
self.b = np.ones(2 * self.matdim - 1)
if const_matrix_ineq is not None and const_vector_ineq is not None:
num_ineq_constraints, dim_ineq_constraints = const_matrix_ineq.shape
if not dim_ineq_constraints == self.dim:
raise ValueError(
"Dimension of the inequality constraints does not match the dimensionality of the problem."
)
self.G = const_matrix_ineq
self.h = const_vector_ineq
else:
self.G = None
self.h = None
if const_matrix_eq is not None and const_vector_eq is not None:
num_eq_constraints, dim_eq_constraints = const_matrix_eq.shape
if not dim_eq_constraints == self.dim:
raise ValueError(
"Dimension of the equality constraints does not match the dimensionality of the problem."
)
self.A = np.vstack(
(
self.A,
const_matrix_eq,
)
)
self.b = np.append(self.b, const_vector_eq).tolist()
@property
def initial_point(self):
c = np.ones(self.dim)
return self.lp_oracle(c)
@property
def initial_active_set(self):
return [self.initial_point()]
def lp_oracle(self, x):
res = linprog(
x,
A_ub=self.G,
b_ub=self.h,
A_eq=self.A,
b_eq=self.b,
method=self.scipy_solver,
bounds=(0.0, np.inf),
)
if not res.status == 0:
raise Exception("LP oracle did not return succesfully.")
optimum = np.array(res.x)
return optimum.flatten()
def away_oracle(self, grad, point_x):
return max_vertex(grad, point_x.support)
class ProbabilitySimplexPolytope(_AbstractFeasibleRegion):
def __init__(self, dim):
self.dim = dim
@property
def initial_point(self):
v = np.zeros(self.dim)
v[0] = 1.0
return v
@property
def initial_active_set(self):
return [self.initial_point()]
def lp_oracle(self, x):
v = np.zeros(len(x), dtype=float)
v[np.argmin(x)] = 1.0
return v
# #This is a faster implementation of the away oracle without having to loop through active set.
# def away_oracle(self, grad, x):
# aux = np.multiply(grad, np.sign(x))
# indices = np.where(x > 0.0)[0]
# v = np.zeros(len(x), dtype=float)
# index_max = indices[np.argmax(aux[indices])]
# v[index_max] = 1.0
# return v, index_max
def away_oracle(self, grad, point_x):
return max_vertex(grad, point_x.support)
def projection(self, x):
(n,) = x.shape # will raise ValueError if v is not 1-D
if x.sum() == 1.0 and np.alltrue(x >= 0):
return x
v = x - np.max(x)
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
rho = np.count_nonzero(u * np.arange(1, n + 1) > (cssv - 1.0)) - 1
theta = float(cssv[rho] - 1.0) / (rho + 1)
w = (v - theta).clip(min=0)
return w
class L1UnitBallPolytope(_AbstractFeasibleRegion):
def __init__(self, dim):
self.dim = dim
@property
def initial_point(self):
v = np.zeros(self.dim)
v[0] = 1.0
return v
@property
def initial_active_set(self):
return [self.initial_point()]
def lp_oracle(self, x):
v = np.zeros(len(x), dtype=float)
max_ind = np.argmax(np.abs(x))
v[max_ind] = -1.0 * np.sign(x[max_ind])
return v
def away_oracle(self, grad, point_x):
return max_vertex(grad, point_x.support)
def projection(self, x):
u = np.abs(x)
if u.sum() <= 1.0:
return x
w = self.projectionSimplex(u)
w *= np.sign(x)
return w
def projectionSimplex(self, x):
(n,) = x.shape # will raise ValueError if v is not 1-D
if x.sum() == 1.0 and np.alltrue(x >= 0):
return x
v = x - np.max(x)
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
rho = np.count_nonzero(u * np.arange(1, n + 1) > (cssv - 1.0)) - 1
theta = float(cssv[rho] - 1.0) / (rho + 1)
w = (v - theta).clip(min=0)
return w
class ConstrainedL1BallPolytope(_AbstractFeasibleRegion):
def __init__(
self,
l1_regularization,
dim,
const_matrix_ineq=None,
const_vector_ineq=None,
const_matrix_eq=None,
const_vector_eq=None,
solver_type="cvxopt",
scipy_solver="revised simplex",
sparse_solver=False,
):
self.dim = dim
self.l1_regularization = l1_regularization
self.solver_type = solver_type
if not (solver_type == "cvxopt" or solver_type == "scipy"):
raise TypeError("Wrong solver type")
if solver_type == "cvxopt":
solvers.options["show_progress"] = False
else:
self.scipy_solver = scipy_solver
if sparse_solver and not solver_type == "cvxopt":
raise TypeError("scipy solver cannot handle sparse matrices.")
simplex_dimensionality = int(2 * dim)
if const_matrix_ineq is not None and const_vector_ineq is not None:
num_ineq_constraints, dim_ineq_constraints = const_matrix_ineq.shape
if not (dim_ineq_constraints == self.dim):
raise ValueError(
"Dimension of the inequality constraints does not match the dimensionality of the problem."
)
self.G = np.vstack(
(
np.hstack((const_matrix_ineq, -const_matrix_ineq)),
-np.identity(simplex_dimensionality),
)
)
self.h = np.append(const_vector_ineq, np.zeros(simplex_dimensionality))
if solver_type == "cvxopt":
self.G = matrix(
self.G,
(
simplex_dimensionality + num_ineq_constraints,
simplex_dimensionality,
),
)
if sparse_solver:
self.G = sparse(self.G)
self.h = matrix(
self.h, (simplex_dimensionality + num_ineq_constraints, 1)
)
else:
self.G = -np.identity(simplex_dimensionality)
self.h = np.zeros(simplex_dimensionality)
if solver_type == "cvxopt":
self.G = matrix(
self.G,
)
self.h = matrix(self.h, (simplex_dimensionality, 1))
if sparse_solver:
self.G = sparse(self.G)
if const_matrix_eq is not None and const_vector_eq is not None:
num_eq_constraints, dim_eq_constraints = const_matrix_eq.shape
if not dim_eq_constraints == self.dim:
raise ValueError(
"Dimension of the equality constraints does not match the dimensionality of the problem."
)
self.A = np.vstack(
(
np.hstack((const_matrix_eq, -const_matrix_eq)),
np.ones(simplex_dimensionality),
)
)
self.b = np.append(const_vector_eq, self.l1_regularization).tolist()
if solver_type == "cvxopt":
self.A = matrix(
self.A, (1 + num_eq_constraints, simplex_dimensionality)
)
self.b = matrix(self.b, (1 + len(const_vector_eq), 1), "d")
if sparse_solver:
self.A = sparse(self.A)
else:
self.A = np.ones(simplex_dimensionality)
self.b = self.l1_regularization
if solver_type == "cvxopt":
self.A = matrix(self.A, (1, simplex_dimensionality))
self.b = matrix(self.b)
else:
self.A = np.ones(simplex_dimensionality).reshape(
(simplex_dimensionality, 1)
)
self.b = np.asarray(self.b).reshape((1,))
@property
def initial_point(self):
c = np.ones(self.dim)
return self.lp_oracle(c)
@property
def initial_active_set(self):
return [self.initial_point()]
def lp_oracle(self, x):
cost_vector = | np.hstack((x, -x)) | numpy.hstack |
import os
import joblib
import numpy as np
import pandas as pd
from Fuzzy_clustering.version2.common_utils.logging import create_logger
def compute_area_grid(lat, long, resolution, round_coord, levels):
lat_range = np.arange(np.around(lat, round_coord) - 20, np.around(lat, round_coord) + 20,
resolution)
lat1 = lat_range[np.abs(lat_range - lat).argmin()] - resolution / 10
lat2 = lat_range[np.abs(lat_range - lat).argmin()] + resolution / 10
long_range = np.arange(np.around(long, round_coord) - 20, np.around(long, round_coord) + 20,
resolution)
long1 = long_range[np.abs(long_range - long).argmin()] - resolution / 10
long2 = long_range[np.abs(long_range - long).argmin()] + resolution / 10
return [[lat1 - resolution * levels, long1 - resolution * levels],
[lat2 + resolution * levels, long2 + resolution * levels]]
class ProjectGroupInit:
"""
Class responsible for managing and loading the
power output or load.
"""
def __init__(self, static_data):
"""
Parameters
----------
static_data: python dict
contains all the information required to load the power measurement for specific project(s).
"""
self.static_data = static_data # dict containing information about project paths, model structure and training
# params, input file, see in util_database_timos.py and config_timos.py
self.file_data = static_data['data_file_name'] # input .csv file PROBLEM_TYPE + '_ts.csv' i.e. wind_ts.csv
self.project_owner = static_data[
'project_owner'] # Name of project owner or research program i.e. my_projects or CROSSBOW
self.projects_group = static_data['projects_group'] # Name of the country
self.area_group = static_data['area_group'] # coordinates of the country
self.version_group = static_data['version_group']
self.version_model = static_data['version_model']
self.weather_in_data = static_data[
'weather_in_data'] # True if input file contains more columns than the power output column
self.nwp_model = static_data['NWP_model']
self.nwp_resolution = static_data['NWP_resolution']
self.data_variables = static_data['data_variables'] # Variable names used
self.projects = [] # list containing all the parks, we're interested in. Each park is considered as a project.
self.use_rated = True
self.model_type = self.static_data['type']
self.sys_folder = self.static_data['sys_folder']
self.path_nwp = self.static_data['path_nwp']
self.path_group = self.static_data['path_group']
self.path_nwp_group = self.static_data['path_nwp_group']
self.group_static_data = []
self.logger = create_logger(logger_name=f'ProjectInitManager_{self.model_type}', abs_path=self.path_group,
logger_path=f'log_{self.projects_group}.log', write_type='a')
def initialize(self):
if os.path.exists(os.path.join(os.path.dirname(self.file_data), 'coord_auto_' + self.model_type + '.csv')):
self.file_coord = os.path.join(os.path.dirname(self.file_data), 'coord_auto_' + self.model_type + '.csv')
else:
self.file_coord = os.path.join(os.path.dirname(self.file_data), 'coord_' + self.model_type + '.csv')
if not os.path.exists(self.file_coord) and not self.weather_in_data:
raise IOError('File with coordinates does not exist')
self.file_rated = os.path.join(os.path.dirname(self.file_data), 'rated_' + self.model_type + '.csv')
if not os.path.exists(self.file_rated):
if self.model_type in {'wind', 'pv'} and self.projects_group not in {'IPTO'}:
raise ValueError('Provide rated_power for each project. The type of projects is %s', self.model_type)
self.use_rated = False
else:
self.use_rated = True
self.load_power_of_parks() # Loads power output, coordinates and rated power.
if len(self.projects) == 0:
raise ImportError('No project loaded. check the input file in configuration')
if self.check_project_names():
for project_name in self.projects:
path_project = self.path_group + '/' + project_name
if not os.path.exists(path_project):
os.makedirs(path_project)
path_model = path_project + '/model_ver' + str(self.version_model)
if not os.path.exists(path_model):
os.makedirs(path_model)
path_backup = self.path_group + '/backup_models/' + project_name + '/model_ver' + str(
self.version_model)
if not os.path.exists(path_backup):
os.makedirs(path_backup)
path_data = path_model + '/DATA'
if not os.path.exists(path_data):
os.makedirs(path_data)
path_fuzzy_models = path_model + '/fuzzy_models'
if not os.path.exists(path_fuzzy_models):
os.makedirs(path_fuzzy_models)
if self.use_rated:
if project_name == self.projects_group + '_' + self.model_type and project_name not in self.rated.index.to_list():
rated = self.rated.sum().to_list()[0]
else:
rated = self.rated.loc[project_name].to_list()[0]
else:
rated = None
if hasattr(self, 'coord'):
if project_name == 'APE_net' or self.model_type == 'load' or project_name == self.projects_group + '_' + self.model_type:
coord = dict()
for name, lat_long in self.coord.iterrows():
coord[name] = lat_long.values.tolist()
else:
coord = self.coord.loc[project_name].to_list() # [lat, long]
else:
coord = None
area = self.create_area(coord)
temp = {'_id': project_name,
'owner': self.project_owner,
'project_group': self.projects_group,
'type': self.model_type,
'location': coord,
'areas': area,
'rated': rated,
'path_project': path_project,
'path_model': path_model,
'path_group': self.path_group,
'version_group': self.version_group,
'version_model': self.version_model,
'path_backup': path_backup,
'path_data': path_data,
'pathnwp': self.path_nwp_group,
'path_fuzzy_models': path_fuzzy_models,
'run_on_platform': False,
}
static_data = dict()
for key, value in self.static_data.items():
static_data[key] = value
for key, value in temp.items():
static_data[key] = value
self.group_static_data.append({'_id': project_name, 'static_data': static_data})
joblib.dump(static_data, os.path.join(path_model, 'static_data.pickle'))
with open(os.path.join(path_model, 'static_data.txt'), 'w') as file:
for k, v in static_data.items():
if not isinstance(v, dict):
file.write(str(k) + ' >>> ' + str(v) + '\n\n')
else:
file.write(str(k) + ' >>> ' + '\n')
for kk, vv in v.items():
file.write('\t' + str(kk) + ' >>> ' + str(vv) + '\n')
joblib.dump(self.group_static_data, os.path.join(self.path_group, 'static_data_projects.pickle'))
self.logger.info('Static data of all projects created')
def check_project_names(self):
flag = True
if self.model_type in {'wind', 'pv'}:
for name in self.projects:
if name not in self.coord.index.to_list() and name != self.projects_group + '_' + self.model_type and name != 'APE_net':
flag = False
self.logger.info('There is inconsistency to files data and coord for the project %s', name)
if not flag:
raise ValueError('Inconsistency in project names between data and coord')
if self.use_rated:
for name in self.projects:
if name not in self.rated.index.to_list() and name != self.projects_group + '_' + self.model_type:
flag = False
self.logger.info('There is inconsistency to files data and rated for the project %s', name)
if not flag:
raise ValueError('Inconsistency in project names between data and rated')
return flag
def load_power_of_parks(self):
try:
# Data containing power output or load. Each column refers to a different wind, pv park.
self.data = pd.read_csv(self.file_data, header=0, index_col=0, parse_dates=True, dayfirst=True)
except Exception:
self.logger.info(f'Cannot import timeseries from the file {self.file_data}')
raise IOError(f'Cannot import timeseries from the file {self.file_data}')
self.logger.info('Timeseries imported successfully from the file %s', self.file_data)
if 'total' in self.data.columns: # In some cases, the total output of all parks is included.
self.data = self.data.rename(
columns={'total': self.projects_group + '_' + self.model_type}) # e.g group = 'Greece'
if self.static_data['Evaluation_start']:
valid_combination = True
time_offset = pd.DateOffset(hours=0)
if self.model_type == 'fa':
time_offset = pd.DateOffset(days=372)
elif self.model_type == 'load':
if self.data.columns[0] == 'lv_load':
time_offset = pd.DateOffset(days=9001)
else:
if self.static_data['horizon'] == 'short-term':
time_offset = pd.DateOffset(hours = 350)
else:
if self.static_data['ts_resolution'] == 'hourly':
time_offset = pd.DateOffset(hours = 9001)
elif self.static_data['ts_resolution'] == '15min':
time_offset = pd.DateOffset(minutes = 60 * 9001)
if valid_combination:
try:
eval_date = pd.to_datetime(self.static_data['Evaluation_start'], format='%d%m%Y %H:%M')
self.data_eval = self.data.iloc[np.where(self.data.index > eval_date - time_offset)]
self.data = self.data.iloc[np.where(self.data.index <= eval_date)]
except Exception:
raise ValueError('Wrong date format, use %d%m%Y %H:%M. Or the date does not exist in the dataset')
if self.model_type == 'load':
self.projects.append(self.data.columns[0])
elif self.model_type == 'fa':
if self.version_model == 0:
self.projects.append('fa_curr_morning')
elif self.version_model == 1:
self.projects.append('fa_ahead_morning')
else:
raise ValueError(
'Version model should be 0 for current day and 1 for day ahead otherwise choose another group version')
else:
for name in self.data.columns:
var = f'{self.projects_group}_{self.model_type}' if name == 'total' else name
self.projects.append(var)
if not self.weather_in_data:
try:
# For each of the park, load its coordinates. (lat,long) single tuple
self.coord = pd.read_csv(self.file_coord, header=None, index_col=0)
except Exception:
self.logger.info('Cannot import coordinates from the file %s', self.file_coord)
raise IOError('Cannot import coordinates from the file %s', self.file_coord)
self.logger.info('Coordinates imported successfully from the file %s', self.file_coord)
else:
self.logger.info('Coordinates in the data')
if self.use_rated:
try:
# For each park, read its rated (maximum) power.
self.rated = pd.read_csv(self.file_rated, header=None, index_col=0)
except Exception:
self.logger.info('Cannot import Rated Power from the file %s', self.file_rated)
raise IOError('Cannot import Rated Power from the file %s', self.file_rated)
self.logger.info('Rated Power imported successfully from the file %s', self.file_rated)
self.logger.info('Data loaded successfully')
def create_area(self, coord):
levels = 4 if self.nwp_resolution == 0.05 else 2
round_coord = 1 if self.nwp_resolution == 0.05 else 0
if coord is None:
area = dict()
elif isinstance(coord, list):
if len(coord) == 2:
lat, long = coord[0], coord[1]
area = compute_area_grid(lat, long, self.nwp_resolution, round_coord, levels)
elif len(coord) == 4:
area = list(np.array(coord).reshape(2, 2))
else:
raise ValueError(
'Wrong coordinates. Should be point (lat, long) or area [lat1, long1, lat2, long2]')
elif isinstance(coord, dict):
area = dict()
for key, value in coord.items():
if len(value) == 2:
lat, long = value[0], value[1]
area[key] = compute_area_grid(lat, long, self.nwp_resolution, round_coord, levels)
else:
area[key] = | np.array(value) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft( | fft.fftshift(mapadded) | numpy.fft.fftshift |
"""smp_base.models_actinf
..moduleauthor:: <NAME>, 2016-2017
Active inference models based on :mod:`smp.actinf` project code.
This file contains the models_learners which can be used as adaptive models
of sensorimotor contexts designed for an active inference
approach. Currently implemented models are
- k nearest neighbours (knn)
- sparse online gaussian process models powered by Harold Soh's OTL library (soesgp, storkgp)
- gaussian mixture model based on pypr's gmm (gmm)
- hebbian connected SOM via bruno lara, guido schillaci (hebbsom)
- incremental gaussian mixtures (igmm via juan acevedo-valle)
- SOMs connected with hebbian associative links
TODO:
- consolidate calling convention / api for all model types
-- init with single argument config dictionary
-- predict, fit, sample, conditionals, visualize
-- common test code
- implement missing models
- missing: single hidden layer networks: linear/elm/res with RLS/FORCE/MDN/EH, merge with otl
- missing: imol/models.py
- missing: im/models.py
- missing: smp/models_seq.py
- missing: smp/models_karpmdn.py
- MDN model: florens, karpathy, hardmaru, amjad, cbonnett, edward
- including 'predict_naive' and 'predict_full' methods that would capture returning confidences about the current prediction
- other variables that might be used by the context to modulate exploration, learning and behaviour
- disambiguate static and dynamic (conditional inference types) idim/odim
- consistent sampling from probabilistic models (gmm, hebbsom, ...): sample from prior, stick with last sample's vicinity
- model visualization
- def visualize for all models
- plot current / final som configuration
- plot densities
- hebbsom
- som track residual error from map training
- som use residual for adjusting rbf width
- som extend sampling to sample actual prediction from gaussian with unit's mu and sigma
"""
import pickle
from functools import partial
import numpy as np
import scipy.sparse as sparse
import scipy.stats as stats
import pylab as pl
import matplotlib.gridspec as gridspec
import pandas as pd
from pandas.plotting import scatter_matrix
from smp_base.models import smpModelInit, smpModel
from smp_base.plot_utils import savefig
from smp_base.plot_models import plot_nodes_over_data_1d_components_fig, plot_nodes_over_data_1d_components
# KNN
from sklearn.neighbors import KNeighborsRegressor
# Online Gaussian Processes
try:
from otl_oesgp import OESGP
from otl_storkgp import STORKGP
HAVE_SOESGP = True
except ImportError as e:
print("couldn't import online GP models:", e)
HAVE_SOESGP = False
# Gaussian mixtures PyPR
try:
import pypr.clustering.gmm as gmm
except ImportError as e:
print("Couldn't import pypr.clustering.gmm", e)
# hebbsom
try:
from kohonen.kohonen import Map, Parameters, ExponentialTimeseries, ConstantTimeseries
from kohonen.kohonen import Gas, GrowingGas, GrowingGasParameters, Filter
from kohonen.kohonen import argsample
except ImportError as e:
print("Couldn't import lmjohns3's kohonon SOM lib", e)
# IGMM
try:
from igmm_cond import IGMM_COND
except ImportError as e:
print("Couldn't import IGMM lib", e)
# requirements: otl, kohonen, pypr, igmm
from smp_base.models_reservoirs import LearningRules
import logging
from smp_base.common import get_module_logger
logger = get_module_logger(modulename = 'models_actinf', loglevel = logging.DEBUG)
saveplot = False # True
model_classes = ["KNN", "SOESGP", "STORKGP", "GMM", "HebbSOM", ",IGMM", "all"]
class smpKNN(smpModel):
"""smpKNN
k-NN function approximator smpmodel originally used for the active
inference developmental model but generally reusable.
"""
defaults = {
'idim': 1,
'odim': 1,
'n_neighbors': 5,
'prior': 'random', # ['random', 'linear']
'prior_width': 0.01,
}
@smpModelInit()
def __init__(self, conf):
"""smpKNN.__init__
init
"""
smpModel.__init__(self, conf)
# comply
if not hasattr(self, 'modelsize'):
self.modelsize = 1000 # self.n_neighbors
# the scikit base model
self.fwd = KNeighborsRegressor(n_neighbors = self.n_neighbors)
# the data store
self.X_ = []
self.y_ = []
self.hidden_dist = np.zeros((1, self.n_neighbors))
self.hidden_dist_sum = np.zeros((1, 1))
self.hidden_dist_sum_avg = np.zeros((1, 1))
self.hidden_idx = np.zeros((1, self.n_neighbors))
# bootstrap the model with prior
self.bootstrap()
def get_params(self, *args, **kwargs):
if 'param' in kwargs:
if 'w_norm' in kwargs['param']:
# return np.tile(np.array([(len(self.X_) + len(self.y_))/2.0]), (self.odim, 1))
return np.tile(np.array([len(self.y_)]), (self.odim, 1))
return self.fwd.get_params()
def visualize(self):
pass
def bootstrap(self):
"""smpKNN.bootstrap
Bootstrap the model with some initial dummy samples to prepare it for inference after init
"""
# bootstrap model
self.n_samples_bootstrap = max(10, self.n_neighbors)
logger.info("%s.bootstrapping with %s prior" % (self.__class__.__name__, self.prior))
if self.prior == 'random':
for i in range(self.n_samples_bootstrap):
if self.idim == self.odim:
self.X_.append(np.ones((self.idim, )) * i * 0.1)
self.y_.append(np.ones((self.odim, )) * i * 0.1)
else:
noise_amp = self.prior_width
self.X_.append(np.random.uniform(
-noise_amp, noise_amp, (self.idim,)))
self.y_.append(np.random.uniform(
-noise_amp, noise_amp, (self.odim,)))
elif self.prior == 'linear':
for i in range(self.n_samples_bootstrap):
p_ = -self.prior_width/2.0 + float(i)/self.n_samples_bootstrap
X = np.ones((self.idim, )) * p_ + np.random.uniform(-0.01, 0.01)
y = np.ones((self.odim, )) * p_ + np.random.uniform(-0.01, 0.01)
self.X_.append(X)
self.y_.append(y)
# print(self.X_, self.y_)
self.fwd.fit(self.X_, self.y_)
def predict(self, X):
"""smpKNN.predict
Predict Y using X on the current model state
"""
# FIXME: change scikit to store intermediate query results
# or: fully local predict def
self.hidden_dist, self.hidden_idx = self.fwd.kneighbors(X)
self.hidden_dist_sum = np.mean(self.hidden_dist)
self.hidden_dist_sum_avg = 0.1 * self.hidden_dist_sum + 0.9 * self.hidden_dist_sum_avg
# self.hidden_idx_norm = self.hidden_idx.astype(np.float) * self.hidden_dist_sum_avg/1000.0
self.hidden_idx_norm = self.hidden_idx.astype(np.float) * 1e-3
# logger.debug('hidden dist = %s, idx = %s', self.hidden_dist, self.hidden_idx)
return self.fwd.predict(X)
def fit(self, X, y):
"""smpKNN.fit
Single fit Y to X step. If the input is a batch of data, fit
that entire batch and forgetting existing data in X' and
Y'. If the input is a single data point, append to X' and Y'
and refit the model to that new data.
"""
if X.shape[0] > 1: # batch of data
# self.modelsize = X.shape[0]
return self.fit_batch(X, y)
# logger.debug("%s.fit[%d] len(X_) = %d, len(y_) = %d, modelsize = %d", self.__class__.__name__, self.cnt, len(self.X_), len(self.y_), self.modelsize)
self.cnt += 1
# if len(self.X_) > self.modelsize: return
self.X_.append(X[0,:])
# self.y_.append(self.m[0,:])
# self.y_.append(self.goal[0,:])
self.y_.append(y[0,:])
self.fwd.fit(self.X_, self.y_)
def fit_batch(self, X, y):
"""smpKNN.fit
Batch fit Y to X
"""
self.X_ = X.tolist()
self.y_ = y.tolist()
self.fwd.fit(self.X_, self.y_)
################################################################################
# ActiveInference OTL library based model, base class implementing predict,
# predict_step (otl can't handle batches), fit, save and load methods
class smpOTLModel(smpModel):
"""smpOTLModel
Sparse online echo state gaussian process function approximator
for active inference
"""
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'soesgp',
'otlmodel': None,
'memory': 1,
'lag_off': 1,
}
@smpModelInit()
def __init__(self, conf):
# if conf is None: conf = self.defaults
smpModel.__init__(self, conf)
# self.otlmodel_type = "soesgp"
# self.otlmodel = None
# introspection
self.cnt = 0
# explicit short term memory needed for tapping across lag gaps
self.r_l = []
print( "otlmodel.memory", self.memory)
self.r_ = np.zeros((self.modelsize, self.memory))
# self.r_ = np.random.uniform(-1, 1, (self.modelsize, self.memory)) * 1.0
# output variables arrays
self.pred = np.zeros((self.odim, 1))
self.var = np.zeros((self.odim, 1))
# output variables lists
self.pred_l = []
self.var_l = []
def update(self, X_):
# update state
self.otlmodel.update(X_)
# store state
self.r_ = np.roll(self.r_, shift = -1, axis = -1)
self.otlmodel.getState(self.r_l)
tmp = np.array([self.r_l]).T
# print("%s r_ = %s, r[...,[-1] = %s, tmp = %s" % (self.__class__.__name__, self.r_.shape, self.r_[...,[-1]].shape, tmp.shape))
self.r_[...,[-1]] = tmp.copy()
def predict(self, X,rollback = False):
# row vector input
if X.shape[0] > 1: # batch input
ret = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
ret[i] = self.predict_step(X[i].flatten().tolist(), rollback = rollback)
return ret
else:
X_ = X.flatten().tolist()
return self.predict_step(X_, rollback = rollback)
def predict_step(self, X_, rollback = False):
# update state and store it
self.update(X_)
# predict output variables from state
self.otlmodel.predict(self.pred_l, self.var_l)
# return np.zeros((1, self.odim))
# set prediction variables
self.pred = np.array(self.pred_l)
self.var = np.abs(np.array(self.var_l))
# roll back the reservoir state if rollback on
if rollback:
self.r_ = np.roll(self.r_, shift = 1, axis = -1)
self.otlmodel.setState(self.r_[...,[-1]].copy().flatten().tolist())
self.cnt += 1
return self.pred.reshape((1, self.odim))
def fit(self, X, y, update = True):
"""smpOTLModel.fit
Fit model to data X, y
"""
if self.cnt < self.memory: return
if X.shape[0] > 1: # batch of data
return self.fit_batch(X, y)
if update:
X_ = X.flatten().tolist()
self.update(X_)
# print("X.shape", X.shape, len(X_), X_)
# self.otlmodel.update(X_)
# copy state into predefined structure
# self.otlmodel.getState(self.r)
# consider lag and restore respective state
# print("otlmodel.fit lag_off", self.lag_off)
r_lagged = self.r_[...,[-self.lag_off]]
# print ("r_lagged", r_lagged.shape)
self.otlmodel.setState(r_lagged.flatten().tolist())
# prepare target and fit
# print("soesgp.fit y", type(y))
y_ = y.flatten().tolist()
self.otlmodel.train(y_)
# restore chronologically most recent state
r_lagged = self.r_[...,[-1]]
self.otlmodel.setState(r_lagged.flatten().tolist())
def fit_batch(self, X, y):
for i in range(X.shape[0]):
self.fit(X[[i]], y[[i]])
def save(self, filename):
otlmodel_ = self.otlmodel
self.otlmodel.save(filename + "_%s_model" % self.otlmodel_type)
print("otlmodel", otlmodel_)
self.otlmodel = None
print("otlmodel", otlmodel_)
pickle.dump(self, open(filename, "wb"))
self.otlmodel = otlmodel_
print("otlmodel", self.otlmodel)
@classmethod
def load(cls, filename):
# otlmodel_ = cls.otlmodel
otlmodel_wrap = pickle.load(open(filename, "rb"))
print("%s.load cls.otlmodel filename = %s, otlmodel_wrap.otlmodel_type = %s" % (cls.__name__, filename, otlmodel_wrap.otlmodel_type))
if otlmodel_wrap.otlmodel_type == "soesgp":
otlmodel_cls = OESGP
elif otlmodel_wrap.otlmodel_type == "storkgp":
otlmodel_cls = STORKGP
else:
otlmodel_cls = OESGP
otlmodel_wrap.otlmodel = otlmodel_cls()
print("otlmodel_wrap.otlmodel", otlmodel_wrap.otlmodel)
otlmodel_wrap.otlmodel.load(filename + "_%s_model" % otlmodel_wrap.otlmodel_type)
# print("otlmodel_wrap.otlmodel", dir(otlmodel_wrap.otlmodel))
# cls.bootstrap(otlmodel_wrap)
# otlmodel_wrap.otlmodel = otlmodel_
return otlmodel_wrap
################################################################################
# Sparse Online Echo State Gaussian Process (SOESGP) OTL library model
class smpSOESGP(smpOTLModel):
"""smpSOESGP
Sparse online echo state gaussian process function approximator
for active inference
"""
# # for input modulation style
# defaults = {
# 'idim': 1,
# 'odim': 1,
# 'otlmodel_type': 'soesgp',
# 'otlmodel': None,
# 'modelsize': 300,
# 'input_weight': 2.0,
# 'output_feedback_weight': 0.0,
# 'activation_function': 1,
# 'leak_rate': 0.8, # 0.9,
# 'connectivity': 0.1,
# 'spectral_radius': 0.99, # 0.999,
# # 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# # 'noise': 0.01,
# # 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# # 'noise': 1.0, # 0.01,
# 'kernel_params': [2.0, 2.0], # [2.0, 2.0],
# 'noise': 5e-2, # 0.01,
# 'epsilon': 1e-3,
# 'capacity': 100, # 10
# 'random_seed': 101,
# 'visualize': False,
# }
# for self-sampling style
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'soesgp',
'otlmodel': None,
'memory': 1,
'lag_off': 1,
'modelsize': 200,
'output_feedback_weight': 0.0,
'use_inputs_in_state': False,
'activation_function': 0,
'connectivity': 0.1,
# 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# 'noise': 0.01,
# 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# 'noise': 1.0, # 0.01,
# pointmass
'input_weight': 1.0,
'kernel_params': [10.0, 1.5],
'noise': 5e-3, #8e-2, # 0.01,
'leak_rate': 0.1, # 0.9,
'spectral_radius': 0.9,
# # barrel
# 'input_weight': 1.0,
# 'kernel_params': [1.2, 1.2], # [2.0, 2.0],
# 'noise': 1e-2,
# 'leak_rate': 0.9, # 0.9,
# 'spectral_radius': 0.99, # 0.999,
'epsilon': 1e-4,
'capacity': 200, # 10
'random_seed': 106,
'visualize': False,
}
@smpModelInit()
def __init__(self, conf):
smpOTLModel.__init__(self, conf = conf)
# self.otlmodel_type = "soesgp"
self.otlmodel = OESGP()
# self.res_size = 100 # 20
# self.input_weight = 1.0 # 1.0
# self.output_feedback_weight = 0.0
# self.activation_function = 1
# # leak_rate: x <= (1-lr) * input + lr * x
# self.leak_rate = 0.96 # 0.05 # 0.0 # 0.1 # 0.3
# self.connectivity = 0.1
# self.spectral_radius = 0.99
# # covariances
# self.kernel_params = [2.0, 2.0]
# # self.kernel_params = [1.0, 1.0]
# # self.kernel_params = [0.1, 0.1]
# self.noise = 0.05
# self.epsilon = 1e-3
# self.capacity = 100
# self.random_seed = 100 # FIXME: constant?
# self.X_ = []
# self.y_ = []
self.bootstrap()
def bootstrap(self):
from .models_reservoirs import res_input_matrix_random_sparse
self.otlmodel.init(self.idim, self.odim, self.modelsize, self.input_weight,
self.output_feedback_weight, self.activation_function,
self.leak_rate, self.connectivity, self.spectral_radius,
False, self.kernel_params, self.noise, self.epsilon,
self.capacity, self.random_seed)
im = res_input_matrix_random_sparse(self.idim, self.modelsize, 0.2) * self.input_weight
# print("im", type(im))
self.otlmodel.setInputWeights(im.tolist())
################################################################################
# StorkGP OTL based model
class smpSTORKGP(smpOTLModel):
"""smpSTORKGP
Sparse online echo state gaussian process function approximator
for active inference
"""
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'storkgp',
'otlmodel': None,
'modelsize': 50,
'memory': 1,
'lag_off': 1,
'input_weight': 1.0,
'output_feedback_weight': 0.0,
'activation_function': 1,
'leak_rate': 0.96,
'connectivity': 0.1,
'spectral_radius': 0.99,
'kernel_params': [2.0, 2.0],
'noise': 0.05,
'epsilon': 1e-3,
'capacity': 100,
'random_seed': 100,
'visualize': False,
}
@smpModelInit()
def __init__(self, conf):
smpOTLModel.__init__(self, conf = conf)
# self.otlmodel_type = "storkgp"
self.otlmodel = STORKGP()
# self.res_size = self.modelsize # 100 # 20
self.bootstrap()
def bootstrap(self):
self.otlmodel.init(
self.idim, self.odim,
self.modelsize, # window size
0, # kernel type
[0.5, 0.99, 1.0, self.idim],
1e-4,
1e-4,
100 # seed
)
self.otlmodel.getState(self.r_l)
# print("|self.r_l| = ", len(self.r_l))
self.r_ = np.zeros((len(self.r_l), self.memory))
################################################################################
# inference type multivalued models: GMM, SOMHebb, MDN
# these are somewhat different in operation than the models above
# - fit vs. fit_batch
# - can create conditional submodels
# GMM - gaussian mixture model
class smpGMM(smpModel):
"""smpGMM
Gaussian mixture model based on PyPR's gmm
"""
defaults = {
'idim': 1, 'odim': 1, 'K': 10, 'fit_interval': 100,
'numepisodes': 10, 'visualize': False, 'em_max_iter': 1000}
@smpModelInit()
def __init__(self, conf):
"""smpGMM.__init__
"""
smpModel.__init__(self, conf)
self.cdim = self.idim + self.odim
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
# self.fit_interval = 100
self.fitted = False
# number of mixture components
# self.K = K
# list of K component idim x 1 centroid vectors
# self.cen_lst = []
self.cen_lst = [] # np.random.uniform(-1, 1, (self.K,)).tolist()
# list of K component idim x idim covariances
self.cov_lst = [] # [np.eye(self.cdim) * 0.1 for _ in range(self.K)]
# K mixture coeffs
# self.p_k = None
self.p_k = None # [1.0/self.K for _ in range(self.K)]
# log loss after training
self.logL = 0
print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""smpGMM.fit
Single step fit: X, y are single patterns
"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""smpGMM.fit_batch
Fit the GMM model with batch data
"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
# max_iter = 10
try:
self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(
self.Xy, K = self.K, max_iter = self.em_max_iter,
verbose = False, iter_call = None)
self.fitted = True
except Exception as e:
print( "%s.fit_batch fit failed with %s" % (self.__class__.__name__, e.args ,))
# sys.exit()
print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X, rollback = False):
"""smpGMM.predict
Predict Y from X by forwarding to default sample call
"""
return self.sample(X, rollback = rollback)
def sample(self, X, rollback = False):
"""smpGMM.sample
Default sample function
Assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
This method constructs the corresponding conditioning input from the reduced input
"""
print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s" % (self.__class__.__name__, uncond.shape))
# np.array([np.nan for i in range(self.odim)])
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
return self.sample_cond(cond)
def sample_cond(self, X):
"""smpGMM.sample_cond
Single sample from the GMM model with conditioning on single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random goal
cond_sample = np.random.uniform(-1.0, 1.0, (1, self.odim)) # FIXME hardcoded shape
# cen_con = self.cen_lst
# cov_con = self.cov_lst
# new_p_k = self.p_k
else:
(cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
# print( "cen_con", cen_con, "cov_con", cov_con, "p_k", new_p_k)
cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
# print("%s.sample_cond: cond_sample.shape = %s" % (self.__class__.__name__, cond_sample.shape))
return cond_sample
def sample_batch(self, X):
"""smpGMM.sample_batch
If X has more than one rows, return batch of samples for
every condition row in X
"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
# def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
# """smpGMM.sample_batch_legacy
# Sample from gmm model with conditioning batch input X legacy function
# """
# # compute conditional
# sampmax = 20
# numsamplesteps = X.shape[0]
# odim = len(out_dims) # self.idim - X.shape[1]
# self.y_sample_ = np.zeros((odim,))
# self.y_sample = np.zeros((odim,))
# self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
# self.y_samples = np.zeros((numsamplesteps, odim))
# self.cond = np.zeros_like(X[0])
# print("%s.sample_batch: y_samples_.shape = %s" % (self.__class__.__name__, self.y_samples_.shape))
# for i in range(numsamplesteps):
# # if i % 100 == 0:
# if i % resample_interval == 0:
# # print("%s.sample_batch: sampling gmm cond prob at step %d" % (self.__class__.__name__, i))
# ref_interval = 1
# # self.cond = self.logs["EP"][(i+ref_interval) % self.logs["EP"].shape[0]] # self.X__[i,:3]
# self.cond = X[(i+ref_interval) % numsamplesteps] # self.X__[i,:3]
# # self.cond = np.array()
# # self.cond[:2] = X_
# # print(self.cond, out_dims, X.shape)
# self.cond[out_dims] = np.nan
# (self.cen_con, self.cov_con, self.new_p_k) = gmm.cond_dist(self.cond, self.cen_lst, self.cov_lst, self.p_k)
# # print "run_hook_e2p_sample gmm.cond_dist:", np.array(self.cen_con).shape, np.array(self.cov_con).shape, self.new_p_k.shape
# samperr = 1e6
# j = 0
# while samperr > 0.1 and j < sampmax:
# self.y_sample = gmm.sample_gaussian_mixture(self.cen_con, self.cov_con, self.new_p_k, samples = 1)
# self.y_samples_[j,i] = self.y_sample
# samperr_ = np.linalg.norm(self.y_sample - X[(i+1) % numsamplesteps,:odim], 2)
# if samperr_ < samperr:
# samperr = samperr_
# self.y_sample_ = self.y_sample
# j += 1
# # print "sample/real err", samperr
# print("sampled", j, "times")
# else:
# # retain samples from last sampling interval boundary
# self.y_samples_[:,i] = self.y_samples_[:,i-1]
# # return sample array
# self.y_samples[i] = self.y_sample_
# return self.y_samples, self.y_samples_
# IGMM - incremental gaussian mixture model, from juan
class smpIGMM(smpModel):
"""smpIGMM
Gaussian mixture model based on PyPR's gmm
"""
defaults = {'idim': 1, 'odim': 1, 'K': 10, 'numepisodes': 10, 'visualize': False}
@smpModelInit()
def __init__(self, conf):
"""smpIGMM.__init__
"""
smpModel.__init__(self, conf)
self.cdim = self.idim + self.odim
# number of mixture components
# self.K = K
# list of K component idim x 1 centroid vectors
self.cen_lst = []
# list of K component idim x idim covariances
self.cov_lst = []
# K mixture coeffs
self.p_k = None
self.cen_lst = np.random.uniform(-1, 1, (self.K,)).tolist()
# list of K component idim x idim covariances
self.cov_lst = [np.eye(self.cdim) * 0.1 for _ in range(self.K)]
# K mixture coeffs
# self.p_k = None
self.p_k = [1.0/self.K for _ in range(self.K)]
# log loss after training
self.logL = 0
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
self.fit_interval = 100
self.fitted = False
self.model = IGMM_COND(min_components=3, forgetting_factor=0.5)
# print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""smpIGMM.fit
Single step fit: X, y are single patterns
"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
self.Xy_ = []
self.X_ = []
self.y_ = []
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""smpIGMM.fit_batch
Fit the IGMM model with batch data
"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
# self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(self.Xy, K = self.K, max_iter = 1000,
# verbose = False, iter_call = None)
self.model.train(self.Xy)
self.fitted = True
# print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X):
"""smpIGMM.predict
Predict Y from X by forwarding to default sample call
"""
# print("IGMM.predict X.shape", X.shape, X)
return self.sample(X)
def sample(self, X):
"""smpIGMM.sample
Default sample function
Assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
This method constructs the corresponding conditioning input from the reduced input
"""
# print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s, %s" % (self.__class__.__name__, uncond.shape, uncond))
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s, %s" % (self.__class__.__name__, cond.shape, cond))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
sample = self.sample_cond(cond)
# print("%s.sample sample = %s, X = %s" % (self.__class__.__name__, sample.shape, X.shape))
# FIXME: fix that inference configuration
if sample.shape[1] == self.odim:
return sample
else:
return sample[...,X.shape[1]:]
def sample_cond(self, X):
"""smpIGMM.sample_cond
Single sample from the IGMM model with conditioning on single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random prediction
return np.random.uniform(-0.1, 0.1, (1, self.odim)) # FIXME hardcoded shape
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
# (cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
# cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
cond_sample = self.model.sample_cond_dist(cond, 1)
# print("%s.sample_cond: cond_sample.shape = %s, %s" % (self.__class__.__name__, cond_sample.shape, cond_sample))
return cond_sample
def sample_batch(self, X):
"""smpIGMM.sample_batch
If X has more than one rows, return batch of samples for
every condition row in X
"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
################################################################################
# Hebbian SOM model: connect to SOMs with hebbian links
class smpHebbianSOM(smpModel):
"""smpHebbianSOM class
Hebbian SOM model
FIXME: conf: kohonen/map.Map init distribution and scaling
FIXME: conf: fit_hebb onset delay
FIXME: conf: sampling mode (weights, gaussian(wgts, sigmas), ...
"""
defaults = {
'idim': 1, 'odim': 1, 'numepisodes': 100, 'visualize': False, 'mapsize_e': 10, 'mapsize_p': 10, 'som_lr': 1e-0,
'som_nhs': 3, 'init_range': (-1.0, 1.0)}
@smpModelInit()
def __init__(self, conf):
"""smpHebbianSOM
Two SOM's coding the input and output space connected by associative Hebbian links
"""
smpModel.__init__(self, conf)
# SOMs training self assessment
self.cnt_fit = 0
self.cnt_predict = 0
self.fitted = False
self.soms_cnt_fit = 0
self.soms_cnt_predict = 0
self.soms_fitted = False
self.hebb_cnt_fit = 0
self.hebb_cnt_predict = 0
self.hebb_fitted = False
self.decay_const = -1e-5
# learning rate proxy
self.ET = ExponentialTimeseries
self.CT = ConstantTimeseries
self.mapsize = 10 ** 2 # 100
# self.mapsize_e = mapsize_e # 100 # int(np.sqrt(self.mapsize)) # max(10, self.idim * 3)
# self.mapsize_p = mapsize_p # 150 # int(np.sqrt(self.mapsize)) # max(10, self.odim * 3)
self.numepisodes_som = self.numepisodes
self.numepisodes_hebb = self.numepisodes
# FIXME: make neighborhood_size decrease with time
# som_lr = som_lr # 1e0
# som_lr = 1e-1 # Haykin, p475
# som_lr = 5e-1
# som_lr = 5e-4
# self.som_nhs = 3 # 1.5
maptype = "som"
# maptype = "gas"
# SOM exteroceptive stimuli 2D input
if maptype == "som":
if self.idim == 1:
mapshape_e = (self.mapsize_e, )
else:
mapshape_e = (self.mapsize_e, self.mapsize_e)
# 1D better?
# mapshape_e = (self.mapsize_e, )
self.kw_e = self.kwargs(
shape = mapshape_e, dimension = self.idim, lr_init = self.som_lr,
neighborhood_size = self.som_nhs, init_variance = 1.0) #, z = 0.001)
# self.kw_e = self.kwargs(shape = (self.mapsize_e, self.mapsize_e), dimension = self.idim, lr_init = 0.5, neighborhood_size = 0.6)
self.som_e = Map(Parameters(**self.kw_e))
elif maptype == "gas":
self.kw_e = self.kwargs_gas(shape = (self.mapsize_e ** 2, ), dimension = self.idim, lr_init = self.som_lr, neighborhood_size = 0.5)
self.som_e = Gas(Parameters(**self.kw_e))
# SOM proprioceptive stimuli 3D input
if maptype == "som":
if self.idim == 1:
mapshape_p = (self.mapsize_p, )
else:
mapshape_p = (int(self.mapsize_p), int(self.mapsize_p))
# 1D better?
mapshape_p = (self.mapsize_p, )
self.kw_p = self.kwargs(shape = mapshape_p, dimension = self.odim, lr_init = self.som_lr,
neighborhood_size = self.som_nhs, init_variance = 0.2) #, z = 0.001)
# self.kw_p = self.kwargs(shape = (int(self.mapsize_p * 1.5), int(self.mapsize_p * 1.5)), dimension = self.odim, lr_init = 0.5, neighborhood_size = 0.7)
self.som_p = Map(Parameters(**self.kw_p))
elif maptype == "gas":
self.kw_p = self.kwargs_gas(shape = (self.mapsize_p ** 2, ), dimension = self.odim, lr_init = self.som_lr, neighborhood_size = 0.5)
self.som_p = Gas(Parameters(**self.kw_p))
print("HebbianSOM mapsize_e,p", self.mapsize_e, self.mapsize_p)
# FIXME: there was a nice trick for node distribution init in _some_ recently added paper
# create "filter" using existing SOM_e, filter computes activation on distance
self.filter_e = Filter(self.som_e, history=lambda: 0.0)
# print("neurons_e", self.filter_e.map.neurons)
self.filter_e.reset()
# print("neurons_e", self.filter_e.map.neurons)
self.filter_e_lr = self.filter_e.map._learning_rate
# kw_f_p = kwargs(shape = (mapsize * 3, mapsize * 3), dimension = 3, neighborhood_size = 0.5, lr_init = 0.1)
# filter_p = Filter(Map(Parameters(**kw_f_p)), history=lambda: 0.01)
# create "filter" using existing SOM_p, filter computes activation on distance
self.filter_p = Filter(self.som_p, history=lambda: 0.0)
self.filter_p.reset()
self.filter_p_lr = self.filter_p.map._learning_rate
# Hebbian links
# hebblink_som = np.random.uniform(-1e-4, 1e-4, (np.prod(som_e._shape), np.prod(som_p._shape)))
# hebblink_filter = np.random.uniform(-1e-4, 1e-4, (np.prod(filter_e.map._shape), np.prod(filter_p.map._shape)))
self.hebblink_som = np.zeros((np.prod(self.som_e._shape), np.prod(self.som_p._shape)))
# self.hebblink_filter = np.zeros((np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
self.hebblink_filter = np.random.normal(0, 1e-6, (np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
# # sparse hebblink
# self.hebblink_filter = sparse.rand(m = np.prod(self.filter_e.map._shape),
# n = np.prod(self.filter_p.map._shape)) * 1e-3
self.hebblink_use_activity = True # use activation or distance
# Hebbian learning rate
if self.hebblink_use_activity:
# self.hebblink_et = ExponentialTimeseries(self.decay_const, 1e-0, 0)
self.hebblink_et = ConstantTimeseries(1e-0)
# self.hebblink_et = ConstantTimeseries(0.0)
else:
self.hebblink_et = ConstantTimeseries(1e-12)
# visualization
if self.visualize:
self.figs.append(plot_nodes_over_data_1d_components_fig(title = self.__class__.__name__, numplots = self.idim + self.odim))
# SOM argument dict
def kwargs(self, shape=(10, 10), z=0.001, dimension=2, lr_init = 1.0, neighborhood_size = 1, init_variance = 1.0):
"""smpHebbianSOM params function for Map"""
return dict(
dimension = dimension,
shape = shape,
neighborhood_size = self.ET(self.decay_const, neighborhood_size, 0.1), # 1.0),
learning_rate=self.ET(self.decay_const, lr_init, 0.0),
# learning_rate=self.CT(lr_init),
noise_variance=z,
init_variance = init_variance)
def kwargs_gas(self, shape=(100,), z=0.001, dimension=3, lr_init = 1.0, neighborhood_size = 1):
"""smpHebbianSOM params function for Gas"""
return dict(
dimension=dimension,
shape=shape,
neighborhood_size = self.ET(self.decay_const, neighborhood_size, 1.0),
learning_rate=self.ET(self.decay_const, lr_init, 0.0),
noise_variance=z)
def visualize_model(self):
"""smpHebbianSOM.visualize_model
Plot the model state visualization
"""
e_nodes, p_nodes = hebbsom_get_map_nodes(self, self.idim, self.odim)
e_nodes_cov = np.tile(np.eye(self.idim) * 0.05, e_nodes.shape[0]).T.reshape((e_nodes.shape[0], self.idim, self.idim))
p_nodes_cov = np.tile(np.eye(self.odim) * 0.05, p_nodes.shape[0]).T.reshape((p_nodes.shape[0], self.odim, self.odim))
X = np.vstack(self.Xhist)
Y = np.vstack(self.Yhist)
# print(X.shape)
plot_nodes_over_data_1d_components(
fig = self.figs[0], X = X, Y = Y, mdl = self,
e_nodes = e_nodes, p_nodes = p_nodes, e_nodes_cov = e_nodes_cov, p_nodes_cov = p_nodes_cov,
saveplot = False
)
def set_learning_rate_constant(self, c = 0.0):
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(c)
self.filter_p.map._learning_rate = self.CT(c)
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
def fit_soms(self, X, y):
"""smpHebbianSOM"""
# print("%s.fit_soms fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
# if X.shape[0] != 1, r
# e = EP[i,:dim_e]
# p = EP[i,dim_e:]
self.filter_e.map._learning_rate = self.filter_e_lr
self.filter_p.map._learning_rate = self.filter_p_lr
# don't learn twice
# som_e.learn(e)
# som_p.learn(p)
# TODO for j in numepisodes
if X.shape[0] > 1:
numepisodes = self.numepisodes_som
else:
numepisodes = 1
if X.shape[0] > 100:
print("%s.fit_soms batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
i = 0
j = 0
eps_convergence = 0.01
# eps_convergence = 0.005
dWnorm_e_ = 1 # short horizon
dWnorm_p_ = 1
dWnorm_e__ = dWnorm_e_ + 2 * eps_convergence # long horizon
dWnorm_p__ = dWnorm_p_ + 2 * eps_convergence
idx_shuffle = np.arange(X.shape[0])
# for j in range(numepisodes):
# (dWnorm_e_ == 0 and dWnorm_p_ == 0) or
# while (dWnorm_e_ > 0.05 and dWnorm_p_ > 0.05):
do_convergence = True
while (do_convergence) and (np.abs(dWnorm_e__ - dWnorm_e_) > eps_convergence and np.abs(dWnorm_p__ - dWnorm_p_) > eps_convergence): # and j < 10:
if j > 0 and j % 10 == 0:
print("%s.fit_soms episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
# print("no convergence")
do_convergence = False
dWnorm_e = 0
dWnorm_p = 0
np.random.shuffle(idx_shuffle)
# print("neurons_e 1", self.filter_e.map.neurons.flatten())
for i in range(X.shape[0]):
# lidx = idx_shuffle[i]
lidx = i
self.filter_e.learn(X[lidx])
dWnorm_e += np.linalg.norm(self.filter_e.map.delta)
self.filter_p.learn(y[lidx])
dWnorm_p += np.linalg.norm(self.filter_p.map.delta)
# print("neurons_e 2", self.filter_e.map.neurons.flatten(), X, X[lidx])
dWnorm_e /= X.shape[0]
dWnorm_e /= self.filter_e.map.numunits
dWnorm_p /= X.shape[0]
dWnorm_p /= self.filter_p.map.numunits
# short
dWnorm_e_ = 0.8 * dWnorm_e_ + 0.2 * dWnorm_e
dWnorm_p_ = 0.8 * dWnorm_p_ + 0.2 * dWnorm_p
# long
dWnorm_e__ = 0.83 * dWnorm_e__ + 0.17 * dWnorm_e_
dWnorm_p__ = 0.83 * dWnorm_p__ + 0.17 * dWnorm_p_
# print("%s.fit_soms batch e |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_e, dWnorm_e_, dWnorm_e__))
# print("%s.fit_soms batch p |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_p, dWnorm_p_, dWnorm_p__))
j += 1
if True and self.soms_cnt_fit % 100 == 0:
print("%s.fit_soms batch e mean error = %f, min = %f, max = %f" % (
self.__class__.__name__,
np.asarray(self.filter_e.distances_).mean(),
np.asarray(self.filter_e.distances_[-1]).min(),
np.asarray(self.filter_e.distances_).max() ))
print("%s.fit_soms batch p mean error = %f, min = %f, max = %f" % (
self.__class__.__name__,
np.asarray(self.filter_p.distances_).mean(),
np.asarray(self.filter_p.distances_[-1]).min(),
np.asarray(self.filter_p.distances_).max() ))
# print np.argmin(som_e.distances(e)) # , som_e.distances(e)
self.soms_cnt_fit += 1
def fit_hebb(self, X, y):
"""smpHebbianSOM"""
# print("%s.fit_hebb fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
if X.shape[0] == 1 and self.soms_cnt_fit < 200: # 200: # 1500:
return
# numepisodes_hebb = 1
if X.shape[0] > 100:
print("%s.fit_hebb batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
numsteps = X.shape[0]
################################################################################
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
eps_convergence = 0.05
z_err_coef_1 = 0.8
z_err_coef_2 = 0.83
z_err_norm_ = 1 # fast
z_err_norm__ = z_err_norm_ + 2 * eps_convergence # slow
Z_err_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
Z_err_norm_ = np.zeros((self.numepisodes_hebb*numsteps,1))
W_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
# # plotting
# pl.ion()
# fig = pl.figure()
# fig2 = pl.figure()
# TODO for j in numepisodes
# j = 0
if X.shape[0] > 1:
numepisodes = self.numepisodes_hebb
else:
numepisodes = 1
i = 0
dWnorm_ = 10.0
j = 0
# for j in range(numepisodes):
do_convergence = True
while do_convergence and z_err_norm_ > eps_convergence and np.abs(z_err_norm__ - z_err_norm_) > eps_convergence: # and j < 20:
if j > 0 and j % 10 == 0:
print("%s.fit_hebb episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
# print("no convergence")
do_convergence = False
for i in range(X.shape[0]):
# just activate
self.filter_e.learn(X[i])
self.filter_p.learn(y[i])
# fetch data induced activity
if self.hebblink_use_activity:
p_ = self.filter_p.activity.reshape(p_shape)
# print(p_.shape)
else:
p_ = self.filter_p.distances(p).flatten().reshape(p_shape)
p__ = p_.copy()
# p_ = p_ ** 2
p_ = (p_ == np.max(p_)) * 1.0
e_ = self.filter_e.activity.reshape(e_shape) # flatten()
e__ = e_.copy()
# e_ = e_ ** 2
e_ = (e_ == np.max(e_)) * 1.0
# compute prediction for p using e activation and hebbian weights
if self.hebblink_use_activity:
# print(self.hebblink_filter.T.shape, self.filter_e.activity.reshape(e_shape).shape)
# p_bar = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape(e_shape))
# e_act = e_.reshape(e_shape)
# e_act
p_bar = np.dot(self.hebblink_filter.T, e_.reshape(e_shape))
# # sparse
# p_bar = self.hebblink_filter.T.dot(e_.reshape(e_shape))
# print("p_bar", type(p_bar))
else:
p_bar = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
p_bar_ = p_bar.copy()
p_bar = (p_bar == np.max(p_bar)) * 1.0
# print("p_bar", type(p_bar), type(p_bar_))
# # plotting
# ax1 = fig.add_subplot(411)
# ax1.cla()
# ax1.plot(e_ * np.max(e__))
# ax1.plot(e__)
# ax2 = fig.add_subplot(412)
# ax2.cla()
# ax2.plot(p_ * np.max(p_bar_))
# ax2.plot(p__)
# ax2.plot(p_bar * np.max(p_bar_))
# ax2.plot(p_bar_)
# ax3 = fig.add_subplot(413)
# ax3.cla()
# ax3.plot(self.filter_e.distances_[-1])
# ax4 = fig.add_subplot(414)
# ax4.cla()
# ax4.plot(self.filter_p.distances_[-1])
# pl.pause(0.001)
# pl.draw()
# inject activity prediction
p_bar_sum = p_bar.sum()
if p_bar_sum > 0:
p_bar_normed = p_bar / p_bar_sum
else:
p_bar_normed = np.zeros(p_bar.shape)
# compute prediction error: data induced activity - prediction
# print("p_", np.linalg.norm(p_))
# print("p_bar", np.linalg.norm(p_bar))
z_err = p_ - p_bar
idx = np.argmax(p_bar_)
# print("sum E", np.sum(z_err))
# print("idx", p_bar_, idx, z_err[idx])
# z_err = (p_[idx] - p_bar[idx]) * np.ones_like(p_)
# z_err = np.ones_like(p_) *
# print("z_err", z_err)
# z_err = p_bar - p_
# z_err_norm = np.linalg.norm(z_err, 2)
z_err_norm = np.sum(np.abs(z_err))
# if j == 0 and i == 0:
# z_err_norm_ = z_err_norm
# else:
z_err_norm_ = z_err_coef_1 * z_err_norm_ + (1 - z_err_coef_1) * z_err_norm
z_err_norm__ = z_err_coef_2 * z_err_norm__ + (1 - z_err_coef_2) * z_err_norm
w_norm = np.linalg.norm(self.hebblink_filter)
# logidx = (j*numsteps) + i
# Z_err_norm [logidx] = z_err_norm
# Z_err_norm_[logidx] = z_err_norm_
# W_norm [logidx] = w_norm
# z_err = p_bar - self.filter_p.activity.reshape(p_bar.shape)
# print "p_bar.shape", p_bar.shape
# print "self.filter_p.activity.flatten().shape", self.filter_p.activity.flatten().shape
# if i % 100 == 0:
# print("%s.fit_hebb: iter %d/%d: z_err.shape = %s, |z_err| = %f, |W| = %f, |p_bar_normed| = %f" % (self.__class__.__name__, logidx, (self.numepisodes_hebb*numsteps), z_err.shape, z_err_norm_, w_norm, np.linalg.norm(p_bar_normed)))
# d_hebblink_filter = et() * np.outer(self.filter_e.activity.flatten(), self.filter_p.activity.flatten())
eta = self.hebblink_et()
if eta > 0.0:
if False and self.hebblink_use_activity:
# eta = 5e-4
# outer = np.outer(self.filter_e.activity.flatten(), np.clip(z_err, 0, 1))
# outer = np.outer(e_, np.clip(z_err, 0, 1))
# outer = np.outer(e_, p_)
# outer = np.outer(e_, p__ * np.clip(z_err, 0, 1))
# FIXME: this can be optimized with sparsity
# print("e_", e_, e__, p_)
outer = np.outer(e_ * e__, p_)
# print(outer.shape, self.hebblink_filter.shape)
# print("outer", outer)
# print("modulator", z_err[idx])
# d_hebblink_filter = eta * outer * (-1e-3 - z_err[idx])
# d_hebblink_filter = eta * np.outer(z_err, self.filter_e.activity.flatten()).T
# d_hebblink_filter = eta * outer * np.abs((z_err_norm_ - z_err_norm))
# d_hebblink_filter = eta * outer * (z_err_norm - z_err_norm_)
d_hebblink_filter = eta * outer
# # plotting
# f2ax1 = fig2.add_subplot(111)
# f2ax1.imshow(self.hebblink_filter.T, interpolation="none")
# # im = f2ax1.imshow(outer, interpolation="none")
# # f2ax2 = pl.colorbar(im, ax=f2ax1)
# pl.pause(1e-5)
# pl.draw()
elif self.hebblink_use_activity:
e_idx = np.argmax(e_)
p_idx = np.argmax(p_)
# print("e_", e_idx, "p_", p_idx)
d_hebblink_filter = np.zeros_like(self.hebblink_filter)
else:
d_hebblink_filter = eta * np.outer(self.filter_e.distances(e), z_err)
# does what?
self.hebblink_filter[e_idx, p_idx] += eta * e__[e_idx]
dWnorm = np.linalg.norm(d_hebblink_filter)
dWnorm_ = 0.8 * dWnorm_ + 0.2 * dWnorm
# print ("dWnorm", dWnorm)
# self.hebblink_filter += d_hebblink_filter
# print("hebblink_filter type", type(self.hebblink_filter))
# print("np.linalg.norm(self.hebblink_filter, 2)", np.linalg.norm(self.hebblink_filter, 2))
self.hebblink_filter /= np.linalg.norm(self.hebblink_filter, 2)
j += 1
if False and self.hebb_cnt_fit % 100 == 0:
# print("hebblink_filter type", type(self.hebblink_filter))
# print(Z_err_norm)
# print("%s.fit_hebb error p/p_bar %f" % (self.__class__.__name__, np.array(Z_err_norm)[:logidx].mean()))
print("%s.fit_hebb |dW| = %f, |W| = %f, mean err = %f / %f" % (self.__class__.__name__, dWnorm_, w_norm, np.min(z_err), np.max(z_err)))
# z_err_norm_, z_err_norm__))
# print("%s.fit_hebb |W| = %f" % (self.__class__.__name__, w_norm))
self.hebb_cnt_fit += 1
def fit(self, X, y):
"""smpHebbianSOM
Fit model to data
"""
# print("%s.fit fitting X = %s, y = %s" % (self.__class__.__name__, X, y))
# if X,y have more than one row, train do batch training on SOMs and links
# otherwise do single step update on both or just the latter?
self.fit_soms(X, y)
self.fit_hebb(X, y)
self.fitted = True
# if self.visualize:
# self.Xhist.append(X)
# self.Yhist.append(y)
# if self.cnt_fit % 100 == 0:
# self.visualize_model()
self.cnt_fit += 1
def predict(self, X):
"""smpHebbianSOM"""
return self.sample(X)
def sample(self, X):
"""smpHebbianSOM.sample"""
# print("%s.sample X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
if len(X.shape) == 2 and X.shape[0] > 1: # batch
return self.sample_batch(X)
return self.sample_cond(X)
def sample_cond(self, X):
"""smpHebbianSOM.sample_cond: draw single sample from model conditioned on X"""
# print("%s.sample_cond X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
# activate input network
self.filter_e.learn(X)
# pl.plot(self.filter_e.
# propagate activation via hebbian associative links
if self.hebblink_use_activity:
e_ = self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1))
e_ = (e_ == np.max(e_)) * 1.0
e2p_activation = np.dot(self.hebblink_filter.T, e_)
# print("e2p_activation", e2p_activation)
self.filter_p.activity = np.clip((e2p_activation / (np.sum(e2p_activation) + 1e-9)).reshape(self.filter_p.map._shape), 0, np.inf)
else:
e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# sample the output network with
sidxs = self.filter_p.sample(100)
# print("sidxs", stats.mode(sidxs)[0], sidxs)
# sidx = self.filter_p.sample(1)[0]
# find the mode (most frequent realization) of distribution
sidx = stats.mode(sidxs)[0][0]
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(sidx))
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(np.argmax(self.filter_p.activity)))
# ret = np.random.normal(e2p_w_p_weights, self.filter_p.sigmas[sidx], (1, self.odim))
ret = np.random.normal(e2p_w_p_weights, np.sqrt(self.filter_p.sigmas[sidx]), (1, self.odim))
# ret = np.random.normal(e2p_w_p_weights, 0.01, (1, self.odim))
# print("hebbsom sample", sidx, e2p_w_p_weights) # , sidxs) # , self.filter_p.sigmas[sidx])
# ret = e2p_w_p_weights.reshape((1, self.odim))
return ret
def sample_prior(self):
"""smpHebbianSOM.sample_prior
Sample from input map prior distribution
"""
# print("pr")
# pass
# print("prior", self.filter_e.map.prior)
# sidxs = argsample(self.filter_e.map.prior, n = 1)
sidxs = argsample(np.sum(self.filter_e.sigmas, axis = 1), n = 1)
prior_sample_mu = self.filter_e.neuron(self.filter_e.flat_to_coords(sidxs[0]))
# print ('prior_sample_mu', prior_sample_mu.shape, self.filter_e.sigmas[sidxs[0]].shape)
# prior_sample = np.random.normal(prior_sample_mu, self.filter_e.sigmas[sidxs[0]]).reshape((self.idim, 1))
prior_sample = prior_sample_mu.reshape((self.idim, 1))
# print("prior_sample", prior_sample)
return prior_sample
# def sample_cond_legacy(self, X):
# """smpHebbianSOM.sample_cond: sample from model conditioned on X"""
# sampling_search_num = 100
# e_shape = (np.prod(self.filter_e.map._shape), 1)
# p_shape = (np.prod(self.filter_p.map._shape), 1)
# # P_ = np.zeros((X.shape[0], self.odim))
# # E_ = np.zeros((X.shape[0], self.idim))
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(self.filter_p.sample(1)[0]))
# for i in range(X.shape[0]):
# # e = EP[i,:dim_e]
# # p = EP[i,dim_e:]
# e = X[i]
# # print np.argmin(som_e.distances(e)), som_e.distances(e)
# self.filter_e.learn(e)
# # print "self.filter_e.winner(e)", self.filter_e.winner(e)
# # filter_p.learn(p)
# # print "self.filter_e.activity.shape", self.filter_e.activity.shape
# # import pdb; pdb.set_trace()
# if self.hebblink_use_activity:
# e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1)))
# self.filter_p.activity = np.clip((e2p_activation / np.sum(e2p_activation)).reshape(self.filter_p.map._shape), 0, np.inf)
# else:
# e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# # print "e2p_activation.shape, np.sum(e2p_activation)", e2p_activation.shape, np.sum(e2p_activation)
# # print "self.filter_p.activity.shape", self.filter_p.activity.shape
# # print "np.sum(self.filter_p.activity)", np.sum(self.filter_p.activity), (self.filter_p.activity >= 0).all()
# # self.filter_p.learn(p)
# # emodes: 0, 1, 2
# emode = 0 #
# if i % 1 == 0:
# if emode == 0:
# e2p_w_p_weights_ = []
# for k in range(sampling_search_num):
# # filter.sample return the index of the sampled unit
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(self.filter_p.sample(1)[0]))
# e2p_w_p_weights_.append(e2p_w_p_weights)
# pred = np.array(e2p_w_p_weights_)
# # print "pred", pred
# # # if we can compare against something
# # pred_err = np.linalg.norm(pred - p, 2, axis=1)
# # # print "np.linalg.norm(e2p_w_p_weights - p, 2)", np.linalg.norm(e2p_w_p_weights - p, 2)
# # e2p_w_p = np.argmin(pred_err)
# # if not pick any
# e2p_w_p = np.random.choice(pred.shape[0])
# # print("pred_err", e2p_w_p, pred_err[e2p_w_p])
# e2p_w_p_weights = e2p_w_p_weights_[e2p_w_p]
# elif emode == 1:
# if self.hebblink_use_activity:
# e2p_w_p = np.argmax(e2p_activation)
# else:
# e2p_w_p = np.argmin(e2p_activation)
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(e2p_w_p))
# elif emode == 2:
# e2p_w_p = self.filter_p.winner(p)
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(e2p_w_p))
# # P_[i] = e2p_w_p_weights
# # E_[i] = environment.compute_sensori_effect(P_[i])
# # print("e2p shape", e2p_w_p_weights.shape)
# return e2p_w_p_weights.reshape((1, self.odim))
def sample_batch(self, X):
"""smpHebbianSOM.sample_batch: If X has more than one rows, return batch of samples for
every condition row in X"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
"""smpHebbianSOM"""
print("%s.sample_batch_legacy data X = %s" % (self.__class__.__name__, X))
sampmax = 20
numsamplesteps = X.shape[0]
odim = len(out_dims) # self.idim - X.shape[1]
self.y_sample_ = np.zeros((odim,))
self.y_sample = np.zeros((odim,))
self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
self.y_samples = np.zeros((numsamplesteps, odim))
self.cond = np.zeros_like(X[0])
return self.y_samples, self.y_samples_
################################################################################
# models_actinf: model testing and plotting code
################################################################################
def hebbsom_get_map_nodes(mdl, idim, odim):
"""hebbsom_get_map_nodes
Get all the nodes of the coupled SOM maps
"""
e_nodes = mdl.filter_e.map.neurons
p_nodes = mdl.filter_p.map.neurons
# print("e_nodes", e_nodes.shape, "p_nodes", p_nodes.shape)
e_nodes = e_nodes.reshape((-1,idim))
p_nodes = p_nodes.reshape((-1,odim))
# print("e_nodes", e_nodes.shape, "p_nodes", p_nodes.shape)
return (e_nodes, p_nodes)
def hebbsom_predict_full(X, Y, mdl):
"""hebbsom_predict_full
Predict using a HebbSOM and return full internal activations as tuple
- (predictions (samples), distances (SOM distance func), activiations (distances after act. func))
"""
distances = []
activities = []
predictions = np.zeros_like(Y)
# have to loop over single steps until we generalize predict function to also yield distances and activities
for h in range(X.shape[0]):
# X_ = (Y[h]).reshape((1, odim))
X_ = X[h]
# print("X_", X_.shape, X_)
# predict proprio 3D from extero 2D
predictions[h] = mdl.predict(X_)
# print("X_.shape = %s, %d" % (X_.shape, 0))
# print("prediction.shape = %s, %d" % (prediction.shape, 0))
distances.append(mdl.filter_e.distances(X_).flatten())
activities.append(mdl.filter_e.activity.flatten())
activities_sorted = activities[-1].argsort()
# print("Y[h]", h, Y[h].shape, prediction.shape)
return (predictions, distances, activities)
def plot_nodes_over_data_scattermatrix(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov, saveplot = False):
"""plot_nodes_over_data_scattermatrix
Plot SOM node locations over input data as scattermatrix all X
comps over all Y comps.
"""
idim = X.shape[1]
odim = Y.shape[1]
numplots = idim + odim
# e_nodes, p_nodes = hebbsom_get_map_nodes(mdl, idim, odim)
dfcols = []
dfcols += ["e_%d" % i for i in range(idim)]
dfcols += ["p_%d" % i for i in range(odim)]
# X_plus_e_nodes = np.vstack((X, e_nodes))
# Y_plus_p_nodes = np.vstack((Y, p_nodes))
# df = pd.DataFrame(np.hstack((X_plus_e_nodes, Y_plus_p_nodes)), columns=dfcols)
df = pd.DataFrame(np.hstack((X, Y)), columns=dfcols)
sm = scatter_matrix(df, alpha=0.2, figsize=(5,5), diagonal="hist")
# print("sm = %s" % (sm))
# loop over i/o components
idims = list(range(idim))
odims = list(range(idim, idim+odim))
for i in range(numplots):
for j in range(numplots):
if i != j and i in idims and j in idims:
# center = np.array()
# x1, x2 = gmm.gauss_ellipse_2d(centroids[i], ccov[i])
sm[i,j].plot(e_nodes[:,j], e_nodes[:,i], "ro", alpha=0.5, markersize=8)
if i != j and i in odims and j in odims:
sm[i,j].plot(p_nodes[:,j-idim], p_nodes[:,i-idim], "ro", alpha=0.5, markersize=8)
# if i != j and i in idims and j in odims:
# sm[i,j].plot(p_nodes[:,j-idim], e_nodes[:,i], "go", alpha=0.5, markersize=8)
# if i != j and i in odims and j in idims:
# sm[i,j].plot(e_nodes[:,j], p_nodes[:,i-idim], "go", alpha=0.5, markersize=8)
# get figure reference from axis and show
fig = sm[0,0].get_figure()
fig.suptitle("Predictions over data scattermatrix (%s)" % (mdl.__class__.__name__))
if saveplot:
filename = "plot_nodes_over_data_scattermatrix_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_nodes_over_data_scattermatrix_hexbin(X, Y, mdl, predictions, distances, activities, saveplot = False):
"""models_actinf.plot_nodes_over_data_scattermatrix_hexbin
Plot models nodes (if applicable) over the hexbinned data
expanding dimensions as a scattermatrix.
"""
idim = X.shape[1]
odim = Y.shape[1]
numplots = idim * odim + 2
fig = pl.figure()
fig.suptitle("Predictions over data xy scattermatrix/hexbin (%s)" % (mdl.__class__.__name__))
gs = gridspec.GridSpec(idim, odim)
figaxes = []
for i in range(idim):
figaxes.append([])
for o in range(odim):
figaxes[i].append(fig.add_subplot(gs[i,o]))
err = 0
# colsa = ["k", "r", "g", "c", "m", "y"]
# colsb = ["k", "r", "g", "c", "m", "y"]
colsa = ["k" for col in range(idim)]
colsb = ["r" for col in range(odim)]
for i in range(odim): # odim * 2
for j in range(idim):
# pl.subplot(numplots, 1, (i*idim)+j+1)
ax = figaxes[j][i]
# target = Y[h,i]
# X__ = X_[j] # X[h,j]
# err += np.sum(np.square(target - prediction))
# ax.plot(X__, [target], colsa[j] + ".", alpha=0.25, label="target_%d" % i)
# ax.plot(X__, [prediction[0,i]], colsb[j] + "o", alpha=0.25, label="pred_%d" % i)
# ax.plot(X[:,j], Y[:,i], colsa[j] + ".", alpha=0.25, label="target_%d" % i)
ax.hexbin(X[:,j], Y[:,i], gridsize = 20, alpha=0.75, cmap=pl.get_cmap("gray"))
ax.plot(X[:,j], predictions[:,i], colsb[j] + "o", alpha=0.15, label="pred_%d" % i, markersize=8)
# pred1 = mdl.filter_e.neuron(mdl.filter_e.flat_to_coords(activities_sorted[-1]))
# ax.plot(X__, [pred1], "ro", alpha=0.5)
# pred2 = mdl.filter_e.neuron(mdl.filter_e.flat_to_coords(activities_sorted[-2]))
# ax.plot(X__, [pred2], "ro", alpha=0.25)
# print("accum total err = %f" % (err / X.shape[0] / (idim * odim)))
if saveplot:
filename = "plot_nodes_over_data_scattermatrix_hexbin_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_hebbsom_links_distances_activations(X, Y, mdl, predictions, distances, activities, saveplot = False):
"""plot the hebbian link matrix, and all node distances and activities for all inputs"""
hebblink_log = np.log(mdl.hebblink_filter.T + 1.0)
fig = pl.figure()
fig.suptitle("Debugging SOM: hebbian links, distances, activities (%s)" % (mdl.__class__.__name__))
gs = gridspec.GridSpec(4, 1)
# pl.plot(X, Y, "k.", alpha=0.5)
# pl.subplot(numplots, 1, numplots-1)
ax1 = fig.add_subplot(gs[0])
ax1.set_title('hebbian associative links')
# im1 = ax1.imshow(mdl.hebblink_filter, interpolation="none", cmap=pl.get_cmap("gray"))
im1 = ax1.pcolormesh(hebblink_log, cmap=pl.get_cmap("gray"))
ax1.set_xlabel("in (e)")
ax1.set_ylabel("out (p)")
cbar = fig.colorbar(mappable = im1, ax=ax1, orientation="horizontal")
ax2 = fig.add_subplot(gs[1])
ax2.set_title('distances over time')
distarray = np.array(distances)
# print("distarray.shape", distarray.shape)
pcm = ax2.pcolormesh(distarray.T)
cbar = fig.colorbar(mappable = pcm, ax=ax2, orientation="horizontal")
# pl.subplot(numplots, 1, numplots)
ax3 = fig.add_subplot(gs[2])
ax3.set_title('activations propagated via hebbian links')
actarray = np.array(activities)
# print("actarray.shape", actarray.shape)
pcm = ax3.pcolormesh(actarray.T)
cbar = fig.colorbar(mappable = pcm, ax=ax3, orientation="horizontal")
ax4 = fig.add_subplot(gs[3])
ax4.set_title('flattened link table')
ax4.plot(hebblink_log.flatten())
# print("hebblink_log", hebblink_log)
if saveplot:
filename = "plot_hebbsom_links_distances_activations_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_mdn_mues_over_data_scan(X, Y, mdl, saveplot = False):
mues = []
sigs = []
pis = []
print("plot_mdn_mues_over_data_scan: X", X.shape)
fig = pl.figure()
gs = gridspec.GridSpec(2, 2)
dim = Y.shape[1]
xscan = np.linspace(-np.pi, np.pi, 101).reshape((-1, 1))
num_mu = mdl.mixcomps * dim
# num_sig = mixcomps * d ** 2
num_sig = ((dim ** 2 - dim)/2 + dim) * mdl.mixcomps
num_pi = mdl.mixcomps
if X.shape[1] > 1:
xscan = np.hstack((xscan, xscan))
print("xscan", xscan.shape)
xscan = X[:100]
for xs in xscan:
# print("xs", xs)
xs = np.atleast_2d(xs)
print("xs", xs)
y = mdl.predict(xs)
# mues.append(mdl.model.z[:mdl.mixcomps,0])
# sigs.append(np.exp(mdl.model.z[mdl.mixcomps:(2*mdl.mixcomps),0]))
# pis.append(mdl.lr.softmax(mdl.model.z[(2*mdl.mixcomps):,0]))
mues.append(mdl.model.z[:num_mu])
sigs.append(np.exp(mdl.model.z[num_mu:num_mu + num_sig]))
pis.append(mdl.lr.softmax(mdl.model.z[-num_pi:]))
# print("xs", xs, "ys", y)
# print("mues", mues)
numpoints = xscan.shape[0]
mues = np.vstack(mues).reshape((numpoints, mdl.mixcomps, dim))
sigs = np.vstack(sigs).reshape((numpoints, mdl.mixcomps, num_sig / mdl.mixcomps))
pis = np.vstack(pis).reshape((numpoints, mdl.mixcomps))
print("mues", mues.shape)
print("sigs", sigs.shape)
print("pis", pis.shape)
colors = ['r', 'g', 'b', 'c', 'y', 'm']
for h in range(dim):
# ax = fig.add_subplot(dim, 2, h + 1)
ax = fig.add_subplot(gs[h,0])
for i in range(mdl.mixcomps):
for j in range(xscan.shape[0]):
# print("mues", mues[[j],[i]], "pis", pis[j,i])
ax.plot(
xscan[[j]], mues[[j],[i],[h]],
marker = 'o', markerfacecolor = colors[i % len(colors)],
markeredgecolor = colors[i % len(colors)],
alpha = pis[j,i])
# ax.plot(xscan[[j]], mues[[j],[i],[h]] - sigs[[j],[i],[h]], "bo", alpha = pis[j,i], markersize = 2.5)
# ax.plot(xscan[[j]], mues[[j],[i],[h]] + sigs[[j],[i],[h]], "bo", alpha = pis[j,i], markersize = 2.5)
ax = fig.add_subplot(gs[0,1])
if dim == 1:
plot_predictions_over_data(X, Y, mdl, saveplot, ax = ax, datalim = 1000)
else:
plot_predictions_over_data_2D(X, Y, mdl, saveplot, ax = ax, datalim = 1000)
for i in range(mdl.mixcomps):
ax.plot(mues[:,i,0], mues[:,i,1], linestyle = "none", marker = 'o', markerfacecolor = colors[i % len(colors)], alpha = np.mean(pis[:,i]))
# ax.plot(xscan, mues - sigs, "bo", alpha = 0.5, markersize = 2.0)
# ax.plot(xscan, mues + sigs, "bo", alpha = 0.5, markersize = 2.0)
# ax.plot(xscan, mues, "ro", alpha = 0.5)
# ax.plot(mues, xscan, "ro", alpha = 0.5)
if saveplot:
filename = "plot_mdn_mues_over_data_scan_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_predictions_over_data(X, Y, mdl, saveplot = False, ax = None, datalim = 1000):
do_hexbin = False
if X.shape[0] > 4000:
do_hexbin = False # True
X = X[-4000:]
Y = Y[-4000:]
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 1 # 2
Y_samples = []
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
# print("Y_samples[0]", Y_samples[0])
fig = pl.figure()
fig.suptitle("Predictions over data xy (numsamples = %d, (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(odim, 1)
for i in range(odim):
ax = fig.add_subplot(gs[i])
target = Y[:,i]
if do_hexbin:
ax.hexbin(X, Y, gridsize = 20, alpha=1.0, cmap=pl.get_cmap("gray"))
else:
ax.plot(X, target, "k.", label="Y_", alpha=0.5)
for j in range(numsamples):
prediction = Y_samples[j][:,i]
# print("X", X.shape, "prediction", prediction.shape)
# print("X", X, "prediction", prediction)
if do_hexbin:
ax.hexbin(X[:,i], prediction, gridsize = 30, alpha=0.6, cmap=pl.get_cmap("Reds"))
else:
ax.plot(X[:,i], prediction, "r.", label="Y_", alpha=0.25)
# get limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
error = target - prediction
mse = np.mean(np.square(error))
mae = np.mean(np.abs(error))
xran = xlim[1] - xlim[0]
yran = ylim[1] - ylim[0]
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
if saveplot:
filename = "plot_predictions_over_data_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_predictions_over_data_2D(X, Y, mdl, saveplot = False, ax = None, datalim = 1000):
do_hexbin = False
if X.shape[0] > datalim:
do_hexbin = False # True
X = X[-datalim:]
Y = Y[-datalim:]
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 1 # 2
Y_samples = []
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
# print("Y_samples[0]", Y_samples[0].shape)
# Y_samples
if ax is None:
fig = pl.figure()
fig.suptitle("Predictions over data xy (numsamples = %d, (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0])
else:
fig = None
ax.plot(Y[:,0], Y[:,1], 'ko', alpha = 0.1)
ax.plot(Y_samples[0][:,0], Y_samples[0][:,1], 'r.', alpha = 0.1)
ax.set_aspect(1)
# for i in range(odim):
# ax = fig.add_subplot(gs[i])
# target = Y[:,i]
# if do_hexbin:
# ax.hexbin(X, Y, gridsize = 20, alpha=1.0, cmap=pl.get_cmap("gray"))
# else:
# ax.plot(X, target, "k.", label="Y_", alpha=0.5)
# for j in range(numsamples):
# prediction = Y_samples[j][:,i]
# # print("X", X.shape, "prediction", prediction.shape)
# # print("X", X, "prediction", prediction)
# if do_hexbin:
# ax.hexbin(X[:,i], prediction, gridsize = 30, alpha=0.6, cmap=pl.get_cmap("Reds"))
# else:
# ax.plot(X[:,i], prediction, "r.", label="Y_", alpha=0.25)
# # get limits
# xlim = ax.get_xlim()
# ylim = ax.get_ylim()
# error = target - prediction
# mse = np.mean(np.square(error))
# mae = np.mean(np.abs(error))
# xran = xlim[1] - xlim[0]
# yran = ylim[1] - ylim[0]
# ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
# ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
if fig is not None:
if saveplot:
filename = "plot_predictions_over_data_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_predictions_over_data_ts(X, Y, mdl, saveplot = False):
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 2
Y_samples = []
print("Xxx", X.shape)
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
print("Y_samples[0]", Y_samples[0])
fig = pl.figure()
fig.suptitle("Predictions over data timeseries (numsamples = %d), (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(odim, 1)
for i in range(odim):
# pl.subplot(odim, 2, (i*2)+1)
ax = fig.add_subplot(gs[i])
target = Y[:,i]
ax.plot(target, "k.", label="Y_", alpha=0.5)
# pl.subplot(odim, 2, (i*2)+2)
# prediction = Y_[:,i]
# pl.plot(target, "k.", label="Y")
mses = []
maes = []
errors = []
for j in range(numsamples):
prediction = Y_samples[j][:,i]
error = target - prediction
errors.append(error)
mse = np.mean(np.square(error))
mae = np.mean(np.abs(error))
mses.append(mse)
maes.append(mae)
# pl.plot(prediction, target, "r.", label="Y_", alpha=0.25)
ax.plot(prediction, "r.", label="Y_", alpha=0.25)
errors = np.asarray(errors)
# print("errors.shape", errors.shape)
aes = np.min(np.abs(errors), axis=0)
ses = np.min(np.square(errors), axis=0)
mae = np.mean(aes)
mse = np.mean(ses)
# get limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xran = xlim[1] - xlim[0]
yran = ylim[1] - ylim[0]
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
# pl.plot(X[:,i], Y[:,i], "k.", alpha=0.25)
if saveplot:
filename = "plot_predictions_over_data_ts_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def get_class_from_name(name = "KNN"):
"""models_actinf.get_class_from_name
Get a class by a common name string.
"""
if name == "KNN":
cls = smpKNN
elif name == "SOESGP":
cls = smpSOESGP
elif name == "STORKGP":
cls = smpSTORKGP
elif name == "GMM":
cls = partial(smpGMM, K = 20)
elif name == "IGMM":
cls = partial(smpIGMM, K = 20)
elif name == "HebbSOM":
cls = smpHebbianSOM
elif name == 'resRLS':
from smp_base.models_learners import smpSHL
cls = smpSHL
else:
cls = smpKNN
return cls
def generate_inverted_sinewave_dataset(N = 1000, f = 1.0, p = 0.0, a1 = 1.0, a2 = 0.3):
"""models_actinf.generate_inverted_sinewave_dataset
Generate the inverted sine dataset used in Bishop's (Bishop96)
mixture density paper
Returns:
- matrices X, Y
"""
X = np.linspace(0,1,N)
# FIXME: include phase p
Y = a1 * X + a2 * np.sin(f * (2 * 3.1415926) * X) + np.random.uniform(-0.1, 0.1, N)
X,Y = Y[:,np.newaxis],X[:,np.newaxis]
# pl.subplot(211)
# pl.plot(Y, X, "ko", alpha=0.25)
# pl.subplot(212)
# pl.plot(X, Y, "ko", alpha=0.25)
# pl.show()
return X,Y
def generate_2devensimpler_component(x):
"""models_actinf.generate_2devensimpler_component
Generate a two-dimensional correspondence dataset to test
covariance learning of the multivariate mixture density learning
rule.
Returns:
- matrix X
"""
y1_1 = np.sin(x * 10.0) * 0.5 + x * 0.3 + x ** 2 * 0.05
y1_1 += np.random.normal(0, np.abs(x - np.mean(x)) * 0.3)
y1_2 = np.sin(x * 5.0) * 0.3 + x * 0.5 - x ** 2 * 0.2
y1_2 += np.random.normal(0, np.abs(x - np.mean(x)) * 0.3)
print(y1_1.shape, y1_2.shape)
return | np.vstack((y1_1, y1_2)) | numpy.vstack |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree
"""Common visualization utilities."""
import PIL
import numpy as np
from qiskit.converters import circuit_to_dag
from qiskit.tools.visualization._error import VisualizationError
def _validate_input_state(quantum_state):
"""Validates the input to state visualization functions.
Args:
quantum_state (ndarray): Input state / density matrix.
Returns:
rho: A 2d numpy array for the density matrix.
Raises:
VisualizationError: Invalid input.
"""
rho = np.asarray(quantum_state)
if rho.ndim == 1:
rho = np.outer(rho, | np.conj(rho) | numpy.conj |
from __future__ import print_function
try:
import cv2
except ModuleNotFoundError:
print("Please install opencv-python module using following command:\npip3 install opencv-python")
import stmpy
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.optimize as opt
import scipy.ndimage as snd
from scipy.interpolate import interp1d, interp2d
from skimage import transform as tf
from skimage.feature import peak_local_max
from pprint import pprint
import types
'''
REFERENCES:
[1] <NAME>, et al. "Picometer registration of zinc impurity states in Bi2Sr2CaCu2O8+d for phase determination in intra-unit-cell Fourier transform STM", New J. Phys. 14, 053017 (2012).
[2] <NAME>, PhD thesis (Ch. 3), http://davisgroup.lassp.cornell.edu/theses/Thesis_JamesSlezak.pdf
History:
2017-04-28 CREATED BY <NAME>
04/29/2019 RL : Add documents for all functions. Add another method to calculate phasemap.
Add inverse FFT method to apply the drift field.
03/25/2021 RL : Change the whole drift corr library to function based library
'''
##################################################################################
######################### Wrapped functions for easy use #########################
##################################################################################
def find_drift_parameter(A, r=None, w=None, mask3=None, cut1=None, cut2=None, bp_angle=None, orient=None, bp_c=None,\
sigma=10, method='lockin', even_out=False, show=True, **kwargs):
'''
This method find drift parameters from a 2D map automatically.
Input:
A - Required : 2D array of topo or LIY in real space.
r - Optional : width of the gaussian mask, ratio to the full map size, to remove low-q noise, =r*width
Set r=None will disable this mask.
w - Optional : width of the mask that filters out noise along qx=0 and qy=0 lines.
Set w=None will disable this mask.
mask3 - Optional : Tuple for custom-defined mask. mask3 = [n, offset, width], where n is order of symmetry,
offset is initial angle, width is the width of the mask. e.g., mask3 = [4, np.pi/4, 5],
or mask3 = [6, 0, 10].
Set mask3=None will disable this mask.
even_out - Optional : Boolean, if True then Bragg peaks will be rounded to the make sure there are even number of lattice
cut1 - Optional : List of length 1 or length 4, specifying how much bad area or area with too large drift to be cut
cut2 - Optional : List of length 1 or length 4, specifying after local drift correction how much to crop on the edge
angle_offset- Optional : The min offset angle of the Bragg peak to the x-axis, in unit of rad
bp_angle - Optional : The angle between neighboring Bragg peaks, if not given, it will be computed based on all Bragg peaks
orient - Optional : The orientation of the Bragg peaks with respect to the x-axis
bp_c - Optional : The correct Bragg peak position that user wants after the drift correction
sigma - Optional : Floating number specifying the size of mask to be used in phasemap()
method - Optional : Specifying which method to apply the drift correction
"lockin": Interpolate A and then apply it to a new set of coordinates, (x-ux, y-uy)
"convolution": Used inversion fft to apply the drift fields
show - Optional : Boolean, if True then A and Bragg peaks will be plotted out.
**kwargs - Optional : key word arguments for findBraggs function
Returns:
p : A dict of parameters that can be directly applied to drift correct orther 2D or 3D datasets
Usage:
p = find_drift(z, sigma=4, cut1=None, cut2=[0,7,0,7], show=True)
History:
06/23/2020 - RL : Initial commit.
'''
p = {}
if cut1 is not None:
A = cropedge(A, n=cut1)
# find the Bragg peak before the drift correction
bp1 = findBraggs(A, r=r, w=w, mask3=mask3, show=show, **kwargs)
bp1 = sortBraggs(bp1, s=np.shape(A))
if bp_c is None:
# Find the angle between each Bragg peaks
if bp_angle is None:
N = len(bp1)
Q = bp_to_q(bp1, A)
angles = []
for i in range(N-1):
angles.append(np.arctan2(*Q[i+1]) - np.arctan2(*Q[i]))
# Here is the commonly used angles in the real world
angle_list = np.array([0, np.pi/6, np.pi/4, np.pi/3, np.pi/2])
offset = np.absolute(np.mean(angles) - angle_list)
index = np.argmin(offset)
bp_angle = angle_list[index]
if orient is None:
orient = np.absolute(np.arctan2(*Q[0]))
# Calculate the correction position of each Bragg peak
bp_c = generate_bp(A, bp1, angle=bp_angle, orient= orient, even_out=even_out)
# Find the phasemap
thetax, thetay, Q1, Q2 = phasemap(A, bp=bp_c, method=method, sigma=sigma)
phix = fixphaseslip(thetax, method='unwrap')
phiy = fixphaseslip(thetay, method='unwrap')
ux, uy = driftmap(phix, phiy, Q1, Q2, method=method)
z_temp = driftcorr(A, ux, uy, method=method, interpolation='cubic')
# This part interpolates the drift corrected maps
if cut2 is None:
z_c = z_temp
else:
bp3 = findBraggs(z_temp, r=r, w=w, mask3=mask3, **kwargs)
z_c = cropedge(z_temp, n=cut2, bp=bp3, force_commen=True)
p['bp3'] = bp3
# This part displays the intermediate maps in the process of drift correction
if show is True:
fig, ax = plt.subplots(1, 2, figsize=[8, 4])
c = np.mean(phix)
s = np.std(phix)
fig.suptitle('Phasemaps after fixing phase slips:')
ax[0].imshow(phix, origin='lower', clim=[c-5*s, c+5*s])
ax[1].imshow(phiy, origin='lower', clim=[c-5*s, c+5*s])
A_fft = stmpy.tools.fft(A, zeroDC=True)
B_fft = stmpy.tools.fft(z_c, zeroDC=True)
c1 = np.mean(A_fft)
s1 = np.std(A_fft)
c2 = np.mean(A)
s2 = np.std(A)
fig, ax = plt.subplots(2, 2, figsize=[8, 8])
fig.suptitle('Maps before and after drift correction:')
ax[0,0].imshow(A, cmap=stmpy.cm.blue2, origin='lower', clim=[c2-5*s2, c2+5*s2])
ax[0,1].imshow(A_fft, cmap=stmpy.cm.gray_r, origin='lower', clim=[0, c1+5*s1])
ax[1,0].imshow(z_c, cmap=stmpy.cm.blue2, origin='lower', clim=[c2-5*s2, c2+5*s2])
ax[1,1].imshow(B_fft, cmap=stmpy.cm.gray_r, origin='lower', clim=[0, c1+5*s1])
p['cut1'] = cut1
p['cut2'] = cut2
p['r'] = r
p['w'] = w
p['mask3'] = mask3
p['sigma'] = sigma
p['method'] = method
p['even_out'] = even_out
p['bp_c'] = bp_c
p['bp_angle'] = bp_angle
p['orient'] = orient
p['bp1'] = bp1
p['phix'] = phix
p['phiy'] = phiy
p['ux'] = ux
p['uy'] = uy
return z_c, p
def apply_drift_parameter(A, p, **kwargs):
'''
Apply the drifr correction parameters p to the 2D or 3D map A.
Input:
A - Required : 2D or 3D map to be drift corrected
p - Required : A collection of parameters to be used in drift correction.
Use parameters (those parameters should be generated by find_drift_parameter automatically)
cut1 :
cut2 :
ux :
uy :
method :
bp3 :
**kwargs - Optional :
Returns:
A_c : 2D or 3D map with drift removed.
Usage:
A_c = apply_drift_parameter(A, p)
History:
06/23/2020 - RL : Initial commit.
'''
data_c = np.copy(A)
if p['cut1'] is None:
data_c = data_c
else:
data_c = cropedge(data_c, n=p['cut1'])
data_corr = driftcorr(data_c, ux=p['ux'], uy=p['uy'], method=p['method'], interpolation='cubic')
if p['cut2'] is None:
data_out = data_corr
else:
data_out = cropedge(data_corr, bp=p['bp3'], n=p['cut2'], force_commen=True)
return data_out
##################################################################################
###################### Basic building blocks for OOD use #########################
##################################################################################
def get_para(A, a0=None, size=None, angle=np.pi/2, orient=np.pi/4,
pixels=None, even_out=False, use_a0=False):
'''
Get parameters that are useful for the drift correction
Input:
A - Required : Spy object of topo (2D) or map (3D).
a0 - Optional : Lattice constant in the unit of nm.
size - Optional : Size of the map in the unit of nm. If not offered, it'll be created
automatically from header file.
angle - Optional : Angle of the lattice. If the lattice is n-fold symmetry, then angle = 2*pi/n
orient - Optional : Angle of the 1st Bragg peak. It's actually the orientation of the scan frame
with respect to the Lattice.
pixels - Optional : Number of pixels of the topo/map. If not offered, it'll be created
automatically from header file.
even_out - Optional : Boolean, if True then Bragg peaks will be rounded to the make sure there are even number of lattice
Returns:
p - Dict, contains necessary information for the
Usage:
import stmpy.driftcorr as dfc
dfc.getAttrs(topo, a0=a0)
'''
if size is None:
try:
size = A.header['scan_range'][-2:]
except KeyError:
try:
#size = float(A.header['Grid settings'].split(";")[-2])
size = [float(k)
for k in A.header['Grid settings'].split(";")[-2:]]
except:
print(
"Error: Cannot find map size from header. Please input it manually.")
if pixels is None:
try:
#pixels = int(A.header['scan_pixels'][-1])
pixels = [int(k) for k in A.header['scan_pixels'][-2:]]
except KeyError:
try:
pixels = int(A.header['Grid dim'].split()[-1][:-1])
except:
print(
"Error: Cannot find number of pixels from header. Please input it manually.")
if not isinstance(size, list):
sizex, sizey = size, size
else:
sizex, sizey = size
if not isinstance(pixels, list):
pixelx, pixely = pixels, pixels
else:
pixelx, pixely = pixels
if a0 is None:
use_a0 = False
a0 = 1
# parameters related to the map itself
A.dfc_para = {
'a0': a0,
'size': np.array([sizex, sizey]),
'pixels': np.array([pixelx, pixely]),
'qmag': np.array([sizex, sizey]) / a0,
'qscale': np.array([pixelx, pixely]) / (2*np.array([sizex, sizey]) / a0),
'angle': angle,
'orient': orient,
'use_a0': use_a0,
'even_out': even_out,
}
def find_drift(self, A, r=None, w=None, mask3=None, cut1=None, cut2=None, \
sigma=10, method='convolution', even_out=False, show=True, **kwargs):
'''
This method find drift field from a 2D map automatically.
Input:
A - Required : 2D array of topo or LIY in real space.
r - Optional : width of the gaussian mask to remove low-q noise, =r*width
Set r=None will disable this mask.
w - Optional : width of the mask that filters out noise along qx=0 and qy=0 lines.
Set w=None will disable this mask.
mask3 - Optional : Tuple for custom-defined mask. mask3 = [n, offset, width], where n is order of symmetry, offset is initial angle, width is
the width of the mask. e.g., mask3 = [4, np.pi/4, 5], or mask3 = [6, 0, 10]
Set mask3=None will disable this mask.
even_out - Optional : Boolean, if True then Bragg peaks will be rounded to the make sure there are even number of lattice
cut1 - Optional : List of length 1 or length 4, specifying after global shear correction how much to crop on the edge
cut2 - Optional : List of length 1 or length 4, specifying after local drift correction how much to crop on the edge
sigma - Optional : Floating number specifying the size of mask to be used in phasemap()
method - Optional : Specifying which method to apply the drift correction
"lockin": Interpolate A and then apply it to a new set of coordinates,
(x-ux, y-uy)
"convolution": Used inversion fft to apply the drift fields
show - Optional : Boolean, if True then A and Bragg peaks will be plotted out.
**kwargs - Optional : key word arguments for findBraggs function
Returns:
coords - (4x2) array contains Bragg peaks in the format of [[x1,y1],[x2,y2],...,[x4,y4]]
Usage:
t.find_drift(t.z, sigma=4, cut1=None, cut2=[0,7,0,7], show=True)
History:
06/09/2020 - RL : Initial commit.
'''
if not hasattr(self, 'parameters'):
self = getAttrs(self, a0=None, size=None, angle=np.pi/2, orient=np.pi/4, pixels=np.shape(A)[::-1], \
even_out=even_out, use_a0=None)
# Find Bragg peaks that will be used in the drift correction part
self.dfcPara = {
'cut1': cut1,
'cut2': cut2,
'method': method,
'sigma': sigma,
}
if cut1 is not None:
A = cropedge(A, n=cut1)
if not hasattr(self, 'bp_parameters'):
self.bp1 = findBraggs(A, r=r, w=w, mask3=mask3, update_obj=True, obj=self, \
show=show, even_out=even_out, **kwargs)
else:
self.bp1 = findBraggs(A, r=r, w=w, mask3=mask3, update_obj=True, obj=self, \
show=show, even_out=even_out, **kwargs)
# self.bp1 = findBraggs(A, obj=self, show=show)
self.bp1 = sortBraggs(self.bp1, s=np.shape(A))
if self.parameters['angle'] is None:
N = len(self.bp1)
Q = bp_to_q(self.bp1, A)
angles = []
for i in range(N-1):
angles.append(np.arctan2(*Q[i+1]) - np.arctan2(*Q[i]))
# Here are the commonly used angles in the real world
angle_list = np.array([0, np.pi/6, np.pi/4, np.pi/3, np.pi/2])
offset = np.absolute(np.mean(angles) - angle_list)
index = np.argmin(offset)
self.parameters['angle'] = angle_list[index]
if self.parameters['orient'] is None:
orient = np.absolute(np.arctan2(*Q[0]))
self.parameters['orient'] = orient
# This is the correct value for the Bragg peak
self.bp2 = generate_bp(A, self.bp1, angle=self.parameters['angle'], orient= self.parameters['orient'],
even_out=self.parameters['even_out'], obj=self)
# This part corrects for the drift
thetax, thetay, Q1, Q2 = phasemap(A, bp=self.bp2, method=method, sigma=sigma)
self.phix = fixphaseslip(thetax, method='unwrap')
self.phiy = fixphaseslip(thetay, method='unwrap')
self.ux, self.uy = driftmap(self.phix, self.phiy, Q1, Q2, method=method)
ztemp = driftcorr(A, self.ux, self.uy, method=method, interpolation='cubic')
# This part interpolates the drift corrected maps
self.bp3 = findBraggs(ztemp, obj=self)
if cut2 is None:
cut2 = 0
force_commen = False
else:
force_commen = True
self.zc = cropedge(ztemp, n=cut2, bp=self.bp3, force_commen=force_commen)
# This part displays the intermediate maps in the process of drift correction
if show is True:
fig, ax = plt.subplots(1, 2, figsize=[8, 4])
c = np.mean(self.phix)
s = np.std(self.phix)
fig.suptitle('Phasemaps after fixing phase slips:')
ax[0].imshow(self.phix, origin='lower', clim=[c-5*s, c+5*s])
ax[1].imshow(self.phiy, origin='lower', clim=[c-5*s, c+5*s])
A_fft = stmpy.tools.fft(A, zeroDC=True)
B_fft = stmpy.tools.fft(self.zc, zeroDC=True)
c1 = np.mean(A_fft)
s1 = np.std(A_fft)
c2 = np.mean(A)
s2 = np.std(A)
fig, ax = plt.subplots(2, 2, figsize=[8, 8])
fig.suptitle('Maps before and after drift correction:')
ax[0,0].imshow(A, cmap=stmpy.cm.blue2, origin='lower', clim=[c2-5*s2, c2+5*s2])
ax[0,1].imshow(A_fft, cmap=stmpy.cm.gray_r, origin='lower', clim=[0, c1+5*s1])
ax[1,0].imshow(self.zc, cmap=stmpy.cm.blue2, origin='lower', clim=[c2-5*s2, c2+5*s2])
ax[1,1].imshow(B_fft, cmap=stmpy.cm.gray_r, origin='lower', clim=[0, c1+5*s1])
self.bp = findBraggs(self.zc, obj=self)
def correct(self, use):
'''
Use attributes of object "self" to correc the 3D map use.
Input:
self - Required : Spy object of topo (2D) or map (3D).
use - Required : 3D map to be corrected with attributes of the object.
Returns:
N/A
Usage:
d.correct(d.LIY)
History:
06/09/2020 - RL : Initial commit.
'''
data_c = np.copy(use)
if self.dfcPara['cut1'] is None:
data_c = data_c
else:
data_c = cropedge(data_c, n=self.dfcPara['cut1'])
data_corr = driftcorr(data_c, ux=self.ux, uy=self.uy,
method=self.dfcPara['method'], interpolation='cubic')
if self.dfcPara['cut2'] is None:
data_out = cropedge(data_corr, bp=self.bp3, n=0, force_commen=False)
else:
data_out = cropedge(data_corr, bp=self.bp3, n=self.dfcPara['cut2'], force_commen=True)
self.liy_c = data_out
def __update_parameters(obj, a0=None, bp=None, pixels=None, size=None, use_a0=True):
if use_a0 is True:
center = (np.array(pixels)-1) // 2
Q = bp - center
q1, q2, q3, q4, *_ = Q
delta_qx = (np.absolute(q1[0]-q3[0])+np.absolute(q2[0]-q4[0])) / 2
delta_qy = (np.absolute(q1[1]-q3[1])+np.absolute(q2[1]-q4[1])) / 2
sizex = np.absolute(
delta_qx / (4 * a0 * np.cos(obj.parameters['angle'])))
sizey = np.absolute(
delta_qy / (4 * a0 * np.cos(obj.parameters['angle'])))
bp_x = np.min(bp[:, 0])
ext_x = pixels[0] / (pixels[0] - 2*bp_x)
bp_y = np.min(bp[:, 1])
ext_y = pixels[1] / (pixels[1] - 2*bp_y)
obj.parameters['size'] = np.array([sizex, sizey])
obj.parameters['pixels'] = np.array(pixels)
obj.parameters['qscale'] = np.array([ext_x, ext_y])
obj.qx = bp[0] - center
obj.qy = bp[1] - center
else:
center = (np.array(pixels)-1) // 2
bp_x = np.min(bp[:, 0])
ext_x = pixels[0] / (pixels[0] - 2*bp_x)
bp_y = np.min(bp[:, 1])
ext_y = pixels[1] / (pixels[1] - 2*bp_y)
obj.parameters['size'] = np.array(
pixels) / obj.parameters['pixels'] * obj.parameters['size']
obj.parameters['pixels'] = np.array(pixels)
obj.parameters['qscale'] = np.array([ext_x, ext_y])
obj.qx = bp[0] - center
obj.qy = bp[1] - center
##################################################################################
################## Basic building blocks for drift correction ####################
##################################################################################
#1 - findBraggs
def findBraggs(A, rspace=True, min_dist=5, thres=0.25, r=None,
w=None, mask3=None, even_out=False, precise=False,
width=10, p0=None, show=False):
'''
Find Bragg peaks in the unit of pixels of topo or FT pattern A using peak_local_max. If obj is offered,
an attribute of bp will be created for obj.
Input:
A - Required : 2D array of topo in real space, or FFT in q space.
min_dist - Optional : Minimum distance (in pixels) between peaks. Default: 5
thres - Optional : Minimum intensity of Bragg peaks relative to max value. Default: 0.25
rspace - Optional : Boolean indicating if A is real or Fourier space image. Default: True
r - Optional : width of the gaussian mask to remove low-q noise, =r*width
Set r=None will disable this mask.
w - Optional : width of the mask that filters out noise along qx=0 and qy=0 lines.
Set w=None will disable this mask.
mask3 - Optional : Tuple for custom-defined mask. mask3 = [n, offset, width], where n is order of symmetry, offset is initial angle, width is
the width of the mask. e.g., mask3 = [4, np.pi/4, 5], or mask3 = [6, 0, 10]
Set mask3=None will disable this mask.
even_out - Optional : Boolean, if True then Bragg peaks will be rounded to the make sure there are even number of lattice
precise - Optional : Boolean, if True then a 2D Gaussian fit will be used to find the precise location of Bragg peaks
width - Optional : Integer, defines how large the 2D Gaussian fit will be performed around each Bragg peaks
p0 - Optional : List of initial parameters for fitting. Default: p0 = [amplitude,x0,y0,sigmaX,sigmaY,offset]=[1, width, width, 1, 1, 0]
show - Optional : Boolean, if True then data A and Bragg peaks will be plotted.
Returns:
coords - (4x2) array contains Bragg peaks in the format of [[x1,y1],[x2,y2],...,[x4,y4]]
Usage:
import stmpy.driftcorr as dfc
bp = dfc.findBraggs(A, min_dist=10, thres=0.2, rspace=True, show=True)
History:
04/28/2017 JG : Initial commit.
04/29/2019 RL : Add maskon option, add outAll option, and add documents.
'''
if rspace is True:
F = stmpy.tools.fft(A, zeroDC=True)
else:
F = np.copy(A)
# Remove low-q high intensity data with multiple masks
*_, Y, X = np.shape(A)
if r is not None:
Lx = X * r
Ly = Y * r
x = np.arange(X)
y = np.arange(Y)
p0 = [int(X/2), int(Y/2), Lx, Ly, 1, np.pi/2]
G = 1-stmpy.tools.gauss2d(x, y, p=p0)
else:
G = 1
if w is not None:
mask2 = np.ones([Y, X])
mask2[Y//2-int(Y*w):Y//2+int(Y*w), :] = 0
mask2[:, X//2-int(X*w):X//2+int(X*w)] = 0
else:
mask2 = 1
if mask3 is None:
mask3 = 1
else:
mask3 = mask_bp(A, p=mask3)
F *= G * mask2 * mask3
coords = peak_local_max(F, min_distance=min_dist, threshold_rel=thres)
coords = np.fliplr(coords)
# This part is to make sure the Bragg peaks are located at even number of pixels
if even_out is not False:
coords = __even_bp(coords, s=np.shape(A))
if precise is not False:
coords = np.asarray(coords, dtype='float32')
if p0 is None:
p0 = [1, width, width, 1, 1, 0]
for i in range(len(coords)):
area = stmpy.tools.crop(F/np.sum(F), cen=[int(k) for k in coords[i]], width=width)
popt, g = fitGaussian2d(area, p0=p0)
coords[i][0] += popt[1] - width
coords[i][1] += popt[2] - width
# This part shows the Bragg peak positions
if show is not False:
plt.figure(figsize=[4, 4])
c = np.mean(F)
s = np.std(F)
plt.imshow(F, cmap=plt.cm.gray_r, interpolation='None',
origin='lower', clim=[0, c+5*s], aspect=1)
plt.plot(coords[:, 0], coords[:, 1], 'r.')
plt.gca().set_aspect(1)
plt.axis('tight')
center = (np.array(np.shape(A)[::-1])-1) // 2
print('The coordinates of the Bragg peaks are:')
pprint(coords)
print()
print('The coordinates of the Q vectors are:')
pprint(coords-center)
return coords
# help function: fitting 2D gaussian peaks around Bragg peaks
def fitGaussian2d(data, p0):
''' Fit a 2D gaussian to the data with initial parameters p0. '''
data = np.array(data)
def gauss(xy,amplitude,x0,y0,sigmaX,sigmaY,offset):
x,y = xy
theta = 90
x0=float(x0);y0=float(y0)
a = 0.5*(np.cos(theta)/sigmaX)**2 + 0.5*(np.sin(theta)/sigmaY)**2
b = -np.sin(2*theta)/(2*sigmaX)**2 + np.sin(2*theta)/(2*sigmaY)**2
c = 0.5*(np.sin(theta)/sigmaX)**2 + 0.5*(np.cos(theta)/sigmaY)**2
g = offset+amplitude*np.exp(-( a*(x-x0)**2 -2*b*(x-x0)*(y-y0) + c*(y-y0)**2 ))
return g.ravel()
x = np.arange(data.shape[0]); y = np.arange(data.shape[1])
X,Y = np.meshgrid(x,y)
popt, pcov = opt.curve_fit(gauss, (X,Y), data.ravel(), p0=p0)
return popt, gauss((X,Y),*popt).reshape(data.shape)
# help function: custom mask to remove unwanted Bragg peaks
def mask_bp(A, p):
n, offset, thres, *_ = p
s = np.shape(A)[-1]
t = np.arange(s)
x, y = np.meshgrid(t, t)
center = (np.array([s, s])-1) // 2
mask = np.ones_like(x)
theta = 2 * np.pi / n
for i in range(n):
angle = theta * i + offset
index = np.where(np.absolute(np.cos(angle)*(y-center[1]) - \
np.sin(angle)*(x-center[0])) < thres)
mask[index] = 0
return mask
# help function: make sure the Bragg peaks are located on even pixels
def __even_bp(bp, s):
'''
This internal function rounds the Bragg peaks to their nearest even number of Q vectors.
'''
*_, s2, s1 = s
center = (np.array([s1, s2])-1) // 2
bp_temp = bp - center
for i, ix in enumerate(bp_temp):
for j, num in enumerate(ix):
if (num % 2) != 0:
if num > 0:
bp_temp[i, j] = num + 1
elif num <0:
bp_temp[i, j] = num - 1
else:
pass
bp_even = bp_temp + center
return bp_even
#2 - cropedge
def cropedge(A, n, bp=None, c1=2, c2=2,
a1=None, a2=None, force_commen=False):
"""
Crop out bad pixels or highly drifted regions from topo/dos map.
Inputs:
A - Required : 2D or 3D array of image to be cropped.
n - Required : List of integers specifying how many bad pixels to crop on each side.
Order: [left, right, down, up].
force_commen- Optional : Boolean determining if the atomic lattice is commensurate with
the output image.
Returns:
A_crop - 2D or 3D array of image after cropping.
Usage:
import stmpy.driftcorr as dfc
A_crop = dfc.cropedge(A, n=5)
History:
06/04/2019 RL : Initial commit.
11/30/2019 RL : Add support for non-square dataset
"""
if not isinstance(n, list):
n = [n]
if force_commen is False:
B = _rough_cut(A, n=n)
print('Shape before crop:', end=' ')
print(A.shape)
print('Shape after crop:', end=' ')
print(B.shape)
return B
else:
if n != 0:
B = _rough_cut(A, n)
else:
B = np.copy(A)
*_, L2, L1 = np.shape(A)
if bp is None:
bp = findBraggs(A, show=False)
bp = sortBraggs(bp, s=np.shape(A))
bp_new = bp - (np.array([L1, L2])-1) // 2
N1 = compute_dist(bp_new[0], bp_new[1])
N2 = compute_dist(bp_new[0], bp_new[-1])
if a1 is None:
a1 = c1 * L1 / N1
if a2 is None:
a2 = a1
#a2 = c2 * L2 / N2
*_, L2, L1 = np.shape(B)
L_new1 = a1 * ((L1)//(a1))
L_new2 = a2 * ((L2)//(a2))
t1 = np.arange(L1)
t2 = np.arange(L2)
if len(np.shape(A)) == 2:
f = interp2d(t1, t2, B, kind='cubic')
t_new1 = np.linspace(0, L_new1, num=L1+1)
t_new2 = np.linspace(0, L_new2, num=L2+1)
z_new = f(t_new1[:-1], t_new2[:-1])
elif len(np.shape(A)) == 3:
z_new = np.zeros([np.shape(A)[0], L2, L1])
for i in range(len(A)):
f = interp2d(t1, t2, B[i], kind='cubic')
t_new1 = np.linspace(0, L_new1, num=L1+1)
t_new2 = np.linspace(0, L_new2, num=L2+1)
z_new[i] = f(t_new1[:-1], t_new2[:-1])
else:
print('ERR: Input must be 2D or 3D numpy array!')
return z_new
# help function: crop edge without any interpolation
def _rough_cut(A, n):
B = np.copy(A)
if len(n) == 1:
n1 = n2 = n3 = n4 = n[0]
else:
n1, n2, n3, n4, *_ = n
if len(B.shape) is 2:
if n2 == 0:
n2 = -B.shape[1]
if n4 == 0:
n4 = -B.shape[0]
return B[n3:-n4, n1:-n2]
elif len(B.shape) is 3:
if n2 == 0:
n2 = -B.shape[2]
if n4 == 0:
n4 = -B.shape[1]
return B[:, n3:-n4, n1:-n2]
# 4. phasemap
def phasemap(A, bp, sigma=10, method="lockin"):
'''
Calculate local phase and phase shift maps. Two methods are available now: spatial lockin or Gaussian mask convolution
Input:
A - Required : 2D arrays after global shear correction with bad pixels cropped on the edge
bp - Required : Coords of Bragg peaks of FT(A), can be computed by findBraggs(A)
sigma - Optional : width of DC filter in lockin method or len(A)/s
method - Optional : Specify which method to use to calculate phase map.
"lockin": Spatial lock-in method to find phase map
"convolution": Gaussian mask convolution method to find phase map
Returns:
thetax - 2D array, Phase shift map in x direction, relative to perfectly generated cos lattice
thetay - 2D array, Phase shift map in y direction, relative to perfectly generated cos lattice
Q1 - Coordinates of 1st Bragg peak
Q2 - Coordinates of 2nd Bragg peak
Usage:
import stmpy.driftcorr as dfc
thetax, thetay, Q1, Q2 = dfc.phasemap(A, bp, sigma=10, method='lockin')
History:
04/28/2017 JG : Initial commit.
04/29/2019 RL : Add "convolution" method, and add documents.
11/30/2019 RL : Add support for non-square dataset
'''
*_, s2, s1 = A.shape
if not isinstance(sigma, list):
sigma = [sigma]
if len(sigma) == 1:
sigmax = sigmay = sigma[0]
else:
sigmax, sigmay, *_ = sigma
s = np.minimum(s1, s2)
bp = sortBraggs(bp, s=np.shape(A))
t1 = np.arange(s1, dtype='float')
t2 = np.arange(s2, dtype='float')
x, y = np.meshgrid(t1, t2)
Q1 = 2*np.pi*np.array([(bp[0][0]-int((s1-1)/2))/s1,
(bp[0][1]-int((s2-1)/2))/s2])
Q2 = 2*np.pi*np.array([(bp[1][0]-int((s1-1)/2))/s1,
(bp[1][1]-int((s2-1)/2))/s2])
if method is "lockin":
Axx = A * np.sin(Q1[0]*x+Q1[1]*y)
Axy = A * np.cos(Q1[0]*x+Q1[1]*y)
Ayx = A * np.sin(Q2[0]*x+Q2[1]*y)
Ayy = A * np.cos(Q2[0]*x+Q2[1]*y)
Axxf = FTDCfilter(Axx, sigmax, sigmay)
Axyf = FTDCfilter(Axy, sigmax, sigmay)
Ayxf = FTDCfilter(Ayx, sigmax, sigmay)
Ayyf = FTDCfilter(Ayy, sigmax, sigmay)
thetax = np.arctan2(Axxf, Axyf)
thetay = np.arctan2(Ayxf, Ayyf)
return thetax, thetay, Q1, Q2
elif method is "convolution":
t_x = np.arange(s1)
t_y = np.arange(s2)
xcoords, ycoords = np.meshgrid(t_x, t_y)
# (2.* np.pi/s)*(Q1[0] * xcoords + Q1[1] * ycoords)
exponent_x = (Q1[0] * xcoords + Q1[1] * ycoords)
# (2.* np.pi/s)*(Q2[0] * xcoords + Q2[1] * ycoords)
exponent_y = (Q2[0] * xcoords + Q2[1] * ycoords)
A_x = A * np.exp(np.complex(0, -1)*exponent_x)
A_y = A * np.exp(np.complex(0, -1)*exponent_y)
# sx = sigma
# sy = sigma * s1 / s2
sx = sigmax
sy = sigmay
Amp = 1/(4*np.pi*sx*sy)
p0 = [int((s-1)/2), int((s-1)/2), sx, sy, Amp, np.pi/2]
G = stmpy.tools.gauss2d(t_x, t_y, p=p0, symmetric=True)
T_x = sp.signal.fftconvolve(A_x, G, mode='same',)
T_y = sp.signal.fftconvolve(A_y, G, mode='same',)
R_x = np.abs(T_x)
R_y = np.abs(T_y)
phi_y = np.angle(T_y)
phi_x = np.angle(T_x)
return phi_x, phi_y, Q1, Q2
else:
print('Only two methods are available now:\n1. lockin\n2. convolution')
#5 - fixphaseslip
def fixphaseslip(A, thres=None, maxval=None, method='unwrap', orient=0):
'''
Fix phase slip by adding 2*pi at phase jump lines.
Inputs:
A - Required : 2D arrays of phase shift map, potentially containing phase slips
thres - Optional : Float number, specifying threshold for finding phase jumps in diff(A). Default: None
method - Optional : Specifying which method to fix phase slips.
"unwrap": fix phase jumps line by line in x direction and y direction, respectively
"spiral": fix phase slip in phase shift maps by flattening A into a 1D array in a spiral way
orient - Optional : Used in "spiral" phase fixing method. 0 for clockwise and 1 for counter-clockwise
Returns:
phase_corr - 2D arrays of phase shift map with phase slips corrected
Usage:
import stmpy.driftcorr as dfc
thetaxf = dfc.fixphaseslip(thetax, method='unwrap')
History:
04/28/2017 JG : Initial commit.
04/29/2019 RL : Add "unwrap" method, and add documents.
'''
output = np.copy(A[::-1, ::-1])
maxval = 2 * np.pi
tol = 0.25 * maxval
if len(np.shape(A)) == 2:
*_, s2, s1 = np.shape(A)
mid2 = s2 // 2
mid1 = s1 // 2
for i in range(s2):
output[i, :] = unwrap_phase(
output[i, :], tolerance=thres, maxval=maxval)
for i in range(s1):
output[:, i] = unwrap_phase(
output[:, i], tolerance=thres, maxval=maxval)
linex = output[:, mid1]
liney = output[mid2, :]
dphx = np.diff(linex)
dphy = np.diff(liney)
dphx[np.where(np.abs(dphx) < tol)] = 0
dphx[np.where(dphx < -tol)] = 1
dphx[np.where(dphx > tol)] = -1
dphy[np.where(np.abs(dphy) < tol)] = 0
dphy[np.where(dphy < -tol)] = 1
dphy[np.where(dphy > tol)] = -1
for i in range(s2):
output[i, 1:] += 2*np.pi * np.cumsum(dphy)
for i in range(s1):
output[1:, i] += 2*np.pi * np.cumsum(dphx)
return output[::-1, ::-1]
#6 - unwrap_phase
def unwrap_phase(ph, tolerance=None, maxval=None):
maxval = 2 * np.pi if maxval is None else maxval
tol = 0.25*maxval if tolerance is None else tolerance*maxval
if len(ph) < 2:
return ph
dph = np.diff(ph)
dph[np.where(np.abs(dph) < tol)] = 0
dph[np.where(dph < -tol)] = 1
dph[np.where(dph > tol)] = -1
ph[1:] += maxval * np.cumsum(dph)
return ph
def unwrap_phase_2d(A, thres=None):
output = np.copy(A[::-1, ::-1])
if len(np.shape(A)) == 2:
n = np.shape(A)[-1]
for i in range(n):
output[i, :] = unwrap_phase(output[i, :], tolerance=thres)
for i in range(n):
output[:, i] = unwrap_phase(output[:, i], tolerance=thres)
return output[::-1, ::-1]
#7 - driftmap
def driftmap(phix=None, phiy=None, Q1=None, Q2=None, method="lockin"):
'''
Calculate drift fields based on phase shift maps, with Q1 and Q2 generated by phasemap.
Inputs:
phix - Optional : 2D arrays of phase shift map in x direction with phase slips corrected
phiy - Optional : 2D arrays of phase shift map in y direction with phase slips corrected
Q1 - Optional : Coordinates of 1st Bragg peak, generated by phasemap
Q2 - Optional : Coordinates of 2nd Bragg peak, generated by phasemap
method - Optional : Specifying which method to use.
"lockin": Used for phase shift map generated by lockin method
"convolution": Used for phase shift map generated by lockin method
Returns:
ux - 2D array of drift field in x direction
uy - 2D array of drift field in y direction
Usage:
import stmpy.driftcorr as dfc
ux, uy = dfc.driftmap(thetaxf, thetayf, Q1, Q2, method='lockin')
History:
04/28/2017 JG : Initial commit.
04/29/2019 RL : Add "lockin" method, and add documents.
11/30/2019 RL : Add support for non-square dataset
'''
if method is "lockin":
tx = np.copy(phix)
ty = np.copy(phiy)
ux = -(Q2[1]*tx - Q1[1]*ty) / (Q1[0]*Q2[1]-Q1[1]*Q2[0])
uy = -(Q2[0]*tx - Q1[0]*ty) / (Q1[1]*Q2[0]-Q1[0]*Q2[1])
return ux, uy
elif method is "convolution":
#s = np.shape(thetax)[-1]
Qx_mag = np.sqrt((Q1[0])**2 + (Q1[1])**2)
Qy_mag = np.sqrt((Q2[0])**2 + (Q2[1])**2)
Qx_ang = np.arctan2(Q1[1], Q1[0]) # in radians
Qy_ang = np.arctan2(Q2[1], Q2[0]) # in radians
Qxdrift = 1/(Qx_mag) * phix # s/(2*np.pi*Qx_mag) * thetax
Qydrift = 1/(Qy_mag) * phiy # s/(2*np.pi*Qy_mag) * thetay
ux = Qxdrift * np.cos(Qx_ang) - Qydrift * np.sin(Qy_ang-np.pi/2)
uy = Qxdrift * np.sin(Qx_ang) + Qydrift * np.cos(Qy_ang-np.pi/2)
return -ux, -uy
else:
print("Only two methods are available now:\n1. lockin\n2. convolution")
#8. - driftcorr
def driftcorr(A, ux=None, uy=None, method="lockin", interpolation='cubic'):
'''
Correct the drift in the topo according to drift fields
Inputs:
A - Required : 2D or 3D arrays of topo to be drift corrected
ux - Optional : 2D arrays of drift field in x direction, generated by driftmap()
uy - Optional : 2D arrays of drift field in y direction, generated by driftmap()
method - Optional : Specifying which method to use.
"lockin": Interpolate A and then apply it to a new set of coordinates,
(x-ux, y-uy)
"convolution": Used inversion fft to apply the drift fields
interpolation - Optional : Specifying which method to use for interpolating
Returns:
A_corr - 2D or 3D array of topo with drift corrected
Usage:
import stmpy.driftcorr as dfc
A_corr = dfc.driftcorr(ux, uy, method='interpolate', interpolation='cubic')
History:
04/28/2017 JG : Initial commit.
04/29/2019 RL : Add "invfft" method, and add documents.
11/30/2019 RL : Add support for non-square dataset
'''
if method is "lockin":
A_corr = np.zeros_like(A)
*_, s2, s1 = np.shape(A)
t1 = np.arange(s1, dtype='float')
t2 = np.arange(s2, dtype='float')
x, y = np.meshgrid(t1, t2)
xnew = (x - ux).ravel()
ynew = (y - uy).ravel()
tmp = np.zeros(s1*s2)
if len(A.shape) is 2:
tmp_f = interp2d(t1, t2, A, kind=interpolation)
for ix in range(tmp.size):
tmp[ix] = tmp_f(xnew[ix], ynew[ix])
A_corr = tmp.reshape(s2, s1)
return A_corr
elif len(A.shape) is 3:
for iz, layer in enumerate(A):
tmp_f = interp2d(t1, t2, layer, kind=interpolation)
for ix in range(tmp.size):
tmp[ix] = tmp_f(xnew[ix], ynew[ix])
A_corr[iz] = tmp.reshape(s2, s1)
print('Processing slice %d/%d...' %
(iz+1, A.shape[0]), end='\r')
return A_corr
else:
print('ERR: Input must be 2D or 3D numpy array!')
elif method is "convolution":
A_corr = np.zeros_like(A)
if len(A.shape) is 2:
return _apply_drift_field(A, ux=ux, uy=uy, zeroOut=True)
elif len(A.shape) is 3:
for iz, layer in enumerate(A):
A_corr[iz] = _apply_drift_field(
layer, ux=ux, uy=uy, zeroOut=True)
print('Processing slice %d/%d...' %
(iz+1, A.shape[0]), end='\r')
return A_corr
else:
print('ERR: Input must be 2D or 3D numpy array!')
# help function: apply drift field using inverse FT method
def _apply_drift_field(A, ux, uy, zeroOut=True):
A_corr = np.copy(A)
*_, s2, s1 = np.shape(A)
t1 = np.arange(s1, dtype='float')
t2 = np.arange(s2, dtype='float')
x, y = np.meshgrid(t1, t2)
xshifted = x - ux
yshifted = y - uy
if zeroOut is True:
A_corr[np.where(xshifted < 0)] = 0
A_corr[np.where(yshifted < 0)] = 0
A_corr[np.where(xshifted > s1)] = 0
A_corr[np.where(yshifted > s2)] = 0
qcoordx = (2*np.pi/s1)*(np.arange(s1)-int(s1/2))
qcoordy = (2*np.pi/s2)*(np.arange(s2)-int(s2/2))
#qcoord = (2*np.pi/s)*(np.arange(s)-(s/2))
xshifted = np.reshape(xshifted, [1, s1*s2])
yshifted = np.reshape(yshifted, [1, s1*s2])
qcoordx = np.reshape(qcoordx, [s1, 1])
qcoordy = np.reshape(qcoordy, [s2, 1])
xphase = np.exp(-1j*(np.matmul(xshifted.T, qcoordx.T).T))
yphase = np.exp(-1j*(np.matmul(yshifted.T, qcoordy.T).T))
avgData = np.mean(A_corr)
A_corr -= avgData
A_corr = np.reshape(A_corr, s1*s2)
data_temp = np.zeros([s2, s1*s2])
for i in range(s2):
data_temp[i] = A_corr
FT = np.matmul(data_temp * xphase, yphase.T).T
invFT = np.fft.ifft2(np.fft.fftshift(FT)) + avgData
return np.real(invFT)
#9
def generate_bp(A, bp, angle=np.pi/2, orient=np.pi/4, even_out=False, obj=None):
'''
Generate Bragg peaks with given q-vectorss
Input:
A - Required : 2D array of topo in real space, or FFT in q space.
bp - Required : Bragg peaks associated with A, to be checked
angle - Optional : Angle of the lattice. If the lattice is n-fold symmetry, then angle = 2*pi/n
orient - Optional : Initial angle of Bragg peak, or orientation of the scan. Default is np.pi/4
obj - Optional : Data object that has bp_parameters with it,
Return:
bp_new : new Bragg peak generated from q-vectors
Usage:
bp_new = dfc.check_bp(A, qx=[qx1,qx2], qy=[qy1,qy2], obj=obj)
History:
05-25-2020 RL : Initial commit.
06-08-2020 RL : Add the ability to compute correct Bragg peaks automatically
'''
*_, s2, s1 = np.shape(A)
bp = sortBraggs(bp, s=np.shape(A))
center = (np.array([s1, s2])-1) // 2
Q1, Q2, Q3, Q4, *_ = bp
Qx_mag = compute_dist(Q1, center)
Qy_mag = compute_dist(Q2, center)
Q_corr = np.mean([Qx_mag, Qy_mag])
Qc1 = np.array([int(k) for k in Q_corr*np.array([np.cos(orient+np.pi), np.sin(orient+np.pi)])])
Qc2 = np.array([int(k) for k in Q_corr*np.array([np.cos(-angle+orient+np.pi), np.sin(-angle+orient+np.pi)])])
bp_out = np.array([Qc1, Qc2, -Qc1, -Qc2]) + center
if even_out is not False:
bp_out = __even_bp(bp_out, s=np.shape(A))
if obj is not None:
pixels = np.shape(A)[::-1]
__update_parameters(obj, a0=obj.parameters['a0'], bp=bp_out, pixels=pixels,
size=obj.parameters['size'], use_a0=obj.parameters['use_a0'])
return sortBraggs(bp_out, s=np.shape(A))
##################################################################################
####################### Useful functions in the processing #######################
##################################################################################
def sortBraggs(bp, s):
''' Sort the Bragg peaks in the order of "lower left, lower right, upper right, and upper left" '''
*_, s2, s1 = s
center = np.array([(s1 - 1) // 2, (s2 - 1) // 2])
out = np.array(sorted(bp-center, key=lambda x: np.arctan2(*x))) + center
return out
def Gaussian2d(x, y, sigma_x, sigma_y, theta, x0, y0, Amp):
'''
x, y: ascending 1D array
x0, y0: center
'''
a = np.cos(theta)**2/2/sigma_x**2 + np.sin(theta)**2/2/sigma_y**2
b = -np.sin(2*theta)**2/4/sigma_x**2 + np.sin(2*theta)**2/4/sigma_y**2
c = np.sin(theta)**2/2/sigma_x**2 + np.cos(theta)**2/2/sigma_y**2
z = np.zeros((len(x), len(y)))
X, Y = np.meshgrid(x, y)
z = Amp * np.exp(-(a*(X-x0)**2 + 2*b*(X-x0)*(Y-y0) + c*(Y-y0)**2))
return z
def FTDCfilter(A, sigma1, sigma2):
'''
Filtering DC component of Fourier transform and inverse FT, using a gaussian with one parameter sigma
A is a 2D array, sigma is in unit of px
'''
*_, s2, s1 = A.shape
m1, m2 = np.arange(s1, dtype='float'), np.arange(s2, dtype='float')
c1, c2 = np.float((s1-1)/2), np.float((s2-1)/2)
# sigma1 = sigma
# sigma2 = sigma * s1 / s2
g = Gaussian2d(m1, m2, sigma1, sigma2, 0, c1, c2, 1)
ft_A = np.fft.fftshift(np.fft.fft2(A))
ft_Af = ft_A * g
Af = np.fft.ifft2(np.fft.ifftshift(ft_Af))
return np.real(Af)
def compute_dist(x1, x2, p=None):
'''
Compute the distance between point x1 and x2.
'''
if p is None:
p1, p2 = 1, 1
else:
p1, p2 = p
return np.sqrt(((x1[0]-x2[0])*p1)**2+((x1[1]-x2[1])*p2)**2)
def bp_to_q(bp, A):
'''
Convert the Bragg peaks to Q vectors by subtracting the center of the image.
Input:
bp - Required : Array of Bragg peaks
A - Required :
'''
center = (np.array(np.shape(A)[::-1])-1) // 2
return bp - center
#15. - display
def display(A, B=None, sigma=3, clim_same=True):
'''
Display or compare images in both real space and q-space.
Inputs:
A - Required : Real space image to display.
B - Optional : Another real space image to be compared with A.
sigma - Optional : sigma for the color limit.
clim_same - Optional : If True, then both FT of A and B will be displayed under the
same color limit (determined by A).
Returns:
N/A
Usage:
import stmpy.driftcorr as dfc
dfc.display(topo.z)
'''
if B is None:
A_fft = stmpy.tools.fft(A, zeroDC=True)
c = np.mean(A_fft)
s = np.std(A_fft)
fig, ax = plt.subplots(1, 2, figsize=[8, 4])
ax[0].imshow(A, cmap=stmpy.cm.blue2, origin='lower')
ax[1].imshow(A_fft, cmap=stmpy.cm.gray_r,
origin='lower', clim=[0, c+sigma*s])
for ix in ax:
ix.set_aspect(1)
else:
A_fft = stmpy.tools.fft(A, zeroDC=True)
B_fft = stmpy.tools.fft(B, zeroDC=True)
c1 = np.mean(A_fft)
s1 = np.std(A_fft)
if clim_same is True:
c2 = c1
s2 = s1
else:
c2 = np.mean(B_fft)
s2 = np.std(B_fft)
fig, ax = plt.subplots(2, 2, figsize=[8, 8])
ax[0, 0].imshow(A, cmap=stmpy.cm.blue2, origin='lower')
ax[0, 1].imshow(A_fft, cmap=stmpy.cm.gray_r,
origin='lower', clim=[0, c1+sigma*s1])
ax[1, 0].imshow(B, cmap=stmpy.cm.blue2, origin='lower')
ax[1, 1].imshow(B_fft, cmap=stmpy.cm.gray_r,
origin='lower', clim=[0, c2+sigma*s2])
for ix in ax.flatten():
ix.set_aspect(1)
def quick_linecut(A, width=2, n=4, bp=None, ax=None, thres=3):
"""
Take four linecuts automatically, horizontal, vertical, and two diagonal.
Inputs:
A - Required : FT space image to take linecuts.
width - Optional : Number of pixels for averaging.
bp - Optional : Bragg peaks
thres - Optional : threshold for displaying FT
Returns:
N/A
Usage:
import stmpy.driftcorr as dfc
r, cut = dfc.quick_linecut(A)
"""
Y = np.shape(A)[-2] / 2
X = np.shape(A)[-1] / 2
r = []
cut = []
start = [[0, Y], [X, 0], [0, 0], [0, Y*2]]
end = [[X*2, Y], [X, Y*2], [X*2, Y*2], [X*2, 0]]
color = ['r', 'g', 'b', 'k']
plt.figure(figsize=[4, 4])
if len(np.shape(A)) == 3:
if bp is None:
bp_x = np.min(findBraggs( | np.mean(A, axis=0) | numpy.mean |
## Tutorial 03: Solving for Himmelblau function
# After you successfully install the package and activate a conda environment
from optimizer.gradient_free import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
class Himmelblau():
def __init__(self, x_ranges, y_ranges):
self.x_limit = np.arange(x_ranges[0],x_ranges[1], x_ranges[-1])
self.y_limit = np.arange(y_ranges[0],y_ranges[1], y_ranges[-1])
self.z_mat = np.zeros((self.x_limit.size, self.y_limit.size))
counter_x = 0
for x in self.x_limit:
counter_y = 0
for y in self.y_limit:
self.z_mat[counter_x, counter_y] = np.log10(self.compute_z( | np.array([x, y]) | numpy.array |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement private clustering."""
import dataclasses
import typing
from absl import logging
import numpy as np
import sklearn.cluster
from clustering import clustering_params
from clustering import default_clustering_params
from clustering import lsh_tree
from clustering import private_outputs
class ClusteringMetrics():
"""Class for computing various clustering quality metrics.
Note: This class is relevant only for data with specified ground truth labels.
1. Dominant Label Accuracy: For each cluster, as indicated by cluster
labels, the accuracy is computed for the labeling of the points that assigns
the most frequently occurring ground truth label to each cluster.
2. True Non-matches: Fraction of pairs of points with the same ground truth
label, which get assigned to different clusters.
The number of pairs of points with the same true label present in different
clusters is computed as follows: For a single ground truth label with a
histogram of cluster labels as (n_1, ... , n_k), the number of pairs of points
in different clusters is given as
((n_1 + ... + n_k)^2 - (n_1^2 + ... + n_k^2))/2.
3. False Matches: Fraction of pairs of points with different ground truth
labels, which get assigned to the same cluster.
The number of pairs of points with different true labels in the same cluster
can also be computed similarly as above.
Attributes:
cross_label_histogram: 2D histogram of (cluster label, true label) pairs.
num_points: total number of points
dominant_label_correct_count: number of labels correctly predicted
dominant_label_accuracy: ratio of dominant_label_correct_count and
num_points
true_pairs: number of pairs of points with the same true label.
true_nonmatch_count: number of pairs of points with same true label, but
assigned to different clusters.
true_nonmatch_frac: ratio of true_nonmatch_count and true_pairs
false_pairs: number of pairs of points with different true labels.
false_match_count: number of pairs of points with different true labels, but
assigned to the same cluster.
false_match_frac: ratio of false_match_count and false_pairs
"""
cross_label_histogram: np.ndarray
num_points: int
dominant_label_correct_count: int
dominant_label_accuracy: float
true_pairs: int
true_nonmatch_count: int
true_nonmatch_frac: float
false_pairs: int
false_match_count: int
false_match_frac: float
def __init__(self, cross_label_histogram: np.ndarray):
self.cross_label_histogram = cross_label_histogram
self.num_points = np.sum(cross_label_histogram)
hist_square_sum = np.sum(cross_label_histogram**2)
num_pairs = self.num_points * (self.num_points - 1) / 2
# Dominant Label Accuracy
self.dominant_label_correct_count = np.sum(
np.max(cross_label_histogram, axis=1))
self.dominant_label_accuracy = (
self.dominant_label_correct_count / self.num_points)
# True Non-matches
true_label_count = np.sum(cross_label_histogram, axis=0)
self.true_pairs = np.sum(true_label_count * (true_label_count - 1) / 2)
self.true_nonmatch_count = (
(np.sum(true_label_count**2) - hist_square_sum) / 2)
self.true_nonmatch_frac = self.true_nonmatch_count / self.true_pairs
# False Matches
cluster_label_count = np.sum(cross_label_histogram, axis=1)
self.false_pairs = num_pairs - self.true_pairs
self.false_match_count = (
(np.sum(cluster_label_count**2) - hist_square_sum) / 2)
self.false_match_frac = self.false_match_count / self.false_pairs
@dataclasses.dataclass(frozen=True)
class ClusteringResult():
"""Result of labelling the data using the centers.
Attributes:
data: Data that is being labelled.
centers: Cluster centers.
labels: Indices of the closest center for each datapoint.
loss: The k-means objective with respect to the centers, i.e., sum of
squared distances of the data to their closest center.
"""
data: clustering_params.Data
centers: clustering_params.Points
labels: typing.Optional[np.ndarray] = None
loss: typing.Optional[float] = None
def __post_init__(self):
def closest_center(datapoint: np.ndarray):
"""Returns closest center to data point and the squared distance from it.
Args:
datapoint: 1D np.ndarray containing a single datapoint
"""
squared_distances = np.sum((self.centers - datapoint)**2, axis=1)
min_index = np.argmin(squared_distances)
return (min_index, squared_distances[min_index])
if self.labels is None and self.loss is None:
result = [closest_center(datapoint) for datapoint in self.data.datapoints]
object.__setattr__(self, "labels",
np.array([res[0] for res in result], dtype=int))
object.__setattr__(self, "loss", sum([res[1] for res in result]))
if self.labels is None or self.loss is None:
raise ValueError("Only one of labels or loss was initialized; "
"either both should be initialized or none.")
if self.data.num_points != len(self.labels):
raise ValueError(f"number of labels ({self.labels.shape[0]}) is not "
f"equal to number of points ({self.data.num_points})")
num_clusters, centers_dim = self.centers.shape
if centers_dim != self.data.dim:
raise ValueError(f"Dimension of cluster centers ({centers_dim}) is not "
f"equal to dimension of data points ({self.data.dim})")
if not all([label in list(range(num_clusters)) for label in self.labels]):
raise ValueError("Labels in incorrect format. Each entry of label must "
"be an integer between 0 and number of clusters - 1")
def cross_label_histogram(self) -> np.ndarray:
"""Returns 2D histogram of (cluster label, true label) pairs.
Example:
For cluster labels (self.labels) = [0, 0, 1, 1, 2, 2], and
true labels (self.data.labels) = [0, 0, 0, 1, 1, 1]
the 2D histogram is given as [[2, 0],
[1, 1],
[0, 2]]
This is computed using np.histogram2d with bins
[-0.5, 0.5, 1.5, 2.5] for cluster labels and
[-0.5, 0.5, 1.5] for true labels.
Raises:
ValueError: if data does not have any specified true labels.
"""
if self.data.labels is None:
raise ValueError("Cross label histogram is undefined since data does not "
"have any specified labels")
bin_start = -0.5
cluster_label_bins = np.arange(bin_start, np.max(self.labels) + 1, 1)
true_label_bins = np.arange(bin_start, np.max(self.data.labels) + 1, 1)
hist, _, _ = np.histogram2d(self.labels, self.data.labels,
bins=(cluster_label_bins, true_label_bins))
return hist.astype(int)
def get_clustering_metrics(self) -> ClusteringMetrics:
"""Returns various clustering quality metrics, when data labels are given.
Raises:
ValueError: if data does not have any specified true labels.
"""
return ClusteringMetrics(self.cross_label_histogram())
def private_lsh_clustering(
k: int,
data: clustering_params.Data,
privacy_param: clustering_params.DifferentialPrivacyParam,
privacy_budget_split: typing.Optional[
clustering_params.PrivacyBudgetSplit] = None,
tree_param: typing.Optional[clustering_params.TreeParam] = None,
short_description: str = "ClusteringParam") -> ClusteringResult:
"""Clusters data into k clusters.
Args:
k: Number of clusters to divide the data into.
data: Data to find centers for. Centering the data around the origin
beforehand may provide performance improvements.
privacy_param: Differential privacy parameters.
privacy_budget_split: Optional privacy budget split between operations in
the clustering algorithm for fine-tuning.
tree_param: Optional tree parameters for generating the LSH net tree for
fine-tuning.
short_description: Optional description to identify this parameter
configuration.
Returns:
ClusteringResult with differentially private centers. The rest of
ClusteringResult is nonprivate, and only provided for convenience.
"""
# Initialize the parameters.
if privacy_budget_split is None:
privacy_budget_split = clustering_params.PrivacyBudgetSplit()
private_count = None
if tree_param is None:
# Saves the private count to re-use for the root node of the tree.
tree_param, private_count = default_clustering_params.default_tree_param(
k, data, privacy_param, privacy_budget_split)
clustering_param = clustering_params.ClusteringParam(privacy_param,
privacy_budget_split,
tree_param,
short_description,
data.radius)
logging.debug("clustering_param: %s", clustering_param)
# To guarantee privacy, enforce the radius provided.
clipped_data = clustering_params.Data(data.clip_by_radius(), data.radius,
data.labels)
coreset: private_outputs.PrivateWeightedData = get_private_coreset(
clipped_data, clustering_param, private_count)
k = min(k, len(coreset.datapoints))
logging.debug(
"Starting k-means++ computation on private coreset with k=%d. This may "
"be less than the original if generated coreset data ended up with "
"less than k unique points.", k)
kmeans = sklearn.cluster.KMeans(
n_clusters=k, init="k-means++").fit(
coreset.datapoints, sample_weight=coreset.weights)
# Calculate the result relative to the original data.
# Note: the calculations besides the centers are nonprivate.
return ClusteringResult(data, kmeans.cluster_centers_)
def get_private_coreset(
data: clustering_params.Data,
clustering_param: clustering_params.ClusteringParam,
private_count: typing.Optional[int],
) -> private_outputs.PrivateWeightedData:
"""Returns private coreset, when clustered it approximates data clustering.
Args:
data: Data to approximate with the coreset.
clustering_param: Parameters for generating the coreset.
private_count: Optional private count. If None, the private count will be
computed.
"""
logging.debug("Starting process to get private coreset.")
root = lsh_tree.root_node(data, clustering_param, private_count)
# Root node must have private count >= 1.
root.private_count = max(1, root.private_count)
leaves = lsh_tree.LshTree(root).leaves
coreset_points = []
coreset_point_weights = []
for leaf in leaves:
coreset_points.append(leaf.get_private_average())
coreset_point_weights.append(leaf.private_count)
# To improve accuracy, we can clip the coreset points to the provided radius.
coreset_points = data.clip_by_radius( | np.array(coreset_points) | numpy.array |
import numpy as np
import random
import time
import os.path
from os import path
import matplotlib.pyplot as plt
import scipy.interpolate
from nodeeditor.say import *
print ("reloaded: "+ __file__)
# tools for geometry ...
def data2vecs(data):
'''convert data to vector or list of vectors or other more complex vectorstructure'''
ndat=np.array(data).flatten()
say(ndat)
if ndat.shape==(3,):
say("vector")
return FreeCAD.Vector(*ndat)
else:
classes=set([x.__class__.__name__ for x in data])
if not 'list' in classes :
say("simple values")
_points= | np.array(data) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:00:33 2018
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
from statsmodels.tsa.api import VAR
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
######################################################################
# LOAD
######################################################################
#import data
df_load = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='hourly_load',header=0)
df_weather = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='weather',header=0)
BPA_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='BPA_location_weights',header=0)
CAISO_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='CAISO_location_weights',header=0)
Name_list=pd.read_csv('Synthetic_demand_pathflows/Covariance_Calculation.csv')
Name_list=list(Name_list.loc['SALEM_T':])
Name_list=Name_list[1:]
df_wind=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_years = int(len(df_wind)/8760) + 3
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0,index_col=0)
sim_weather = sim_weather.iloc[0:365*sim_years,:]
sim_weather = sim_weather.iloc[365:len(sim_weather)-730,:]
sim_weather = sim_weather.reset_index(drop=True)
#weekday designation
dow = df_weather.loc[:,'Weekday']
#generate simulated day of the week assuming starts from monday
count=0
sim_dow= np.zeros(len(sim_weather))
for i in range(0,len(sim_weather)):
count = count +1
if count <=5:
sim_dow[i]=1
elif count > 5:
sim_dow[i]=0
if count ==7:
count =0
#Generate a datelist
datelist=pd.date_range(pd.datetime(2017,1,1),periods=365).tolist()
sim_month=np.zeros(len(sim_weather))
sim_day=np.zeros(len(sim_weather))
sim_year=np.zeros(len(sim_weather))
count=0
for i in range(0,len(sim_weather)):
if count <=364:
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
else:
count=0
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
count=count+1
######################################################################
# BPAT
######################################################################
#Find the simulated data at the sites
col_BPA_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T']
col_BPA_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W']
BPA_sim_T=sim_weather[col_BPA_T].values
BPA_sim_W=sim_weather[col_BPA_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
###########################################
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*BPA_weights.loc[0,i]
Wind[:,j] = df_weather.loc[:,n3]
weighted_SimT[:,0] = weighted_SimT[:,0] + BPA_sim_T[:,j]*BPA_weights.loc[0,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
BPA_sim_T_F=(BPA_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-BPA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,BPA_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(BPA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(BPA_sim_W,binary_HDD_sim)
#convert load to array
BPA_load = df_load.loc[:,'BPA'].values
#remove NaNs
a = np.argwhere(np.isnan(BPA_load))
for i in a:
BPA_load[i] = BPA_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(BPA_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X70p = M[(M[:,0] >= 70),2:]
y70p = M[(M[:,0] >= 70),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),2:]
y40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),1]
X30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),2:]
y30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),1]
X25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),2:]
y25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),1]
X25m = M[(M[:,0] < 25),2:]
y25m = M[(M[:,0] < 25),1]
X70p_Sim = M_sim[(M_sim[:,0] >= 70),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X40_50_Sim = M_sim[(M_sim[:,0] >= 40) & (M_sim[:,0] < 50),1:]
X30_40_Sim = M_sim[(M_sim[:,0] >= 30) & (M_sim[:,0] < 40),1:]
X25_30_Sim = M_sim[(M_sim[:,0] >= 25) & (M_sim[:,0] < 30),1:]
X25m_Sim = M_sim[(M_sim[:,0] < 25),1:]
#multivariate regression
#Create linear regression object
reg70p = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg40_50 = linear_model.LinearRegression()
reg30_40 = linear_model.LinearRegression()
reg25_30 = linear_model.LinearRegression()
reg25m = linear_model.LinearRegression()
# Train the model using the training sets
if len(y70p) > 0:
reg70p.fit(X70p,y70p)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y40_50) > 0:
reg40_50.fit(X40_50,y40_50)
if len(y30_40) > 0:
reg30_40.fit(X30_40,y30_40)
if len(y25_30) > 0:
reg25_30.fit(X25_30,y25_30)
if len(y25m) > 0:
reg25m.fit(X25m,y25m)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=70:
y_hat = reg70p.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] >= 40 and M[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M[i,0] >= 30 and M[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M[i,0] >= 25 and M[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M[i,0] < 25:
y_hat = reg25m.predict(s)
predicted = np.append(predicted,y_hat)
BPA_p = predicted.reshape((len(predicted),1))
#Simulate using the regression above
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=70:
y_hat = reg70p.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] >= 40 and M_sim[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M_sim[i,0] >= 30 and M_sim[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M_sim[i,0] >= 25 and M_sim[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M_sim[i,0] < 25:
y_hat = reg25m.predict(s)
simulated = np.append(simulated,y_hat)
BPA_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,BPA_p)
print(a[0]**2, a[1])
# Residuals
BPAresiduals = BPA_p - peaks
BPA_y = peaks
# RMSE
RMSE = (np.sum((BPAresiduals**2))/len(BPAresiduals))**.5
output = np.column_stack((BPA_p,peaks))
#########################################################################
# CAISO
#########################################################################
#Find the simulated data at the sites
col_CAISO_T = ['FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_CAISO_W = ['FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
CAISO_sim_T=sim_weather[col_CAISO_T].values
CAISO_sim_W=sim_weather[col_CAISO_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
#find average temps
cities = ['Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
Wind[:,j] = df_weather.loc[:,n3]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*CAISO_weights.loc[1,i]
weighted_SimT[:,0] = weighted_SimT[:,0] + CAISO_sim_T[:,j]*CAISO_weights.loc[1,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
CAISO_sim_T_F=(CAISO_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CAISO_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CAISO_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
CDD_wind_sim = np.multiply(CAISO_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CAISO_sim_W,binary_HDD_sim)
###########################
# CAISO - SDGE
###########################
#convert load to array
SDGE_load = df_load.loc[:,'SDGE'].values
#remove NaNs
a = np.argwhere(np.isnan(SDGE_load))
for i in a:
SDGE_load[i] = SDGE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SDGE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SDGE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
#
simulated = np.append(simulated,y_hat)
SDGE_sim = simulated.reshape((len(simulated),1))
# Residuals
SDGEresiduals = SDGE_p - peaks
SDGE_y = peaks
#a=st.pearsonr(peaks,SDGE_p)
#print a[0]**2
# RMSE
RMSE = (np.sum((SDGEresiduals**2))/len(SDGEresiduals))**.5
###########################
# CAISO - SCE
###########################
#convert load to array
SCE_load = df_load.loc[:,'SCE'].values
#remove NaNs
a = np.argwhere(np.isnan(SCE_load))
for i in a:
SCE_load[i] = SCE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SCE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SCE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
SCE_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,SCE_p)
#print a[0]**2
# Residuals
SCEresiduals = SCE_p - peaks
SCE_y = peaks
# RMSE
RMSE = (np.sum((SCEresiduals**2))/len(SCEresiduals))**.5
###########################
# CAISO - PG&E Valley
###########################
#convert load to array
PGEV_load = df_load.loc[:,'PGE_V'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEV_load))
for i in a:
PGEV_load[i] = PGEV_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEV_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEV_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
PGEV_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,PGEV_p)
print(a[0]**2, a[1])
# Residuals
PGEVresiduals = PGEV_p - peaks
PGEV_y = peaks
# RMSE
RMSE = (np.sum((PGEVresiduals**2))/len(PGEVresiduals))**.5
###########################
# CAISO - PG&E Bay
###########################
#convert load to array
PGEB_load = df_load.loc[:,'PGE_B'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEB_load))
for i in a:
PGEB_load[i] = PGEB_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEB_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path46'}, inplace=True)
df_data.rename(columns={4:'Weekday'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
y = df_data.loc[:,'Path46']
#multivariate regression
jan_reg_46 = linear_model.LinearRegression()
feb_reg_46 = linear_model.LinearRegression()
mar_reg_46 = linear_model.LinearRegression()
apr_reg_46 = linear_model.LinearRegression()
may_reg_46 = linear_model.LinearRegression()
jun_reg_46 = linear_model.LinearRegression()
jul_reg_46 = linear_model.LinearRegression()
aug_reg_46 = linear_model.LinearRegression()
sep_reg_46 = linear_model.LinearRegression()
oct_reg_46 = linear_model.LinearRegression()
nov_reg_46 = linear_model.LinearRegression()
dec_reg_46 = linear_model.LinearRegression()
# Train the model using the training sets
jan_reg_46.fit(jan.loc[:,'Weekday':],jan.loc[:,'Path46'])
feb_reg_46.fit(feb.loc[:,'Weekday':],feb.loc[:,'Path46'])
mar_reg_46.fit(mar.loc[:,'Weekday':],mar.loc[:,'Path46'])
apr_reg_46.fit(apr.loc[:,'Weekday':],apr.loc[:,'Path46'])
may_reg_46.fit(may.loc[:,'Weekday':],may.loc[:,'Path46'])
jun_reg_46.fit(jun.loc[:,'Weekday':],jun.loc[:,'Path46'])
jul_reg_46.fit(jul.loc[:,'Weekday':],jul.loc[:,'Path46'])
aug_reg_46.fit(aug.loc[:,'Weekday':],aug.loc[:,'Path46'])
sep_reg_46.fit(sep.loc[:,'Weekday':],sep.loc[:,'Path46'])
oct_reg_46.fit(oct.loc[:,'Weekday':],oct.loc[:,'Path46'])
nov_reg_46.fit(nov.loc[:,'Weekday':],nov.loc[:,'Path46'])
dec_reg_46.fit(dec.loc[:,'Weekday':],dec.loc[:,'Path46'])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Weekday':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
Path46_p = predicted
# Residuals
residuals = predicted - y.values
Residuals46 = np.reshape(residuals[730:],(1095,1))
Path46_y = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
##R2
#a=st.pearsonr(y,predicted)
#print a[0]**2
###############################
# NW PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/NW_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
H = df_data
#df_data.to_excel('Synthetic_demand_pathflows/cX.xlsx')
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path8'}, inplace=True)
df_data.rename(columns={4:'Path14'}, inplace=True)
df_data.rename(columns={5:'Path3'}, inplace=True)
df_data.rename(columns={6:'BPA_wind'}, inplace=True)
df_data.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data.rename(columns={8:'Weekday'}, inplace=True)
df_data.rename(columns={9:'Salem_HDD'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
NWPaths_p= np.zeros((len(cX),num_lines))
NWPaths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name='jan_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='feb_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='mar_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='apr_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='may_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jun_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jul_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='aug_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='sep_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='oct_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='nov_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='dec_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
name='jan_reg_NW' + str(line)
locals()[name].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
name='feb_reg_NW' + str(line)
locals()[name].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
name='mar_reg_NW' + str(line)
locals()[name].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
name='apr_reg_NW' + str(line)
locals()[name].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
name='may_reg_NW' + str(line)
locals()[name].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
name='jun_reg_NW' + str(line)
locals()[name].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
name='jul_reg_NW' + str(line)
locals()[name].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
name='aug_reg_NW' + str(line)
locals()[name].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
name='sep_reg_NW' + str(line)
locals()[name].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
name='oct_reg_NW' + str(line)
locals()[name].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
name='nov_reg_NW' + str(line)
locals()[name].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
name='dec_reg_NW' + str(line)
locals()[name].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jan_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='feb_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='mar_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='apr_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='may_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jun_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jul_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='aug_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='sep_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='oct_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='nov_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='dec_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
NWPaths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
NWPaths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsNWPaths = export_residuals
###############################
# Other CA PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/OtherCA_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path61'}, inplace=True)
df_data.rename(columns={4:'Path42'}, inplace=True)
df_data.rename(columns={5:'Path24'}, inplace=True)
df_data.rename(columns={6:'Path45'}, inplace=True)
df_data.rename(columns={7:'BPA_wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path61','Path42','Path24','Path45']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
OtherCA_Paths_p= np.zeros((len(cX),num_lines))
OtherCA_Paths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_CA' + str(line)
name_2='feb_reg_CA' + str(line)
name_3='mar_reg_CA' + str(line)
name_4='apr_reg_CA' + str(line)
name_5='may_reg_CA' + str(line)
name_6='jun_reg_CA' + str(line)
name_7='jul_reg_CA' + str(line)
name_8='aug_reg_CA' + str(line)
name_9='sep_reg_CA' + str(line)
name_10='oct_reg_CA' + str(line)
name_11='nov_reg_CA' + str(line)
name_12='dec_reg_CA' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
OtherCA_Paths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
OtherCA_Paths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsOtherCA_Paths = export_residuals
##########################
# PATH 65 & 66
##########################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/Path65_66_regression_data.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path65'}, inplace=True)
df_data.rename(columns={4:'Path66'}, inplace=True)
df_data.rename(columns={5:'Wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path65','Path66']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
Path65_66_p = np.zeros((len(cX),num_lines))
Path65_66_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'Wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'Wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'Wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'Wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'Wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'Wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'Wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'Wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'Wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'Wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'Wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'Wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
Path65_66_p[:,line_index] = predicted
Path65_66_y[:,line_index] = y.values
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
#
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
#R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
Residuals65_66 = export_residuals[730:,:]
#####################################################################
# Residual Analysis
#####################################################################
R = np.column_stack((ResidualsLoad,ResidualsNWPaths,ResidualsOtherCA_Paths,Residuals46,Residuals65_66))
rc = np.shape(R)
cols = rc[1]
mus = np.zeros((cols,1))
stds = np.zeros((cols,1))
R_w = np.zeros(np.shape(R))
sim_days = len(R_w)
#whiten residuals
for i in range(0,cols):
mus[i] = np.mean(R[:,i])
stds[i] = np.std(R[:,i])
R_w[:,i] = (R[:,i] - mus[i])/stds[i]
#Vector autoregressive model on residuals
model = VAR(R_w)
results = model.fit(1)
sim_residuals = np.zeros((sim_days,cols))
errors = np.zeros((sim_days,cols))
p = results.params
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,sim_days)
ys = np.zeros((cols,1))
# Generate cross correlated residuals
for i in range(0,sim_days):
for j in range(1,cols+1):
name='y' + str(j)
locals()[name]= p[0,j-1] + p[1,j-1]*y_seeds[0]+ p[2,j-1]*y_seeds[1]+ p[3,j-1]*y_seeds[2]+ p[4,j-1]*y_seeds[3]+ p[5,j-1]*y_seeds[4]+ p[6,j-1]*y_seeds[5]+ p[7,j-1]*y_seeds[6]+ p[8,j-1]*y_seeds[7]+ p[9,j-1]*y_seeds[8]+ p[10,j-1]*y_seeds[9]+ p[11,j-1]*y_seeds[10]+ p[12,j-1]*y_seeds[11]+ p[13,j-1]*y_seeds[12]+ p[13,j-1]*y_seeds[12]+ p[14,j-1]*y_seeds[13]+ p[15,j-1]*y_seeds[14]+E[i,j-1]
for j in range(1,cols+1):
name='y' + str(j)
y_seeds[j-1]=locals()[name]
sim_residuals[i,:] = [y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y13,y14,y15]
for i in range(0,cols):
sim_residuals[:,i] = sim_residuals[:,i]*stds[i]*(1/np.std(sim_residuals[:,i])) + mus[i]
#validation
Y = np.column_stack((np.reshape(BPA_y[0:3*365],(1095,1)),np.reshape(SDGE_y[0:3*365],(1095,1)),np.reshape(SCE_y[0:3*365],(1095,1)),np.reshape(PGEV_y[0:3*365],(1095,1)),np.reshape(PGEB_y[0:3*365],(1095,1)),NWPaths_y,OtherCA_Paths_y,np.reshape(Path46_y[730:],(1095,1)),np.reshape(Path65_66_y[730:,:],(1095,2))))
combined_BPA = np.reshape(sim_residuals[:,0],(1095,1)) + np.reshape(BPA_p[0:3*365],(1095,1))
combined_SDGE = np.reshape(sim_residuals[:,1],(1095,1)) + np.reshape(SDGE_p[0:3*365],(1095,1))
combined_SCE = np.reshape(sim_residuals[:,2],(1095,1)) + np.reshape(SCE_p[0:3*365],(1095,1))
combined_PGEV = np.reshape(sim_residuals[:,3],(1095,1)) + np.reshape(PGEV_p[0:3*365],(1095,1))
combined_PGEB = np.reshape(sim_residuals[:,4],(1095,1)) + np.reshape(PGEB_p[0:3*365],(1095,1))
combined_Path8 = np.reshape(sim_residuals[:,5],(1095,1)) + np.reshape(NWPaths_p[:,0],(1095,1))
combined_Path14 = np.reshape(sim_residuals[:,6],(1095,1)) + np.reshape(NWPaths_p[:,1],(1095,1))
combined_Path3 = np.reshape(sim_residuals[:,7],(1095,1)) + np.reshape(NWPaths_p[:,2],(1095,1))
combined_Path61 = np.reshape(sim_residuals[:,8],(1095,1)) + np.reshape(OtherCA_Paths_p[:,0],(1095,1))
combined_Path42 = np.reshape(sim_residuals[:,9],(1095,1)) + np.reshape(OtherCA_Paths_p[:,1],(1095,1))
combined_Path24 = np.reshape(sim_residuals[:,10],(1095,1)) + np.reshape(OtherCA_Paths_p[:,2],(1095,1))
combined_Path45 = np.reshape(sim_residuals[:,11],(1095,1)) + np.reshape(OtherCA_Paths_p[:,3],(1095,1))
combined_Path46 = np.reshape(sim_residuals[:,12],(1095,1)) + np.reshape(Path46_p[730:],(1095,1))
combined_Path65 = np.reshape(sim_residuals[:,13],(1095,1)) + np.reshape(Path65_66_p[730:,0],(1095,1))
combined_Path66 = np.reshape(sim_residuals[:,14],(1095,1)) + np.reshape(Path65_66_p[730:,1],(1095,1))
combined = np.column_stack((combined_BPA,combined_SDGE,combined_SCE,combined_PGEV,combined_PGEB,combined_Path8,combined_Path14,combined_Path3,combined_Path61,combined_Path42,combined_Path24,combined_Path45,combined_Path46,combined_Path65,combined_Path66))
rc = np.shape(Y)
cols = rc[1]
names = ['BPA','SDGE','SCE','PGEV','PGEB','Path8','Path14','Path3','Path61','Path42','Path24','Path45','Path46','Path65','Path66']
#for n in names:
#
# n_index = names.index(n)
#
# plt.figure()
# plt.plot(combined[:,n_index],'r')
# plt.plot(Y[:,n_index],'b')
# plt.title(n)
#
##########################################################################################################################################################
#Simulating demand and path
#########################################################################################################################################################
#Sim Residual
simulation_length=len(sim_weather)
syn_residuals = np.zeros((simulation_length,cols))
errors = np.zeros((simulation_length,cols))
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,simulation_length)
ys = np.zeros((cols,1))
for i in range(0,simulation_length):
for n in range(0,cols):
ys[n] = p[0,n]
for m in range(0,cols):
ys[n] = ys[n] + p[m+1,n]*y_seeds[n]
ys[n] = ys[n] + E[i,n]
for n in range(0,cols):
y_seeds[n] = ys[n]
syn_residuals[i,:] = np.reshape([ys],(1,cols))
for i in range(0,cols):
syn_residuals[:,i] = syn_residuals[:,i]*stds[i]*(1/np.std(syn_residuals[:,i])) + mus[i]
##################################################
# PATH NW
##################################################
#This only uses BPA wind and hydro
col_nw_T =['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_nw_W =['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>']
num_cities = len(col_nw_T)
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T_F=(NW_sim_T * 9/5) +32
NW_sim_W =NW_sim_W *2.23694
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-NW_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,NW_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(NW_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(NW_sim_W,binary_HDD_sim)
#Need Month,Day,Year,8 14 3 BPA_wind,BPA_hydro
sim_BPA_hydro = pd.read_csv('PNW_hydro/FCRPS/Path_dams.csv',header=None)
sim_BPA_hydro=sim_BPA_hydro.values
sim_BPA_hydro=np.sum(sim_BPA_hydro,axis=1)/24
#What is the common length
effect_sim_year=int(len(sim_BPA_hydro)/365)
sim_month=sim_month[:len(sim_BPA_hydro)]
sim_day=sim_day[:len(sim_BPA_hydro)]
sim_year=sim_year[:len(sim_BPA_hydro)]
sim_dow= sim_dow[:len(sim_BPA_hydro)]
sim_wind_power=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_BPA_wind_power= sim_wind_power.loc[:,'BPA']/24
sim_wind_daily = np.zeros((effect_sim_year*365,1))
for i in range(0,effect_sim_year*365):
sim_wind_daily[i] = np.sum((sim_BPA_wind_power.loc[i*24:i*24+24]))
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path8'}, inplace=True)
df_data_sim.rename(columns={4:'Path14'}, inplace=True)
df_data_sim.rename(columns={5:'Path3'}, inplace=True)
df_data_sim.rename(columns={6:'BPA_wind'}, inplace=True)
df_data_sim.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data_sim.rename(columns={8:'Weekday'}, inplace=True)
df_data_sim.rename(columns={9:'Salem_HDD'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
upper = [1900,1500,1900]
lower = [-600,-900,-2200]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'BPA_wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_NW' + str(line)
name_2='feb_reg_NW' + str(line)
name_3='mar_reg_NW' + str(line)
name_4='apr_reg_NW' + str(line)
name_5='may_reg_NW' + str(line)
name_6='jun_reg_NW' + str(line)
name_7='jul_reg_NW' + str(line)
name_8='aug_reg_NW' + str(line)
name_9='sep_reg_NW' + str(line)
name_10='oct_reg_NW' + str(line)
name_11='nov_reg_NW' + str(line)
name_12='dec_reg_NW' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path8=predicted_Path8+syn_residuals[:effect_sim_year*365,5]
syn_Path14=predicted_Path14+syn_residuals[:effect_sim_year*365,6]
syn_Path3=predicted_Path3+syn_residuals[:effect_sim_year*365,7]
bias = np.mean(syn_Path8) - np.mean(NWPaths_y[:,0])
syn_Path8 = syn_Path8 - bias
bias = np.mean(syn_Path14) - np.mean(NWPaths_y[:,1])
syn_Path14 = syn_Path14 - bias
bias = np.mean(syn_Path3) - np.mean(NWPaths_y[:,2])
syn_Path3 = syn_Path3 - bias
S = df_data_sim.values
HO = H.values
stats = np.zeros((69,4))
for i in range(0,69):
stats[i,0] = np.mean(S[:,i])
stats[i,1] = np.mean(HO[:,i])
stats[i,2] = np.std(S[:,i])
stats[i,3] = np.std(HO[:,i])
################################################################################
###################################################
## PATH 65 & 66
###################################################
col_6566_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_6566_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>_W']
num_cities = len(col_6566_T)
P6566_sim_T=sim_weather[col_6566_T].values
P6566_sim_W=sim_weather[col_6566_W].values
P6566_sim_W =P6566_sim_W*2.23694
sim_days = len(sim_weather)
P6566_sim_T_F=(P6566_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P6566_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P6566_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P6566_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P6566_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,syn_Path3,syn_Path8,syn_Path14,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path65'}, inplace=True)
df_data_sim.rename(columns={4:'Path66'}, inplace=True)
df_data_sim.rename(columns={5:'Wind'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path65','Path66']
upper = [3100,4300]
lower = [-2210,-500]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'Wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path65= predicted_Path65 + syn_residuals[:effect_sim_year*365,13]
syn_Path66 = predicted_Path66 + syn_residuals[:effect_sim_year*365,14]
bias = np.mean(syn_Path65) - np.mean(Path65_66_y[:,0])
syn_Path65 = syn_Path65 - bias
bias = np.mean(syn_Path66) - np.mean(Path65_66_y[:,1])
syn_Path66 = syn_Path66 - bias
###################################################
## PATH 46
###################################################
#Find the simulated data at the sites
col_46_T = ['TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_46_W = ['TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
num_cities = len(col_46_T)
P46_sim_T=sim_weather[col_46_T].values
P46_sim_W=sim_weather[col_46_W].values
P46_sim_W =P46_sim_W *2.23694
sim_days = len(sim_weather)
P46_sim_T_F=(P46_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P46_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P46_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P46_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P46_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
sim_Hoover = pd.read_csv('Synthetic_streamflows/synthetic_discharge_Hoover.csv',header=None)
sim_Hoover=sim_Hoover.values
sim_Hoover = sim_Hoover[:effect_sim_year*365]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),sim_dow,sim_Hoover,syn_Path65,syn_Path66))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path46'}, inplace=True)
df_data_sim.rename(columns={4:'Weekday'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
y = df_data_sim.loc[:,'Path46']
predicted_Path46 =[]
rc = np.shape(jan2.loc[:,'Weekday':])
n = rc[1]
upper = 185000
lower = 48000
predicted=[]
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper:
predicted[i] = upper
elif predicted[i] < lower:
predicted[i] = lower
predicted_Path46=predicted
syn_Path46=predicted_Path46+syn_residuals[:effect_sim_year*365,12]
bias = np.mean(syn_Path46) - np.mean(Path46_y)
syn_Path46 = syn_Path46 - bias
syn_Path46 = syn_Path46/24
#
################################
## Other CA PATHS
################################
col_ca_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_ca_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
num_cities = len(col_ca_T)
CA_sim_T=sim_weather[col_ca_T].values
CA_sim_W=sim_weather[col_ca_W].values
CA_sim_W =CA_sim_W *2.23694
CA_sim_T_F=(CA_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CA_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(CA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CA_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,sim_dow,syn_Path46,sim_Hoover,syn_Path65,syn_Path66))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path61'}, inplace=True)
df_data_sim.rename(columns={4:'Path42'}, inplace=True)
df_data_sim.rename(columns={5:'Path24'}, inplace=True)
df_data_sim.rename(columns={6:'Path45'}, inplace=True)
df_data_sim.rename(columns={7:'BPA_wind'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path61','Path42','Path24','Path45']
upper = [1940,98,92,340]
lower = [240,-400,-48,-190]
num_lines = len(lines)
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'BPA_wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_CA' + str(line)
name_2='feb_reg_CA' + str(line)
name_3='mar_reg_CA' + str(line)
name_4='apr_reg_CA' + str(line)
name_5='may_reg_CA' + str(line)
name_6='jun_reg_CA' + str(line)
name_7='jul_reg_CA' + str(line)
name_8='aug_reg_CA' + str(line)
name_9='sep_reg_CA' + str(line)
name_10='oct_reg_CA' + str(line)
name_11='nov_reg_CA' + str(line)
name_12='dec_reg_CA' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
# if predicted[i] > upper[line_index]:
# predicted[i] = upper[line_index]
# elif predicted[i] < lower[line_index]:
# predicted[i] = lower[line_index]
if predicted[i] > upper[line_index]:
predicted[i] = np.mean(OtherCA_Paths_y[:,line_index])
elif predicted[i] < lower[line_index]:
predicted[i] = np.mean(OtherCA_Paths_y[:,line_index])
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path61= predicted_Path61 + syn_residuals[:effect_sim_year*365,8]
syn_Path42 = predicted_Path42 + syn_residuals[:effect_sim_year*365,9]
syn_Path24= predicted_Path24 + syn_residuals[:effect_sim_year*365,10]
syn_Path45 = predicted_Path45 + syn_residuals[:effect_sim_year*365,11]
bias = np.mean(syn_Path61) - np.mean(OtherCA_Paths_y[:,0])
syn_Path61 = syn_Path61 - bias
bias = np.mean(syn_Path42) - np.mean(OtherCA_Paths_y[:,1])
syn_Path42 = syn_Path42 - bias
bias = np.mean(syn_Path24) - np.mean(OtherCA_Paths_y[:,2])
syn_Path24 = syn_Path24 - bias
bias = np.mean(syn_Path45) - np.mean(OtherCA_Paths_y[:,3])
syn_Path45 = syn_Path45 - bias
############################################################################
syn_BPA= BPA_sim + np.reshape(syn_residuals[:,0],(len(BPA_sim),1))
#syn_BPA= syn_BPA[365:len(BPA_sim)-730]
syn_BPA=np.reshape(syn_BPA,(effect_sim_year*365))
syn_SDGE= SDGE_sim + np.reshape(syn_residuals[:,1],(len(BPA_sim),1))
#syn_SDGE= syn_SDGE[365:len(BPA_sim)-730]
syn_SDGE=np.reshape(syn_SDGE,(effect_sim_year*365))
syn_SCE= SCE_sim + np.reshape(syn_residuals[:,2],(len(BPA_sim),1))
#syn_SCE= syn_SCE[365:len(BPA_sim)-730]
syn_SCE=np.reshape(syn_SCE,(effect_sim_year*365))
syn_PGEV= PGEV_sim + np.reshape(syn_residuals[:,3],(len(BPA_sim),1))
#syn_PGEV= syn_PGEV[365:len(BPA_sim)-730]
syn_PGEV=np.reshape(syn_PGEV,(effect_sim_year*365))
syn_PGEB= PGEB_sim + np.reshape(syn_residuals[:,4],(len(BPA_sim),1))
#syn_PGEB= syn_PGEB[365:len(BPA_sim)-730]
syn_PGEB=np.reshape(syn_PGEB,(effect_sim_year*365))
###############################################################################
Demand_Path=pd.DataFrame()
Demand_Path['BPA_Load_sim']=syn_BPA.tolist()
Demand_Path['SDGE_Load_sim']= syn_SDGE.tolist()
Demand_Path['SCE_Load_sim']= syn_SCE.tolist()
Demand_Path['PGEV_Load_sim']= syn_PGEV.tolist()
Demand_Path['PGEB_Load_sim']=syn_PGEB.tolist()
Demand_Path['Path8_sim']=syn_Path8.tolist()
Demand_Path['Path3_sim']=syn_Path3.tolist()
Demand_Path['Path14_sim']=syn_Path14.tolist()
Demand_Path['Path65_sim']=syn_Path65.tolist()
Demand_Path['Path66_sim']=syn_Path66.tolist()
Demand_Path['Path46_sim']=syn_Path46.tolist()
Demand_Path['Path61_sim']=syn_Path61.tolist()
Demand_Path['Path42_sim']=syn_Path42.tolist()
Demand_Path['Path24_sim']=syn_Path24.tolist()
Demand_Path['Path45_sim']=syn_Path45.tolist()
Demand_Path.to_csv('Synthetic_demand_pathflows/Load_Path_Sim.csv')
######################################################################
## Hourly Demand Simulation
######################################################################
#
BPA_profile = np.zeros((24,365))
SDGE_profile = np.zeros((24,365))
SCE_profile = np.zeros((24,365))
PGEV_profile = np.zeros((24,365))
PGEB_profile = np.zeros((24,365))
# number of historical days
hist_days = len(SCE_load)/24
hist_years = int(hist_days/365)
sim_years = int(len(sim_weather)/365)
# create profiles
for i in range(0,hist_years):
for j in range(0,365):
# pull 24 hours of demand
BPA_sample = BPA_load[i*8760+j*24:i*8760+j*24+24]
SDGE_sample = SDGE_load[i*8760+j*24:i*8760+j*24+24]
SCE_sample = SCE_load[i*8760+j*24:i*8760+j*24+24]
PGEV_sample = PGEV_load[i*8760+j*24:i*8760+j*24+24]
PGEB_sample = PGEB_load[i*8760+j*24:i*8760+j*24+24]
# create fractional profile (relative to peak demand)
sample_peak = np.max(BPA_sample)
BPA_fraction = BPA_sample/sample_peak
BPA_profile[:,j] = BPA_profile[:,j] + BPA_fraction*(1/hist_years)
sample_peak = np.max(SDGE_sample)
SDGE_fraction = SDGE_sample/sample_peak
SDGE_profile[:,j] = SDGE_profile[:,j] + SDGE_fraction*(1/hist_years)
sample_peak = np.max(SCE_sample)
SCE_fraction = SCE_sample/sample_peak
SCE_profile[:,j] = SCE_profile[:,j] + SCE_fraction*(1/hist_years)
sample_peak = np.max(PGEV_sample)
PGEV_fraction = PGEV_sample/sample_peak
PGEV_profile[:,j] = PGEV_profile[:,j] + PGEV_fraction*(1/hist_years)
sample_peak = np.max(PGEB_sample)
PGEB_fraction = PGEB_sample/sample_peak
PGEB_profile[:,j] = PGEB_profile[:,j] + PGEB_fraction*(1/hist_years)
# simulate using synthetic peaks
BPA_hourly = np.zeros((8760*effect_sim_year,1))
SDGE_hourly = np.zeros((8760*effect_sim_year,1))
SCE_hourly = np.zeros((8760*effect_sim_year,1))
PGEV_hourly = np.zeros((8760*effect_sim_year,1))
PGEB_hourly = np.zeros((8760*effect_sim_year,1))
PNW_hourly = | np.zeros((8760*effect_sim_year,1)) | numpy.zeros |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from numpy.testing import assert_allclose
import numpy as np
import pytest
from collections import OrderedDict
from thermo.chemical import *
from thermo.mixture import Mixture
import thermo
from scipy.integrate import quad
from math import *
from thermo.utils import R
def test_Mixture():
Mixture(['water', 'ethanol'], ws=[.5, .5], T=320, P=1E5)
Mixture(['water', 'phosphoric acid'], ws=[.5, .5], T=320, P=1E5)
Mixture('air', T=320, P=1E5)
Mixture(['ethanol', 'water'], ws=[0.5, 0.5], T=500)
Mixture('water')
s = Mixture(['water', 'ethanol'], P=5200, zs=[0.5, 0.5])
assert_allclose(s.V_over_F, 0.3061646720256255, rtol=1E-3)
s = Mixture(['water', 'ethanol'], P=5200, zs=[0.5, 0.5])
assert_allclose(s.quality, 0.34745483870024646, rtol=1E-3)
with pytest.raises(Exception):
Mixture(['2,2-Dichloro-1,1,1-trifluoroethane'], T=276.15, P=37000, zs=[0.5, 0.5])
m = Mixture(['Na+', 'Cl-', 'water'], ws=[.01, .02, .97]).charge_balance
assert_allclose(m, -0.0023550338411239182)
def test_Mixture_input_forms():
# Run a test initializing a mixture from mole fractions, mass fractions,
# liquid fractions, gas fractions (liq/gas are with volumes of pure components at T and P)
kwargs = {'ws': [0.5, 0.5], 'zs': [0.7188789914193495, 0.2811210085806504],
'Vfls': [0.44054617180108374, 0.5594538281989162],
'Vfgs': [0.7229421485513368, 0.2770578514486633]}
for key, val in kwargs.items():
m = Mixture(['water', 'ethanol'], **{key:val})
assert_allclose(m.zs, kwargs['zs'], rtol=1E-6)
assert_allclose(m.zs, m.xs)
assert_allclose(m.Vfls(), kwargs['Vfls'], rtol=1E-5)
assert_allclose(m.Vfgs(), kwargs['Vfgs'])
# Ordered dict inputs
IDs = ['pentane', 'hexane', 'heptane']
kwargs = {'ws': [0.4401066297270966, 0.31540115235588945, 0.24449221791701395],
'zs': [.5, .3, .2],
'Vfls': [0.45711574619871703, 0.31076035223551646, 0.23212390156576654],
'Vfgs': [0.5127892380094016, 0.2979448661739439, 0.18926589581665448]}
for key, val in kwargs.items():
d = OrderedDict()
for i, j in zip(IDs, val):
d.update({i: j})
m = Mixture(**{key:d})
assert_allclose(m.zs, kwargs['zs'], rtol=1E-6)
assert_allclose(m.zs, m.xs)
assert_allclose(m.Vfls(), kwargs['Vfls'], rtol=1E-5)
assert_allclose(m.Vfgs(), kwargs['Vfgs'], rtol=2E-5)
# numpy array inputs
IDs = ['pentane', 'hexane', 'heptane']
kwargs = {'ws': np.array([0.4401066297270966, 0.31540115235588945, 0.24449221791701395]),
'zs': np.array([.5, .3, .2]),
'Vfls': np.array([0.45711574619871703, 0.31076035223551646, 0.23212390156576654]),
'Vfgs': np.array([0.5127892380094016, 0.2979448661739439, 0.18926589581665448])}
for key, val in kwargs.items():
m = Mixture(IDs, **{key:val})
assert_allclose(m.zs, kwargs['zs'], rtol=1E-6)
assert_allclose(m.zs, m.xs)
assert_allclose(m.Vfls(), kwargs['Vfls'], rtol=1E-5)
assert_allclose(m.Vfgs(), kwargs['Vfgs'], rtol=2E-5)
with pytest.raises(Exception):
Mixture(['water', 'ethanol'])
Mixture(['water'], ws=[1], T=300, P=1E5)
Mixture('water', ws=[1], T=365).SGl
def test_Mixture_input_vfs_TP():
# test against the default arguments of T and P
m0 = Mixture(['hexane', 'decane'], Vfls=[.5, .5])
m1 = Mixture(['hexane', 'decane'], Vfls=[.5, .5], Vf_TP=(298.15, None))
m2 = Mixture(['hexane', 'decane'], Vfls=[.5, .5], Vf_TP=(298.15, None))
m3 = Mixture(['hexane', 'decane'], Vfls=[.5, .5], Vf_TP=(None, 101325))
assert_allclose(m0.zs, m1.zs)
assert_allclose(m0.zs, m2.zs)
assert_allclose(m0.zs, m3.zs)
# change T, P slightly - check that's it's still close to the result
# and do one rough test that the result is still working
m0 = Mixture(['hexane', 'decane'], Vfls=[.5, .5])
m1 = Mixture(['hexane', 'decane'], Vfls=[.5, .5], Vf_TP=(300, None))
m2 = Mixture(['hexane', 'decane'], Vfls=[.5, .5], Vf_TP=(300, 1E5))
m3 = Mixture(['hexane', 'decane'], Vfls=[.5, .5], Vf_TP=(None, 1E5))
assert_allclose(m0.zs, m1.zs, rtol=1E-3)
assert_allclose(m2.zs, [0.5979237361861229, 0.402076263813877], rtol=1E-4)
assert_allclose(m0.zs, m2.zs, rtol=1E-3)
assert_allclose(m0.zs, m3.zs, rtol=1E-3)
def test_Mixture_calculated_Vfs():
# Liquid standard fractions
S = Mixture(['hexane', 'decane'], zs=[0.25, 0.75])
Vfls = S.Vfls(298.16, 101326)
assert_allclose(Vfls, [0.18299723912903532, 0.8170027608709647])
assert_allclose(S.Vfls(), [0.18299676086285419, 0.8170032391371459])
assert_allclose(S.Vfls(P=1E6), [0.18291966593930253, 0.8170803340606975])
assert_allclose(S.Vfls(T=299.15), [0.18304482422114987, 0.8169551757788501])
# gas fractions
S = Mixture(['hexane', 'decane'], zs=[0.25, 0.75], T=699)
assert_allclose(S.Vfgs(700, 101326), [0.251236709756207, 0.748763290243793])
assert_allclose(S.Vfgs(), [0.25124363058052673, 0.7487563694194732])
assert_allclose(S.Vfgs(P=101326), [0.2512436429605387, 0.7487563570394613])
assert_allclose(S.Vfgs(T=699), [0.25124363058052673, 0.7487563694194732])
def test_Mixture_predefined():
for name in ['Air', 'air', u'Air', ['air']]:
air = Mixture(name)
assert air.CASs == ['7727-37-9', '7440-37-1', '7782-44-7']
assert_allclose(air.zs, [0.7811979754734807, 0.009206322604387548, 0.20959570192213187], rtol=1E-4)
assert_allclose(air.ws, [0.7557, 0.0127, 0.2316], rtol=1E-3)
R401A = Mixture('R401A')
assert R401A.CASs == ['75-45-6', '75-37-6', '2837-89-0']
assert_allclose(R401A.zs, [0.578852219944875, 0.18587468325478565, 0.2352730968003393], rtol=1E-4)
| assert_allclose(R401A.ws, [0.53, 0.13, 0.34], rtol=1E-3) | numpy.testing.assert_allclose |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Common methods shared by MNIST and ImageNet experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import errno
import getpass
import numpy as np
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
# mkdir -p in Python >2.5
def mkdir_p(path):
try:
os.makedirs(path, mode=0o755)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# Returns path to postfix under user's Unix home directory.
def make_experiment_dir(postfix):
home = os.path.expanduser('~')
exp_dir = os.path.join(home, postfix)
mkdir_p(exp_dir)
return exp_dir
# appends .png to file name
def save_fig(folder, filename):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.png')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
plt.savefig(out_file)
# appends .txt to file name
def save_array(x, folder, filename, formatting):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.txt')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
np.savetxt(out_file, x, fmt=formatting)
def load_array(filename):
with open(filename, 'r') as f:
return | np.loadtxt(f) | numpy.loadtxt |
import math
import os
import pathlib
import random
import re
import time
from queue import Queue
from threading import Thread
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from keras.callbacks import Callback
from sklearn.metrics import fbeta_score
from util import data_loader
from util import data_visualization as dv
from util import metrics
from util import path
import config
class KerasModelConfig(object):
def __init__(self,
k_fold_file,
model_path: str,
image_resolution,
data_type,
val_index=None,
input_norm=True,
downsampling=None,
data_visualization=False,
label_position=(1,),
label_color_augment=None,
label_up_sampling=None,
train_batch_size=(32,),
val_batch_size=32,
predict_batch_size=32,
epoch=(1,),
initial_epoch=0,
lr=(0.01,),
clr=False,
freeze_layers=(0,),
tta_flip=False,
tta_crop=False,
debug=False):
file_name = os.path.basename(model_path)
model_dir = os.path.dirname(model_path)
self.k_fold_file = k_fold_file
self.model_path = model_path
self.val_index = int("".join(filter(str.isdigit, file_name.split("_")[1]))) if val_index is None else val_index
self.image_resolution = image_resolution
self.image_size = (image_resolution, image_resolution)
self.image_shape = (image_resolution, image_resolution, 3)
self.input_norm = input_norm
self.data_type = data_type
self.record_dir = os.path.join(os.path.join(model_dir, "record"), file_name.split("_")[0])
self.record_dir = os.path.join(self.record_dir, "val%d" % self.val_index)
self.model_name = os.path.split(model_dir)[-1] + ": " + file_name
self.fit_img_record_dir = os.path.join(os.path.join(self.record_dir, "image"), "fit")
self.predict_img_record_dir = os.path.join(os.path.join(self.record_dir, "image"), "predict")
self.log_file = os.path.join(self.record_dir, "log.txt")
self.label_position = label_position
self.train_batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.predict_batch_size = predict_batch_size
self.epoch = epoch
self.initial_epoch = initial_epoch
self.lr = lr
self.clr = clr
self.freeze_layers = freeze_layers
self.writer = tf.summary.FileWriter(self.record_dir)
self.current_epoch = initial_epoch
self.tta_flip = tta_flip
self.tta_crop = False
self.debug = debug
self.label_color_augment = label_color_augment
self.downsampling = downsampling
self.label_up_sampling = label_up_sampling
self.data_visualization = data_visualization
self.val_files = []
self.train_files = []
self.train_file_cnt = 0
self.val_file_cnt = 0
self.up_sampling_cnt = [0 for i in range(13)]
self.color_augment_cnt = 0
self.down_sampling_cnt = 0
# for i in self.data_type:
# train_files, val_files = data_loader.get_k_fold_files(self.k_fold_file, self.val_index, [i])
# self.val_files.append(val_files)
# self.val_file_cnt += len(val_files)
# self.train_files += train_files
#
# if label_color_augment is not None and config.DATA_TYPE_ORIGINAL in data_type:
#
# # 将当前用于train的所有图片名称构成一个dict
# train_file_dict = {}
# for train_file in self.train_files:
# train_file_dict.setdefault(os.path.split(train_file)[-1], None)
#
# from preprocess.augment import color
# augment_image_dirs = color.get_augment_image_dirs()
# labels = data_loader.get_labels(augment_image_dirs)
#
# augment_files = []
# for i in range(len(augment_image_dirs)):
# # 如果augment的数据名称不在当前train集中,说明是val数据,跳过
# if os.path.split(augment_image_dirs[i])[-1] not in train_file_dict:
# continue
# label = labels[i]
# for j in label_color_augment:
# if label[j] == 1:
# augment_files.append(augment_image_dirs[i])
# break
# self.train_files += augment_files
# self.color_augment_cnt = len(augment_files)
# self.save_log("add %d color augmentation file" % self.color_augment_cnt)
#
# if label_up_sampling is not None:
# self.save_log("train files is %d before up sampling" % len(self.train_files))
# sampling_files = []
# labels = data_loader.get_labels(self.train_files)
# for i in range(len(labels)):
# for j in range(len(label_up_sampling)):
# label = labels[i]
# if label[j] > 0 and label_up_sampling[j] > 0:
# sampling_files += [self.train_files[i]] * label_up_sampling[j]
# self.up_sampling_cnt[j] += label_up_sampling[j]
#
# self.train_files += sampling_files
# self.save_log(
# "up sampling times: %s, totaol: %d" % (
# str([str(i) for i in self.up_sampling_cnt]), sum(self.up_sampling_cnt)))
# self.save_log("train files is %d after up sampling" % len(self.train_files))
#
# self.val_y = np.array(data_loader.get_labels(self.val_files[0]), np.bool)[:, self.label_position]
# if downsampling is not None:
# new_train_files = []
# for _ in self.train_files:
# _label = data_loader.get_label(_.split(os.sep)[-1])
# _labels = [
# ['0', '0', '1', '0', '0', '0', '0', '0', '1', '0', '0', '0', '0'],
# ['0', '0', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0'],
# ['0', '0', '1', '0', '1', '0', '0', '0', '1', '0', '0', '0', '0'],
# ['0', '0', '0', '0', '0', '0', '0', '0', '1', '0', '0', '0', '0']
# ]
# if _label in _labels and random.random() > downsampling:
# self.down_sampling_cnt += 1
# continue
# else:
# new_train_files.append(_)
# self.train_files = new_train_files
#
# random.shuffle(self.train_files)
# self.train_file_cnt = len(self.train_files)
# self.train_files = np.array(self.train_files)
#
# if self.data_visualization:
# dv.show_label_calss_bar_per_epoch(self.train_files, self.record_dir)
#
# if debug:
# self.train_files = self.train_files[:64]
# for i in range(len(self.val_files)):
# self.val_files[i] = self.val_files[i][:64]
# self.val_y = self.val_y[:64]
self.image_mean_file = path.get_image_mean_file(self.k_fold_file, self.val_index,
data_type=self.data_type)
self.image_std_file = path.get_image_std_file(self.k_fold_file, self.val_index,
data_type=self.data_type)
#
# self.save_model_format = os.path.join(self.record_dir,
# "%sweights.{epoch:03d}.hdf5" % str([str(i) for i in self.label_position]))
# self.tem_model_file = os.path.join(self.record_dir, 'weights.hdf5')
# pathlib.Path(self.record_dir).mkdir(parents=True, exist_ok=True)
# pathlib.Path(self.fit_img_record_dir).mkdir(parents=True, exist_ok=True)
# pathlib.Path(self.predict_img_record_dir).mkdir(parents=True, exist_ok=True)
#
# print("##########load model config")
# print("##########file name is: %s" % file_name)
# print("##########val index is: %d" % self.val_index)
# print("##########model dir is: %s" % model_dir)
# print("##########record dir is: %s" % self.record_dir)
# self.save_log("train file: %d, val file: %d" % (len(self.train_files), len(self.val_y)))
def save_log(self, log):
log = time.strftime("%Y-%m-%d:%H:%M:%S") + ": " + log
print(log)
with open(self.log_file, "a") as f:
f.write(log)
f.write("\n")
def decrease_train_files(self, num):
self.train_files = self.train_files[:num]
def decrease_val_files(self, num):
for i in range(len(self.val_files)):
self.val_files[i] = self.val_files[i][:num]
self.val_y = self.val_y[:num]
def get_init_stage(self):
stage = 0
for i in range(len(self.epoch)):
stage = i
if self.initial_epoch + 1 <= self.epoch[i]:
break
return stage
def get_stage(self, epoch):
stage = 0
for i in range(len(self.epoch)):
stage = i
if epoch + 1 <= self.epoch[i]:
break
return stage
def get_steps_per_epoch(self, stage):
return math.ceil(len(self.train_files) / self.train_batch_size[stage])
def get_weights_path(self, epoch):
return os.path.join(self.record_dir,
"%sweights.%03d.hdf5" % (str([str(j) for j in self.label_position]), epoch))
def predict_tta_all(self, model):
for epoch in range(1, self.epoch[-1]):
unique_path = re.match(r".*competition[\\/]*(.*)", self.get_weights_path(epoch)).group(1)
real_weight_file = os.path.join("E:\\backup\\jdfc", pathlib.Path(unique_path))
if not os.path.exists(real_weight_file):
self.save_log("weight not existed in %s" % real_weight_file)
continue
model.load_weights(real_weight_file)
predict = predict_tta(model, self)
save_prediction_file(predict, self.get_weights_path(epoch), True)
evaluate(self.val_y, predict, self.get_weights_path(epoch), self)
def dynamic_model_import(weights_file=None, model_path_in=None):
if model_path_in is None:
model_file = "_".join(re.match(r".*record\\(.*)\\\[", weights_file).group(1).split("\\"))
model_dir = re.match(r"(.*)\\record", weights_file).group(1)
model_path = os.path.join(model_dir, model_file)
else:
model_path = model_path_in
root_dir, type_dir, name = re.match(r".*competition\\(.*)", model_path).group(1).split("\\")
package = __import__(".".join([root_dir, type_dir, name]))
attr_get_model = getattr(getattr(getattr(package, type_dir), name), "get_model")
attr_model_config = getattr(getattr(getattr(package, type_dir), name), "model_config")
return attr_get_model, attr_model_config
def predict_tta(model: keras.Model, model_config: KerasModelConfig, verbose=1):
if model_config.input_norm:
model_config.save_log("use image norm during predict tta")
pre_datagen = data_loader.KerasGenerator(featurewise_center=True,
featurewise_std_normalization=True,
rescale=1. / 256,
model_config=model_config,
real_transform=True)
pre_datagen.check_mean_std_file(model_config)
pre_datagen.load_image_global_mean_std(model_config.image_mean_file, model_config.image_std_file)
else:
pre_datagen = data_loader.KerasGenerator(model_config=model_config, real_transform=True)
y_pred = None
start = time.time()
tta = data_loader.TestTimeAugmentation(crop=model_config.tta_crop, flip=model_config.tta_flip)
pre_datagen.tta = tta
predict_times = 0
for i in range(len(model_config.val_files)):
files = model_config.val_files[i]
for j in range(tta.tta_times):
predict_times += 1
model_config.save_log(
"start predict with tta index is %d, data type is %s" % (j, model_config.data_type[i]))
pre_flow = pre_datagen.flow_from_files(files, mode="predict",
target_size=model_config.image_size,
batch_size=model_config.predict_batch_size,
tta_index=j)
if y_pred is None:
y_pred = np.array(model.predict_generator(pre_flow, steps=len(files) / model_config.predict_batch_size,
verbose=verbose, workers=12))
else:
y_pred += np.array(model.predict_generator(pre_flow, steps=len(files) / model_config.predict_batch_size,
verbose=verbose, workers=12))
# assert y_pred.shape[0] == model_config.val_y.shape[0]
y_pred = y_pred / (len(model_config.data_type) * tta.tta_times)
print("####### predict %d times, spend %d seconds total ######" % (predict_times, time.time() - start))
return y_pred
def predict(model: keras.Model, model_config: KerasModelConfig, verbose=1):
if model_config.input_norm:
pre_datagen = data_loader.KerasGenerator(model_config=model_config, featurewise_center=True,
featurewise_std_normalization=True,
rescale=1. / 256)
print("use input norm in predict phase")
else:
pre_datagen = data_loader.KerasGenerator(model_config=model_config)
pre_datagen.check_mean_std_file(model_config)
pre_datagen.load_image_global_mean_std(model_config.image_mean_file, model_config.image_std_file)
y_pred = None
start = time.time()
for i in range(len(model_config.val_files)):
files = model_config.val_files[i]
model_config.save_log("start predict data type %s" % model_config.data_type[i])
pre_flow = pre_datagen.flow_from_files(files, mode="predict",
target_size=model_config.image_size,
batch_size=model_config.predict_batch_size)
if y_pred is None:
y_pred = np.array(model.predict_generator(pre_flow, steps=len(files) / model_config.predict_batch_size,
verbose=verbose, workers=16))
else:
y_pred += np.array(model.predict_generator(pre_flow, steps=len(files) / model_config.predict_batch_size,
verbose=verbose, workers=16))
# assert y_pred.shape[0] == model_config.val_y.shape[0]
y_pred = y_pred / len(model_config.data_type)
print("####### predict spend %d seconds ######" % (time.time() - start))
return y_pred
def summary_val_value(name, value, model_config):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
model_config.writer.add_summary(summary, model_config.current_epoch)
model_config.writer.flush()
def evaluate(y, y_pred, weight_name, model_config: KerasModelConfig, search_times=100):
if len(model_config.label_position) > 1:
thread_f2_01 = fbeta_score(y, ( | np.array(y_pred) | numpy.array |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Commonly used utility functions."""
import re
import copy
import warnings
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
from scipy.spatial.distance import cdist
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from astropy.coordinates import SkyCoord, Distance, EarthLocation
from astropy import units
import erfa
from . import _utils
__all__ = [
"POL_STR2NUM_DICT",
"POL_NUM2STR_DICT",
"CONJ_POL_DICT",
"JONES_STR2NUM_DICT",
"JONES_NUM2STR_DICT",
"LatLonAlt_from_XYZ",
"XYZ_from_LatLonAlt",
"rotECEF_from_ECEF",
"ECEF_from_rotECEF",
"ENU_from_ECEF",
"ECEF_from_ENU",
"phase_uvw",
"unphase_uvw",
"uvcalibrate",
"apply_uvflag",
"get_lst_for_time",
"polstr2num",
"polnum2str",
"jstr2num",
"jnum2str",
"parse_polstr",
"parse_jpolstr",
"conj_pol",
"reorder_conj_pols",
"baseline_to_antnums",
"antnums_to_baseline",
"baseline_index_flip",
"get_baseline_redundancies",
"get_antenna_redundancies",
"collapse",
"mean_collapse",
"absmean_collapse",
"quadmean_collapse",
"or_collapse",
"and_collapse",
]
# fmt: off
# polarization constants
# maps polarization strings to polarization integers
POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4,
"I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names
"rr": -1, "ll": -2, "rl": -3, "lr": -4,
"xx": -5, "yy": -6, "xy": -7, "yx": -8}
# maps polarization integers to polarization strings
POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV",
-1: "rr", -2: "ll", -3: "rl", -4: "lr",
-5: "xx", -6: "yy", -7: "xy", -8: "yx"}
# maps how polarizations change when antennas are swapped
CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy",
"ee": "ee", "nn": "nn", "en": "ne", "ne": "en",
"rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl",
"I": "I", "Q": "Q", "U": "U", "V": "V",
"pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"}
# maps jones matrix element strings to jones integers
# Add entries that don't start with "J" to allow shorthand versions
JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8,
"xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8,
"Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4,
"rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4}
# maps jones integers to jones matrix element strings
JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr",
-5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"}
# maps uvdata pols to input feed polarizations
POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"],
"xy": ["x", "y"], "yx": ["y", "x"],
"ee": ["e", "e"], "nn": ["n", "n"],
"en": ["e", "n"], "ne": ["n", "e"],
"rr": ["r", "r"], "ll": ["l", "l"],
"rl": ["r", "l"], "lr": ["l", "r"]}
# fmt: on
def _get_iterable(x):
"""Return iterable version of input."""
if isinstance(x, Iterable):
return x
else:
return (x,)
def _fits_gethduaxis(hdu, axis):
"""
Make axis arrays for fits files.
Parameters
----------
hdu : astropy.io.fits HDU object
The HDU to make an axis array for.
axis : int
The axis number of interest (1-based).
Returns
-------
ndarray of float
Array of values for the specified axis.
"""
ax = str(axis)
axis_num = hdu.header["NAXIS" + ax]
val = hdu.header["CRVAL" + ax]
delta = hdu.header["CDELT" + ax]
index = hdu.header["CRPIX" + ax] - 1
return delta * (np.arange(axis_num) - index) + val
def _fits_indexhdus(hdulist):
"""
Get a dict of table names and HDU numbers from a FITS HDU list.
Parameters
----------
hdulist : list of astropy.io.fits HDU objects
List of HDUs to get names for
Returns
-------
dict
dictionary with table names as keys and HDU number as values.
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header["EXTNAME"]] = i
except (KeyError):
continue
return tablenames
def _get_fits_extra_keywords(header, keywords_to_skip=None):
"""
Get any extra keywords and return as dict.
Parameters
----------
header : FITS header object
header object to get extra_keywords from.
keywords_to_skip : list of str
list of keywords to not include in extra keywords in addition to standard
FITS keywords.
Returns
-------
dict
dict of extra keywords.
"""
# List standard FITS header items that are still should not be included in
# extra_keywords
# These are the beginnings of FITS keywords to ignore, the actual keywords
# often include integers following these names (e.g. NAXIS1, CTYPE3)
std_fits_substrings = [
"HISTORY",
"SIMPLE",
"BITPIX",
"EXTEND",
"BLOCKED",
"GROUPS",
"PCOUNT",
"BSCALE",
"BZERO",
"NAXIS",
"PTYPE",
"PSCAL",
"PZERO",
"CTYPE",
"CRVAL",
"CRPIX",
"CDELT",
"CROTA",
"CUNIT",
]
if keywords_to_skip is not None:
std_fits_substrings.extend(keywords_to_skip)
extra_keywords = {}
# find all the other header items and keep them as extra_keywords
for key in header:
# check if key contains any of the standard FITS substrings
if np.any([sub in key for sub in std_fits_substrings]):
continue
if key == "COMMENT":
extra_keywords[key] = str(header.get(key))
elif key != "":
extra_keywords[key] = header.get(key)
return extra_keywords
def _check_history_version(history, version_string):
"""Check if version_string is present in history string."""
if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""):
return True
else:
return False
def _check_histories(history1, history2):
"""Check if two histories are the same."""
if history1.replace("\n", "").replace(" ", "") == history2.replace(
"\n", ""
).replace(" ", ""):
return True
else:
return False
def _combine_history_addition(history1, history2):
"""
Find extra history to add to have minimal repeats.
Parameters
----------
history1 : str
First history.
history2 : str
Second history
Returns
-------
str
Extra history to add to first history.
"""
# first check if they're the same to avoid more complicated processing.
if _check_histories(history1, history2):
return None
hist2_words = history2.split(" ")
add_hist = ""
test_hist1 = " " + history1 + " "
for i, word in enumerate(hist2_words):
if " " + word + " " not in test_hist1:
add_hist += " " + word
keep_going = i + 1 < len(hist2_words)
while keep_going:
if (hist2_words[i + 1] == " ") or (
" " + hist2_words[i + 1] + " " not in test_hist1
):
add_hist += " " + hist2_words[i + 1]
del hist2_words[i + 1]
keep_going = i + 1 < len(hist2_words)
else:
keep_going = False
if add_hist == "":
add_hist = None
return add_hist
def baseline_to_antnums(baseline, Nants_telescope):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of ints
baseline number
Nants_telescope : int
number of antennas
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
if Nants_telescope > 2048:
raise Exception(
"error Nants={Nants}>2048 not supported".format(Nants=Nants_telescope)
)
return_array = isinstance(baseline, (np.ndarray, list, tuple))
ant1, ant2 = _utils.baseline_to_antnums(
np.ascontiguousarray(baseline, dtype=np.int64)
)
if return_array:
return ant1, ant2
else:
return ant1.item(0), ant2.item(0)
def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
Nants_telescope : int
number of antennas
attempt256 : bool
Option to try to use the older 256 standard used in
many uvfits files (will use 2048 standard if there are more
than 256 antennas). Default is False.
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
if Nants_telescope is not None and Nants_telescope > 2048:
raise Exception(
"cannot convert ant1, ant2 to a baseline index "
"with Nants={Nants}>2048.".format(Nants=Nants_telescope)
)
return_array = isinstance(ant1, (np.ndarray, list, tuple))
baseline = _utils.antnums_to_baseline(
np.ascontiguousarray(ant1, dtype=np.int64),
np.ascontiguousarray(ant2, dtype=np.int64),
attempt256=attempt256,
)
if return_array:
return baseline
else:
return baseline.item(0)
def baseline_index_flip(baseline, Nants_telescope):
"""Change baseline number to reverse antenna order."""
ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope)
return antnums_to_baseline(ant2, ant1, Nants_telescope)
def _x_orientation_rep_dict(x_orientation):
"""Create replacement dict based on x_orientation."""
if x_orientation.lower() == "east" or x_orientation.lower() == "e":
return {"x": "e", "y": "n"}
elif x_orientation.lower() == "north" or x_orientation.lower() == "n":
return {"x": "n", "y": "e"}
else:
raise ValueError("x_orientation not recognized.")
def polstr2num(pol, x_orientation=None):
"""
Convert polarization str to number according to AIPS Memo 117.
Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes, but also supports 'I', 'Q', 'U', 'V'.
Parameters
----------
pol : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
Number corresponding to string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
poldict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(pol, str):
out = poldict[pol.lower()]
elif isinstance(pol, Iterable):
out = [poldict[key.lower()] for key in pol]
else:
raise ValueError(
"Polarization {p} cannot be converted to a polarization number.".format(
p=pol
)
)
return out
def polnum2str(num, x_orientation=None):
"""
Convert polarization number to str according to AIPS Memo 117.
Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes
Parameters
----------
num : int
polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
String corresponding to polarization number
Raises
------
ValueError
If the polarization number cannot be converted to a polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(num, (int, np.int32, np.int64)):
out = dict_use[num]
elif isinstance(num, Iterable):
out = [dict_use[i] for i in num]
else:
raise ValueError(
"Polarization {p} cannot be converted to string.".format(p=num)
)
return out
def jstr2num(jstr, x_orientation=None):
"""
Convert jones polarization str to number according to calfits memo.
Parameters
----------
jstr : str
antenna (jones) polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
antenna (jones) polarization number corresponding to string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
jdict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(jstr, str):
out = jdict[jstr.lower()]
elif isinstance(jstr, Iterable):
out = [jdict[key.lower()] for key in jstr]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to index.".format(j=jstr)
)
return out
def jnum2str(jnum, x_orientation=None):
"""
Convert jones polarization number to str according to calfits memo.
Parameters
----------
num : int
antenna (jones) polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
antenna (jones) polarization string corresponding to number
Raises
------
ValueError
If the jones polarization number cannot be converted to a jones
polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(jnum, (int, np.int32, np.int64)):
out = dict_use[jnum]
elif isinstance(jnum, Iterable):
out = [dict_use[i] for i in jnum]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to string.".format(j=jnum)
)
return out
def parse_polstr(polstr, x_orientation=None):
"""
Parse a polarization string and return pyuvdata standard polarization string.
See utils.POL_STR2NUM_DICT for options.
Parameters
----------
polstr : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
str
AIPS Memo 117 standard string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return polnum2str(
polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def parse_jpolstr(jpolstr, x_orientation=None):
"""
Parse a Jones polarization string and return pyuvdata standard jones string.
See utils.JONES_STR2NUM_DICT for options.
Parameters
----------
jpolstr : str
Jones polarization string
Returns
-------
str
calfits memo standard string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return jnum2str(
jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def conj_pol(pol):
"""
Return the polarization for the conjugate baseline.
For example, (1, 2, 'xy') = conj(2, 1, 'yx').
The returned polarization is determined by assuming the antenna pair is
reversed in the data, and finding the correct polarization correlation
which will yield the requested baseline when conjugated. Note this means
changing the polarization for linear cross-pols, but keeping auto-pol
(e.g. xx) and Stokes the same.
Parameters
----------
pol : str or int
Polarization string or integer.
Returns
-------
cpol : str or int
Polarization as if antennas are swapped (type matches input)
"""
cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()}
if isinstance(pol, str):
cpol = cpol_dict[pol.lower()]
elif isinstance(pol, Iterable):
cpol = [conj_pol(p) for p in pol]
elif isinstance(pol, (int, np.int32, np.int64)):
cpol = polstr2num(cpol_dict[polnum2str(pol).lower()])
else:
raise ValueError("Polarization not recognized, cannot be conjugated.")
return cpol
def reorder_conj_pols(pols):
"""
Reorder multiple pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after
conjugating the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Parameters
----------
pols : array_like of str or int
Polarization array (strings or ints).
Returns
-------
conj_order : ndarray of int
Indices to reorder polarization array.
"""
if not isinstance(pols, Iterable):
raise ValueError("reorder_conj_pols must be given an array of polarizations.")
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError(
"Not all conjugate pols exist in the polarization array provided."
)
return conj_order
def LatLonAlt_from_XYZ(xyz, check_acceptability=True):
"""
Calculate lat/lon/alt from ECEF x,y,z.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
check_acceptability : bool
Flag to check XYZ coordinates are reasonable.
Returns
-------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
"""
# convert to a numpy array
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = xyz.ndim == 1
if squeeze:
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# checking for acceptable values
if check_acceptability:
norms = np.linalg.norm(xyz, axis=0)
if not all(np.logical_and(norms >= 6.35e6, norms <= 6.39e6)):
raise ValueError("xyz values should be ECEF x, y, z coordinates in meters")
# this helper function returns one 2D array because it is less overhead for cython
lla = _utils._lla_from_xyz(xyz)
if squeeze:
return lla[0, 0], lla[1, 0], lla[2, 0]
return lla[0], lla[1], lla[2]
def XYZ_from_LatLonAlt(latitude, longitude, altitude):
"""
Calculate ECEF x,y,z from lat/lon/alt values.
Parameters
----------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
latitude = np.ascontiguousarray(latitude, dtype=np.float64)
longitude = np.ascontiguousarray(longitude, dtype=np.float64)
altitude = np.ascontiguousarray(altitude, dtype=np.float64)
n_pts = latitude.size
if longitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
if altitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
xyz = _utils._xyz_from_latlonalt(latitude, longitude, altitude)
xyz = xyz.T
if n_pts == 1:
return xyz[0]
return xyz
def rotECEF_from_ECEF(xyz, longitude):
"""
Get rotated ECEF positions such that the x-axis goes through the longitude.
Miriad and uvfits expect antenna positions in this frame
(with longitude of the array center/telescope location)
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
longitude : float
longitude in radians to rotate coordinates to
(usually the array center/telescope location).
Returns
-------
ndarray of float
Rotated ECEF coordinates, shape (Npts, 3).
"""
angle = -1 * longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ECEF_from_rotECEF(xyz, longitude):
"""
Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.
longitude : float
longitude in radians giving the x direction of the rotated coordinates
(usually the array center/telescope location).
Returns
-------
ndarray of float
ECEF coordinates, shape (Npts, 3).
"""
angle = longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ENU_from_ECEF(xyz, latitude, longitude, altitude):
"""
Calculate local ENU (east, north, up) coordinates from ECEF coordinates.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates
"""
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = False
if xyz.ndim == 1:
squeeze = True
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# check that these are sensible ECEF values -- their magnitudes need to be
# on the order of Earth's radius
ecef_magnitudes = np.linalg.norm(xyz, axis=0)
sensible_radius_range = (6.35e6, 6.39e6)
if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any(
ecef_magnitudes >= sensible_radius_range[1]
):
raise ValueError(
"ECEF vector magnitudes must be on the order of the radius of the earth"
)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
enu = _utils._ENU_from_ECEF(
xyz,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
enu = enu.T
if squeeze:
enu = np.squeeze(enu)
return enu
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Parameters
----------
enu : ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
enu = np.asarray(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
raise ValueError("The expected shape of the ENU array is (Npts, 3).")
squeeze = False
if enu.ndim == 1:
squeeze = True
enu = enu[np.newaxis, :]
enu = np.ascontiguousarray(enu.T, dtype=np.float64)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
xyz = _utils._ECEF_from_ENU(
enu,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
xyz = xyz.T
if squeeze:
xyz = np.squeeze(xyz)
return xyz
def phase_uvw(ra, dec, initial_uvw):
"""
Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.
This code expects input uvws or positions relative to the telescope
location in the same frame that ra/dec are in (e.g. icrs or gcrs) and
returns phased ones in the same frame.
Note that this code is nearly identical to ENU_from_ECEF, except that it
uses an arbitrary phasing center rather than a coordinate center.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
initial_uvw : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
uvw : ndarray of float
uvw array in the same frame as initial_uvws, ra and dec.
"""
if initial_uvw.ndim == 1:
initial_uvw = initial_uvw[np.newaxis, :]
return _utils._phase_uvw(
np.float64(ra),
np.float64(dec),
np.ascontiguousarray(initial_uvw.T, dtype=np.float64),
).T
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
return _utils._unphase_uvw(
np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64),
).T
def polar2_to_cart3(lon_array, lat_array):
"""
Convert 2D polar coordinates into 3D cartesian coordinates.
This is a simple routine for converting a set of spherical angular coordinates
into a 3D cartesian vectors, where the x-direction is set by the position (0, 0).
Parameters
----------
lon_array : float or ndarray
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians. Can either be a float or ndarray -- if the latter, must have
the same shape as lat_array.
lat_array : float or ndarray
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians. Can either be a float or ndarray -- if the latter, must have the same
shape as lat_array.
Returns
-------
xyz_array : ndarray of float
Cartesian coordinates of the given longitude and latitude on a unit sphere.
Shape is (3, coord_shape), where coord_shape is the shape of lon_array and
lat_array if they were provided as type ndarray, otherwise (3,).
"""
# Check to make sure that we are not playing with mixed types
if type(lon_array) is not type(lat_array):
raise ValueError(
"lon_array and lat_array must either both be floats or ndarrays."
)
if isinstance(lon_array, np.ndarray):
if lon_array.shape != lat_array.shape:
raise ValueError("lon_array and lat_array must have the same shape.")
# Once we know that lon_array and lat_array are of the same shape,
# time to create our 3D set of vectors!
xyz_array = np.array(
[
np.cos(lon_array) * np.cos(lat_array),
np.sin(lon_array) * np.cos(lat_array),
np.sin(lat_array),
],
dtype=float,
)
return xyz_array
def cart3_to_polar2(xyz_array):
"""
Convert 3D cartesian coordinates into 2D polar coordinates.
This is a simple routine for converting a set of 3D cartesian vectors into
spherical coordinates, where the position (0, 0) lies along the x-direction.
Parameters
----------
xyz_array : ndarray of float
Cartesian coordinates, need not be of unit vector length. Shape is
(3, coord_shape).
Returns
-------
lon_array : ndarray of float
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians, shape is (coord_shape,).
lat_array : ndarray of float
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians, shape is (coord_shape,).
"""
if not isinstance(xyz_array, np.ndarray):
raise ValueError("xyz_array must be an ndarray.")
if xyz_array.ndim == 0:
raise ValueError("xyz_array must have ndim > 0")
if xyz_array.shape[0] != 3:
raise ValueError("xyz_array must be length 3 across the zeroth axis.")
# The longitude coord is relatively easy to calculate, just take the X and Y
# components and find the arctac of the pair.
lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float)
# If we _knew_ that xyz_array was always of length 1, then this call could be a much
# simpler one to arcsin. But to make this generic, we'll use the length of the XY
# component along with arctan2.
lat_array = np.arctan2(
xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float
)
# Return the two arrays
return lon_array, lat_array
def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot):
"""
Apply a rotation matrix to a series of vectors.
This is a simple convenience function which wraps numpy's matmul function for use
with various vector rotation functions in this module. This code could, in
principle, be replaced by a cythonized piece of code, although the matmul function
is _pretty_ well optimized already. This function is not meant to be called by
users, but is instead used by multiple higher-level utility functions (namely those
that perform rotations).
Parameters
----------
xyz_array : ndarray of floats
Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec)
or (1, 3, n_vec), the latter is useful for when performing multiple rotations
on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec),
or (3,).
rot_matrix : ndarray of floats
Series of rotation matricies to be applied to the stack of vectors. Must be
of shape (n_rot, 3, 3)
n_rot : int
Number of individual rotation matricies to be applied.
Returns
-------
rotated_xyz : ndarray of floats
Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,).
"""
# Do a quick check to make sure that things look sensible
if rot_matrix.shape != (n_rot, 3, 3):
raise ValueError(
"rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot
)
if (xyz_array.ndim == 3) and (
(xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3)
):
raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).")
if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3):
raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).")
rotated_xyz = np.matmul(rot_matrix, xyz_array)
return rotated_xyz
def _rotate_one_axis(xyz_array, rot_amount, rot_axis):
"""
Rotate an array of 3D positions around the a single axis (x, y, or z).
This function performs a basic rotation of 3D vectors about one of the priciple
axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount : float or ndarray of float
Amount (in radians) to rotate the given set of coordinates. Can either be a
single float (or ndarray of shape (1,)) if rotating all vectors by the same
amount, otherwise expected to be shape (Nrot,).
rot_axis : int
Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis,
and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# If rot_amount is None or all zeros, then this is just one big old no-op.
if (rot_amount is None) or np.all(rot_amount == 0.0):
if np.ndim(xyz_array) == 1:
return deepcopy(xyz_array[np.newaxis, :, np.newaxis])
elif np.ndim(xyz_array) == 2:
return deepcopy(xyz_array[np.newaxis, :, :])
else:
return deepcopy(xyz_array)
# Check and see how big of a rotation matrix we need
n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0])
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64)
# Figure out which pieces of the matrix we need to update
temp_jdx = (rot_axis + 1) % 3
temp_idx = (rot_axis + 2) % 3
# Fill in the rotation matricies accordingly
rot_matrix[rot_axis, rot_axis] = 1
rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx]
rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx]
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2):
"""
Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z).
This function performs a sequential pair of basic rotations of 3D vectors about
the priciple axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount1 : float or ndarray of float
Amount (in radians) of rotatation to apply during the first rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_amount2 : float or ndarray of float
Amount (in radians) of rotatation to apply during the second rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_axis1 : int
Axis around which the first rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
rot_axis2 : int
Axis around which the second rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# Capture some special cases upfront, where we can save ourselves a bit of work
no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0)
no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0)
if no_rot1 and no_rot2:
# If rot_amount is None, then this is just one big old no-op.
return deepcopy(xyz_array)
elif no_rot1:
# If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation
return _rotate_one_axis(xyz_array, rot_amount2, rot_axis2)
elif no_rot2:
# If rot_amount2 is None, then ignore it and just work w/ the 1st rotation
return _rotate_one_axis(xyz_array, rot_amount1, rot_axis1)
elif rot_axis1 == rot_axis2:
# Capture the case where someone wants to do a sequence of rotations on the same
# axis. Also known as just rotating a single axis.
return _rotate_one_axis(xyz_array, rot_amount1 + rot_amount2, rot_axis1)
# Figure out how many individual rotation matricies we need, accounting for the
# fact that these can either be floats or ndarrays.
n_rot = max(
rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1,
rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1,
)
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64)
# There are two permulations per pair of axes -- when the pair is right-hand
# oriented vs left-hand oriented. Check here which one it is. For example,
# rotating first on the x-axis, second on the y-axis is considered a
# "right-handed" pair, whereas z-axis first, then y-axis would be considered
# a "left-handed" pair.
lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1
temp_idx = [
np.mod(rot_axis1 - lhd_order, 3),
np.mod(rot_axis1 + 1 - lhd_order, 3),
np.mod(rot_axis1 + 2 - lhd_order, 3),
]
# We're using lots of sin and cos calculations -- doing them once upfront saves
# quite a bit of time by eliminating redundant calculations
sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
# Take care of the diagonal terms first, since they aren't actually affected by the
# order of rotational opertations
rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi
rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo
rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi
# Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix
# for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just
# a transpose of the right-hand orientation of the same pair (e.g., y-rot, then
# x-rot).
rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi
rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = (
cos_lo * sin_hi * ((-1.0) ** lhd_order)
)
rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0
rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = (
sin_lo * cos_hi * ((-1.0) ** (lhd_order))
)
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def calc_uvw(
app_ra=None,
app_dec=None,
frame_pa=None,
lst_array=None,
use_ant_pos=True,
uvw_array=None,
antenna_positions=None,
antenna_numbers=None,
ant_1_array=None,
ant_2_array=None,
old_app_ra=None,
old_app_dec=None,
old_frame_pa=None,
telescope_lat=None,
telescope_lon=None,
from_enu=False,
to_enu=False,
):
"""
Calculate an array of baseline coordinates, in either uvw or ENU.
This routine is meant as a convenience function for producing baseline coordinates
based under a few different circumstances:
1) Calculating ENU coordinates using antenna positions
2) Calculating uwv coordinates at a given sky position using antenna positions
3) Converting from ENU coordinates to uvw coordinates
4) Converting from uvw coordinate to ENU coordinates
5) Converting from uvw coordinates at one sky position to another sky position
Different conversion pathways have different parameters that are required.
Parameters
----------
app_ra : ndarray of float
Apparent RA of the target phase center, required if calculating baseline
coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are
radians.
app_dec : ndarray of float
Apparent declination of the target phase center, required if calculating
baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,),
units are radians.
frame_pa : ndarray of float
Position angle between the great circle of declination in the apparent frame
versus that of the reference frame, used for making sure that "North" on
the derived maps points towards a particular celestial pole (not just the
topocentric one). Required if not deriving baseline coordinates from antenna
positions, from_enu=False, and a value for old_frame_pa is given. Shape is
(Nblts,), units are radians.
old_app_ra : ndarray of float
Apparent RA of the previous phase center, required if not deriving baseline
coordinates from antenna positions and from_enu=False. Shape is (Nblts,),
units are radians.
old_app_dec : ndarray of float
Apparent declination of the previous phase center, required if not deriving
baseline coordinates from antenna positions and from_enu=False. Shape is
(Nblts,), units are radians.
old_frame_pa : ndarray of float
Frame position angle of the previous phase center, required if not deriving
baseline coordinates from antenna positions, from_enu=False, and a value
for frame_pa is supplied. Shape is (Nblts,), units are radians.
lst_array : ndarray of float
Local apparent sidereal time, required if deriving baseline coordinates from
antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,).
use_ant_pos : bool
Switch to determine whether to derive uvw values from the antenna positions
(if set to True), or to use the previously calculated uvw coordinates to derive
new the new baseline vectors (if set to False). Default is True.
uvw_array : ndarray of float
Array of previous baseline coordinates (in either uvw or ENU), required if
not deriving new coordinates from antenna positions. Shape is (Nblts, 3).
antenna_positions : ndarray of float
List of antenna positions relative to array center in ECEF coordinates,
required if not providing `uvw_array`. Shape is (Nants, 3).
antenna_numbers: ndarray of int
List of antenna numbers, ordered in the same way as `antenna_positions` (e.g.,
`antenna_numbers[0]` should given the number of antenna that resides at ECEF
position given by `antenna_positions[0]`). Shape is (Nants,), requred if not
providing `uvw_array`. Contains all unique entires of the joint set of
`ant_1_array` and `ant_2_array`.
ant_1_array : ndarray of int
Antenna number of the first antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
ant_2_array : ndarray of int
Antenna number of the second antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
telescope_lat : float
Latitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
telescope_lon : float
Longitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
from_enu : boolean
Set to True if uvw_array is expressed in ENU coordinates. Default is False.
to_enu : boolean
Set to True if you would like the output expressed in EN coordinates. Default
is False.
Returns
-------
new_coords : ndarray of float64
Set of baseline coordinates, shape (Nblts, 3).
"""
if to_enu:
if lst_array is None and not use_ant_pos:
raise ValueError(
"Must include lst_array to calculate baselines in ENU coordinates!"
)
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat to calculate baselines "
"in ENU coordinates!"
)
else:
if ((app_ra is None) or (app_dec is None)) and frame_pa is None:
raise ValueError(
"Must include both app_ra and app_dec, or frame_pa to calculate "
"baselines in uvw coordinates!"
)
if use_ant_pos:
# Assume at this point we are dealing w/ antenna positions
if antenna_positions is None:
raise ValueError("Must include antenna_positions if use_ant_pos=True.")
if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None):
raise ValueError(
"Must include ant_1_array, ant_2_array, and antenna_numbers "
"setting use_ant_pos=True."
)
if lst_array is None and not to_enu:
raise ValueError(
"Must include lst_array if use_ant_pos=True and not calculating "
"baselines in ENU coordinates."
)
if telescope_lon is None:
raise ValueError("Must include telescope_lon if use_ant_pos=True.")
ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)}
ant_1_index = np.array([ant_dict[idx] for idx in ant_1_array], dtype=int)
ant_2_index = np.array([ant_dict[idx] for idx in ant_2_array], dtype=int)
N_ants = antenna_positions.shape[0]
# Use the app_ra, app_dec, and lst_array arrays to figure out how many unique
# rotations are actually needed. If the ratio of Nblts to number of unique
# entries is favorable, we can just rotate the antenna positions and save
# outselves a bit of work.
if to_enu:
# If to_enu, skip all this -- there's only one unique ha + dec combo
unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_)
unique_mask[0] = True
else:
unique_mask = np.append(
True,
(
((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:]))
| (app_dec[:-1] != app_dec[1:])
),
)
# GHA -> Hour Angle as measured at Greenwich (because antenna coords are
# centered such that x-plane intersects the meridian at longitude 0).
if to_enu:
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof the gha and dec based on telescope lon and lat
unique_gha = np.zeros(1) - telescope_lon
unique_dec = np.zeros(1) + telescope_lat
unique_pa = None
else:
unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon
unique_dec = app_dec[unique_mask]
unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask]
# Tranpose the ant vectors so that they are in the proper shape
ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :]
# Apply rotations, and then reorganize the ndarray so that you can access
# individual antenna vectors quickly.
ant_rot_vectors = np.reshape(
np.transpose(
_rotate_one_axis(
_rotate_two_axis(ant_vectors, unique_gha, unique_dec, 2, 1),
unique_pa,
0,
),
axes=[0, 2, 1],
),
(-1, 3),
)
unique_mask[0] = False
unique_map = np.cumsum(unique_mask) * N_ants
new_coords = (
ant_rot_vectors[unique_map + ant_2_index]
- ant_rot_vectors[unique_map + ant_1_index]
)
else:
if uvw_array is None:
raise ValueError("Must include uvw_array if use_ant_pos=False.")
if from_enu:
if to_enu:
# Well this was pointless... returning your uvws unharmed
return uvw_array
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof old_app_ra and old_app_dec based on lst_array and
# telescope_lat
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat if moving between "
'ENU (i.e., "unphased") and uvw coordinates!'
)
if lst_array is None:
raise ValueError(
'Must include lst_array if moving between ENU (i.e., "unphased") '
"and uvw coordinates!"
)
else:
if (old_frame_pa is None) and not (frame_pa is None or to_enu):
raise ValueError(
"Must include old_frame_pa values if data are phased and "
"applying new position angle values (frame_pa)."
)
if ((old_app_ra is None) and not (app_ra is None or to_enu)) or (
(old_app_dec is None) and not (app_dec is None or to_enu)
):
raise ValueError(
"Must include old_app_ra and old_app_dec values when data are "
"already phased and phasing to a new position."
)
# For this operation, all we need is the delta-ha coverage, which _should_ be
# entirely encapsulated by the change in RA.
if (app_ra is None) and (old_app_ra is None):
gha_delta_array = 0.0
else:
gha_delta_array = (lst_array if from_enu else old_app_ra) - (
lst_array if to_enu else app_ra
)
# Notice below there's an axis re-orientation here, to go from uvw -> XYZ,
# where X is pointing in the direction of the source. This is mostly here
# for convenience and code legibility -- a slightly different pair of
# rotations would give you the same result w/o needing to cycle the axes.
# Up front, we want to trap the corner-case where the sky position you are
# phasing up to hasn't changed, just the position angle (i.e., which way is
# up on the map). This is a much easier transform to handle.
if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec):
new_coords = _rotate_one_axis(
uvw_array[:, [2, 0, 1], np.newaxis],
frame_pa - (0.0 if old_frame_pa is None else old_frame_pa),
0,
)[:, :, 0]
else:
new_coords = _rotate_two_axis(
_rotate_two_axis( # Yo dawg, I heard you like rotation maticies...
uvw_array[:, [2, 0, 1], np.newaxis],
0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa),
(-telescope_lat) if from_enu else (-old_app_dec),
0,
1,
),
gha_delta_array,
telescope_lat if to_enu else app_dec,
2,
1,
)
# One final rotation applied here, to compensate for the fact that we want
# the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with
# the chosen frame, if we not in ENU coordinates
if not to_enu:
new_coords = _rotate_one_axis(new_coords, frame_pa, 0)
# Finally drop the now-vestigal last axis of the array
new_coords = new_coords[:, :, 0]
# There's one last task to do, which is to re-align the axes from projected
# XYZ -> uvw, where X (which points towards the source) falls on the w axis,
# and Y and Z fall on the u and v axes, respectively.
return new_coords[:, [1, 2, 0]]
def transform_sidereal_coords(
lon,
lat,
in_coord_frame,
out_coord_frame,
in_coord_epoch=None,
out_coord_epoch=None,
time_array=None,
):
"""
Transform a given set of coordinates from one sidereal coordinate frame to another.
Uses astropy to convert from a coordinates from sidereal frame into another.
This function will support transforms from several frames, including GCRS,
FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and
a few others (basically anything that doesn't require knowing the observers
location on Earth/other celestial body).
Parameters
----------
lon_coord : float or ndarray of floats
Logitudinal coordinate to be transformed, typically expressed as the right
ascension, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lat_coord.
lat_coord : float or ndarray of floats
Latitudinal coordinate to be transformed, typically expressed as the
declination, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lon_coord.
in_coord_frame : string
Reference frame for the provided coordinates. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
out_coord_frame : string
Reference frame to output coordinates in. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
in_coord_epoch : float
Epoch for the input coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
out_coord_epoch : float
Epoch for the output coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
time_array : float or ndarray of floats
Julian date(s) to which the coordinates correspond to, only used in frames
with annular motion terms (e.g., abberation in GCRS). Can either be a float,
or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord
and lon_coord are floats, or that Ntimes == Ncoords.
Returns
-------
new_lat : float or ndarray of floats
Longitudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
new_lon : float or ndarray of floats
Latidudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
"""
lon_coord = lon * units.rad
lat_coord = lat * units.rad
# Check here to make sure that lat_coord and lon_coord are the same length,
# either 1 or len(time_array)
if lat_coord.shape != lon_coord.shape:
raise ValueError("lon and lat must be the same shape.")
if lon_coord.ndim == 0:
lon_coord.shape += (1,)
lat_coord.shape += (1,)
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
in_epoch = None
if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
in_epoch = Time(in_coord_epoch)
elif in_coord_epoch is not None:
if in_coord_frame.lower() in ["fk4", "fk4noeterms"]:
in_epoch = Time(in_coord_epoch, format="byear")
else:
in_epoch = Time(in_coord_epoch, format="jyear")
# Now do the same for the outbound frame
out_epoch = None
if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
out_epoch = Time(out_coord_epoch)
elif out_coord_epoch is not None:
if out_coord_frame.lower() in ["fk4", "fk4noeterms"]:
out_epoch = Time(out_coord_epoch, format="byear")
else:
out_epoch = Time(out_coord_epoch, format="jyear")
# Make sure that time array matched up with what we expect. Thanks to astropy
# weirdness, time_array has to be the same length as lat/lon coords
rep_time = False
rep_crds = False
if time_array is None:
time_obj_array = None
else:
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if (time_obj_array.size != 1) and (lon_coord.size != 1):
if time_obj_array.shape != lon_coord.shape:
raise ValueError(
"Shape of time_array must be either that of "
" lat_coord/lon_coord if len(time_array) > 1."
)
else:
rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1)
rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1)
if rep_crds:
lon_coord = np.repeat(lon_coord, len(time_array))
lat_coord = np.repeat(lat_coord, len(time_array))
if rep_time:
time_obj_array = Time(
np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc",
)
coord_object = SkyCoord(
lon_coord,
lat_coord,
frame=in_coord_frame,
equinox=in_epoch,
obstime=time_obj_array,
)
# Easiest, most general way to transform to the new frame is to create a dummy
# SkyCoord with all the attributes needed -- note that we particularly need this
# in order to use a non-standard equinox/epoch
new_coord = coord_object.transform_to(
SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch)
)
return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad
def transform_icrs_to_app(
time_array,
ra,
dec,
telescope_loc,
epoch=2000.0,
pm_ra=None,
pm_dec=None,
vrad=None,
dist=None,
astrometry_library="erfa",
):
"""
Transform a set of coordinates in ICRS to topocentric/apparent coordinates.
This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate
the apparent (i.e., topocentric) coordinates of a source at a given time and
location, given a set of coordinates expressed in the ICRS frame. These coordinates
are most typically used for defining the phase center of the array (i.e, calculating
baseline vectors).
As of astropy v4.2, the agreement between the three libraries is consistent down to
the level of better than 1 mas, with the values produced by astropy and pyERFA
consistent to bettter than 10 µas (this is not surprising, given that astropy uses
pyERFA under the hood for astrometry). ERFA is the default as it outputs
coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as
well as the fact that of the three libraries, it produces results the fastest.
Parameters
----------
time_array : float or array-like of float
Julian dates to calculate coordinate positions for. Can either be a single
float, or an array-like of shape (Ntimes,).
ra : float or array-like of float
ICRS RA of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
dec : float or array-like of float
ICRS Dec of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
telescope_loc : array-like of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
epoch : int or float or str or Time object
Epoch of the coordinate data supplied, only used when supplying proper motion
values. If supplying a number, it will assumed to be in Julian years. Default
is J2000.0.
pm_ra : float or array-like of float
Proper motion in RA of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Note that
units are in dRA/dt, not cos(Dec)*dRA/dt. Not required.
pm_dec : float or array-like of float
Proper motion in Dec of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required.
vrad : float or array-like of float
Radial velocity of the source, expressed in units of km / sec. Can either be
a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (namely ra_coord and dec_coord). Not required.
dist : float or array-like of float
Distance of the source, expressed in milliarcseconds. Can either be a single
float or array of shape (Ntimes,), although this must be consistent with other
parameters (namely ra_coord and dec_coord). Not required.
astrometry_library : str
Library used for running the coordinate conversions. Allowed options are
'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library),
and 'astropy' (which uses the astropy utilities). Default is erfa.
Returns
-------
app_ra : ndarray of floats
Apparent right ascension coordinates, in units of radians, of shape (Ntimes,).
app_dec : ndarray of floats
Apparent declination coordinates, in units of radians, of shape (Ntimes,).
"""
# Make sure that the library requested is actually permitted
if astrometry_library not in ["erfa", "novas", "astropy"]:
raise ValueError(
"Requested coordinate transformation library is not supported, please "
"select either 'erfa', 'novas', or 'astropy' for astrometry_library."
)
ra_coord = ra * units.rad
dec_coord = dec * units.rad
# Check here to make sure that ra_coord and dec_coord are the same length,
# either 1 or len(time_array)
multi_coord = ra_coord.size != 1
if ra_coord.shape != dec_coord.shape:
raise ValueError("ra and dec must be the same shape.")
pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr)
pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr)
d_coord = (
None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc)
)
v_coord = None if vrad is None else vrad * (units.km / units.s)
opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord]
opt_names = ["pm_ra", "pm_dec", "dist", "vrad"]
# Check the optional inputs, make sure that they're sensible
for item, name in zip(opt_list, opt_names):
if item is not None:
if ra_coord.shape != item.shape:
raise ValueError("%s must be the same shape as ra and dec." % name)
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
# Useful for both astropy and novas methods, the latter of which gives easy
# access to the IERS data that we want.
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if time_obj_array.size != 1:
if (time_obj_array.shape != ra_coord.shape) and multi_coord:
raise ValueError(
"time_array must be of either of length 1 (single "
"float) or same length as ra and dec."
)
elif time_obj_array.ndim == 0:
# Make the array at least 1-dimensional so we don't run into indexing
# issues later.
time_obj_array = Time([time_obj_array])
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
coord_epoch = None
if isinstance(epoch, str) or isinstance(epoch, Time):
# If its a string or a Time object, we don't need to do anything more
coord_epoch = Time(epoch)
elif epoch is not None:
coord_epoch = Time(epoch, format="jyear")
# Note if time_array is a single element
multi_time = time_obj_array.size != 1
# Get IERS data, which is needed for NOVAS and ERFA
polar_motion_data = iers.earth_orientation_table.get()
pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array)
delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array)
pm_x_array = pm_x_array.to_value("arcsec")
pm_y_array = pm_y_array.to_value("arcsec")
delta_x_array = delta_x_array.to_value("marcsec")
delta_y_array = delta_y_array.to_value("marcsec")
# Catch the case where we don't have CIP delta values yet (they don't typically have
# predictive values like the polar motion does)
delta_x_array[np.isnan(delta_x_array)] = 0.0
delta_y_array[np.isnan(delta_y_array)] = 0.0
# If the source was instantiated w/ floats, it'll be a 0-dim object, which will
# throw errors if we try to treat it as an array. Reshape to a 1D array of len 1
# so that all the calls can be uniform
if ra_coord.ndim == 0:
ra_coord.shape += (1,)
dec_coord.shape += (1,)
if pm_ra_coord is not None:
pm_ra
if d_coord is not None:
d_coord.shape += (1,)
if v_coord is not None:
v_coord.shape += (1,)
# If there is an epoch and a proper motion, apply that motion now
if astrometry_library == "astropy":
# Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec
# directly, but we can cheat this by going to AltAz, and then coverting back
# to apparent RA/Dec using the telescope lat and LAST.
if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None):
# astropy is a bit weird in how it handles proper motion, so rather than
# fight with it to do it all in one step, we separate it into two: first
# apply proper motion to ICRS, then transform to topocentric.
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord),
pm_dec=pm_dec_coord,
frame="icrs",
)
sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch))
ra_coord = sky_coord.ra
dec_coord = sky_coord.dec
if d_coord is not None:
d_coord = d_coord.repeat(ra_coord.size)
if v_coord is not None:
v_coord = v_coord.repeat(ra_coord.size)
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
distance=d_coord,
radial_velocity=v_coord,
frame="icrs",
)
azel_data = sky_coord.transform_to(
SkyCoord(
np.zeros_like(time_obj_array) * units.rad,
np.zeros_like(time_obj_array) * units.rad,
location=site_loc,
obstime=time_obj_array,
frame="altaz",
)
)
app_ha, app_dec = erfa.ae2hd(
azel_data.az.rad, azel_data.alt.rad, site_loc.lat.rad,
)
app_ra = np.mod(
time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad
- app_ha,
2 * np.pi,
)
elif astrometry_library == "novas":
# Import the NOVAS library only if it's needed/available.
try:
from novas import compat as novas
from novas.compat import eph_manager
import novas_de405 # noqa
except ImportError as e: # pragma: no cover
raise ImportError(
"novas and/or novas_de405 are not installed but is required for "
"NOVAS functionality"
) from e
# Call is needed to load high-precision ephem data in NOVAS
jd_start, jd_end, number = eph_manager.ephem_open()
# Define the obs location, which is needed to calculate diurnal abb term
# and polar wobble corrections
site_loc = novas.make_on_surface(
site_loc.lat.deg, # latitude in deg
site_loc.lon.deg, # Longitude in deg
site_loc.height.to_value("m"), # Height in meters
0.0, # Temperature, set to 0 for now (no atm refrac)
0.0, # Pressure, set to 0 for now (no atm refrac)
)
# NOVAS wants things in terrestial time and UT1
tt_time_array = time_obj_array.tt.jd
ut1_time_array = time_obj_array.ut1.jd
gast_array = time_obj_array.sidereal_time("apparent", "greenwich").rad
if np.any(tt_time_array < jd_start) or np.any(tt_time_array > jd_end):
raise ValueError(
"No current support for JPL ephems outside of 1700 - 2300 AD. "
"Check back later (or possibly earlier)..."
)
app_ra = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape)
app_dec = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape)
for idx in range(len(app_ra)):
if multi_coord or (idx == 0):
# Create a catalog entry for the source in question
cat_entry = novas.make_cat_entry(
"dummy_name", # Dummy source name
"GKK", # Catalog ID, fixed for now
156, # Star ID number, fixed for now
ra_coord[idx].to_value("hourangle"),
dec_coord[idx].to_value("deg"),
0.0
if pm_ra is None
else (
pm_ra_coord.to_value("mas/yr")
* np.cos(dec_coord[idx].to_value("rad"))
),
0.0 if pm_dec is None else pm_dec_coord.to_value("mas/yr"),
0.0
if (dist is None or np.any(dist == 0.0))
else (d_coord.kiloparsec ** -1.0),
0.0 if (vrad is None) else v_coord.to_value("km/s"),
)
# Update polar wobble parameters for a given timestamp
if multi_time or (idx == 0):
gast = gast_array[idx]
pm_x = pm_x_array[idx] * np.cos(gast) + pm_y_array[idx] * np.sin(gast)
pm_y = pm_y_array[idx] * np.cos(gast) - pm_x_array[idx] * np.sin(gast)
tt_time = tt_time_array[idx]
ut1_time = ut1_time_array[idx]
novas.cel_pole(
tt_time, 2, delta_x_array[idx], delta_y_array[idx],
)
# Calculate topocentric RA/Dec values
[temp_ra, temp_dec] = novas.topo_star(
tt_time,
(tt_time - ut1_time) * 86400.0,
cat_entry,
site_loc,
accuracy=0,
)
xyz_array = polar2_to_cart3(
temp_ra * (np.pi / 12.0), temp_dec * (np.pi / 180.0)
)
xyz_array = novas.wobble(tt_time, pm_x, pm_y, xyz_array, 1)
app_ra[idx], app_dec[idx] = cart3_to_polar2(np.array(xyz_array))
elif astrometry_library == "erfa":
# liberfa wants things in radians
pm_x_array *= np.pi / (3600.0 * 180.0)
pm_y_array *= np.pi / (3600.0 * 180.0)
[_, _, _, app_dec, app_ra, eqn_org] = erfa.atco13(
ra_coord.to_value("rad"),
dec_coord.to_value("rad"),
0.0 if (pm_ra is None) else pm_ra_coord.to_value("rad/yr"),
0.0 if (pm_dec is None) else pm_dec_coord.to_value("rad/yr"),
0.0 if (dist is None or np.any(dist == 0.0)) else (d_coord.pc ** -1.0),
0.0 if (vrad is None) else v_coord.to_value("km/s"),
time_obj_array.utc.jd,
0.0,
time_obj_array.delta_ut1_utc,
site_loc.lon.rad,
site_loc.lat.rad,
site_loc.height.to_value("m"),
pm_x_array,
pm_y_array,
0, # ait pressure, used for refraction (ignored)
0, # amb temperature, used for refraction (ignored)
0, # rel humidity, used for refraction (ignored)
0, # wavelength, used for refraction (ignored)
)
app_ra = np.mod(app_ra - eqn_org, 2 * np.pi)
return app_ra, app_dec
def transform_app_to_icrs(
time_array, app_ra, app_dec, telescope_loc, astrometry_library="erfa",
):
"""
Transform a set of coordinates in topocentric/apparent to ICRS coordinates.
This utility uses either astropy or erfa to calculate the ICRS coordinates of
a given set of apparent source coordinates. These coordinates are most typically
used for defining the celestial/catalog position of a source. Note that at present,
this is only implemented in astropy and pyERFA, although it could hypothetically
be extended to NOVAS at some point.
Parameters
----------
time_array : float or ndarray of float
Julian dates to calculate coordinate positions for. Can either be a single
float, or an ndarray of shape (Ntimes,).
app_ra : float or ndarray of float
ICRS RA of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ncoord,). Note that if time_array is
not a singleton value, then Ncoord must be equal to Ntimes.
app_dec : float or ndarray of float
ICRS Dec of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ncoord,). Note that if time_array is
not a singleton value, then Ncoord must be equal to Ntimes.
telescope_loc : tuple of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
Returns
-------
icrs_ra : ndarray of floats
ICRS right ascension coordinates, in units of radians, of either shape
(Ntimes,) if Ntimes >1, otherwise (Ncoord,).
icrs_dec : ndarray of floats
ICRS declination coordinates, in units of radians, of either shape
(Ntimes,) if Ntimes >1, otherwise (Ncoord,).
"""
# Make sure that the library requested is actually permitted
if astrometry_library not in ["erfa", "astropy"]:
raise ValueError(
"Requested coordinate transformation library is not supported, please "
"select either 'erfa' or 'astropy' for astrometry_library."
)
ra_coord = app_ra * units.rad
dec_coord = app_dec * units.rad
# Check here to make sure that ra_coord and dec_coord are the same length,
# either 1 or len(time_array)
multi_coord = ra_coord.size != 1
if ra_coord.shape != dec_coord.shape:
raise ValueError("app_ra and app_dec must be the same shape.")
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if time_obj_array.size != 1:
if (time_obj_array.shape != ra_coord.shape) and multi_coord:
raise ValueError(
"time_array must be of either of length 1 (single "
"float) or same length as ra and dec."
)
elif time_obj_array.ndim == 0:
# Make the array at least 1-dimensional so we don't run into indexing
# issues later.
time_obj_array = Time([time_obj_array])
if astrometry_library == "astropy":
az_coord, el_coord = erfa.hd2ae(
np.mod(
time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad
- ra_coord.to_value("rad"),
2 * np.pi,
),
dec_coord.to_value("rad"),
site_loc.lat.rad,
)
sky_coord = SkyCoord(
az_coord * units.rad,
el_coord * units.rad,
frame="altaz",
location=site_loc,
obstime=time_obj_array,
)
coord_data = sky_coord.transform_to("icrs")
icrs_ra = coord_data.ra.rad
icrs_dec = coord_data.dec.rad
elif astrometry_library == "erfa":
# Get IERS data, which is needed for highest precision
polar_motion_data = iers.earth_orientation_table.get()
pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array)
pm_x_array = pm_x_array.to_value("rad")
pm_y_array = pm_y_array.to_value("rad")
bpn_matrix = erfa.pnm06a(time_obj_array.tt.jd, 0.0)
cip_x, cip_y = erfa.bpn2xy(bpn_matrix)
cio_s = erfa.s06(time_obj_array.tt.jd, 0.0, cip_x, cip_y)
eqn_org = erfa.eors(bpn_matrix, cio_s)
# Observed to ICRS via ERFA
icrs_ra, icrs_dec = erfa.atoc13(
"r",
ra_coord.to_value("rad") + eqn_org,
dec_coord.to_value("rad"),
time_obj_array.utc.jd,
0.0, # Second half of the UT date, not needed
time_obj_array.delta_ut1_utc,
site_loc.lon.rad,
site_loc.lat.rad,
site_loc.height.value,
pm_x_array,
pm_y_array,
0, # ait pressure, used for refraction (ignored)
0, # amb temperature, used for refraction (ignored)
0, # rel humidity, used for refraction (ignored)
0, # wavelength, used for refraction (ignored)
)
# Return back the two RA/Dec arrays
return icrs_ra, icrs_dec
def calc_parallactic_angle(
app_ra, app_dec, lst_array, telescope_lat,
):
"""
Calculate the parallactic angle between RA/Dec and the AltAz frame.
Parameters
----------
app_ra : ndarray of floats
Array of apparent RA values in units of radians, shape (Ntimes,).
app_dec : ndarray of floats
Array of apparent dec values in units of radians, shape (Ntimes,).
telescope_lat : float
Latitude of the observatory, in units of radians.
lst_array : float or ndarray of float
Array of local apparent sidereal timesto calculate position angle values
for, in units of radians. Can either be a single float or an array of shape
(Ntimes,).
"""
# This is just a simple wrapped around the pas function in ERFA
return erfa.pas(app_ra, app_dec, lst_array, telescope_lat)
def calc_frame_pos_angle(
time_array,
app_ra,
app_dec,
telescope_loc,
ref_frame,
ref_epoch=None,
offset_pos=(np.pi / 360.0),
):
"""
Calculate an position angle given apparent position and reference frame.
This function is used to determine the position angle between the great
circle of declination in apparent coordinates, versus that in a given
reference frame. Note that this is slightly different than parallactic
angle, which is the difference between apparent declination and elevation.
Paramters
---------
time_array : float or ndarray of floats
Array of julian dates to calculate position angle values for, of shape
(Ntimes,).
app_ra : ndarray of floats
Array of apparent RA values in units of radians, shape (Ntimes,).
app_dec : ndarray of floats
Array of apparent dec values in units of radians, shape (Ntimes,).
telescope_loc : tuple of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the observer.
Can either be provided as an astropy EarthLocation, or an array-like of shape
(3,) containing the latitude, longitude, and altitude, in that order, with units
of radians, radians, and meters, respectively.
offset_pos : float
Distance of the offset position used to calculate the frame PA. Default
is 0.5 degrees, which should be sufficent for most applications.
ref_frame : str
Coordinate frame to calculate position angles for. Can be any of the
several supported frames in astropy (a limited list: fk4, fk5, icrs,
gcrs, cirs, galactic).
ref_epoch : str or flt
Epoch of the coordinates, only used when ref_frame = fk4 or fk5. Given
in unites of fractional years, either as a float or as a string with
the epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0).
Returns
-------
frame_pa : ndarray of floats
Array of position angles, in units of radians.
"""
# Check to see if the position angles should default to zero
if (ref_frame is None) or (ref_frame == "topo"):
# No-op detected, ENGAGE MAXIMUM SNARK!
return np.zeros_like(time_array)
# This creates an array of unique entries of ra + dec + time, since the processing
# time for each element can be non-negligible, and entries along the Nblt axis can
# be highly redundant.
unique_mask = np.union1d(
np.union1d(
np.unique(app_ra, return_index=True)[1],
np.unique(app_dec, return_index=True)[1],
),
np.unique(time_array, return_index=True)[1],
)
# Pluck out the unique entries for each
unique_ra = app_ra[unique_mask]
unique_dec = app_dec[unique_mask]
unique_time = time_array[unique_mask]
# Figure out how many elements we need to transform
n_coord = len(unique_mask)
# Offset north/south positions by 0.5 deg, such that the PA is determined over a
# 1 deg arc.
up_dec = unique_dec + (np.pi / 360.0)
dn_dec = unique_dec - (np.pi / 360.0)
up_ra = dn_ra = unique_ra
# Wrap the positions if they happen to go over the poles
up_ra[up_dec > (np.pi / 2.0)] = np.mod(
up_ra[up_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi
)
up_dec[up_dec > (np.pi / 2.0)] = np.pi - up_dec[up_dec > (np.pi / 2.0)]
dn_ra[-dn_dec > (np.pi / 2.0)] = np.mod(
dn_ra[dn_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi
)
dn_dec[-dn_dec > (np.pi / 2.0)] = np.pi - dn_dec[-dn_dec > (np.pi / 2.0)]
# Run the set of offset coordinates through the "reverse" transform. The two offset
# positions are concat'd together to help reduce overheads
ref_ra, ref_dec = calc_sidereal_coords(
np.tile(unique_time, 2),
np.concatenate((dn_ra, up_ra)),
np.concatenate((dn_dec, up_dec)),
telescope_loc,
ref_frame,
coord_epoch=ref_epoch,
)
# Use the pas function from ERFA to calculate the position angle. The negative sign
# is here because we're measuring PA of app -> frame, but we want frame -> app.
unique_pa = -erfa.pas(
ref_ra[:n_coord], ref_dec[:n_coord], ref_ra[n_coord:], ref_dec[n_coord:]
)
# Finally, we have to go back through and "fill in" the redundant entries
frame_pa = np.zeros_like(app_ra)
for idx in range(n_coord):
select_mask = np.logical_and(
np.logical_and(unique_ra[idx] == app_ra, unique_dec[idx] == app_dec,),
unique_time[idx] == time_array,
)
frame_pa[select_mask] = unique_pa[idx]
return frame_pa
def lookup_jplhorizons(
target_name,
time_array,
telescope_loc=None,
high_cadence=False,
force_indv_lookup=None,
):
"""
Lookup solar system body coordinates via the JPL-Horizons service.
This utility is useful for generating ephemerides, which can then be interpolated in
order to provide positional data for a target which is moving, such as planetary
bodies and other solar system objects. Use of this function requires the
installation of the `astroquery` module.
Parameters
----------
target_name : str
Name of the target to gather an ephemeris for. Must match the name
in the JPL-Horizons database.
time_array : array-like of float
Times in UTC Julian days to gather an ephemeris for.
telescope_loc : array-like of float
ITRF latitude, longitude, and altitude (rel to sea-level) of the observer. Must
be an array-like of shape (3,) containing the latitude, longitude, and
altitude, in that order, with units of radians, radians, and meters,
respectively.
high_cadence : bool
If set to True, will calculate ephemeris points every 3 minutes in time, as
opposed to the default of every 3 hours.
force_indv_lookup : bool
If set to True, will calculate coordinate values for each value found within
`time_array`. If False, a regularized time grid is sampled that encloses the
values contained within `time_array`. Default is False, unless `time_array` is
of length 1, in which the default is set to True.
Returns
-------
ephem_times : ndarray of float
Times for which the ephemeris values were calculated, in UTC Julian days.
ephem_ra : ndarray of float
ICRS Right ascension of the target at the values within `ephem_times`, in
units of radians.
ephem_dec : ndarray of float
ICRS Declination of the target at the values within `ephem_times`, in units
of radians.
ephem_dist : ndarray of float
Distance of the target relative to the observer, at the values within
`ephem_times`, in units of parsecs.
ephem_vel : ndarray of float
Velocity of the targets relative to the observer, at the values within
`ephem_times`, in units of km/sec.
"""
try:
from astroquery.jplhorizons import Horizons
except ImportError as err: # pragma: no cover
raise ImportError(
"astroquery is not installed but is required for "
"planet ephemeris functionality"
) from err
from pyuvdata.data import DATA_PATH
from os.path import join as path_join
from json import load as json_load
# Get the telescope location into a format that JPL-Horizons can understand,
# which is nominally a dict w/ entries for lon (units of deg), lat (units of
# deg), and elevation (units of km).
if isinstance(telescope_loc, EarthLocation):
site_loc = {
"lon": telescope_loc.lon.deg,
"lat": telescope_loc.lat.deg,
"elevation": telescope_loc.height.to_value(unit=units.km),
}
elif telescope_loc is None:
# Setting to None will report the geocentric position
site_loc = None
else:
site_loc = {
"lon": telescope_loc[1] * (180.0 / np.pi),
"lat": telescope_loc[0] * (180.0 / np.pi),
"elevation": telescope_loc[2] * (0.001), # m -> km
}
# If force_indv_lookup is True, or unset but only providing a single value, then
# just calculate the RA/Dec for the times requested rather than creating a table
# to interpolate from.
if force_indv_lookup or (
(np.array(time_array).size == 1) and (force_indv_lookup is None)
):
epoch_list = np.unique(time_array)
if len(epoch_list) > 50:
raise ValueError(
"Requesting too many individual ephem points from JPL-Horizons. This "
"can be remedied by setting force_indv_lookup=False or limiting the "
"number of values in time_array."
)
else:
# When querying for multiple times, its faster (and kinder to the
# good folks at JPL) to create a range to query, and then interpolate
# between values. The extra buffer of 0.001 or 0.25 days for high and
# low cadence is to give enough data points to allow for spline
# interpolation of the data.
if high_cadence:
start_time = np.min(time_array) - 0.001
stop_time = np.max(time_array) + 0.001
step_time = "3m"
n_entries = (stop_time - start_time) * (1440.0 / 3.0)
else:
# The start/stop time here are setup to maximize reusability of the
# data, since astroquery appears to cache the results from previous
# queries.
start_time = (0.25 * np.floor(4.0 * np.min(time_array))) - 0.25
stop_time = (0.25 * np.ceil(4.0 * np.max(time_array))) + 0.25
step_time = "3h"
n_entries = (stop_time - start_time) * (24.0 / 3.0)
# We don't want to overtax the JPL service, so limit ourselves to 1000
# individual queries at a time. Note that this is likely a conservative
# cap for JPL-Horizons, but there should be exceptionally few applications
# that actually require more than this.
if n_entries > 1000:
if (len(np.unique(time_array)) <= 50) and (force_indv_lookup is None):
# If we have a _very_ sparse set of epochs, pass that along instead
epoch_list = np.unique(time_array)
else:
# Otherwise, time to raise an error
raise ValueError(
"Too many ephem points requested from JPL-Horizons. This "
"can be remedied by setting high_cadance=False or limiting "
"the number of values in time_array."
)
else:
epoch_list = {
"start": Time(start_time, format="jd").isot,
"stop": Time(stop_time, format="jd").isot,
"step": step_time,
}
# Check to make sure dates are within the 1700-2200 time range,
# since not all targets are supported outside of this range
if (np.min(time_array) < 2341973.0) or (np.max(time_array) > 2524593.0):
raise ValueError(
"No current support for JPL ephems outside of 1700 - 2300 AD. "
"Check back later (or possibly earlier)..."
)
# JPL-Horizons has a separate catalog with what it calls 'major bodies',
# and will throw an error if you use the wrong catalog when calling for
# astrometry. We'll use the dict below to capture this behavior.
with open(path_join(DATA_PATH, "jpl_major_bodies.json"), "r") as fhandle:
major_body_dict = json_load(fhandle)
target_id = target_name
id_type = "smallbody"
# If we find the target in the major body database, then we can extract the
# target ID to make the query a bit more robust (otherwise JPL-Horizons will fail
# on account that id will find multiple partial matches: e.g., "Mars" will be
# matched with "Mars", "Mars Explorer", "Mars Barycenter"..., and JPL-Horizons will
# not know which to choose).
if target_name in major_body_dict.keys():
target_id = major_body_dict[target_name]
id_type = "majorbody"
query_obj = Horizons(
id=target_id, location=site_loc, epochs=epoch_list, id_type=id_type,
)
# If not in the major bodies catalog, try the minor bodies list, and if
# still not found, throw an error.
try:
ephem_data = query_obj.ephemerides(extra_precision=True)
except KeyError:
# This is a fix for an astroquery + JPL-Horizons bug, that's related to
# API change on JPL's side. In this case, the source is identified, but
# astroquery can't correctly parse the return message from JPL-Horizons.
# See astroquery issue #2169.
ephem_data = query_obj.ephemerides(extra_precision=False) # pragma: no cover
except ValueError as err:
query_obj._session.close()
raise ValueError(
"Target ID is not recognized in either the small or major bodies "
"catalogs, please consult the JPL-Horizons database for supported "
"targets (https://ssd.jpl.nasa.gov/?horizons)."
) from err
# This is explicitly closed here to trap a bug that occassionally throws an
# unexpected warning, see astroquery issue #1807
query_obj._session.close()
# Now that we have the ephem data, extract out the relevant data
ephem_times = np.array(ephem_data["datetime_jd"])
ephem_ra = np.array(ephem_data["RA"]) * (np.pi / 180.0)
ephem_dec = np.array(ephem_data["DEC"]) * (np.pi / 180.0)
ephem_dist = np.array(ephem_data["delta"]) # AU
ephem_vel = np.array(ephem_data["delta_rate"]) # km/s
return ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel
def interpolate_ephem(
time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None,
):
"""
Interpolates ephemerides to give positions for requested times.
This is a simple tool for calculated interpolated RA and Dec positions, as well
as distances and velocities, for a given ephemeris. Under the hood, the method
uses as cubic spline interpolation to calculate values at the requested times,
provided that there are enough values to interpolate over to do so (requires
>= 4 points), otherwise a linear interpolation is used.
Parameters
----------
time_array : array-like of floats
Times to interpolate positions for, in UTC Julian days.
ephem_times : array-like of floats
Times in UTC Julian days which describe that match to the recorded postions
of the target. Must be array-like, of shape (Npts,), where Npts is the number
of ephemeris points.
ephem_ra : array-like of floats
Right ascencion of the target, at the times given in `ephem_times`. Units are
in radians, must have the same shape as `ephem_times`.
ephem_dec : array-like of floats
Declination of the target, at the times given in `ephem_times`. Units are
in radians, must have the same shape as `ephem_times`.
ephem_dist : array-like of floats
Distance of the target from the observer, at the times given in `ephem_times`.
Optional argument, in units of parsecs. Must have the same shape as
`ephem_times`.
ephem_vel : array-like of floats
Velocities of the target, at the times given in `ephem_times`. Optional
argument, in units of km/sec. Must have the same shape as `ephem_times`.
Returns
-------
ra_vals : ndarray of float
Interpolated RA values, returned as an ndarray of floats with
units of radians, and the same shape as `time_array`.
dec_vals : ndarray of float
Interpolated declination values, returned as an ndarray of floats with
units of radians, and the same shape as `time_array`.
dist_vals : None or ndarray of float
If `ephem_dist` was provided, an ndarray of floats (with same shape as
`time_array`) with the interpolated target distances, in units of parsecs.
If `ephem_dist` was not provided, this returns as None.
vel_vals : None or ndarray of float
If `ephem_vals` was provided, an ndarray of floats (with same shape as
`time_array`) with the interpolated target velocities, in units of km/sec.
If `ephem_vals` was not provided, this returns as None.
"""
# We're importing this here since it's only used for this one function
from scipy.interpolate import interp1d
ephem_shape = np.array(ephem_times).shape
# Make sure that things look reasonable
if np.array(ephem_ra).shape != ephem_shape:
raise ValueError("ephem_ra must have the same shape as ephem_times.")
if np.array(ephem_dec).shape != ephem_shape:
raise ValueError("ephem_dec must have the same shape as ephem_times.")
if (np.array(ephem_dist).shape != ephem_shape) and (ephem_dist is not None):
raise ValueError("ephem_dist must have the same shape as ephem_times.")
if (np.array(ephem_vel).shape != ephem_shape) and (ephem_vel is not None):
raise ValueError("ephem_vel must have the same shape as ephem_times.")
ra_vals = np.zeros_like(time_array, dtype=float)
dec_vals = np.zeros_like(time_array, dtype=float)
dist_vals = None if ephem_dist is None else np.zeros_like(time_array, dtype=float)
vel_vals = None if ephem_vel is None else np.zeros_like(time_array, dtype=float)
if len(ephem_times) == 1:
ra_vals += ephem_ra
dec_vals += ephem_dec
if ephem_dist is not None:
dist_vals += ephem_dist
if ephem_vel is not None:
vel_vals += ephem_vel
else:
if len(ephem_times) > 3:
interp_kind = "cubic"
else:
interp_kind = "linear"
# If we have values that line up perfectly, just use those directly
select_mask = np.isin(time_array, ephem_times)
if np.any(select_mask):
time_select = time_array[select_mask]
ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind="nearest")(
time_select
)
dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind="nearest")(
time_select
)
if ephem_dist is not None:
dist_vals[select_mask] = interp1d(
ephem_times, ephem_dist, kind="nearest"
)(time_select)
if ephem_vel is not None:
vel_vals[select_mask] = interp1d(
ephem_times, ephem_vel, kind="nearest"
)(time_select)
# If we have values lining up between grid points, use spline interpolation
# to calculate their values
select_mask = ~select_mask
if np.any(select_mask):
time_select = time_array[select_mask]
ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind=interp_kind)(
time_select
)
dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind=interp_kind)(
time_select
)
if ephem_dist is not None:
dist_vals[select_mask] = interp1d(
ephem_times, ephem_dist, kind=interp_kind
)(time_select)
if ephem_vel is not None:
vel_vals[select_mask] = interp1d(
ephem_times, ephem_vel, kind=interp_kind
)(time_select)
return (ra_vals, dec_vals, dist_vals, vel_vals)
def calc_app_coords(
lon_coord,
lat_coord,
coord_frame="icrs",
coord_epoch=None,
coord_times=None,
coord_type="sidereal",
time_array=None,
lst_array=None,
telescope_loc=None,
pm_ra=None,
pm_dec=None,
vrad=None,
dist=None,
):
"""
Calculate apparent coordinates for several different coordinate types.
This function calculates apparent positions at the current epoch.
Parameters
----------
lon_coord : float or ndarray of float
Longitudinal (e.g., RA) coordinates, units of radians. Must match the same
shape as lat_coord.
lat_coord : float or ndarray of float
Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same
shape as lon_coord.
coord_frame : string
The requested reference frame for the output coordinates, can be any frame
that is presently supported by astropy.
coord_epoch : float or str or Time object
Epoch for ref_frame, nominally only used if converting to either the FK4 or
FK5 frames, in units of fractional years. If provided as a float and the
coord_frame is an FK4-variant, value will assumed to be given in Besselian
years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be
in Julian years.
coord_times : float or ndarray of float
Only used when `coord_type="ephem"`, the JD UTC time for each value of
`lon_coord` and `lat_coord`. These values are used to interpolate `lon_coord`
and `lat_coord` values to those times listed in `time_array`.
coord_type : str
coord_type : str
Type of source to calculate coordinates for. Must be one of:
"sidereal" (fixed RA/Dec),
"ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position),
"unphased" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)).
time_array : float or ndarray of float or Time object
Times for which the apparent coordinates were calculated, in UTC JD. If more
than a single element, must be the same shape as lon_coord and lat_coord if
both of those are arrays (instead of single floats).
telescope_loc : array-like of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
coord_frame : string
The requested reference frame for the output coordinates, can be any frame
that is presently supported by astropy. Default is ICRS.
coord_epoch : float or str or Time object
Epoch for ref_frame, nominally only used if converting to either the FK4 or
FK5 frames, in units of fractional years. If provided as a float and the
ref_frame is an FK4-variant, value will assumed to be given in Besselian
years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be
in Julian years.
pm_ra : float or ndarray of float
Proper motion in RA of the source, expressed in units of milliarcsec / year.
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required, motion is calculated relative to the value of `coord_epoch`.
pm_dec : float or ndarray of float
Proper motion in Dec of the source, expressed in units of milliarcsec / year.
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required, motion is calculated relative to the value of `coord_epoch`.
vrad : float or ndarray of float
Radial velocity of the source, expressed in units of km / sec. Can either be
a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (namely ra_coord and dec_coord). Not required.
dist : float or ndarray of float
Distance of the source, expressed in milliarcseconds. Can either be a single
float or array of shape (Ntimes,), although this must be consistent with other
parameters (namely ra_coord and dec_coord). Not required.
Returns
-------
app_ra : ndarray of floats
Apparent right ascension coordinates, in units of radians.
app_dec : ndarray of floats
Apparent declination coordinates, in units of radians.
"""
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
# Time objects and unique don't seem to play well together, so we break apart
# their handling here
if isinstance(time_array, Time):
unique_time_array, unique_mask = np.unique(time_array.utc.jd, return_index=True)
else:
unique_time_array, unique_mask = np.unique(time_array, return_index=True)
if coord_type in ["driftscan", "unphased"]:
if lst_array is None:
unique_lst = get_lst_for_time(
unique_time_array,
site_loc.lat.deg,
site_loc.lon.deg,
site_loc.height.to_value("m"),
)
else:
unique_lst = lst_array[unique_mask]
if coord_type == "sidereal":
# If the coordinates are not in the ICRS frame, go ahead and transform them now
if coord_frame != "icrs":
icrs_ra, icrs_dec = transform_sidereal_coords(
lon_coord,
lat_coord,
coord_frame,
"icrs",
in_coord_epoch=coord_epoch,
time_array=unique_time_array,
)
else:
icrs_ra = lon_coord
icrs_dec = lat_coord
unique_app_ra, unique_app_dec = transform_icrs_to_app(
unique_time_array,
icrs_ra,
icrs_dec,
site_loc,
pm_ra=pm_ra,
pm_dec=pm_dec,
vrad=vrad,
dist=dist,
)
elif coord_type == "driftscan":
# Use the ERFA function ae2hd, which will do all the heavy
# lifting for us
unique_app_ha, unique_app_dec = erfa.ae2hd(
lon_coord, lat_coord, site_loc.lat.rad
)
# The above returns HA/Dec, so we just need to rotate by
# the LST to get back app RA and Dec
unique_app_ra = np.mod(unique_app_ha + unique_lst, 2 * np.pi)
unique_app_dec = unique_app_dec + np.zeros_like(unique_app_ra)
elif coord_type == "ephem":
interp_ra, interp_dec, _, _ = interpolate_ephem(
unique_time_array, coord_times, lon_coord, lat_coord,
)
if coord_frame != "icrs":
icrs_ra, icrs_dec = transform_sidereal_coords(
interp_ra,
interp_dec,
coord_frame,
"icrs",
in_coord_epoch=coord_epoch,
time_array=unique_time_array,
)
else:
icrs_ra = interp_ra
icrs_dec = interp_dec
# TODO: Vel and distance handling to be integrated here, once they are are
# needed for velocity frame tracking
unique_app_ra, unique_app_dec = transform_icrs_to_app(
unique_time_array, icrs_ra, icrs_dec, site_loc, pm_ra=pm_ra, pm_dec=pm_dec,
)
elif coord_type == "unphased":
# This is the easiest one - this is just supposed to be ENU, so set the
# apparent coords to the current lst and telescope_lon.
unique_app_ra = unique_lst.copy()
unique_app_dec = np.zeros_like(unique_app_ra) + site_loc.lat.rad
else:
raise ValueError("Object type %s is not recognized." % coord_type)
# Now that we've calculated all the unique values, time to backfill through the
# "redundant" entries in the Nblt axis.
app_ra = np.zeros(np.array(time_array).shape)
app_dec = np.zeros(np.array(time_array).shape)
# Need this promotion in order to match entries
if isinstance(time_array, Time):
unique_time_array = Time(unique_time_array, format="jd", scale="utc")
for idx, unique_time in enumerate(unique_time_array):
select_mask = time_array == unique_time
app_ra[select_mask] = unique_app_ra[idx]
app_dec[select_mask] = unique_app_dec[idx]
return app_ra, app_dec
def calc_sidereal_coords(
time_array, app_ra, app_dec, telescope_loc, coord_frame, coord_epoch=None,
):
"""
Calculate sidereal coordinates given apparent coordinates.
This function calculates coordinates in the requested frame (at a given epoch)
from a set of apparent coordinates.
Parameters
----------
time_array : float or ndarray of float or Time object
Times for which the apparent coordinates were calculated, in UTC JD. Must
match the shape of app_ra and app_dec.
app_ra : float or ndarray of float
Array of apparent right ascension coordinates, units of radians. Must match
the shape of time_array and app_dec.
app_ra : float or ndarray of float
Array of apparent right declination coordinates, units of radians. Must match
the shape of time_array and app_dec.
telescope_loc : tuple of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
coord_frame : string
The requested reference frame for the output coordinates, can be any frame
that is presently supported by astropy. Default is ICRS.
coord_epoch : float or str or Time object
Epoch for ref_frame, nominally only used if converting to either the FK4 or
FK5 frames, in units of fractional years. If provided as a float and the
ref_frame is an FK4-variant, value will assumed to be given in Besselian
years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be
in Julian years.
Returns
-------
ref_ra : ndarray of floats
Right ascension coordinates in the requested frame, in units of radians.
Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,).
ref_dec : ndarray of floats
Declination coordinates in the requested frame, in units of radians.
Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,).
"""
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
epoch = None
if isinstance(coord_epoch, str) or isinstance(coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
epoch = Time(coord_epoch)
elif coord_epoch is not None:
if coord_frame.lower() in ["fk4", "fk4noeterms"]:
epoch = Time(coord_epoch, format="byear")
else:
epoch = Time(coord_epoch, format="jyear")
icrs_ra, icrs_dec = transform_app_to_icrs(
time_array, app_ra, app_dec, telescope_loc
)
if coord_frame == "icrs":
ref_ra, ref_dec = (icrs_ra, icrs_dec)
else:
ref_ra, ref_dec = transform_sidereal_coords(
icrs_ra,
icrs_dec,
"icrs",
coord_frame,
out_coord_epoch=epoch,
time_array=time_array,
)
return ref_ra, ref_dec
def get_lst_for_time(
jd_array, latitude, longitude, altitude, astrometry_library="erfa"
):
"""
Get the local apparent sidereal time for a set of jd times at an earth location.
This function calculates the local apparent sidereal time (LAST), given a UTC time
and a position on the Earth, using either the astropy or NOVAS libraries. It
is important to note that there is an apporoximate 20 microsecond difference
between the two methods, presumably due to small differences in the apparent
reference frame. These differences will cancel out when calculating coordinates
in the TOPO frame, so long as apparent coordinates are calculated using the
same library (i.e., astropy or NOVAS). Failing to do so can introduce errors
up to ~1 mas in the horizontal coordinate system (i.e., AltAz).
Parameters
----------
jd_array : ndarray of float
JD times to get lsts for.
latitude : float
Latitude of location to get lst for in degrees.
longitude : float
Longitude of location to get lst for in degrees.
altitude : float
Altitude of location to get lst for in meters.
astrometry_library : str
Library used for running the LST calculations. Allowed options are 'erfa'
(which uses the pyERFA), 'novas' (which uses the python-novas library),
and 'astropy' (which uses the astropy utilities). Default is erfa.
Returns
-------
ndarray of float
LASTs in radians corresponding to the jd_array.
"""
if isinstance(jd_array, np.ndarray):
lst_array = np.zeros_like(jd_array)
else:
lst_array = np.zeros(1)
jd, reverse_inds = np.unique(jd_array, return_inverse=True)
times = Time(
jd,
format="jd",
scale="utc",
location=(Angle(longitude, unit="deg"), Angle(latitude, unit="deg"), altitude),
)
if iers.conf.auto_max_age is None: # pragma: no cover
delta, status = times.get_delta_ut1_utc(return_status=True)
if np.any(
np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE))
):
warnings.warn(
"time is out of IERS range, setting delta ut1 utc to "
"extrapolated value"
)
times.delta_ut1_utc = delta
if astrometry_library == "erfa":
# This appears to be what astropy is using under the hood,
# so it _should_ be totally consistent.
gast_array = erfa.gst06a(times.ut1.jd, 0.0, times.tt.jd, 0.0)
lst_array = np.mod(gast_array + (longitude * (np.pi / 180.0)), 2.0 * np.pi)[
reverse_inds
]
elif astrometry_library == "astropy":
lst_array = times.sidereal_time("apparent").radian[reverse_inds]
elif astrometry_library == "novas":
# Import the NOVAS library only if it's needed/available.
try:
from novas import compat as novas
from novas.compat import eph_manager
import novas_de405 # noqa
except ImportError as e: # pragma: no cover
raise ImportError(
"novas and/or novas_de405 are not installed but is required for "
"NOVAS functionality"
) from e
jd_start, jd_end, number = eph_manager.ephem_open()
tt_time_array = times.tt.value
ut1_time_array = times.ut1.value
polar_motion_data = iers.earth_orientation_table.get()
delta_x_array = np.interp(
times.mjd,
polar_motion_data["MJD"].value,
polar_motion_data["dX_2000A_B"].value,
left=0.0,
right=0.0,
)
delta_y_array = np.interp(
times.mjd,
polar_motion_data["MJD"].value,
polar_motion_data["dY_2000A_B"].value,
left=0.0,
right=0.0,
)
# Catch the case where we don't have CIP delta values yet (they don't typically
# have predictive values like the polar motion does)
delta_x_array[np.isnan(delta_x_array)] = 0.0
delta_y_array[np.isnan(delta_y_array)] = 0.0
for idx in range(len(times)):
novas.cel_pole(
tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx]
)
# The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST),
# in units of hours
lst_array[reverse_inds == idx] = novas.sidereal_time(
ut1_time_array[idx],
0.0,
(tt_time_array[idx] - ut1_time_array[idx]) * 86400.0,
)
# Add the telescope lon to convert from GAST to LAST (local)
lst_array = np.mod(lst_array + (longitude / 15.0), 24.0)
# Convert from hours back to rad
lst_array *= np.pi / 12.0
return lst_array
def _adj_list(vecs, tol, n_blocks=None):
"""Identify neighbors of each vec in vecs, to distance tol."""
n_items = len(vecs)
max_items = 2 ** 10 # Max array size used is max_items**2. Avoid using > 1 GiB
if n_blocks is None:
n_blocks = max(n_items // max_items, 1)
# We may sort blocks so that some pairs of blocks may be skipped.
# Reorder vectors by x.
order = np.argsort(vecs[:, 0])
blocks = np.array_split(order, n_blocks)
adj = [{k} for k in range(n_items)] # Adjacency lists
for b1 in blocks:
for b2 in blocks:
v1, v2 = vecs[b1], vecs[b2]
# Check for no overlap, with tolerance.
xmin1 = v1[0, 0] - tol
xmax1 = v1[-1, 0] + tol
xmin2 = v2[0, 0] - tol
xmax2 = v2[-1, 0] + tol
if max(xmin1, xmin2) > min(xmax1, xmax2):
continue
adj_mat = cdist(vecs[b1], vecs[b2]) < tol
for bi, col in enumerate(adj_mat):
adj[b1[bi]] = adj[b1[bi]].union(b2[col])
return [frozenset(g) for g in adj]
def _find_cliques(adj, strict=False):
n_items = len(adj)
loc_gps = []
visited = np.zeros(n_items, dtype=bool)
for k in range(n_items):
if visited[k]:
continue
a0 = adj[k]
visited[k] = True
if all(adj[it].__hash__() == a0.__hash__() for it in a0):
group = list(a0)
group.sort()
visited[list(a0)] = True
loc_gps.append(group)
# Require all adjacency lists to be isolated maximal cliques:
if strict:
if not all(sorted(st) in loc_gps for st in adj):
raise ValueError("Non-isolated cliques found in graph.")
return loc_gps
def find_clusters(location_ids, location_vectors, tol, strict=False):
"""
Find clusters of vectors (e.g. redundant baselines, times).
Parameters
----------
location_ids : array_like of int
ID labels for locations.
location_vectors : array_like of float
location vectors, can be multidimensional
tol : float
tolerance for clusters
strict : bool
Require that all adjacency lists be isolated maximal cliques.
This ensures that vectors do not fall into multiple clusters.
Default: False
Returns
-------
list of list of location_ids
"""
location_vectors = np.asarray(location_vectors)
location_ids = np.asarray(location_ids)
if location_vectors.ndim == 1:
location_vectors = location_vectors[:, np.newaxis]
adj = _adj_list(location_vectors, tol) # adj = list of sets
loc_gps = _find_cliques(adj, strict=strict)
loc_gps = [np.sort(location_ids[gp]).tolist() for gp in loc_gps]
return loc_gps
def get_baseline_redundancies(baselines, baseline_vecs, tol=1.0, with_conjugates=False):
"""
Find redundant baseline groups.
Parameters
----------
baselines : array_like of int
Baseline numbers, shape (Nbls,)
baseline_vecs : array_like of float
Baseline vectors in meters, shape (Nbls, 3)
tol : float
Absolute tolerance of redundancy, in meters.
with_conjugates : bool
Option to include baselines that are redundant when flipped.
Returns
-------
baseline_groups : list of lists of int
list of lists of redundant baseline numbers
vec_bin_centers : list of array_like of float
List of vectors describing redundant group centers
lengths : list of float
List of redundant group baseline lengths in meters
baseline_ind_conj : list of int
List of baselines that are redundant when reversed. Only returned if
with_conjugates is True
"""
Nbls = baselines.shape[0]
if not baseline_vecs.shape == (Nbls, 3):
raise ValueError("Baseline vectors must be shape (Nbls, 3)")
baseline_vecs = copy.copy(baseline_vecs) # Protect the vectors passed in.
if with_conjugates:
conjugates = []
for bv in baseline_vecs:
uneg = bv[0] < -tol
uzer = np.isclose(bv[0], 0.0, atol=tol)
vneg = bv[1] < -tol
vzer = np.isclose(bv[1], 0.0, atol=tol)
wneg = bv[2] < -tol
conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg))
conjugates = np.array(conjugates, dtype=bool)
baseline_vecs[conjugates] *= -1
baseline_ind_conj = baselines[conjugates]
bl_gps, vec_bin_centers, lens = get_baseline_redundancies(
baselines, baseline_vecs, tol=tol, with_conjugates=False
)
return bl_gps, vec_bin_centers, lens, baseline_ind_conj
try:
bl_gps = find_clusters(baselines, baseline_vecs, tol, strict=True)
except ValueError as exc:
raise ValueError(
"Some baselines are falling into multiple"
" redundant groups. Lower the tolerance to resolve ambiguity."
) from exc
n_unique = len(bl_gps)
vec_bin_centers = np.zeros((n_unique, 3))
for gi, gp in enumerate(bl_gps):
inds = [np.where(i == baselines)[0] for i in gp]
vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0)
lens = np.sqrt(np.sum(vec_bin_centers ** 2, axis=1))
return bl_gps, vec_bin_centers, lens
def get_antenna_redundancies(
antenna_numbers, antenna_positions, tol=1.0, include_autos=False
):
"""
Find redundant baseline groups based on antenna positions.
Parameters
----------
antenna_numbers : array_like of int
Antenna numbers, shape (Nants,).
antenna_positions : array_like of float
Antenna position vectors in the ENU (topocentric) frame in meters,
shape (Nants, 3).
tol : float
Redundancy tolerance in meters.
include_autos : bool
Option to include autocorrelations.
Returns
-------
baseline_groups : list of lists of int
list of lists of redundant baseline numbers
vec_bin_centers : list of array_like of float
List of vectors describing redundant group centers
lengths : list of float
List of redundant group baseline lengths in meters
Notes
-----
The baseline numbers refer to antenna pairs (a1, a2) such that
the baseline vector formed from ENU antenna positions,
blvec = enu[a1] - enu[a2]
is close to the other baselines in the group.
This is achieved by putting baselines in a form of the u>0
convention, but with a tolerance in defining the signs of
vector components.
To guarantee that the same baseline numbers are present in a UVData
object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is
the tolerance used here.
"""
Nants = antenna_numbers.size
bls = []
bl_vecs = []
for aj in range(Nants):
mini = aj + 1
if include_autos:
mini = aj
for ai in range(mini, Nants):
anti, antj = antenna_numbers[ai], antenna_numbers[aj]
bidx = antnums_to_baseline(antj, anti, Nants)
bv = antenna_positions[ai] - antenna_positions[aj]
bl_vecs.append(bv)
bls.append(bidx)
bls = np.array(bls)
bl_vecs = np.array(bl_vecs)
gps, vecs, lens, conjs = get_baseline_redundancies(
bls, bl_vecs, tol=tol, with_conjugates=True
)
# Flip the baselines in the groups.
for gi, gp in enumerate(gps):
for bi, bl in enumerate(gp):
if bl in conjs:
gps[gi][bi] = baseline_index_flip(bl, Nants)
return gps, vecs, lens
def mean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging data.
This is similar to np.average, except it handles infs (by giving them
zero weight) and zero weight axes (by forcing result to be inf with zero
output weight).
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
Whether to return the sum of the square of the weights. Default is False.
"""
arr = copy.deepcopy(arr) # avoid changing outside
if weights is None:
weights = np.ones_like(arr)
else:
weights = copy.deepcopy(weights)
weights = weights * np.logical_not(np.isinf(arr))
arr[np.isinf(arr)] = 0
weight_out = np.sum(weights, axis=axis)
if return_weights_square:
weights_square = weights ** 2
weights_square_out = np.sum(weights_square, axis=axis)
out = np.sum(weights * arr, axis=axis)
where = weight_out > 1e-10
out = np.true_divide(out, weight_out, where=where)
out = np.where(where, out, np.inf)
if return_weights and return_weights_square:
return out, weight_out, weights_square_out
elif return_weights:
return out, weight_out
elif return_weights_square:
return out, weights_square_out
else:
return out
def absmean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging absolute value of data.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
whether to return the sum of the squares of the weights. Default is False.
"""
return mean_collapse(
np.abs(arr),
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
def quadmean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging in quadrature.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
whether to return the sum of the squares of the weights. Default is False.
"""
out = mean_collapse(
np.abs(arr) ** 2,
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
if return_weights and return_weights_square:
return np.sqrt(out[0]), out[1], out[2]
elif return_weights or return_weights_square:
return np.sqrt(out[0]), out[1]
else:
return np.sqrt(out)
def or_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse using OR operation.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
NOT USED, but kept for symmetry with other collapsing functions.
axis : int or tuple, optional
Axis or axes to collapse (take OR over). Default is all.
return_weights : bool
Whether to return dummy weights array.
NOTE: the dummy weights will simply be an array of ones
return_weights_square: bool
NOT USED, but kept for symmetry with other collapsing functions.
"""
if arr.dtype != np.bool_:
raise ValueError("Input to or_collapse function must be boolean array")
out = np.any(arr, axis=axis)
if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):
warnings.warn("Currently weights are not handled when OR-ing boolean arrays.")
if return_weights:
return out, np.ones_like(out, dtype=np.float64)
else:
return out
def and_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse using AND operation.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
NOT USED, but kept for symmetry with other collapsing functions.
axis : int or tuple, optional
Axis or axes to collapse (take AND over). Default is all.
return_weights : bool
Whether to return dummy weights array.
NOTE: the dummy weights will simply be an array of ones
return_weights_square: bool
NOT USED, but kept for symmetry with other collapsing functions.
"""
if arr.dtype != np.bool_:
raise ValueError("Input to and_collapse function must be boolean array")
out = np.all(arr, axis=axis)
if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):
warnings.warn("Currently weights are not handled when AND-ing boolean arrays.")
if return_weights:
return out, np.ones_like(out, dtype=np.float64)
else:
return out
def collapse(
arr, alg, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Parent function to collapse an array with a given algorithm.
Parameters
----------
arr : array
Input array to process.
alg : str
Algorithm to use. Must be defined in this function with
corresponding subfunction above.
weights: ndarray, optional
weights for collapse operation (e.g. weighted mean).
NOTE: Some subfunctions do not use the weights. See corresponding
doc strings.
axis : int or tuple, optional
Axis or axes to collapse. Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
Whether to return the sum of the squares of the weights. Default is False.
"""
collapse_dict = {
"mean": mean_collapse,
"absmean": absmean_collapse,
"quadmean": quadmean_collapse,
"or": or_collapse,
"and": and_collapse,
}
try:
out = collapse_dict[alg](
arr,
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
except KeyError:
raise ValueError(
"Collapse algorithm must be one of: "
+ ", ".join(collapse_dict.keys())
+ "."
)
return out
def uvcalibrate(
uvdata,
uvcal,
inplace=True,
prop_flags=True,
Dterm_cal=False,
flip_gain_conj=False,
delay_convention="minus",
undo=False,
time_check=True,
ant_check=True,
):
"""
Calibrate a UVData object with a UVCal object.
Parameters
----------
uvdata : UVData object
UVData object to calibrate.
uvcal : UVCal object
UVCal object containing the calibration.
inplace : bool, optional
if True edit uvdata in place, else return a calibrated copy
prop_flags : bool, optional
if True, propagate calibration flags to data flags
and doesn't use flagged gains. Otherwise, uses flagged gains and
does not propagate calibration flags to data flags.
Dterm_cal : bool, optional
Calibrate the off-diagonal terms in the Jones matrix if present
in uvcal. Default is False. Currently not implemented.
flip_gain_conj : bool, optional
This function uses the UVData ant_1_array and ant_2_array to specify the
antennas in the UVCal object. By default, the conjugation convention, which
follows the UVData convention (i.e. ant2 - ant1), is that the applied
gain = ant1_gain * conjugate(ant2_gain). If the other convention is required,
set flip_gain_conj=True.
delay_convention : str, optional
Exponent sign to use in conversion of 'delay' to 'gain' cal_type
if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'.
undo : bool, optional
If True, undo the provided calibration. i.e. apply the calibration with
flipped gain_convention. Flag propagation rules apply the same.
time_check : bool
Option to check that times match between the UVCal and UVData
objects if UVCal has a single time or time range. Times are always
checked if UVCal has multiple times.
ant_check : bool
Option to check that all antennas with data on the UVData
object have calibration solutions in the UVCal object. If this option is
set to False, uvcalibrate will proceed without erroring and data for
antennas without calibrations will be flagged.
Returns
-------
UVData, optional
Returns if not inplace
"""
if not inplace:
uvdata = uvdata.copy()
# Check whether the UVData antennas *that have data associated with them*
# have associated data in the UVCal object
uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array))
uvdata.antenna_names = np.asarray(uvdata.antenna_names)
uvdata_used_antnames = np.array(
[
uvdata.antenna_names[np.where(uvdata.antenna_numbers == antnum)][0]
for antnum in uvdata_unique_nums
]
)
uvcal_unique_nums = np.unique(uvcal.ant_array)
uvcal.antenna_names = np.asarray(uvcal.antenna_names)
uvcal_used_antnames = np.array(
[
uvcal.antenna_names[np.where(uvcal.antenna_numbers == antnum)][0]
for antnum in uvcal_unique_nums
]
)
ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist()
if not ant_arr_match:
# check more carefully
name_missing = []
for this_ant_name in uvdata_used_antnames:
wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name)
if wh_ant_match[0].size == 0:
name_missing.append(this_ant_name)
if len(name_missing) > 0:
if len(name_missing) == uvdata_used_antnames.size:
# all antenna_names with data on UVData are missing on UVCal.
if not ant_check:
warnings.warn(
"All antenna names with data on UVData are missing "
"on UVCal. Since ant_check is False, calibration will "
"proceed but all data will be flagged."
)
else:
raise ValueError(
"All antenna names with data on UVData are missing "
"on UVCal. To continue with calibration "
"(and flag all the data), set ant_check=False."
)
else:
# Only some antenna_names with data on UVData are missing on UVCal
if not ant_check:
warnings.warn(
f"Antennas {name_missing} have data on UVData but are missing "
"on UVCal. Since ant_check is False, calibration will "
"proceed and the data for these antennas will be flagged."
)
else:
raise ValueError(
f"Antennas {name_missing} have data on UVData but "
"are missing on UVCal. To continue calibration and "
"flag the data from missing antennas, set ant_check=False."
)
uvdata_times = np.unique(uvdata.time_array)
downselect_cal_times = False
if uvcal.Ntimes > 1:
if uvcal.Ntimes < uvdata.Ntimes:
raise ValueError(
"The uvcal object has more than one time but fewer than the "
"number of unique times on the uvdata object."
)
uvcal_times = np.unique(uvcal.time_array)
try:
time_arr_match = np.allclose(
uvcal_times,
uvdata_times,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
)
except ValueError:
time_arr_match = False
if not time_arr_match:
# check more carefully
uvcal_times_to_keep = []
for this_time in uvdata_times:
wh_time_match = np.nonzero(
np.isclose(
uvcal.time_array - this_time,
0,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
)
)
if wh_time_match[0].size > 0:
uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0])
else:
raise ValueError(
f"Time {this_time} exists on UVData but not on UVCal."
)
if len(uvcal_times_to_keep) < uvcal.Ntimes:
downselect_cal_times = True
elif uvcal.time_range is None:
# only one UVCal time, no time_range.
# This cannot match if UVData.Ntimes > 1.
# If they are both NTimes = 1, then check if they're close.
if uvdata.Ntimes > 1 or not np.isclose(
uvdata_times,
uvcal.time_array,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
):
if not time_check:
warnings.warn(
"Times do not match between UVData and UVCal "
"but time_check is False, so calibration "
"will be applied anyway."
)
else:
raise ValueError(
"Times do not match between UVData and UVCal. "
"Set time_check=False to apply calibration anyway."
)
else:
# time_array is length 1 and time_range exists: check uvdata_times in time_range
if (
np.min(uvdata_times) < uvcal.time_range[0]
or np.max(uvdata_times) > uvcal.time_range[1]
):
if not time_check:
warnings.warn(
"Times do not match between UVData and UVCal "
"but time_check is False, so calibration "
"will be applied anyway."
)
else:
raise ValueError(
"Times do not match between UVData and UVCal. "
"Set time_check=False to apply calibration anyway. "
)
downselect_cal_freq = False
if uvdata.future_array_shapes:
uvdata_freq_arr_use = uvdata.freq_array
else:
uvdata_freq_arr_use = uvdata.freq_array[0, :]
try:
freq_arr_match = np.allclose(
np.sort(uvcal.freq_array[0, :]),
np.sort(uvdata_freq_arr_use),
atol=uvdata._freq_array.tols[1],
rtol=uvdata._freq_array.tols[0],
)
except ValueError:
freq_arr_match = False
if freq_arr_match is False:
# check more carefully
uvcal_freqs_to_keep = []
for this_freq in uvdata_freq_arr_use:
wh_freq_match = np.nonzero(
np.isclose(
uvcal.freq_array - this_freq,
0,
atol=uvdata._freq_array.tols[1],
rtol=uvdata._freq_array.tols[0],
)
)
if wh_freq_match[0].size > 0:
uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0])
else:
raise ValueError(
f"Frequency {this_freq} exists on UVData but not on UVCal."
)
if len(uvcal_freqs_to_keep) < uvcal.Nfreqs:
downselect_cal_freq = True
uvdata_pol_strs = polnum2str(
uvdata.polarization_array, x_orientation=uvdata.x_orientation
)
uvcal_pol_strs = jnum2str(uvcal.jones_array, x_orientation=uvcal.x_orientation)
uvdata_feed_pols = {
feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol]
}
for feed in uvdata_feed_pols:
# get diagonal jones str
jones_str = parse_jpolstr(feed, x_orientation=uvcal.x_orientation)
if jones_str not in uvcal_pol_strs:
raise ValueError(
f"Feed polarization {feed} exists on UVData but not on UVCal. "
)
# downselect UVCal times, frequencies
if downselect_cal_freq or downselect_cal_times:
if not downselect_cal_times:
uvcal_times_to_keep = None
elif not downselect_cal_freq:
uvcal_freqs_to_keep = None
uvcal_use = uvcal.select(
times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False
)
new_uvcal = True
else:
uvcal_use = uvcal
new_uvcal = False
# input checks
if uvcal_use.cal_type == "delay":
if not new_uvcal:
# make a copy to convert to gain
uvcal_use = uvcal_use.copy()
new_uvcal = True
uvcal_use.convert_to_gain(delay_convention=delay_convention)
# D-term calibration
if Dterm_cal:
# check for D-terms
if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array:
raise ValueError(
"Cannot apply D-term calibration without -7 or -8"
"Jones polarization in uvcal object."
)
raise NotImplementedError("D-term calibration is not yet implemented.")
# No D-term calibration
else:
# key is number, value is name
uvdata_ant_dict = dict(zip(uvdata.antenna_numbers, uvdata.antenna_names))
# opposite: key is name, value is number
uvcal_ant_dict = dict(zip(uvcal.antenna_names, uvcal.antenna_numbers))
# iterate over keys
for key in uvdata.get_antpairpols():
# get indices for this key
blt_inds = uvdata.antpair2ind(key)
pol_ind = np.argmin(
np.abs(
uvdata.polarization_array - polstr2num(key[2], uvdata.x_orientation)
)
)
# try to get gains for each antenna
ant1_num = key[0]
ant2_num = key[1]
feed1, feed2 = POL_TO_FEED_DICT[key[2]]
try:
uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]]
except KeyError:
uvcal_ant1_num = None
try:
uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]]
except KeyError:
uvcal_ant2_num = None
uvcal_key1 = (uvcal_ant1_num, feed1)
uvcal_key2 = (uvcal_ant2_num, feed2)
if (uvcal_ant1_num is None or uvcal_ant2_num is None) or not (
uvcal_use._has_key(*uvcal_key1) and uvcal_use._has_key(*uvcal_key2)
):
if uvdata.future_array_shapes:
uvdata.flag_array[blt_inds, :, pol_ind] = True
else:
uvdata.flag_array[blt_inds, 0, :, pol_ind] = True
continue
if flip_gain_conj:
gain = (
np.conj(uvcal_use.get_gains(uvcal_key1))
* uvcal_use.get_gains(uvcal_key2)
).T # tranpose to match uvdata shape
else:
gain = (
uvcal_use.get_gains(uvcal_key1)
* np.conj(uvcal_use.get_gains(uvcal_key2))
).T # tranpose to match uvdata shape
flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T
# propagate flags
if prop_flags:
mask = np.isclose(gain, 0.0) | flag
gain[mask] = 1.0
if uvdata.future_array_shapes:
uvdata.flag_array[blt_inds, :, pol_ind] += mask
else:
uvdata.flag_array[blt_inds, 0, :, pol_ind] += mask
# apply to data
mult_gains = uvcal_use.gain_convention == "multiply"
if undo:
mult_gains = not mult_gains
if uvdata.future_array_shapes:
if mult_gains:
uvdata.data_array[blt_inds, :, pol_ind] *= gain
else:
uvdata.data_array[blt_inds, :, pol_ind] /= gain
else:
if mult_gains:
uvdata.data_array[blt_inds, 0, :, pol_ind] *= gain
else:
uvdata.data_array[blt_inds, 0, :, pol_ind] /= gain
# update attributes
uvdata.history += "\nCalibrated with pyuvdata.utils.uvcalibrate."
if undo:
uvdata.vis_units = "uncalib"
else:
if uvcal_use.gain_scale is not None:
uvdata.vis_units = uvcal_use.gain_scale
if not inplace:
return uvdata
def apply_uvflag(
uvd, uvf, inplace=True, unflag_first=False, flag_missing=True, force_pol=True
):
"""
Apply flags from a UVFlag to a UVData instantiation.
Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across
that axis.
Parameters
----------
uvd : UVData object
UVData object to add flags to.
uvf : UVFlag object
A UVFlag object in flag mode.
inplace : bool
If True overwrite flags in uvd, otherwise return new object
unflag_first : bool
If True, completely unflag the UVData before applying flags.
Else, OR the inherent uvd flags with uvf flags.
flag_missing : bool
If input uvf is a baseline type and antpairs in uvd do not exist in uvf,
flag them in uvd. Otherwise leave them untouched.
force_pol : bool
If True, broadcast flags to all polarizations if they do not match.
Only works if uvf.Npols == 1.
Returns
-------
UVData
If not inplace, returns new UVData object with flags applied
"""
# assertions
if uvf.mode != "flag":
raise ValueError("UVFlag must be flag mode")
if not inplace:
uvd = uvd.copy()
# make a deepcopy by default b/c it is generally edited inplace downstream
uvf = uvf.copy()
# convert to baseline type
if uvf.type != "baseline":
# edits inplace
uvf.to_baseline(uvd, force_pol=force_pol)
else:
# make sure polarizations match or force_pol
uvd_pols, uvf_pols = (
uvd.polarization_array.tolist(),
uvf.polarization_array.tolist(),
)
if set(uvd_pols) != set(uvf_pols):
if uvf.Npols == 1 and force_pol:
# if uvf is 1pol we can make them match: also edits inplace
uvf.polarization_array = uvd.polarization_array
uvf.Npols = len(uvf.polarization_array)
uvf_pols = uvf.polarization_array.tolist()
else:
raise ValueError("Input uvf and uvd polarizations do not match")
# make sure polarization ordering is correct: also edits inplace
uvf.polarization_array = uvf.polarization_array[
[uvd_pols.index(pol) for pol in uvf_pols]
]
# check time and freq shapes match: if Ntimes or Nfreqs is 1, allow
# implicit broadcasting
if uvf.Ntimes == 1:
mismatch_times = False
elif uvf.Ntimes == uvd.Ntimes:
tdiff = np.unique(uvf.time_array) - | np.unique(uvd.time_array) | numpy.unique |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import pandas as pd
from sklearn.metrics import confusion_matrix
from .confusion_matrix import pretty_plot_confusion_matrix
def create_ara(c, steps=10):
mini = np.min(c)
maxi = np.max(c)
if (maxi-mini) > 0:
ara = np.arange(start=mini, stop=maxi, step=(maxi-mini)/steps)
else:
ara = np.ones(steps)
return ara
def plot_constant_prototypes(prototypes,
directory='figures\\',
title='',
xlabel='Features [-]',
ylabel='Value [-]',
size=[6.2992, 2],
fname='constant_prototypes',
save=False,
single_figures=False):
colo = matplotlib.cm.YlOrBr(255)
if single_figures:
for proto in prototypes:
plt.figure(figsize=size, tight_layout=True)
plt.plot(proto, c=colo)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if save:
plt.savefig(directory + fname + '.png', dpi=500)
else:
plt.figure(figsize=size, tight_layout=True)
marker = ['-', '--', ':', '-.']
for i, proto in enumerate(prototypes):
plt.plot(proto, linestyle=marker[i], c=colo)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if save:
plt.savefig(directory + fname + '.png', dpi=500)
def plot_prototypes_over_env_2d(
signal,
ara,
fname,
xlabel='Features [-]',
ylabel='$v [m/s]$',
zlabel='Value [-]',
title='Prototypen nach Geschwindigkeit sortiert',
directory='figures\\',
size=[6.2992, 2],
save=False):
"""
Plot single Signal/Prototype per class
"""
plt.figure(tight_layout=True, figsize=size)
cm = matplotlib.cm
colormaps = [cm.YlOrBr, cm.Reds, cm.Greens]
c_offset = 30
index_c = np.arange(start=c_offset, stop=255 + (255-c_offset)//len(signal),
step=(255-c_offset)//len(signal))
c = [colormaps[0](i) for i in index_c]
for i in range(len(signal)):
plt.plot(signal[i], color=c[i+1], # color=c2+i/10*d_c,
label='v = ' + str(int(ara[i]))+'km/h')
plt.ylabel(ylabel, fontsize=11)
plt.xlabel(xlabel, fontsize=11)
if save:
plt.savefig(directory + fname + '_protos_vis_2d' + '.png', dpi=1200)
def plot_multi_prototypes_2d_over_env(
signal_list,
ara,
fname,
xlabel='$Features [-]$',
ylabel='$Value [-]$',
title='Prototypen nach Geschwindigkeit sortiert',
directory='figures\\',
size=[6.2992, 2],
save=False):
"""
Plot multiple Signals/Prototypes per class
"""
plt.figure(figsize=size, tight_layout=True)
cm = matplotlib.cm
colormaps = [cm.Blues, cm.Reds, cm.Greens]
c_offset = 50
for j, signal in enumerate(signal_list):
# d_c = c[j] - c2[j]
index_c = np.arange(start=c_offset,
stop=255 + (255-c_offset)//len(signal),
step=(255-c_offset)//len(signal))
c = [colormaps[j](i) for i in index_c]
for i in range(len(signal)-1):
plt.plot(signal[i], color=c[i+1])
i = i + 1
plt.plot(signal[i], color=c[i+1],
label='class' + str(j))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
if save:
plt.savefig(directory + fname + '_protos_vis_2d_multi' + '.png',
dpi=1200)
def plot_prototypes_over_env_3d(
signal,
ara,
fname,
xlabel='Features [-]',
ylabel='$v [m/s]$',
zlabel='Value [-]',
title='Prototypen nach Geschwindigkeit sortiert',
directory='figures\\',
size=[20, 10],
save=False):
"""
Plot multiple Signals/Prototypes per class in 3d
"""
plt.figure(figsize=size, constrained_layout=True)
ax = plt.axes(projection='3d')
length = signal.shape[1]
x, y = np.meshgrid( | np.arange(length) | numpy.arange |
import cv2
import numpy as np
def canny(image):
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
#canny = cv2.Canny(blur,low_threshold,high_threshold)
canny = cv2.Canny(blur,50,150)
return canny
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([
[(200,height),(1100,height),(550,250)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask,polygons,255)
masked_image = cv2.bitwise_and(image,mask)
return masked_image
def average_slope_intercept(image,lines):
left_fit = []
right_fit = []
for line in lines:
print(line)
x1,y1,x2,y2 = line.reshape(4)
parameters = | np.polyfit((x1,x2),(y1,y2),1) | numpy.polyfit |
import numpy as np
import h5py as h
import numpy as np
import matplotlib as mpl
mpl.use('Qt5Agg')
"""
USEFUL METHODS.
"""
def toPoint(point):
point = np.array(point)
point[1] *= -1
return point-250
def toIndex(point):
point = np.array(point)
point[1] *= -1
return point+250
"""
START OF MASK STUFF.
"""
import imageio as io
arr = io.read('../scratch/testMask.png').get_data(0)
arr = np.int64(np.all(arr[:, :, :3] == 0, axis=2))
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Circle,Rectangle
# N pixels per mm.
pixelSize = 10
# Beam height in mm converted to pixels.
beamHeight = 0.5
# Look at beam position (center position in mm).
_lookAt = 10
_beamB = _lookAt - beamHeight/2
_beamT = _lookAt + beamHeight/2
# Initialise mask start and stop positions.
# Start and stop are index positions of the array.
start = [0,0]
stop = [0,0]
# Find mask horizontal start and stop positions.
for row in range(arr.shape[0]):
# Find the first row with a 0 in it.
if np.sum(arr[row,:]) != arr.shape[0]:
# Find the middle position of all the values that are 0.
middle = np.argwhere(arr[row,:] == 0).mean()
# Store the start position.
start = [row,middle]
break
for row in reversed(range(arr.shape[0])):
if np.sum(arr[row,:]) != arr.shape[0]:
middle = np.argwhere(arr[row,:] == 0).mean()
stop = [row,middle]
break
# Set diameter for the mask (in mm).
_radius = 25
# Create datapoints for two half circles in degrees.
leftCircleAngle = np.linspace(90, 270, 2000)
rightCircleAngle = np.linspace(90, -90, 2000)
# Find the tangent values of the points in each half circle.
leftCircleTangent = np.tan(np.deg2rad(leftCircleAngle))
rightCircleTangent = np.tan(np.deg2rad(rightCircleAngle))
# Get subArray of mask.
# subArray = arr[b:t,:]
# Investigate beam area:
_bt = int( np.absolute(25-_beamT)*10 )
_bb = int( np.absolute(25-_beamB)*10 )
subArray = arr[_bt:_bb,:]
# Get the top and bottom line of the sub array.
line1 = subArray[0,:]
line2 = subArray[-1,:]
# Find the left and right most points for each line.
line1 = np.argwhere(line1 == 0)
line2 = np.argwhere(line2 == 0)
tl = line1.min()
tr = line1.max()
bl = line2.min()
br = line2.max()
# Calculate the tangent for each side.
left = np.arctan(((tl-bl)/pixelSize)/beamHeight)
right = np.arctan(((tr-br)/pixelSize)/beamHeight)
# Find the tangent condition that matches in the circle.
leftAngle = np.deg2rad(leftCircleAngle[ np.argmin(np.absolute(leftCircleTangent-left)) ])
rightAngle = np.deg2rad(rightCircleAngle[ np.argmin(np.absolute(rightCircleTangent-right)) ])
# Find the position of the mask that matches the tangent condition.
circleLeftPosition = np.array([_radius*np.cos(leftAngle),-_radius*np.sin(leftAngle)])
circleRightPosition = np.array([_radius*np.cos(rightAngle),-_radius*np.sin(rightAngle)])
# Get the position of the matched pixel.
x1 = (0 + np.min(np.array([tl,bl])) + | np.absolute(tl-bl) | numpy.absolute |
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import openvino.runtime as ov
import openvino.runtime.opset8 as ops
from openvino.runtime import Model, Output, Type
from openvino.runtime.utils.decorators import custom_preprocess_function
from openvino.runtime import Core
from tests.runtime import get_runtime
from openvino.preprocess import PrePostProcessor, ColorFormat, ResizeAlgorithm
def test_ngraph_preprocess_mean():
shape = [2, 2]
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
model = parameter_a
function = Model(model, [parameter_a], "TestFunction")
p = PrePostProcessor(function)
inp = p.input()
prep = inp.preprocess()
prep.mean(1.0)
function = p.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32)
expected_output = np.array([[0, 1], [2, 3]]).astype(np.float32)
runtime = get_runtime()
computation = runtime.computation(function)
output = computation(input_data)
assert np.equal(output, expected_output).all()
def test_ngraph_preprocess_mean_vector():
shape = [2, 2]
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
model = parameter_a
function = Model(model, [parameter_a], "TestFunction")
layout = ov.Layout("NCHW")
p = PrePostProcessor(function)
p.input().tensor().set_layout(layout)
p.input().preprocess().mean([1., 2.])
function = p.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32)
expected_output = np.array([[0, 0], [2, 2]]).astype(np.float32)
runtime = get_runtime()
computation = runtime.computation(function)
output = computation(input_data)
assert np.equal(output, expected_output).all()
def test_ngraph_preprocess_scale_vector():
shape = [2, 2]
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
model = parameter_a
function = Model(model, [parameter_a], "TestFunction")
layout = ov.Layout("NCHW")
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout)
inp.preprocess().scale([0.5, 2.0])
function = p.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32)
expected_output = np.array([[2, 1], [6, 2]]).astype(np.float32)
runtime = get_runtime()
computation = runtime.computation(function)
output = computation(input_data)
assert | np.equal(output, expected_output) | numpy.equal |
import json
import argparse
import copy
import datetime
import logging
import os
import numpy as np
import torch
import wandb
from torch.utils.data import ConcatDataset, DataLoader
from algs.fedavg import train_nets
from algs.fednova import local_train_net_fednova
from algs.fedprox import local_train_net_fedprox
from algs.scaffold import local_train_net_scaffold
from data.dataloader import get_loaderargs
from data.partition import partition_data
from metrics.basic import compute_accuracy
from models.nets import init_nets
from utils import mkdirs, init_logger, check_disk_space, save_model
DATASETS = ['mnist', 'fmnist', 'cifar10', 'svhn', 'celeba', 'femnist', 'generated', 'rcv1', 'SUSY', 'covtype', 'a9a']
def get_args():
parser = argparse.ArgumentParser()
# Experiment setting
parser.add_argument('--name', type=str, required=True, help='Name of each experiment')
# Model & Dataset
parser.add_argument('--arch', type=str, default='MLP', help='Neural network architecture used in training')
parser.add_argument('--modeldir', type=str, required=False, default='./ckpt/', help='Model directory path')
parser.add_argument('--dataset', type=str, choices=DATASETS, help='dataset used for training')
parser.add_argument('--datadir', type=str, required=False, default='./data/', help='Data directory')
parser.add_argument('--save_round', type=int, default=10, help='Save model once in n comm rounds')
parser.add_argument('--save_local', action='store_true', help='Save local model for analysis')
parser.add_argument('--save_epoch', type=int, default=5, help='Save local model once in n epochs')
# Train, Hyperparams, Optimizer
parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)')
parser.add_argument('--epochs', type=int, default=5, help='number of local epochs')
parser.add_argument('--dropout', type=float, required=False, default=0.0, help='Dropout probability. Default=0.0')
parser.add_argument('--loss', type=str, choices=['ce', 'srip', 'ocnn'], default='ce', help='Loss function')
parser.add_argument('--optimizer', type=str, choices=['adam', 'amsgrad', 'sgd'], default='sgd', help='Optimizer')
parser.add_argument('--momentum', type=float, default=0, help='Parameter controlling the momentum SGD')
parser.add_argument('--nesterov', type=bool, default=True, help='nesterov momentum')
parser.add_argument('--mu', type=float, default=1, help='the mu parameter for fedprox')
parser.add_argument('--reg', type=float, default=1e-5, help='L2 losses strength')
parser.add_argument('--odecay', type=float, default=1e-2, help='Orth loss strength')
# Averaging algorithms
parser.add_argument('--alg', type=str, choices=['fedavg', 'fedprox', 'scaffold', 'fednova'],
help='communication strategy')
parser.add_argument('--comm_round', type=int, default=50, help='number of maximum communication roun')
parser.add_argument('--is_same_initial', type=int, default=1, help='All models with same parameters in fedavg')
# Data partitioning
parser.add_argument('--n_clients', type=int, default=2, help='number of workers in a distributed cluster')
parser.add_argument('--partition', type=str,
choices=['homo', 'noniid-labeldir', 'iid-diff-quantity', 'mixed', 'real', 'femnist'] \
+ ['noniid-#label' + str(i) for i in range(10)],
default='homo', help='the data partitioning strategy')
parser.add_argument('--beta', type=float, default=0.5,
help='The parameter for the dirichlet distribution for data partitioning')
parser.add_argument('--noise', type=float, default=0, help='how much noise we add to some party')
parser.add_argument('--noise_type', type=str, choices=['space', 'level'], default='level',
help='Different level of noise or different space of noise')
parser.add_argument('--sample', type=float, default=1, help='Sample ratio for each communication round')
# Misc.
parser.add_argument('--num_workers', default=8, type=int, help='core usage (default: 1)')
parser.add_argument('--ngpu', default=1, type=int, help='total number of gpus (default: 1)')
parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program')
parser.add_argument('--amp', action='store_true', help='Turn Automatic Mixed Precision on')
parser.add_argument('--init_seed', type=int, default=0, help='Random seed')
parser.add_argument('--logdir', type=str, required=False, default='./logs/', help='Log directory path')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
# Logging
mkdirs(args.logdir)
mkdirs(args.modeldir)
args_path = f'{args.name}_arguments-{datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S")}.json'
with open(os.path.join(args.logdir, args_path), 'w') as f:
json.dump(str(args), f)
init_logger(args.name, args.logdir)
logger = logging.getLogger()
# Wandb
wandb.init(
name=args.name,
config=args.__dict__,
project='federated-learning',
tags=['train', args.arch, args.dataset, args.loss],
)
# Device
device = torch.device(args.device)
logger.info(f'Device: {device}')
# Set random seeds
logger.info(f'Seed: {args.init_seed}')
seed = args.init_seed
np.random.seed(seed)
torch.manual_seed(seed)
# Data partitioning
logger.info('Partitioning data...')
X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(
args.dataset, args.datadir, args.logdir, args.partition, args.n_clients, beta=args.beta)
# Prepare dataloader
logger.info('Creating dataloaders...')
if args.noise_type == 'space':
noise_level = lambda net_idx: 0 if net_idx == args.n_clients - 1 else args.noise
dl_args = lambda net_idx: {'net_id': net_idx, 'total': args.n_clients - 1}
else:
noise_level = lambda net_idx: args.noise / (args.n_clients - 1) * net_idx
dl_args = lambda net_idx: {}
trainargs, _, trainsets, testsets = [
{idx: obj for idx, obj in enumerate(tup)} for tup in list(zip(
*[get_loaderargs(
args.dataset, args.datadir, args.batch_size, 32,
net_dataidx_map[i], noise_level(i), **dl_args(i),
num_workers=(args.num_workers, args.num_workers)
) for i in range(args.n_clients)]
))
]
# if noise
if args.noise > 0:
trainset_global = ConcatDataset(trainsets)
testset_global = ConcatDataset(testsets)
trainargs_global = DataLoader(
dataset=trainset_global, batch_size=args.batch_size, shuffle=True,
pin_memory=True, num_workers=args.num_workers, persistent_workers=True
)
testargs_global = DataLoader(
dataset=testset_global, batch_size=args.batch_size, shuffle=False,
pin_memory=True, num_workers=args.num_workers, persistent_workers=True
)
else:
trainargs_global, testargs_global, trainset_global, testset_global = get_loaderargs(
args.dataset, args.datadir, args.batch_size, 32,
num_workers=(args.num_workers, args.num_workers)
)
logger.info(f'Global train size: {len(trainset_global)}')
logger.info(f'Global test size: {len(testset_global)}')
if args.alg == 'fedavg':
logger.info('Initializing nets...')
nets, local_model_meta_data, layer_type = init_nets(args.dropout, args.n_clients, args)
logger.info('Complete.')
global_models, global_model_meta_data, global_layer_type = init_nets(0, 1, args)
global_model = global_models[0]
# TODO
# commented out for consecutive run
# check_disk_space(
# global_model, args.n_clients, args.comm_round, args.save_round,
# args.save_local, args.epochs, args.save_epoch
# )
global_para = global_model.state_dict()
if args.is_same_initial:
for net_id, net in nets.items():
net.load_state_dict(global_para)
for round_ in range(1, args.comm_round + 1):
logger.info('=' * 58)
logger.info('Communication round: ' + str(round_))
arr = np.arange(args.n_clients)
np.random.shuffle(arr)
selected = arr[:int(args.n_clients * args.sample)]
global_para = global_model.state_dict()
if round_ == 1:
if args.is_same_initial:
for idx in selected:
nets[idx].load_state_dict(global_para)
else:
for idx in selected:
nets[idx].load_state_dict(global_para)
train_nets(nets, selected, args, net_dataidx_map, trainargs, round_, testargs=testargs_global,
device=device)
# update global model
total_data_points = sum([len(net_dataidx_map[r]) for r in selected])
fed_avg_freqs = [len(net_dataidx_map[r]) / total_data_points for r in selected]
for idx in range(len(selected)):
net_para = nets[selected[idx]].state_dict()
if idx == 0:
for key in net_para:
global_para[key] = net_para[key] * fed_avg_freqs[idx]
else:
for key in net_para:
global_para[key] += net_para[key] * fed_avg_freqs[idx]
global_model.load_state_dict(global_para)
global_model.to(device)
trainloader_global = DataLoader(**trainargs_global)
train_acc = compute_accuracy(global_model, trainloader_global, device=device)
del trainloader_global
testloader_global = DataLoader(**testargs_global)
test_acc, conf_matrix = compute_accuracy(
global_model, testloader_global, get_confusion_matrix=True, device=device
)
del testloader_global
global_model.cpu()
logger.info(f'>> Global Train accuracy: {train_acc * 100:5.2f} %')
logger.info(f'>> Global Test accuracy: {test_acc * 100:5.2f} %')
wandb.log(
data={
f'Global': {
'train': {'Accuracy': train_acc},
'test': {'Accuracy': test_acc},
},
'round': round_
},
)
# Save global model
if (round_ % args.save_round == 0) or round_ == args.comm_round:
save_model(global_model, args.name, args.modeldir, f'comm{round_:03}-GLOBAL')
logger.info('=' * 58)
elif args.alg == 'fedprox':
logger.info('Initializing nets')
nets, local_model_meta_data, layer_type = init_nets(args.dropout, args.n_clients, args)
global_models, global_model_meta_data, global_layer_type = init_nets(0, 1, args)
global_model = global_models[0]
global_para = global_model.state_dict()
if args.is_same_initial:
for net_id, net in nets.items():
net.load_state_dict(global_para)
for round_ in range(args.comm_round):
logger.info('Communication round: ' + str(round_))
arr = np.arange(args.n_clients)
np.random.shuffle(arr)
selected = arr[:int(args.n_clients * args.sample)]
global_para = global_model.state_dict()
if round_ == 0:
if args.is_same_initial:
for idx in selected:
nets[idx].load_state_dict(global_para)
else:
for idx in selected:
nets[idx].load_state_dict(global_para)
local_train_net_fedprox(nets, selected, global_model, args, net_dataidx_map, testargs=testargs_global,
device=device)
global_model.to('cpu')
# update global model
total_data_points = sum([len(net_dataidx_map[r]) for r in selected])
fed_avg_freqs = [len(net_dataidx_map[r]) / total_data_points for r in selected]
for idx in range(len(selected)):
net_para = nets[selected[idx]].state_dict()
if idx == 0:
for key in net_para:
global_para[key] = net_para[key] * fed_avg_freqs[idx]
else:
for key in net_para:
global_para[key] += net_para[key] * fed_avg_freqs[idx]
global_model.load_state_dict(global_para)
trainloader_global = DataLoader(**trainargs_global)
logger.info('global n_training: %d' % len(trainloader_global))
train_acc = compute_accuracy(global_model, trainloader_global, device=device)
del trainloader_global
testloader_global = DataLoader(**testargs_global)
logger.info('global n_test: %d' % len(testloader_global))
test_acc, conf_matrix = compute_accuracy(
global_model, testloader_global, get_confusion_matrix=True, device=device
)
del testloader_global
logger.info('>> Global Model Train accuracy: %f' % train_acc)
logger.info('>> Global Model Test accuracy: %f' % test_acc)
elif args.alg == 'scaffold':
logger.info('Initializing nets')
nets, local_model_meta_data, layer_type = init_nets(args.dropout, args.n_clients, args)
global_models, global_model_meta_data, global_layer_type = init_nets(0, 1, args)
global_model = global_models[0]
c_nets, _, _ = init_nets(args.dropout, args.n_clients, args)
c_globals, _, _ = init_nets(0, 1, args)
c_global = c_globals[0]
c_global_para = c_global.state_dict()
for net_id, net in c_nets.items():
net.load_state_dict(c_global_para)
global_para = global_model.state_dict()
if args.is_same_initial:
for net_id, net in nets.items():
net.load_state_dict(global_para)
for round_ in range(args.comm_round):
logger.info('Communication round: ' + str(round_))
arr = | np.arange(args.n_clients) | numpy.arange |
import unittest
import numpy as np
import mock
from activepipe import ActivePipeline
from corpus import Corpus
from featureforge.vectorizer import Vectorizer
from scipy.sparse import csr_matrix
from sklearn.preprocessing import normalize
testing_config = {
'features': Vectorizer([lambda x : x]),
'em_adding_instances': 3,
'u_corpus_f': 'test_files/unlabeled_corpus.pickle',
'test_corpus_f': 'test_files/test_corpus.pickle',
'training_corpus_f': 'test_files/training_corpus.pickle',
'feature_corpus_f': 'test_files/feature_corpus.pickle',
'dummy_config': None,
'number_of_features': 2,
}
X = [
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0]
]
Y = [[1], [0, 1] , [0, 0]]
U_vectors = [
[1.0, 1.0, 1.0],
[1.0, 1.0, 2.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
]
T_vectors = [
[1.0, 3.0, 5.0],
[3.0, 1.0, 0.0]
]
T_targets = [[1], [2]]
feat_corpus = [[-1, -1, 0], [1, -1, 0]]
class TestActivePipe(unittest.TestCase):
def setUp(self):
self.pipe = ActivePipeline(**testing_config)
self.instance_class_prob = np.array(
[[0.5, 0.5],
[0.25, 0.75],
[0.7, 0.3],
[0.1, 0.9],
[0.8, 0.2]]
)
self.instance_prob = np.array([0.02, 0.09, 0.01, 0.12, 0.08])
def test_em_feat_class_no_labeled(self):
"""Tests if the feature_log_prob matrix is calculated correctly.
P(fj|ck) = sum_i(P(xi) * fj(xi) * P(ck|xi))
P(f0|c0) = 0.5*0.02*1 + 0.25*0.09*1 + 0.7*0*0.01 + 0.1*1*0.12 + 0.8*2*0.08 = 0.1725
P(f0|c1) = 0.5*0.02*1 + 0.75*0.09*1 + 0.3*0*0.01 + 0.9*1*0.12 + 0.2*2*0.08 = 0.2175
P(f1|c0) = 0.5*0.02*1 + 0.25*0.09*1 + 0.7*0*0.01 + 0.1*0*0.12 + 0.8*2*0.08 = 0.1605
P(f1|c1) = 0.5*0.02*1 + 0.75*0.09*1 + 0.3*0*0.01 + 0.9*0*0.12 + 0.2*2*0.08 = 0.1095
P(f2|c0) = 0.5*0.02*1 + 0.25*0.09*2 + 0.7*1*0.01 + 0.1*0*0.12 + 0.8*2*0.08 = 0.19
P(f2|c1) = 0.5*0.02*1 + 0.75*0.09*2 + 0.3*1*0.01 + 0.9*0*0.12 + 0.2*2*0.08 = 0.18
"""
expected = np.array([[0.32982792, 0.30688337, 0.36328872],
[0.42899408, 0.21597633, 0.35502959]])
with mock.patch('featmultinomial.FeatMultinomialNB.predict_proba',
return_value=self.instance_class_prob) as mock_pred:
with mock.patch('featmultinomial.FeatMultinomialNB.instance_proba',
return_value=self.instance_prob) as mock_inst_p:
self.pipe.training_corpus = Corpus()
self.pipe._expectation_maximization()
np.testing.assert_array_almost_equal(
self.pipe.classifier.feature_log_prob_,
np.log(expected)
)
def test_em_class_no_labeled(self):
"""Tests if the class_log_prior_ matrix is calculated correctly.
P(ck) = sum_i(P(xi) * P(ck|xi))
P(c0) = 0.5*0.02 + 0.25*0.09 + 0.7*0.01 + 0.1*0.12 + 0.8*0.08 = 0.1155
P(c1) = 0.5*0.02 + 0.75*0.09 + 0.3*0.01 + 0.9*0.12 + 0.2*0.08 = 0.2045
"""
expected = np.array([0.3609375, 0.6390625])
with mock.patch('featmultinomial.FeatMultinomialNB.predict_proba',
return_value=self.instance_class_prob) as mock_pred:
with mock.patch('featmultinomial.FeatMultinomialNB.instance_proba',
return_value=self.instance_prob) as mock_inst_p:
self.pipe.training_corpus = Corpus()
self.pipe._expectation_maximization()
np.testing.assert_array_almost_equal(
self.pipe.classifier.class_log_prior_,
np.log(expected)
)
def test_em_feat_class(self):
"""
P(fj|ck) = Pu(fj|ck) * 0.1 + 0.9* sum_i(P(xl_i) * fj(xl_i) * {0,1})
P(f0|c0) = 0.1 * 0.1725 + 0.9 * (0.02*0*0 + 0.09*1*1 + 0.01*0*1) = 0.09825
P(f0|c1) = 0.1 * 0.2175 + 0.9 * (0.02*0*1 + 0.09*1*0 + 0.01*0*0) = 0.02175
P(f1|c0) = 0.1 * 0.1605 + 0.9 * (0.02*1*0 + 0.09*1*1 + 0.01*1*1) = 0.10605
P(f1|c1) = 0.1 * 0.1095 + 0.9 * (0.02*1*1 + 0.09*1*0 + 0.01*1*0) = 0.02895
P(f2|c0) = 0.1 * 0.19 + 0.9 * (0.02*0*0 + 0.09*1*1 + 0.01*1*1) = 0.109
P(f2|c1) = 0.1 * 0.18 + 0.9 * (0.02*0*1 + 0.09*1*0 + 0.01*1*0) = 0.018
"""
expected = np.array([[0.31359719, 0.3384934, 0.34790935],
[0.31659388, 0.421397379, 0.262008733]])
instance_prob_fun = lambda s, x: self.instance_prob[:x.shape[0]]
with mock.patch('featmultinomial.FeatMultinomialNB.predict_proba',
return_value=self.instance_class_prob) as mock_pred:
with mock.patch('featmultinomial.FeatMultinomialNB.instance_proba',
new=instance_prob_fun) as mock_inst_p:
self.pipe._expectation_maximization()
np.testing.assert_array_almost_equal(
self.pipe.classifier.feature_log_prob_,
np.log(expected)
)
def test_em_class(self):
"""Tests if the class_log_prior_ matrix is calculated correctly.
P(ck) = sum_i(P(xui) * P(ck|xui)) * 0.1 + sum_i(P(xli) * P(ck|xli)) * 0.9
P(c0) = 0.1155 * 0.1 + 0.9 * (0*0.02 + 1*0.09 + 1*0.01) = 0.10155
P(c1) = 0.2045 * 0.1 + 0.9 * (1*0.02 + 0*0.09 + 0*0.01) = 0.03845
"""
expected = np.array([0.725357142, 0.27464285714])
instance_prob_fun = lambda s, x: self.instance_prob[:x.shape[0]]
with mock.patch('featmultinomial.FeatMultinomialNB.predict_proba',
return_value=self.instance_class_prob) as mock_pred:
with mock.patch('featmultinomial.FeatMultinomialNB.instance_proba',
new=instance_prob_fun) as mock_inst_p:
self.pipe._expectation_maximization()
np.testing.assert_array_almost_equal(
self.pipe.classifier.class_log_prior_,
| np.log(expected) | numpy.log |
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: <NAME> <<EMAIL>>
"""
"""
Tests some of the api in cuda4py.blas._cublas module.
"""
import cuda4py as cu
import cuda4py.blas as blas
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.blas = blas.CUBLAS(self.ctx)
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.blas
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(blas.CUBLAS_OP_N, 0)
self.assertEqual(blas.CUBLAS_OP_T, 1)
self.assertEqual(blas.CUBLAS_OP_C, 2)
self.assertEqual(blas.CUBLAS_DATA_FLOAT, 0)
self.assertEqual(blas.CUBLAS_DATA_DOUBLE, 1)
self.assertEqual(blas.CUBLAS_DATA_HALF, 2)
self.assertEqual(blas.CUBLAS_DATA_INT8, 3)
self.assertEqual(blas.CUBLAS_POINTER_MODE_HOST, 0)
self.assertEqual(blas.CUBLAS_POINTER_MODE_DEVICE, 1)
self.assertEqual(blas.CUBLAS_STATUS_SUCCESS, 0)
self.assertEqual(blas.CUBLAS_STATUS_NOT_INITIALIZED, 1)
self.assertEqual(blas.CUBLAS_STATUS_ALLOC_FAILED, 3)
self.assertEqual(blas.CUBLAS_STATUS_INVALID_VALUE, 7)
self.assertEqual(blas.CUBLAS_STATUS_ARCH_MISMATCH, 8)
self.assertEqual(blas.CUBLAS_STATUS_MAPPING_ERROR, 11)
self.assertEqual(blas.CUBLAS_STATUS_EXECUTION_FAILED, 13)
self.assertEqual(blas.CUBLAS_STATUS_INTERNAL_ERROR, 14)
self.assertEqual(blas.CUBLAS_STATUS_NOT_SUPPORTED, 15)
self.assertEqual(blas.CUBLAS_STATUS_LICENSE_ERROR, 16)
def test_errors(self):
idx = cu.CU.ERRORS[blas.CUBLAS_STATUS_NOT_INITIALIZED].find(" | ")
self.assertGreater(idx, 0)
def _test_gemm(self, gemm, dtype):
for mode in (blas.CUBLAS_POINTER_MODE_HOST,
blas.CUBLAS_POINTER_MODE_DEVICE):
self._test_gemm_with_mode(gemm, dtype, mode)
def _test_gemm_with_mode(self, gemm, dtype, mode):
self.blas.set_pointer_mode(mode)
a = numpy.zeros([127, 353], dtype=dtype)
b = numpy.zeros([135, a.shape[1]], dtype=dtype)
c = numpy.zeros([a.shape[0], b.shape[0]], dtype=dtype)
try:
numpy.random.seed(123)
except AttributeError: # PyPy workaround
pass
a[:] = numpy.random.rand(a.size).astype(dtype).reshape(a.shape) - 0.5
b[:] = numpy.random.rand(b.size).astype(dtype).reshape(b.shape) - 0.5
gold_c = numpy.dot(a.astype(numpy.float64),
b.transpose().astype(numpy.float64))
a_buf = cu.MemAlloc(self.ctx, a.nbytes)
b_buf = cu.MemAlloc(self.ctx, b.nbytes)
c_buf = cu.MemAlloc(self.ctx, c.nbytes * 2)
alpha = numpy.ones(
1, dtype={numpy.float16: numpy.float32}.get(dtype, dtype))
beta = numpy.zeros(
1, dtype={numpy.float16: numpy.float32}.get(dtype, dtype))
if mode == blas.CUBLAS_POINTER_MODE_DEVICE:
alpha = cu.MemAlloc(self.ctx, alpha)
beta = cu.MemAlloc(self.ctx, beta)
a_buf.to_device_async(a)
b_buf.to_device_async(b)
c_buf.to_device_async(c)
c_buf.to_device_async(c, c.nbytes)
gemm(blas.CUBLAS_OP_T, blas.CUBLAS_OP_N,
b.shape[0], a.shape[0], a.shape[1],
alpha, b_buf, a_buf, beta, c_buf)
c_buf.to_host(c)
max_diff = numpy.fabs(gold_c - c.astype(numpy.float64)).max()
logging.debug("Maximum difference is %.6f", max_diff)
self.assertLess(
max_diff, {numpy.float32: 1.0e-5, numpy.float64: 1.0e-13,
numpy.float16: 3.0e-3}[dtype])
c_buf.to_host(c, c.nbytes)
max_diff = numpy.fabs(c).max()
# To avoid destructor call before gemm completion
del beta
del alpha
self.assertEqual(max_diff, 0,
"Written some values outside of the target array")
def test_sgemm(self):
logging.debug("ENTER: test_sgemm")
with self.ctx:
self._test_gemm(self.blas.sgemm, numpy.float32)
logging.debug("EXIT: test_sgemm")
def test_dgemm(self):
logging.debug("ENTER: test_dgemm")
with self.ctx:
self._test_gemm(self.blas.dgemm, numpy.float64)
logging.debug("EXIT: test_dgemm")
def test_sgemm_ex(self):
logging.debug("ENTER: test_sgemm_ex")
with self.ctx:
self._test_gemm(self.blas.sgemm_ex, numpy.float16)
logging.debug("EXIT: test_sgemm_ex")
def test_kernel(self):
logging.debug("ENTER: test_kernel")
cap = self.ctx.device.compute_capability
if cap < (3, 5):
logging.debug("Requires compute capability >= (3, 5), got %s", cap)
logging.debug("EXIT: test_kernel")
return
with self.ctx:
module = cu.Module(
self.ctx, source_file=("%s/cublas.cu" % self.path),
nvcc_options2=cu.Module.OPTIONS_CUBLAS,
compute_capability=(cap[0], 0) if cap >= (6, 0) else cap)
# minor version of compute has to be set to 0
# to work on Pascal with CUDA 8.0
logging.debug("Compiled")
f = module.create_function("test")
logging.debug("Got function")
n = 256
a = numpy.random.rand(n, n).astype(numpy.float32)
b = numpy.random.rand(n, n).astype(numpy.float32)
c = | numpy.zeros_like(a) | numpy.zeros_like |
# --------------------------------------------------------
# Multi-Epitope-Ligand Cartography (MELC) phase-contrast image based segmentation pipeline
#
#
# Written by <NAME>
# --------------------------------------------------------
import json
import tifffile
import cv2
import pandas as pd
import numpy as np
from os.path import join
# MELC packages
from MELC.utils.ptr import Pointer
class JSONDataset:
"""
Description of really cool class
...
Attributes
----------
attr1 : str
this is very cool attribute
Methods
-------
__init__(data_path)
very cool init function
"""
def __init__(self, image_path=None):
self.image_path = image_path
self.COCO_structure = {
'info': [],
'licences': [],
'images': [],
'annotations': [],
'categories': []
}
self.COCO_info = {
'info': [],
'licences': [],
'images': [],
'annotations': [],
'categories': []
}
self.COCO_categories = {
'subcategory': '',
'id': 0,
'name': ''
}
self.COCO_licences = list()
self.COCO_images = list()
self.number_of_annotations = 0
self.ImagesObj = list()
@classmethod
def fromdata(cls, image_path):
return cls(image_path)
@classmethod
def fromjson(cls, json_dict, image_path):
clsObj = cls(image_path)
annotations_pd = pd.DataFrame(json_dict['annotations'])
for image in json_dict['images']:
temp = JSONImage.fromjson(image, annotations_pd.loc[annotations_pd['image_id'] == image['id']].sort_values('id'))
clsObj.ImagesObj.append(temp)
return clsObj
def add_image(self, image, annotations, file_name):
ImageInputDict = {
'image': image,
'annotations': annotations,
'filename': file_name + '.tif',
'img_id': len(self.ImagesObj)
}
image = image.round().astype(np.uint16)
#print(join(self.image_path, ImageInputDict['filename']))
#print(image.dtype)
#print(image.shape)
#print(image)
tifffile.imsave(join(self.image_path, ImageInputDict['filename']), image)
JImg = JSONImage.fromdata(ImageInputDict)
JImg.set_annotation_base_id_ptr(self.number_of_annotations)
self.number_of_annotations += JImg.COCO_annotations.__len__()
self.ImagesObj.append(JImg)
def write_json(self, path_json_file):
self.COCO_structure['info'] = self.COCO_info
self.COCO_structure['licences'] = self.COCO_licences
self.COCO_structure['categories'] = [{"subcategory": "", "id": 1, "name": "phaseCell", "supercategory": "cell"}]
for ImgObj in self.ImagesObj:
img_json, annot_json = ImgObj.get_JSON()
self.COCO_structure['images'].append(img_json)
self.COCO_structure['annotations'] = self.COCO_structure['annotations'] + annot_json
with open(path_json_file, 'w') as f:
json.dump(self.COCO_structure, f)
return self.COCO_structure
# with open(json_file, 'w') as f:
# json.dump(json_file, f)
class JSONImage:
"""
Description of really cool class
...
Attributes
----------
attr1 : str
this is very cool attribute
Methods
-------
__init__(data_path)
very cool init function
"""
def __init__(self):
#self._init_from_data(Image)
pass
@classmethod
def fromjson(cls, Image_json = {
'licenses': '',
'file_name': None,
'coco_url': '',
'height': None,
'width': None,
'date_captured': '',
'flickr_url': '',
'id': None
}, annotations_pdDataFrame = pd.DataFrame
):
clsObj = cls()
clsObj.license = Image_json['license']
clsObj.file_name = Image_json['file_name']
clsObj.coco_url = Image_json['coco_url']
clsObj.height = Image_json['height']
clsObj.width = Image_json['width']
clsObj.date_captured = Image_json['date_captured']
clsObj.flick_url = Image_json['flickr_url']
clsObj.PTR_id = Pointer(Image_json['id'])
clsObj.PTR_annotation_base_id = Pointer(annotations_pdDataFrame['id'].min())
clsObj.annotation_relative_id = annotations_pdDataFrame['id'].__len__()
clsObj.COCO_annotations = list()
for annotation in annotations_pdDataFrame.iterrows():
input_annotation_dict = {
'annotation_pdDataFrame': annotation[1],
'PTR_annotation_base_id': clsObj.PTR_annotation_base_id,
'annotation_relative_id': annotation[1]['id'] - clsObj.PTR_annotation_base_id.get(),
'PTR_image_id': clsObj.PTR_id,
}
clsObj.COCO_annotations.append(JSONAnnotation.fromjson(input_annotation_dict))
return clsObj
@classmethod
def fromdata(cls, Image={
'image': | np.array([]) | numpy.array |
from __future__ import print_function
import math
import time
import numpy as np
import tensorflow as tf
def create_padding_dimension(n):
# Halve the padded dimension and round up the first result, and round down the second to create a correct
# padding dimension.
return [int(math.ceil(n / 2.0)), int(math.floor(n / 2.0))]
def zero_pad(input, shape):
#print "input shape %s" % np.array(input.get_shape().as_list())
#print "target shape %s" % np.array(shape)
input_shape = input.get_shape().as_list()
shape_disparity = np.array(shape) - | np.array(input_shape) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.